diff --git a/README.md b/README.md index bda7b1a..b5c765a 100644 --- a/README.md +++ b/README.md @@ -31,3 +31,141 @@ Options: ![Harmony Core Architecture](docs/diagrams/Harmony_Core_Architecture.drawio.svg) ```` +## Supporting a new field in OPNSense `config.xml` + +Two steps: +- Supporting the field in `opnsense-config-xml` +- Enabling Harmony to control the field + +We'll use the `filename` field in the `dhcpcd` section of the file as an example. + +### Supporting the field + +As type checking if enforced, every field from `config.xml` must be known by the code. Each subsection of `config.xml` has its `.rs` file. For the `dhcpcd` section, we'll modify `opnsense-config-xml/src/data/dhcpd.rs`. + +When a new field appears in the xml file, an error like this will be thrown and Harmony will panic : +``` + Running `/home/stremblay/nt/dir/harmony/target/debug/example-nanodc` +Found unauthorized element filename +thread 'main' panicked at opnsense-config-xml/src/data/opnsense.rs:54:14: +OPNSense received invalid string, should be full XML: () + +``` + +Define the missing field (`filename`) in the `DhcpInterface` struct of `opnsense-config-xml/src/data/dhcpd.rs`: +``` +pub struct DhcpInterface { + ... + pub filename: Option, +``` + +Harmony should now be fixed, build and run. + +### Controlling the field + +Define the `xml field setter` in `opnsense-config/src/modules/dhcpd.rs`. +``` +impl<'a> DhcpConfig<'a> { + ... + pub fn set_filename(&mut self, filename: &str) { + self.enable_netboot(); + self.get_lan_dhcpd().filename = Some(filename.to_string()); + } + ... +``` + +Define the `value setter` in the `DhcpServer trait` in `domain/topology/network.rs` +``` +#[async_trait] +pub trait DhcpServer: Send + Sync { + ... + async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError>; + ... +``` + +Implement the `value setter` in each `DhcpServer` implementation. +`infra/opnsense/dhcp.rs`: +``` +#[async_trait] +impl DhcpServer for OPNSenseFirewall { + ... + async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> { + { + let mut writable_opnsense = self.opnsense_config.write().await; + writable_opnsense.dhcp().set_filename(filename); + debug!("OPNsense dhcp server set filename {filename}"); + } + + Ok(()) + } + ... +``` + +`domain/topology/ha_cluster.rs` +``` +#[async_trait] +impl DhcpServer for DummyInfra { + ... + async fn set_filename(&self, _filename: &str) -> Result<(), ExecutorError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } + ... +``` + +Add the new field to the DhcpScore in `modules/dhcp.rs` +``` +pub struct DhcpScore { + ... + pub filename: Option, +``` + +Define it in its implementation in `modules/okd/dhcp.rs` +``` +impl OKDDhcpScore { + ... + Self { + dhcp_score: DhcpScore { + ... + filename: Some("undionly.kpxe".to_string()), +``` + +Define it in its implementation in `modules/okd/bootstrap_dhcp.rs` +``` +impl OKDDhcpScore { + ... + Self { + dhcp_score: DhcpScore::new( + ... + Some("undionly.kpxe".to_string()), +``` + +Update the interpret (function called by the `execute` fn of the interpret) so it now updates the `filename` field value in `modules/dhcp.rs` +``` +impl DhcpInterpret { + ... + let filename_outcome = match &self.score.filename { + Some(filename) => { + let dhcp_server = Arc::new(topology.dhcp_server.clone()); + dhcp_server.set_filename(&filename).await?; + Outcome::new( + InterpretStatus::SUCCESS, + format!("Dhcp Interpret Set filename to {filename}"), + ) + } + None => Outcome::noop(), + }; + + if next_server_outcome.status == InterpretStatus::NOOP + && boot_filename_outcome.status == InterpretStatus::NOOP + && filename_outcome.status == InterpretStatus::NOOP + + ... + + Ok(Outcome::new( + InterpretStatus::SUCCESS, + format!( + "Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}]", + self.score.boot_filename, self.score.boot_filename, self.score.filename + ) + ... +``` diff --git a/data/watchguard/pxe-http-files/.gitattributes b/data/watchguard/pxe-http-files/.gitattributes new file mode 100644 index 0000000..b503bee --- /dev/null +++ b/data/watchguard/pxe-http-files/.gitattributes @@ -0,0 +1 @@ +slitaz/* filter=lfs diff=lfs merge=lfs -text diff --git a/data/watchguard/pxe-http-files/boot.ipxe b/data/watchguard/pxe-http-files/boot.ipxe new file mode 100644 index 0000000..5b00e50 --- /dev/null +++ b/data/watchguard/pxe-http-files/boot.ipxe @@ -0,0 +1,6 @@ +#!ipxe + +set base-url http://192.168.33.1:8080 +set hostfile ${base-url}/byMAC/01-${mac:hexhyp}.ipxe + +chain ${hostfile} || chain ${base-url}/default.ipxe diff --git a/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-01-bc-68.ipxe b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-01-bc-68.ipxe new file mode 100644 index 0000000..4baf9b6 --- /dev/null +++ b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-01-bc-68.ipxe @@ -0,0 +1,35 @@ +#!ipxe +menu PXE Boot Menu - [${mac}] +item okdinstallation Install OKD +item slitaz Boot to Slitaz - old linux for debugging +choose selected + +goto ${selected} + +:local +exit + +################################# +# okdinstallation +################################# +:okdinstallation +set base-url http://192.168.33.1:8080 +set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64 +set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img +set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img +set install-disk /dev/nvme0n1 +set ignition-file ncd0/master.ign + +kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp +initrd --name main ${base-url}/${live-initramfs} +boot + +################################# +# slitaz +################################# +:slitaz +set server_ip 192.168.33.1:8080 +set base_url http://${server_ip}/slitaz +kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz +initrd ${base_url}/rootfs.gz +boot diff --git a/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-60-fa.ipxe b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-60-fa.ipxe new file mode 100644 index 0000000..4baf9b6 --- /dev/null +++ b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-60-fa.ipxe @@ -0,0 +1,35 @@ +#!ipxe +menu PXE Boot Menu - [${mac}] +item okdinstallation Install OKD +item slitaz Boot to Slitaz - old linux for debugging +choose selected + +goto ${selected} + +:local +exit + +################################# +# okdinstallation +################################# +:okdinstallation +set base-url http://192.168.33.1:8080 +set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64 +set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img +set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img +set install-disk /dev/nvme0n1 +set ignition-file ncd0/master.ign + +kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp +initrd --name main ${base-url}/${live-initramfs} +boot + +################################# +# slitaz +################################# +:slitaz +set server_ip 192.168.33.1:8080 +set base_url http://${server_ip}/slitaz +kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz +initrd ${base_url}/rootfs.gz +boot diff --git a/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-0f.ipxe b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-0f.ipxe new file mode 100644 index 0000000..0f60c7e --- /dev/null +++ b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-0f.ipxe @@ -0,0 +1,35 @@ +#!ipxe +menu PXE Boot Menu - [${mac}] +item okdinstallation Install OKD +item slitaz Slitaz - an old linux image for debugging +choose selected + +goto ${selected} + +:local +exit + +################################# +# okdinstallation +################################# +:okdinstallation +set base-url http://192.168.33.1:8080 +set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64 +set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img +set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img +set install-disk /dev/sda +set ignition-file ncd0/worker.ign + +kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp +initrd --name main ${base-url}/${live-initramfs} +boot + +################################# +# slitaz +################################# +:slitaz +set server_ip 192.168.33.1:8080 +set base_url http://${server_ip}/slitaz +kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz +initrd ${base_url}/rootfs.gz +boot diff --git a/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-1a.ipxe b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-1a.ipxe new file mode 100644 index 0000000..4baf9b6 --- /dev/null +++ b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-1a.ipxe @@ -0,0 +1,35 @@ +#!ipxe +menu PXE Boot Menu - [${mac}] +item okdinstallation Install OKD +item slitaz Boot to Slitaz - old linux for debugging +choose selected + +goto ${selected} + +:local +exit + +################################# +# okdinstallation +################################# +:okdinstallation +set base-url http://192.168.33.1:8080 +set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64 +set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img +set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img +set install-disk /dev/nvme0n1 +set ignition-file ncd0/master.ign + +kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp +initrd --name main ${base-url}/${live-initramfs} +boot + +################################# +# slitaz +################################# +:slitaz +set server_ip 192.168.33.1:8080 +set base_url http://${server_ip}/slitaz +kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz +initrd ${base_url}/rootfs.gz +boot diff --git a/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-26.ipxe b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-26.ipxe new file mode 100644 index 0000000..0f60c7e --- /dev/null +++ b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-26.ipxe @@ -0,0 +1,35 @@ +#!ipxe +menu PXE Boot Menu - [${mac}] +item okdinstallation Install OKD +item slitaz Slitaz - an old linux image for debugging +choose selected + +goto ${selected} + +:local +exit + +################################# +# okdinstallation +################################# +:okdinstallation +set base-url http://192.168.33.1:8080 +set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64 +set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img +set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img +set install-disk /dev/sda +set ignition-file ncd0/worker.ign + +kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp +initrd --name main ${base-url}/${live-initramfs} +boot + +################################# +# slitaz +################################# +:slitaz +set server_ip 192.168.33.1:8080 +set base_url http://${server_ip}/slitaz +kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz +initrd ${base_url}/rootfs.gz +boot diff --git a/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-70.ipxe b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-70.ipxe new file mode 100644 index 0000000..53a71df --- /dev/null +++ b/data/watchguard/pxe-http-files/byMAC/01-c4-62-37-02-61-70.ipxe @@ -0,0 +1,37 @@ +#!ipxe +menu PXE Boot Menu - [${mac}] +item okdinstallation Install OKD +item slitaz Slitaz - an old linux image for debugging +choose selected + +goto ${selected} + +:local +exit +# This is the bootstrap node +# it will become wk2 + +################################# +# okdinstallation +################################# +:okdinstallation +set base-url http://192.168.33.1:8080 +set kernel-image fcos/fedora-coreos-39.20231101.3.0-live-kernel-x86_64 +set live-rootfs fcos/fedora-coreos-39.20231101.3.0-live-rootfs.x86_64.img +set live-initramfs fcos/fedora-coreos-39.20231101.3.0-live-initramfs.x86_64.img +set install-disk /dev/sda +set ignition-file ncd0/worker.ign + +kernel ${base-url}/${kernel-image} initrd=main coreos.live.rootfs_url=${base-url}/${live-rootfs} coreos.inst.install_dev=${install-disk} coreos.inst.ignition_url=${base-url}/${ignition-file} ip=enp1s0:dhcp +initrd --name main ${base-url}/${live-initramfs} +boot + +################################# +# slitaz +################################# +:slitaz +set server_ip 192.168.33.1:8080 +set base_url http://${server_ip}/slitaz +kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz +initrd ${base_url}/rootfs.gz +boot diff --git a/data/watchguard/pxe-http-files/default.ipxe b/data/watchguard/pxe-http-files/default.ipxe new file mode 100644 index 0000000..ce28d70 --- /dev/null +++ b/data/watchguard/pxe-http-files/default.ipxe @@ -0,0 +1,71 @@ +#!ipxe +menu PXE Boot Menu - [${mac}] +item local Boot from Hard Disk +item slitaz Boot slitaz live environment [tux|root:root] +#item ubuntu-server Ubuntu 24.04.1 live server +#item ubuntu-desktop Ubuntu 24.04.1 desktop +#item systemrescue System Rescue 11.03 +item memtest memtest +#choose --default local --timeout 5000 selected +choose selected + +goto ${selected} + +:local +exit + +################################# +# slitaz +################################# +:slitaz +set server_ip 192.168.33.1:8080 +set base_url http://${server_ip}/slitaz +kernel ${base_url}/vmlinuz-2.6.37-slitaz rw root=/dev/null vga=788 initrd=rootfs.gz +initrd ${base_url}/rootfs.gz +boot + +################################# +# Ubuntu Server +################################# +:ubuntu-server +set server_ip 192.168.33.1:8080 +set base_url http://${server_ip}/ubuntu/live-server-24.04.1 + +kernel ${base_url}/vmlinuz ip=dhcp url=${base_url}/ubuntu-24.04.1-live-server-amd64.iso autoinstall ds=nocloud +initrd ${base_url}/initrd +boot + +################################# +# Ubuntu Desktop +################################# +:ubuntu-desktop +set server_ip 192.168.33.1:8080 +set base_url http://${server_ip}/ubuntu/desktop-24.04.1 + +kernel ${base_url}/vmlinuz ip=dhcp url=${base_url}/ubuntu-24.04.1-desktop-amd64.iso autoinstall ds=nocloud +initrd ${base_url}/initrd +boot + +################################# +# System Rescue +################################# +:systemrescue +set base-url http://192.168.33.1:8080/systemrescue + +kernel ${base-url}/vmlinuz initrd=sysresccd.img boot=systemrescue docache +initrd ${base-url}/sysresccd.img +boot + +################################# +# MemTest86 (BIOS/UEFI) +################################# +:memtest +iseq ${platform} efi && goto memtest_efi || goto memtest_bios + +:memtest_efi +kernel http://192.168.33.1:8080/memtest/memtest64.efi +boot + +:memtest_bios +kernel http://192.168.33.1:8080/memtest/memtest64.bin +boot diff --git a/data/watchguard/pxe-http-files/memtest86/memtest32.bin b/data/watchguard/pxe-http-files/memtest86/memtest32.bin new file mode 100644 index 0000000..b9d706f Binary files /dev/null and b/data/watchguard/pxe-http-files/memtest86/memtest32.bin differ diff --git a/data/watchguard/pxe-http-files/memtest86/memtest32.efi b/data/watchguard/pxe-http-files/memtest86/memtest32.efi new file mode 100644 index 0000000..b7e8341 Binary files /dev/null and b/data/watchguard/pxe-http-files/memtest86/memtest32.efi differ diff --git a/data/watchguard/pxe-http-files/memtest86/memtest64.bin b/data/watchguard/pxe-http-files/memtest86/memtest64.bin new file mode 100644 index 0000000..1430539 Binary files /dev/null and b/data/watchguard/pxe-http-files/memtest86/memtest64.bin differ diff --git a/data/watchguard/pxe-http-files/memtest86/memtest64.efi b/data/watchguard/pxe-http-files/memtest86/memtest64.efi new file mode 100644 index 0000000..0ba3605 Binary files /dev/null and b/data/watchguard/pxe-http-files/memtest86/memtest64.efi differ diff --git a/data/watchguard/pxe-http-files/memtest86/memtestla64.efi b/data/watchguard/pxe-http-files/memtest86/memtestla64.efi new file mode 100644 index 0000000..c055cd3 Binary files /dev/null and b/data/watchguard/pxe-http-files/memtest86/memtestla64.efi differ diff --git a/data/watchguard/pxe-http-files/paul b/data/watchguard/pxe-http-files/paul deleted file mode 100644 index 00cd64a..0000000 --- a/data/watchguard/pxe-http-files/paul +++ /dev/null @@ -1 +0,0 @@ -hey i am paul diff --git a/data/watchguard/pxe-http-files/slitaz/rootfs.gz b/data/watchguard/pxe-http-files/slitaz/rootfs.gz new file mode 100644 index 0000000..cd7225a --- /dev/null +++ b/data/watchguard/pxe-http-files/slitaz/rootfs.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f6eab9e52b7fb445da51b21a325853692570a8ab15790b0b5b11f31356433ea +size 41743919 diff --git a/data/watchguard/pxe-http-files/slitaz/vmlinuz-2.6.37-slitaz b/data/watchguard/pxe-http-files/slitaz/vmlinuz-2.6.37-slitaz new file mode 100644 index 0000000..96953ef --- /dev/null +++ b/data/watchguard/pxe-http-files/slitaz/vmlinuz-2.6.37-slitaz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4458a8186da1b1fc407064886b678ea94e0f1f3529f88419b4dff8fc6bf6f32f +size 2492912 diff --git a/data/watchguard/tftpboot/ipxe.efi b/data/watchguard/tftpboot/ipxe.efi new file mode 100644 index 0000000..24a9510 Binary files /dev/null and b/data/watchguard/tftpboot/ipxe.efi differ diff --git a/data/watchguard/tftpboot/undionly.kpxe b/data/watchguard/tftpboot/undionly.kpxe new file mode 100644 index 0000000..a265f30 Binary files /dev/null and b/data/watchguard/tftpboot/undionly.kpxe differ diff --git a/examples/lamp/src/main.rs b/examples/lamp/src/main.rs index 1aaca90..f26daaa 100644 --- a/examples/lamp/src/main.rs +++ b/examples/lamp/src/main.rs @@ -8,7 +8,7 @@ use harmony::{ #[tokio::main] async fn main() { - // This here is the whole configuration to + // This here is the whole configuration to // - setup a local K3D cluster // - Build a docker image with the PHP project builtin and production grade settings // - Deploy a mariadb database using a production grade helm chart diff --git a/examples/nanodc/rook-cephcluster/install-rook-cephcluster.sh b/examples/nanodc/rook-cephcluster/install-rook-cephcluster.sh new file mode 100644 index 0000000..2ded569 --- /dev/null +++ b/examples/nanodc/rook-cephcluster/install-rook-cephcluster.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +helm install --create-namespace --namespace rook-ceph rook-ceph-cluster \ + --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f values.yaml diff --git a/examples/nanodc/rook-cephcluster/values.yaml b/examples/nanodc/rook-cephcluster/values.yaml new file mode 100644 index 0000000..1186020 --- /dev/null +++ b/examples/nanodc/rook-cephcluster/values.yaml @@ -0,0 +1,721 @@ +# Default values for a single rook-ceph cluster +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# -- Namespace of the main rook operator +operatorNamespace: rook-ceph + +# -- The metadata.name of the CephCluster CR +# @default -- The same as the namespace +clusterName: + +# -- Optional override of the target kubernetes version +kubeVersion: + +# -- Cluster ceph.conf override +configOverride: +# configOverride: | +# [global] +# mon_allow_pool_delete = true +# osd_pool_default_size = 3 +# osd_pool_default_min_size = 2 + +# Installs a debugging toolbox deployment +toolbox: + # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md) + enabled: true + # -- Toolbox image, defaults to the image used by the Ceph cluster + image: #quay.io/ceph/ceph:v19.2.2 + # -- Toolbox tolerations + tolerations: [] + # -- Toolbox affinity + affinity: {} + # -- Toolbox container security context + containerSecurityContext: + runAsNonRoot: true + runAsUser: 2016 + runAsGroup: 2016 + capabilities: + drop: ["ALL"] + # -- Toolbox resources + resources: + limits: + memory: "1Gi" + requests: + cpu: "100m" + memory: "128Mi" + # -- Set the priority class for the toolbox if desired + priorityClassName: + +monitoring: + # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors. + # Monitoring requires Prometheus to be pre-installed + enabled: false + # -- Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled + metricsDisabled: false + # -- Whether to create the Prometheus rules for Ceph alerts + createPrometheusRules: false + # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace. + # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus + # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions. + rulesNamespaceOverride: + # Monitoring settings for external clusters: + # externalMgrEndpoints: + # externalMgrPrometheusPort: + # Scrape interval for prometheus + # interval: 10s + # allow adding custom labels and annotations to the prometheus rule + prometheusRule: + # -- Labels applied to PrometheusRule + labels: {} + # -- Annotations applied to PrometheusRule + annotations: {} + +# -- Create & use PSP resources. Set this to the same value as the rook-ceph chart. +pspEnable: false + +# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts. +# imagePullSecrets: +# - name: my-registry-secret + +# All values below are taken from the CephCluster CRD +# -- Cluster configuration. +# @default -- See [below](#ceph-cluster-spec) +cephClusterSpec: + # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally, + # as in the host-based example (cluster.yaml). For a different configuration such as a + # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml), + # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec` + # with the specs from those examples. + + # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/ + cephVersion: + # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). + # v18 is Reef, v19 is Squid + # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different + # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. + # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.2-20250409 + # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities + image: quay.io/ceph/ceph:v19.2.2 + # Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported. + # Future versions such as Tentacle (v20) would require this to be set to `true`. + # Do not set to true in production. + allowUnsupported: false + + # The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster. + # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. + # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. + dataDirHostPath: /var/lib/rook + + # Whether or not upgrade should continue even if a check fails + # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise + # Use at your OWN risk + # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/ + skipUpgradeChecks: false + + # Whether or not continue if PGs are not clean during an upgrade + continueUpgradeAfterChecksEvenIfNotHealthy: false + + # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart. + # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one + # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would + # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`. + # The default wait timeout is 10 minutes. + waitTimeoutForHealthyOSDInMinutes: 10 + + # Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy. + # This configuration will be ignored if `skipUpgradeChecks` is `true`. + # Default is false. + upgradeOSDRequiresHealthyPGs: false + + mon: + # Set the number of mons to be started. Generally recommended to be 3. + # For highest availability, an odd number of mons should be specified. + count: 3 + # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason. + # Mons should only be allowed on the same node for test environments where data loss is acceptable. + allowMultiplePerNode: false + + mgr: + # When higher availability of the mgr is needed, increase the count to 2. + # In that case, one mgr will be active and one in standby. When Ceph updates which + # mgr is active, Rook will update the mgr services to match the active mgr. + count: 2 + allowMultiplePerNode: false + modules: + # List of modules to optionally enable or disable. + # Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR. + # - name: rook + # enabled: true + + # enable the ceph dashboard for viewing cluster status + dashboard: + enabled: true + # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) + # urlPrefix: /ceph-dashboard + # serve the dashboard at the given port. + # port: 8443 + # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set + # the corresponding "backend protocol" annotation(s) for your ingress controller of choice) + ssl: true + + # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings + network: + connections: + # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network. + # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted. + # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check. + # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only, + # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class. + # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes. + encryption: + enabled: false + # Whether to compress the data in transit across the wire. The default is false. + # The kernel requirements above for encryption also apply to compression. + compression: + enabled: false + # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled + # and clients will be required to connect to the Ceph cluster with the v2 port (3300). + # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer). + requireMsgr2: false + # # enable host networking + # provider: host + # # EXPERIMENTAL: enable the Multus network provider + # provider: multus + # selectors: + # # The selector keys are required to be `public` and `cluster`. + # # Based on the configuration, the operator will do the following: + # # 1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface + # # 2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network' + # # + # # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus + # # + # # public: public-conf --> NetworkAttachmentDefinition object name in Multus + # # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus + # # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4 + # ipFamily: "IPv6" + # # Ceph daemons to listen on both IPv4 and Ipv6 networks + # dualStack: false + + # enable the crash collector for ceph daemon crash collection + crashCollector: + disable: false + # Uncomment daysToRetain to prune ceph crash entries older than the + # specified number of days. + # daysToRetain: 30 + + # enable log collector, daemons will log on files and rotate + logCollector: + enabled: true + periodicity: daily # one of: hourly, daily, weekly, monthly + maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M. + + # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction. + cleanupPolicy: + # Since cluster cleanup is destructive to data, confirmation is required. + # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data". + # This value should only be set when the cluster is about to be deleted. After the confirmation is set, + # Rook will immediately stop configuring the cluster and only wait for the delete command. + # If the empty string is set, Rook will not destroy any data on hosts during uninstall. + confirmation: "" + # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion + sanitizeDisks: + # method indicates if the entire disk should be sanitized or simply ceph's metadata + # in both case, re-install is possible + # possible choices are 'complete' or 'quick' (default) + method: quick + # dataSource indicate where to get random bytes from to write on the disk + # possible choices are 'zero' (default) or 'random' + # using random sources will consume entropy from the system and will take much more time then the zero source + dataSource: zero + # iteration overwrite N times instead of the default (1) + # takes an integer value + iteration: 1 + # allowUninstallWithVolumes defines how the uninstall should be performed + # If set to true, cephCluster deletion does not wait for the PVs to be deleted. + allowUninstallWithVolumes: false + + # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. + # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and + # tolerate taints with a key of 'storage-node'. + # placement: + # all: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: role + # operator: In + # values: + # - storage-node + # podAffinity: + # podAntiAffinity: + # topologySpreadConstraints: + # tolerations: + # - key: storage-node + # operator: Exists + # # The above placement information can also be specified for mon, osd, and mgr components + # mon: + # # Monitor deployments may contain an anti-affinity rule for avoiding monitor + # # collocation on the same node. This is a required rule when host network is used + # # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a + # # preferred rule with weight: 50. + # osd: + # mgr: + # cleanup: + + # annotations: + # all: + # mon: + # osd: + # cleanup: + # prepareosd: + # # If no mgr annotations are set, prometheus scrape annotations will be set by default. + # mgr: + # dashboard: + + # labels: + # all: + # mon: + # osd: + # cleanup: + # mgr: + # prepareosd: + # # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator. + # # These labels can be passed as LabelSelector to Prometheus + # monitoring: + # dashboard: + + resources: + mgr: + limits: + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + mon: + limits: + memory: "2Gi" + requests: + cpu: "1000m" + memory: "1Gi" + osd: + limits: + memory: "4Gi" + requests: + cpu: "1000m" + memory: "4Gi" + prepareosd: + # limits: It is not recommended to set limits on the OSD prepare job + # since it's a one-time burst for memory that must be allowed to + # complete without an OOM kill. Note however that if a k8s + # limitRange guardrail is defined external to Rook, the lack of + # a limit here may result in a sync failure, in which case a + # limit should be added. 1200Mi may suffice for up to 15Ti + # OSDs ; for larger devices 2Gi may be required. + # cf. https://github.com/rook/rook/pull/11103 + requests: + cpu: "500m" + memory: "50Mi" + mgr-sidecar: + limits: + memory: "100Mi" + requests: + cpu: "100m" + memory: "40Mi" + crashcollector: + limits: + memory: "60Mi" + requests: + cpu: "100m" + memory: "60Mi" + logcollector: + limits: + memory: "1Gi" + requests: + cpu: "100m" + memory: "100Mi" + cleanup: + limits: + memory: "1Gi" + requests: + cpu: "500m" + memory: "100Mi" + exporter: + limits: + memory: "128Mi" + requests: + cpu: "50m" + memory: "50Mi" + + # The option to automatically remove OSDs that are out and are safe to destroy. + removeOSDsIfOutAndSafeToRemove: false + + # priority classes to apply to ceph resources + priorityClassNames: + mon: system-node-critical + osd: system-node-critical + mgr: system-cluster-critical + + storage: # cluster level storage configuration and selection + useAllNodes: true + useAllDevices: true + # deviceFilter: + # config: + # crushRoot: "custom-root" # specify a non-default root label for the CRUSH map + # metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore. + # databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB + # osdsPerDevice: "1" # this value can be overridden at the node or device level + # encryptedDevice: "true" # the default value for this option is "false" + # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named + # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label. + # nodes: + # - name: "172.17.4.201" + # devices: # specific devices to use for storage can be specified for each node + # - name: "sdb" + # - name: "nvme01" # multiple osds can be created on high performance devices + # config: + # osdsPerDevice: "5" + # - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths + # config: # configuration can be specified at the node level which overrides the cluster level config + # - name: "172.17.4.301" + # deviceFilter: "^sd." + + # The section for configuring management of daemon disruptions during upgrade or fencing. + disruptionManagement: + # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically + # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will + # block eviction of OSDs by default and unblock them safely when drains are detected. + managePodBudgets: true + # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the + # default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes. + osdMaintenanceTimeout: 30 + + # Configure the healthcheck and liveness probes for ceph pods. + # Valid values for daemons are 'mon', 'osd', 'status' + healthCheck: + daemonHealth: + mon: + disabled: false + interval: 45s + osd: + disabled: false + interval: 60s + status: + disabled: false + interval: 60s + # Change pod liveness probe, it works for all mon, mgr, and osd pods. + livenessProbe: + mon: + disabled: false + mgr: + disabled: false + osd: + disabled: false + +ingress: + # -- Enable an ingress for the ceph-dashboard + dashboard: + # {} + # labels: + # external-dns/private: "true" + annotations: + "route.openshift.io/termination": "passthrough" + # external-dns.alpha.kubernetes.io/hostname: dashboard.example.com + # nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2 + # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/server-snippet: | + # proxy_ssl_verify off; + host: + name: ceph.apps.ncd0.harmony.mcd + path: null # TODO the chart does not allow removing the path, and it causes openshift to fail creating a route, because path is not supported with termination mode passthrough + pathType: ImplementationSpecific + tls: + - {} + # secretName: testsecret-tls + # Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time + # to set the ingress class + # ingressClassName: openshift-default + # labels: + # external-dns/private: "true" + # annotations: + # external-dns.alpha.kubernetes.io/hostname: dashboard.example.com + # nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2 + # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly + # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + # nginx.ingress.kubernetes.io/server-snippet: | + # proxy_ssl_verify off; + # host: + # name: dashboard.example.com + # path: "/ceph-dashboard(/|$)(.*)" + # pathType: Prefix + # tls: + # - hosts: + # - dashboard.example.com + # secretName: testsecret-tls + ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time + ## to set the ingress class + # ingressClassName: nginx + +# -- A list of CephBlockPool configurations to deploy +# @default -- See [below](#ceph-block-pools) +cephBlockPools: + - name: ceph-blockpool + # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration + spec: + failureDomain: host + replicated: + size: 3 + # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false. + # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics + # enableRBDStats: true + storageClass: + enabled: true + name: ceph-block + annotations: {} + labels: {} + isDefault: true + reclaimPolicy: Delete + allowVolumeExpansion: true + volumeBindingMode: "Immediate" + mountOptions: [] + # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies + allowedTopologies: [] + # - matchLabelExpressions: + # - key: rook-ceph-role + # values: + # - storage-node + # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration + parameters: + # (optional) mapOptions is a comma-separated list of map options. + # For krbd options refer + # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options + # mapOptions: lock_on_read,queue_depth=1024 + + # (optional) unmapOptions is a comma-separated list of unmap options. + # For krbd options refer + # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options + # For nbd options refer + # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options + # unmapOptions: force + + # RBD image format. Defaults to "2". + imageFormat: "2" + + # RBD image features, equivalent to OR'd bitfield value: 63 + # Available for imageFormat: "2". Older releases of CSI RBD + # support only the `layering` feature. The Linux kernel (KRBD) supports the + # full feature complement as of 5.4 + imageFeatures: layering + + # These secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node + csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + +# -- A list of CephFileSystem configurations to deploy +# @default -- See [below](#ceph-file-systems) +cephFileSystems: + - name: ceph-filesystem + # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration + spec: + metadataPool: + replicated: + size: 3 + dataPools: + - failureDomain: host + replicated: + size: 3 + # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools + name: data0 + metadataServer: + activeCount: 1 + activeStandby: true + resources: + limits: + memory: "4Gi" + requests: + cpu: "1000m" + memory: "4Gi" + priorityClassName: system-cluster-critical + storageClass: + enabled: true + isDefault: false + name: ceph-filesystem + # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default + pool: data0 + reclaimPolicy: Delete + allowVolumeExpansion: true + volumeBindingMode: "Immediate" + annotations: {} + labels: {} + mountOptions: [] + # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration + parameters: + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 + +# -- Settings for the filesystem snapshot class +# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots) +cephFileSystemVolumeSnapshotClass: + enabled: false + name: ceph-filesystem + isDefault: true + deletionPolicy: Delete + annotations: {} + labels: {} + # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration + parameters: {} + +# -- Settings for the block pool snapshot class +# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots) +cephBlockPoolsVolumeSnapshotClass: + enabled: false + name: ceph-block + isDefault: false + deletionPolicy: Delete + annotations: {} + labels: {} + # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration + parameters: {} + +# -- A list of CephObjectStore configurations to deploy +# @default -- See [below](#ceph-object-stores) +cephObjectStores: + - name: ceph-objectstore + # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration + spec: + metadataPool: + failureDomain: host + replicated: + size: 3 + dataPool: + failureDomain: host + erasureCoded: + dataChunks: 2 + codingChunks: 1 + parameters: + bulk: "true" + preservePoolsOnDelete: true + gateway: + port: 80 + resources: + limits: + memory: "2Gi" + requests: + cpu: "1000m" + memory: "1Gi" + # securePort: 443 + # sslCertificateRef: + instances: 1 + priorityClassName: system-cluster-critical + # opsLogSidecar: + # resources: + # limits: + # memory: "100Mi" + # requests: + # cpu: "100m" + # memory: "40Mi" + storageClass: + enabled: true + name: ceph-bucket + reclaimPolicy: Delete + volumeBindingMode: "Immediate" + annotations: {} + labels: {} + # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration + parameters: + # note: objectStoreNamespace and objectStoreName are configured by the chart + region: us-east-1 + ingress: + # Enable an ingress for the ceph-objectstore + enabled: true + # The ingress port by default will be the object store's "securePort" (if set), or the gateway "port". + # To override those defaults, set this ingress port to the desired port. + # port: 80 + # annotations: {} + host: + name: objectstore.apps.ncd0.harmony.mcd + path: / + pathType: Prefix + # tls: + # - hosts: + # - objectstore.example.com + # secretName: ceph-objectstore-tls + # ingressClassName: nginx +## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it +## For erasure coded a replicated metadata pool is required. +## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded +#cephECBlockPools: +# - name: ec-pool +# spec: +# metadataPool: +# replicated: +# size: 2 +# dataPool: +# failureDomain: osd +# erasureCoded: +# dataChunks: 2 +# codingChunks: 1 +# deviceClass: hdd +# +# parameters: +# # clusterID is the namespace where the rook cluster is running +# # If you change this namespace, also change the namespace below where the secret namespaces are defined +# clusterID: rook-ceph # namespace:cluster +# # (optional) mapOptions is a comma-separated list of map options. +# # For krbd options refer +# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options +# # For nbd options refer +# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options +# # mapOptions: lock_on_read,queue_depth=1024 +# +# # (optional) unmapOptions is a comma-separated list of unmap options. +# # For krbd options refer +# # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options +# # For nbd options refer +# # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options +# # unmapOptions: force +# +# # RBD image format. Defaults to "2". +# imageFormat: "2" +# +# # RBD image features, equivalent to OR'd bitfield value: 63 +# # Available for imageFormat: "2". Older releases of CSI RBD +# # support only the `layering` feature. The Linux kernel (KRBD) supports the +# # full feature complement as of 5.4 +# # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock +# imageFeatures: layering +# +# storageClass: +# provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name +# enabled: true +# name: rook-ceph-block +# isDefault: false +# annotations: { } +# labels: { } +# allowVolumeExpansion: true +# reclaimPolicy: Delete + +# -- CSI driver name prefix for cephfs, rbd and nfs. +# @default -- `namespace name where rook-ceph operator is deployed` +csiDriverNamePrefix: diff --git a/examples/nanodc/rook-operator/install-rook-operator.sh b/examples/nanodc/rook-operator/install-rook-operator.sh new file mode 100644 index 0000000..77a32b7 --- /dev/null +++ b/examples/nanodc/rook-operator/install-rook-operator.sh @@ -0,0 +1,3 @@ +#!/bin/bash +helm repo add rook-release https://charts.rook.io/release +helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph -f values.yaml diff --git a/examples/nanodc/rook-operator/values.yaml b/examples/nanodc/rook-operator/values.yaml new file mode 100644 index 0000000..a6980a3 --- /dev/null +++ b/examples/nanodc/rook-operator/values.yaml @@ -0,0 +1,674 @@ +# Default values for rook-ceph-operator +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + # -- Image + repository: docker.io/rook/ceph + # -- Image tag + # @default -- `master` + tag: v1.17.1 + # -- Image pull policy + pullPolicy: IfNotPresent + +crds: + # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be + # managed independently with deploy/examples/crds.yaml. + # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED. + # If the CRDs are deleted in this case, see + # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion) + # to restore them. + enabled: true + +# -- Pod resource requests & limits +resources: + limits: + memory: 512Mi + requests: + cpu: 200m + memory: 128Mi + +# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment. +nodeSelector: {} +# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`. +# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +# disktype: ssd + +# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment. +tolerations: [] + +# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override +# the Kubernetes default of 5 minutes +unreachableNodeTolerationSeconds: 5 + +# -- Whether the operator should watch cluster CRD in its own namespace or not +currentNamespaceOnly: false + +# -- Custom pod labels for the operator +operatorPodLabels: {} + +# -- Pod annotations +annotations: {} + +# -- Global log level for the operator. +# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG` +logLevel: INFO + +# -- If true, create & use RBAC resources +rbacEnable: true + +rbacAggregate: + # -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims + enableOBCs: false + +# -- If true, create & use PSP resources +pspEnable: false + +# -- Set the priority class for the rook operator deployment if desired +priorityClassName: + +# -- Set the container security context for the operator +containerSecurityContext: + runAsNonRoot: true + runAsUser: 2016 + runAsGroup: 2016 + capabilities: + drop: ["ALL"] +# -- If true, loop devices are allowed to be used for osds in test clusters +allowLoopDevices: false + +# Settings for whether to disable the drivers or other daemons if they are not +# needed +csi: + # -- Enable Ceph CSI RBD driver + enableRbdDriver: true + # -- Enable Ceph CSI CephFS driver + enableCephfsDriver: true + # -- Disable the CSI driver. + disableCsiDriver: "false" + + # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary + # in some network configurations where the SDN does not provide access to an external cluster or + # there is significant drop in read/write performance + enableCSIHostNetwork: true + # -- Enable Snapshotter in CephFS provisioner pod + enableCephfsSnapshotter: true + # -- Enable Snapshotter in NFS provisioner pod + enableNFSSnapshotter: true + # -- Enable Snapshotter in RBD provisioner pod + enableRBDSnapshotter: true + # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins + enablePluginSelinuxHostMount: false + # -- Enable Ceph CSI PVC encryption support + enableCSIEncryption: false + + # -- Enable volume group snapshot feature. This feature is + # enabled by default as long as the necessary CRDs are available in the cluster. + enableVolumeGroupSnapshot: true + # -- PriorityClassName to be set on csi driver plugin pods + pluginPriorityClassName: system-node-critical + + # -- PriorityClassName to be set on csi driver provisioner pods + provisionerPriorityClassName: system-cluster-critical + + # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted. + # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html + rbdFSGroupPolicy: "File" + + # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted. + # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html + cephFSFSGroupPolicy: "File" + + # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted. + # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html + nfsFSGroupPolicy: "File" + + # -- OMAP generator generates the omap mapping between the PV name and the RBD image + # which helps CSI to identify the rbd images for CSI operations. + # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature. + # By default OMAP generator is disabled and when enabled, it will be deployed as a + # sidecar with CSI provisioner pod, to enable set it to true. + enableOMAPGenerator: false + + # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options. + # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR + cephFSKernelMountOptions: + + # -- Enable adding volume metadata on the CephFS subvolumes and RBD images. + # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images. + # Hence enable metadata is false by default + enableMetadata: false + + # -- Set replicas for csi provisioner deployment + provisionerReplicas: 2 + + # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful + # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster + clusterName: + + # -- Set logging level for cephCSI containers maintained by the cephCSI. + # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity. + logLevel: 0 + + # -- Set logging level for Kubernetes-csi sidecar containers. + # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity. + # @default -- `0` + sidecarLogLevel: + + # -- CSI driver name prefix for cephfs, rbd and nfs. + # @default -- `namespace name where rook-ceph operator is deployed` + csiDriverNamePrefix: + + # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate + # @default -- `RollingUpdate` + rbdPluginUpdateStrategy: + + # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy. + # @default -- `1` + rbdPluginUpdateStrategyMaxUnavailable: + + # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate + # @default -- `RollingUpdate` + cephFSPluginUpdateStrategy: + + # -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy. + # @default -- `1` + cephFSPluginUpdateStrategyMaxUnavailable: + + # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate + # @default -- `RollingUpdate` + nfsPluginUpdateStrategy: + + # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150 + grpcTimeoutInSeconds: 150 + + # -- Burst to use while communicating with the kubernetes apiserver. + kubeApiBurst: + + # -- QPS to use while communicating with the kubernetes apiserver. + kubeApiQPS: + + # -- The volume of the CephCSI RBD plugin DaemonSet + csiRBDPluginVolume: + # - name: lib-modules + # hostPath: + # path: /run/booted-system/kernel-modules/lib/modules/ + # - name: host-nix + # hostPath: + # path: /nix + + # -- The volume mounts of the CephCSI RBD plugin DaemonSet + csiRBDPluginVolumeMount: + # - name: host-nix + # mountPath: /nix + # readOnly: true + + # -- The volume of the CephCSI CephFS plugin DaemonSet + csiCephFSPluginVolume: + # - name: lib-modules + # hostPath: + # path: /run/booted-system/kernel-modules/lib/modules/ + # - name: host-nix + # hostPath: + # path: /nix + + # -- The volume mounts of the CephCSI CephFS plugin DaemonSet + csiCephFSPluginVolumeMount: + # - name: host-nix + # mountPath: /nix + # readOnly: true + + # -- CEPH CSI RBD provisioner resource requirement list + # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true` + # @default -- see values.yaml + csiRBDProvisionerResource: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + - name : csi-resizer + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + - name : csi-attacher + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + - name : csi-snapshotter + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + - name : csi-rbdplugin + resource: + requests: + memory: 512Mi + limits: + memory: 1Gi + - name : csi-omap-generator + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + + # -- CEPH CSI RBD plugin resource requirement list + # @default -- see values.yaml + csiRBDPluginResource: | + - name : driver-registrar + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + - name : csi-rbdplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + + # -- CEPH CSI CephFS provisioner resource requirement list + # @default -- see values.yaml + csiCephFSProvisionerResource: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + - name : csi-resizer + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + - name : csi-attacher + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + - name : csi-snapshotter + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + - name : csi-cephfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + + # -- CEPH CSI CephFS plugin resource requirement list + # @default -- see values.yaml + csiCephFSPluginResource: | + - name : driver-registrar + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + - name : csi-cephfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + - name : liveness-prometheus + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + + # -- CEPH CSI NFS provisioner resource requirement list + # @default -- see values.yaml + csiNFSProvisionerResource: | + - name : csi-provisioner + resource: + requests: + memory: 128Mi + cpu: 100m + limits: + memory: 256Mi + - name : csi-nfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + - name : csi-attacher + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + + # -- CEPH CSI NFS plugin resource requirement list + # @default -- see values.yaml + csiNFSPluginResource: | + - name : driver-registrar + resource: + requests: + memory: 128Mi + cpu: 50m + limits: + memory: 256Mi + - name : csi-nfsplugin + resource: + requests: + memory: 512Mi + cpu: 250m + limits: + memory: 1Gi + + # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod. + # The CSI provisioner would be best to start on the same nodes as other ceph daemons. + + # -- Array of tolerations in YAML format which will be added to CSI provisioner deployment + provisionerTolerations: + # - key: key + # operator: Exists + # effect: NoSchedule + + # -- The node labels for affinity of the CSI provisioner deployment [^1] + provisionerNodeAffinity: #key1=value1,value2; key2=value3 + # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods. + # The CSI plugins need to be started on all the nodes where the clients need to mount the storage. + + # -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet + pluginTolerations: + # - key: key + # operator: Exists + # effect: NoSchedule + + # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1] + pluginNodeAffinity: # key1=value1,value2; key2=value3 + + # -- Enable Ceph CSI Liveness sidecar deployment + enableLiveness: false + + # -- CSI CephFS driver metrics port + # @default -- `9081` + cephfsLivenessMetricsPort: + + # -- CSI Addons server port + # @default -- `9070` + csiAddonsPort: + # -- CSI Addons server port for the RBD provisioner + # @default -- `9070` + csiAddonsRBDProvisionerPort: + # -- CSI Addons server port for the Ceph FS provisioner + # @default -- `9070` + csiAddonsCephFSProvisionerPort: + + # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS + # you may want to disable this setting. However, this will cause an issue during upgrades + # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html) + forceCephFSKernelClient: true + + # -- Ceph CSI RBD driver metrics port + # @default -- `8080` + rbdLivenessMetricsPort: + + serviceMonitor: + # -- Enable ServiceMonitor for Ceph CSI drivers + enabled: false + # -- Service monitor scrape interval + interval: 10s + # -- ServiceMonitor additional labels + labels: {} + # -- Use a different namespace for the ServiceMonitor + namespace: + + # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag) + # @default -- `/var/lib/kubelet` + kubeletDirPath: + + # -- Duration in seconds that non-leader candidates will wait to force acquire leadership. + # @default -- `137s` + csiLeaderElectionLeaseDuration: + + # -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up. + # @default -- `107s` + csiLeaderElectionRenewDeadline: + + # -- Retry period in seconds the LeaderElector clients should wait between tries of actions. + # @default -- `26s` + csiLeaderElectionRetryPeriod: + + cephcsi: + # -- Ceph CSI image repository + repository: quay.io/cephcsi/cephcsi + # -- Ceph CSI image tag + tag: v3.14.0 + + registrar: + # -- Kubernetes CSI registrar image repository + repository: registry.k8s.io/sig-storage/csi-node-driver-registrar + # -- Registrar image tag + tag: v2.13.0 + + provisioner: + # -- Kubernetes CSI provisioner image repository + repository: registry.k8s.io/sig-storage/csi-provisioner + # -- Provisioner image tag + tag: v5.1.0 + + snapshotter: + # -- Kubernetes CSI snapshotter image repository + repository: registry.k8s.io/sig-storage/csi-snapshotter + # -- Snapshotter image tag + tag: v8.2.0 + + attacher: + # -- Kubernetes CSI Attacher image repository + repository: registry.k8s.io/sig-storage/csi-attacher + # -- Attacher image tag + tag: v4.8.0 + + resizer: + # -- Kubernetes CSI resizer image repository + repository: registry.k8s.io/sig-storage/csi-resizer + # -- Resizer image tag + tag: v1.13.1 + + # -- Image pull policy + imagePullPolicy: IfNotPresent + + # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods + cephfsPodLabels: #"key1=value1,key2=value2" + + # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods + nfsPodLabels: #"key1=value1,key2=value2" + + # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods + rbdPodLabels: #"key1=value1,key2=value2" + + csiAddons: + # -- Enable CSIAddons + enabled: false + # -- CSIAddons sidecar image repository + repository: quay.io/csiaddons/k8s-sidecar + # -- CSIAddons sidecar image tag + tag: v0.12.0 + + nfs: + # -- Enable the nfs csi driver + enabled: false + + topology: + # -- Enable topology based provisioning + enabled: false + # NOTE: the value here serves as an example and needs to be + # updated with node labels that define domains of interest + # -- domainLabels define which node labels to use as domains + # for CSI nodeplugins to advertise their domains + domainLabels: + # - kubernetes.io/hostname + # - topology.kubernetes.io/zone + # - topology.rook.io/rack + + # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details + # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object). + # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation + # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for + # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details. + cephFSAttachRequired: true + # -- Whether to skip any attach operation altogether for RBD PVCs. See more details + # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object). + # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast. + # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption. + # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set + # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on. + # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details. + rbdAttachRequired: true + # -- Whether to skip any attach operation altogether for NFS PVCs. See more details + # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object). + # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation + # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for + # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details. + nfsAttachRequired: true + +# -- Enable discovery daemon +enableDiscoveryDaemon: false +# -- Set the discovery daemon device discovery interval (default to 60m) +discoveryDaemonInterval: 60m + +# -- The timeout for ceph commands in seconds +cephCommandsTimeoutSeconds: "15" + +# -- If true, run rook operator on the host network +useOperatorHostNetwork: + +# -- If true, scale down the rook operator. +# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling +# to deploy your helm charts. +scaleDownOperator: false + +## Rook Discover configuration +## toleration: NoSchedule, PreferNoSchedule or NoExecute +## tolerationKey: Set this to the specific key of the taint to tolerate +## tolerations: Array of tolerations in YAML format which will be added to agent deployment +## nodeAffinity: Set to labels of the node to match + +discover: + # -- Toleration for the discover pods. + # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute` + toleration: + # -- The specific key of the taint to tolerate + tolerationKey: + # -- Array of tolerations in YAML format which will be added to discover deployment + tolerations: + # - key: key + # operator: Exists + # effect: NoSchedule + # -- The node labels for affinity of `discover-agent` [^1] + nodeAffinity: + # key1=value1,value2; key2=value3 + # + # or + # + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: storage-node + # operator: Exists + # -- Labels to add to the discover pods + podLabels: # "key1=value1,key2=value2" + # -- Add resources to discover daemon pods + resources: + # - limits: + # memory: 512Mi + # - requests: + # cpu: 100m + # memory: 128Mi + +# -- Custom label to identify node hostname. If not set `kubernetes.io/hostname` will be used +customHostnameLabel: + +# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions. +hostpathRequiresPrivileged: false + +# -- Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled +enforceHostNetwork: false + +# -- Disable automatic orchestration when new devices are discovered. +disableDeviceHotplug: false + +# -- The revision history limit for all pods created by Rook. If blank, the K8s default is 10. +revisionHistoryLimit: + +# -- Blacklist certain disks according to the regex provided. +discoverDaemonUdev: + +# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts. +imagePullSecrets: +# - name: my-registry-secret + +# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used +enableOBCWatchOperatorNamespace: true + +# -- Specify the prefix for the OBC provisioner in place of the cluster namespace +# @default -- `ceph cluster namespace` +obcProvisionerNamePrefix: + +# -- Many OBC additional config fields may be risky for administrators to allow users control over. +# The safe and default-allowed fields are 'maxObjects' and 'maxSize'. +# Other fields should be considered risky. To allow all additional configs, use this value: +# "maxObjects,maxSize,bucketMaxObjects,bucketMaxSize,bucketPolicy,bucketLifecycle,bucketOwner" +# @default -- "maxObjects,maxSize" +obcAllowAdditionalConfigFields: "maxObjects,maxSize" + +monitoring: + # -- Enable monitoring. Requires Prometheus to be pre-installed. + # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors + enabled: false diff --git a/examples/nanodc/src/main.rs b/examples/nanodc/src/main.rs index 27e6849..31c5ba3 100644 --- a/examples/nanodc/src/main.rs +++ b/examples/nanodc/src/main.rs @@ -1,26 +1,145 @@ +use std::{ + net::{IpAddr, Ipv4Addr}, + sync::Arc, +}; + +use cidr::Ipv4Cidr; use harmony::{ + hardware::{FirewallGroup, HostCategory, Location, PhysicalHost, SwitchGroup}, + infra::opnsense::OPNSenseManagementInterface, inventory::Inventory, maestro::Maestro, - modules::dummy::{ErrorScore, PanicScore, SuccessScore}, - topology::HAClusterTopology, + modules::{ + http::HttpScore, + ipxe::IpxeScore, + okd::{ + bootstrap_dhcp::OKDBootstrapDhcpScore, + bootstrap_load_balancer::OKDBootstrapLoadBalancerScore, dhcp::OKDDhcpScore, + dns::OKDDnsScore, + }, + tftp::TftpScore, + }, + topology::{LogicalHost, UnmanagedRouter, Url}, }; +use harmony_macros::{ip, mac_address}; #[tokio::main] async fn main() { - let inventory = Inventory::autoload(); - let topology = HAClusterTopology::autoload(); - let mut maestro = Maestro::initialize(inventory, topology).await.unwrap(); + let firewall = harmony::topology::LogicalHost { + ip: ip!("192.168.33.1"), + name: String::from("fw0"), + }; + let opnsense = Arc::new( + harmony::infra::opnsense::OPNSenseFirewall::new(firewall, None, "root", "opnsense").await, + ); + let lan_subnet = Ipv4Addr::new(192, 168, 33, 0); + let gateway_ipv4 = Ipv4Addr::new(192, 168, 33, 1); + let gateway_ip = IpAddr::V4(gateway_ipv4); + let topology = harmony::topology::HAClusterTopology { + domain_name: "ncd0.harmony.mcd".to_string(), // TODO this must be set manually correctly + // when setting up the opnsense firewall + router: Arc::new(UnmanagedRouter::new( + gateway_ip, + Ipv4Cidr::new(lan_subnet, 24).unwrap(), + )), + load_balancer: opnsense.clone(), + firewall: opnsense.clone(), + tftp_server: opnsense.clone(), + http_server: opnsense.clone(), + dhcp_server: opnsense.clone(), + dns_server: opnsense.clone(), + control_plane: vec![ + LogicalHost { + ip: ip!("192.168.33.20"), + name: "cp0".to_string(), + }, + LogicalHost { + ip: ip!("192.168.33.21"), + name: "cp1".to_string(), + }, + LogicalHost { + ip: ip!("192.168.33.22"), + name: "cp2".to_string(), + }, + ], + bootstrap_host: LogicalHost { + ip: ip!("192.168.33.66"), + name: "bootstrap".to_string(), + }, + workers: vec![ + LogicalHost { + ip: ip!("192.168.33.30"), + name: "wk0".to_string(), + }, + LogicalHost { + ip: ip!("192.168.33.31"), + name: "wk1".to_string(), + }, + LogicalHost { + ip: ip!("192.168.33.32"), + name: "wk2".to_string(), + }, + ], + switch: vec![], + }; + + let inventory = Inventory { + location: Location::new("I am mobile".to_string(), "earth".to_string()), + switch: SwitchGroup::from([]), + firewall: FirewallGroup::from([PhysicalHost::empty(HostCategory::Firewall) + .management(Arc::new(OPNSenseManagementInterface::new()))]), + storage_host: vec![], + worker_host: vec![ + PhysicalHost::empty(HostCategory::Server) + .mac_address(mac_address!("C4:62:37:02:61:0F")), + PhysicalHost::empty(HostCategory::Server) + .mac_address(mac_address!("C4:62:37:02:61:26")), + // thisone + // Then create the ipxe file + // set the dns static leases + // bootstrap nodes + // start ceph cluster + // try installation of lampscore + // bingo? + PhysicalHost::empty(HostCategory::Server) + .mac_address(mac_address!("C4:62:37:02:61:70")), + ], + control_plane_host: vec![ + PhysicalHost::empty(HostCategory::Server) + .mac_address(mac_address!("C4:62:37:02:60:FA")), + PhysicalHost::empty(HostCategory::Server) + .mac_address(mac_address!("C4:62:37:02:61:1A")), + PhysicalHost::empty(HostCategory::Server) + .mac_address(mac_address!("C4:62:37:01:BC:68")), + ], + }; + + // TODO regroup smaller scores in a larger one such as this + // let okd_boostrap_preparation(); + + let bootstrap_dhcp_score = OKDBootstrapDhcpScore::new(&topology, &inventory); + let bootstrap_load_balancer_score = OKDBootstrapLoadBalancerScore::new(&topology); + let dhcp_score = OKDDhcpScore::new(&topology, &inventory); + let dns_score = OKDDnsScore::new(&topology); + let load_balancer_score = + harmony::modules::okd::load_balancer::OKDLoadBalancerScore::new(&topology); + + let tftp_score = TftpScore::new(Url::LocalFolder("./data/watchguard/tftpboot".to_string())); + let http_score = HttpScore::new(Url::LocalFolder( + "./data/watchguard/pxe-http-files".to_string(), + )); + let ipxe_score = IpxeScore::new(); + let mut maestro = Maestro::initialize(inventory, topology).await.unwrap(); maestro.register_all(vec![ - // ADD scores : - // 1. OPNSense setup scores - // 2. Bootstrap node setup - // 3. Control plane setup - // 4. Workers setup - // 5. Various tools and apps setup - Box::new(SuccessScore {}), - Box::new(ErrorScore {}), - Box::new(PanicScore {}), + Box::new(dns_score), + Box::new(bootstrap_dhcp_score), + Box::new(bootstrap_load_balancer_score), + Box::new(load_balancer_score), + Box::new(tftp_score), + Box::new(http_score), + Box::new(ipxe_score), + Box::new(dhcp_score), ]); harmony_tui::init(maestro).await.unwrap(); } diff --git a/harmony/src/domain/config.rs b/harmony/src/domain/config.rs index 0fa059f..53d7446 100644 --- a/harmony/src/domain/config.rs +++ b/harmony/src/domain/config.rs @@ -6,8 +6,8 @@ lazy_static! { .unwrap() .data_dir() .join("harmony"); - pub static ref REGISTRY_URL: String = std::env::var("HARMONY_REGISTRY_URL") - .unwrap_or_else(|_| "hub.nationtech.io".to_string()); + pub static ref REGISTRY_URL: String = + std::env::var("HARMONY_REGISTRY_URL").unwrap_or_else(|_| "hub.nationtech.io".to_string()); pub static ref REGISTRY_PROJECT: String = std::env::var("HARMONY_REGISTRY_PROJECT").unwrap_or_else(|_| "harmony".to_string()); } diff --git a/harmony/src/domain/hardware/mod.rs b/harmony/src/domain/hardware/mod.rs index 8e24768..41576fc 100644 --- a/harmony/src/domain/hardware/mod.rs +++ b/harmony/src/domain/hardware/mod.rs @@ -138,7 +138,8 @@ impl ManagementInterface for ManualManagementInterface { } fn get_supported_protocol_names(&self) -> String { - todo!() + // todo!() + "none".to_string() } } diff --git a/harmony/src/domain/interpret/mod.rs b/harmony/src/domain/interpret/mod.rs index 789edc6..d0e00d6 100644 --- a/harmony/src/domain/interpret/mod.rs +++ b/harmony/src/domain/interpret/mod.rs @@ -15,6 +15,7 @@ pub enum InterpretName { LoadBalancer, Tftp, Http, + Ipxe, Dummy, Panic, OPNSense, @@ -29,6 +30,7 @@ impl std::fmt::Display for InterpretName { InterpretName::LoadBalancer => f.write_str("LoadBalancer"), InterpretName::Tftp => f.write_str("Tftp"), InterpretName::Http => f.write_str("Http"), + InterpretName::Ipxe => f.write_str("iPXE"), InterpretName::Dummy => f.write_str("Dummy"), InterpretName::Panic => f.write_str("Panic"), InterpretName::OPNSense => f.write_str("OPNSense"), diff --git a/harmony/src/domain/topology/ha_cluster.rs b/harmony/src/domain/topology/ha_cluster.rs index 4e94f49..6ee1844 100644 --- a/harmony/src/domain/topology/ha_cluster.rs +++ b/harmony/src/domain/topology/ha_cluster.rs @@ -168,6 +168,16 @@ impl DhcpServer for HAClusterTopology { async fn commit_config(&self) -> Result<(), ExecutorError> { self.dhcp_server.commit_config().await } + + async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> { + self.dhcp_server.set_filename(filename).await + } + async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError> { + self.dhcp_server.set_filename64(filename64).await + } + async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> { + self.dhcp_server.set_filenameipxe(filenameipxe).await + } } #[async_trait] @@ -293,6 +303,15 @@ impl DhcpServer for DummyInfra { async fn set_boot_filename(&self, _boot_filename: &str) -> Result<(), ExecutorError> { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } + async fn set_filename(&self, _filename: &str) -> Result<(), ExecutorError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } + async fn set_filename64(&self, _filename: &str) -> Result<(), ExecutorError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } + async fn set_filenameipxe(&self, _filenameipxe: &str) -> Result<(), ExecutorError> { + unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) + } fn get_ip(&self) -> IpAddress { unimplemented!("{}", UNIMPLEMENTED_DUMMY_INFRA) } diff --git a/harmony/src/domain/topology/network.rs b/harmony/src/domain/topology/network.rs index ce6ec1e..42ff8c3 100644 --- a/harmony/src/domain/topology/network.rs +++ b/harmony/src/domain/topology/network.rs @@ -53,6 +53,9 @@ pub trait DhcpServer: Send + Sync + std::fmt::Debug { async fn list_static_mappings(&self) -> Vec<(MacAddress, IpAddress)>; async fn set_next_server(&self, ip: IpAddress) -> Result<(), ExecutorError>; async fn set_boot_filename(&self, boot_filename: &str) -> Result<(), ExecutorError>; + async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError>; + async fn set_filename64(&self, filename64: &str) -> Result<(), ExecutorError>; + async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError>; fn get_ip(&self) -> IpAddress; fn get_host(&self) -> LogicalHost; async fn commit_config(&self) -> Result<(), ExecutorError>; diff --git a/harmony/src/infra/opnsense/dhcp.rs b/harmony/src/infra/opnsense/dhcp.rs index 028921f..bea44fe 100644 --- a/harmony/src/infra/opnsense/dhcp.rs +++ b/harmony/src/infra/opnsense/dhcp.rs @@ -69,4 +69,34 @@ impl DhcpServer for OPNSenseFirewall { Ok(()) } + + async fn set_filename(&self, filename: &str) -> Result<(), ExecutorError> { + { + let mut writable_opnsense = self.opnsense_config.write().await; + writable_opnsense.dhcp().set_filename(filename); + debug!("OPNsense dhcp server set filename {filename}"); + } + + Ok(()) + } + + async fn set_filename64(&self, filename: &str) -> Result<(), ExecutorError> { + { + let mut writable_opnsense = self.opnsense_config.write().await; + writable_opnsense.dhcp().set_filename64(filename); + debug!("OPNsense dhcp server set filename {filename}"); + } + + Ok(()) + } + + async fn set_filenameipxe(&self, filenameipxe: &str) -> Result<(), ExecutorError> { + { + let mut writable_opnsense = self.opnsense_config.write().await; + writable_opnsense.dhcp().set_filenameipxe(filenameipxe); + debug!("OPNsense dhcp server set filenameipxe {filenameipxe}"); + } + + Ok(()) + } } diff --git a/harmony/src/infra/opnsense/http.rs b/harmony/src/infra/opnsense/http.rs index a06fe5b..2650717 100644 --- a/harmony/src/infra/opnsense/http.rs +++ b/harmony/src/infra/opnsense/http.rs @@ -61,7 +61,7 @@ impl HttpServer for OPNSenseFirewall { info!("Adding custom caddy config files"); config .upload_files( - "../../../watchguard/caddy_config", + "./data/watchguard/caddy_config", "/usr/local/etc/caddy/caddy.d/", ) .await diff --git a/harmony/src/infra/opnsense/load_balancer.rs b/harmony/src/infra/opnsense/load_balancer.rs index dd32a03..cae414a 100644 --- a/harmony/src/infra/opnsense/load_balancer.rs +++ b/harmony/src/infra/opnsense/load_balancer.rs @@ -370,10 +370,13 @@ mod tests { let result = get_servers_for_backend(&backend, &haproxy); // Check the result - assert_eq!(result, vec![BackendServer { - address: "192.168.1.1".to_string(), - port: 80, - },]); + assert_eq!( + result, + vec![BackendServer { + address: "192.168.1.1".to_string(), + port: 80, + },] + ); } #[test] fn test_get_servers_for_backend_no_linked_servers() { @@ -430,15 +433,18 @@ mod tests { // Call the function let result = get_servers_for_backend(&backend, &haproxy); // Check the result - assert_eq!(result, vec![ - BackendServer { - address: "some-hostname.test.mcd".to_string(), - port: 80, - }, - BackendServer { - address: "192.168.1.2".to_string(), - port: 8080, - }, - ]); + assert_eq!( + result, + vec![ + BackendServer { + address: "some-hostname.test.mcd".to_string(), + port: 80, + }, + BackendServer { + address: "192.168.1.2".to_string(), + port: 8080, + }, + ] + ); } } diff --git a/harmony/src/modules/dhcp.rs b/harmony/src/modules/dhcp.rs index 6ef0c3d..0112b41 100644 --- a/harmony/src/modules/dhcp.rs +++ b/harmony/src/modules/dhcp.rs @@ -17,6 +17,9 @@ pub struct DhcpScore { pub host_binding: Vec, pub next_server: Option, pub boot_filename: Option, + pub filename: Option, + pub filename64: Option, + pub filenameipxe: Option, } impl Score for DhcpScore { @@ -117,8 +120,44 @@ impl DhcpInterpret { None => Outcome::noop(), }; + let filename_outcome = match &self.score.filename { + Some(filename) => { + dhcp_server.set_filename(&filename).await?; + Outcome::new( + InterpretStatus::SUCCESS, + format!("Dhcp Interpret Set filename to {filename}"), + ) + } + None => Outcome::noop(), + }; + + let filename64_outcome = match &self.score.filename64 { + Some(filename64) => { + dhcp_server.set_filename64(&filename64).await?; + Outcome::new( + InterpretStatus::SUCCESS, + format!("Dhcp Interpret Set filename64 to {filename64}"), + ) + } + None => Outcome::noop(), + }; + + let filenameipxe_outcome = match &self.score.filenameipxe { + Some(filenameipxe) => { + dhcp_server.set_filenameipxe(&filenameipxe).await?; + Outcome::new( + InterpretStatus::SUCCESS, + format!("Dhcp Interpret Set filenameipxe to {filenameipxe}"), + ) + } + None => Outcome::noop(), + }; + if next_server_outcome.status == InterpretStatus::NOOP && boot_filename_outcome.status == InterpretStatus::NOOP + && filename_outcome.status == InterpretStatus::NOOP + && filename64_outcome.status == InterpretStatus::NOOP + && filenameipxe_outcome.status == InterpretStatus::NOOP { return Ok(Outcome::noop()); } @@ -126,8 +165,12 @@ impl DhcpInterpret { Ok(Outcome::new( InterpretStatus::SUCCESS, format!( - "Dhcp Interpret Set next boot to {:?} and boot_filename to {:?}", - self.score.boot_filename, self.score.boot_filename + "Dhcp Interpret Set next boot to [{:?}], boot_filename to [{:?}], filename to [{:?}], filename64 to [{:?}], filenameipxe to [:{:?}]", + self.score.boot_filename, + self.score.boot_filename, + self.score.filename, + self.score.filename64, + self.score.filenameipxe ), )) } diff --git a/harmony/src/modules/ipxe.rs b/harmony/src/modules/ipxe.rs new file mode 100644 index 0000000..f9e8ed3 --- /dev/null +++ b/harmony/src/modules/ipxe.rs @@ -0,0 +1,66 @@ +use async_trait::async_trait; +use derive_new::new; +use serde::Serialize; + +use crate::{ + data::{Id, Version}, + interpret::{Interpret, InterpretError, InterpretName, InterpretStatus, Outcome}, + inventory::Inventory, + score::Score, + topology::Topology, +}; + +#[derive(Debug, new, Clone, Serialize)] +pub struct IpxeScore { + //files_to_serve: Url, +} + +impl Score for IpxeScore { + fn create_interpret(&self) -> Box> { + Box::new(IpxeInterpret::new(self.clone())) + } + + fn name(&self) -> String { + "IpxeScore".to_string() + } +} + +#[derive(Debug, new, Clone)] +pub struct IpxeInterpret { + _score: IpxeScore, +} + +#[async_trait] +impl Interpret for IpxeInterpret { + async fn execute( + &self, + _inventory: &Inventory, + _topology: &T, + ) -> Result { + /* + let http_server = &topology.http_server; + http_server.ensure_initialized().await?; + Ok(Outcome::success(format!( + "Http Server running and serving files from {}", + self.score.files_to_serve + ))) + */ + todo!(); + } + + fn get_name(&self) -> InterpretName { + InterpretName::Ipxe + } + + fn get_version(&self) -> Version { + todo!() + } + + fn get_status(&self) -> InterpretStatus { + todo!() + } + + fn get_children(&self) -> Vec { + todo!() + } +} diff --git a/harmony/src/modules/k8s/deployment.rs b/harmony/src/modules/k8s/deployment.rs index 55f581f..019a7ac 100644 --- a/harmony/src/modules/k8s/deployment.rs +++ b/harmony/src/modules/k8s/deployment.rs @@ -1,5 +1,4 @@ -use k8s_openapi::{DeepMerge, api::apps::v1::Deployment}; -use log::debug; +use k8s_openapi::api::apps::v1::Deployment; use serde::Serialize; use serde_json::json; diff --git a/harmony/src/modules/mod.rs b/harmony/src/modules/mod.rs index a578ada..6a615c5 100644 --- a/harmony/src/modules/mod.rs +++ b/harmony/src/modules/mod.rs @@ -3,6 +3,7 @@ pub mod dns; pub mod dummy; pub mod helm; pub mod http; +pub mod ipxe; pub mod k3d; pub mod k8s; pub mod lamp; diff --git a/harmony/src/modules/okd/bootstrap_dhcp.rs b/harmony/src/modules/okd/bootstrap_dhcp.rs index 2e3dd6f..c133ecf 100644 --- a/harmony/src/modules/okd/bootstrap_dhcp.rs +++ b/harmony/src/modules/okd/bootstrap_dhcp.rs @@ -36,13 +36,20 @@ impl OKDBootstrapDhcpScore { .expect("Should have at least one worker to be used as bootstrap node") .clone(), }); + // TODO refactor this so it is not copy pasted from dhcp.rs Self { dhcp_score: DhcpScore::new( host_binding, // TODO : we should add a tftp server to the topology instead of relying on the // router address, this is leaking implementation details Some(topology.router.get_gateway()), - Some("bootx64.efi".to_string()), + None, // To allow UEFI boot we cannot provide a legacy file + Some("undionly.kpxe".to_string()), + Some("ipxe.efi".to_string()), + Some(format!( + "http://{}:8080/boot.ipxe", + topology.router.get_gateway() + )), ), } } diff --git a/harmony/src/modules/okd/dhcp.rs b/harmony/src/modules/okd/dhcp.rs index a060b31..3386592 100644 --- a/harmony/src/modules/okd/dhcp.rs +++ b/harmony/src/modules/okd/dhcp.rs @@ -15,7 +15,7 @@ pub struct OKDDhcpScore { impl OKDDhcpScore { pub fn new(topology: &HAClusterTopology, inventory: &Inventory) -> Self { - let host_binding = topology + let mut host_binding: Vec = topology .control_plane .iter() .enumerate() @@ -28,13 +28,35 @@ impl OKDDhcpScore { .clone(), }) .collect(); + + topology + .workers + .iter() + .enumerate() + .for_each(|(index, topology_entry)| { + host_binding.push(HostBinding { + logical_host: topology_entry.clone(), + physical_host: inventory + .worker_host + .get(index) + .expect("There should be enough worker hosts to fill topology") + .clone(), + }) + }); + Self { // TODO : we should add a tftp server to the topology instead of relying on the // router address, this is leaking implementation details dhcp_score: DhcpScore { host_binding, next_server: Some(topology.router.get_gateway()), - boot_filename: Some("bootx64.efi".to_string()), + boot_filename: None, + filename: Some("undionly.kpxe".to_string()), + filename64: Some("ipxe.efi".to_string()), + filenameipxe: Some(format!( + "http://{}:8080/boot.ipxe", + topology.router.get_gateway() + )), }, } } diff --git a/opnsense-config-xml/src/data/caddy.rs b/opnsense-config-xml/src/data/caddy.rs index c836d44..b4ca0fc 100644 --- a/opnsense-config-xml/src/data/caddy.rs +++ b/opnsense-config-xml/src/data/caddy.rs @@ -40,7 +40,11 @@ pub struct CaddyGeneral { #[yaserde(rename = "TlsDnsOptionalField4")] pub tls_dns_optional_field4: MaybeString, #[yaserde(rename = "TlsDnsPropagationTimeout")] - pub tls_dns_propagation_timeout: MaybeString, + pub tls_dns_propagation_timeout: Option, + #[yaserde(rename = "TlsDnsPropagationTimeoutPeriod")] + pub tls_dns_propagation_timeout_period: Option, + #[yaserde(rename = "TlsDnsPropagationDelay")] + pub tls_dns_propagation_delay: Option, #[yaserde(rename = "TlsDnsPropagationResolvers")] pub tls_dns_propagation_resolvers: MaybeString, pub accesslist: MaybeString, @@ -82,4 +86,8 @@ pub struct CaddyGeneral { pub auth_to_tls: Option, #[yaserde(rename = "AuthToUri")] pub auth_to_uri: MaybeString, + #[yaserde(rename = "ClientIpHeaders")] + pub client_ip_headers: MaybeString, + #[yaserde(rename = "CopyHeaders")] + pub copy_headers: MaybeString, } diff --git a/opnsense-config-xml/src/data/dhcpd.rs b/opnsense-config-xml/src/data/dhcpd.rs index 5b06610..f334a9a 100644 --- a/opnsense-config-xml/src/data/dhcpd.rs +++ b/opnsense-config-xml/src/data/dhcpd.rs @@ -14,6 +14,8 @@ pub struct DhcpInterface { pub netboot: Option, pub nextserver: Option, pub filename64: Option, + pub filename: Option, + pub filenameipxe: Option, #[yaserde(rename = "ddnsdomainalgorithm")] pub ddns_domain_algorithm: Option, #[yaserde(rename = "numberoptions")] diff --git a/opnsense-config-xml/src/data/opnsense.rs b/opnsense-config-xml/src/data/opnsense.rs index aa39621..be3e9af 100644 --- a/opnsense-config-xml/src/data/opnsense.rs +++ b/opnsense-config-xml/src/data/opnsense.rs @@ -45,6 +45,7 @@ pub struct OPNsense { #[yaserde(rename = "Pischem")] pub pischem: Option, pub ifgroups: Ifgroups, + pub dnsmasq: Option, } impl From for OPNsense { @@ -166,7 +167,7 @@ pub struct Sysctl { pub struct SysctlItem { pub descr: MaybeString, pub tunable: String, - pub value: String, + pub value: MaybeString, } #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] @@ -279,6 +280,7 @@ pub struct User { pub scope: String, pub groupname: Option, pub password: String, + pub pwd_changed_at: Option, pub uid: u32, pub disabled: Option, pub landing_page: Option, @@ -540,6 +542,8 @@ pub struct GeneralIpsec { preferred_oldsa: Option, disablevpnrules: Option, passthrough_networks: Option, + user_source: Option, + local_group: Option, } #[derive(Debug, YaSerialize, YaDeserialize, PartialEq)] @@ -1219,6 +1223,7 @@ pub struct Host { pub rr: String, pub mxprio: MaybeString, pub mx: MaybeString, + pub ttl: Option, pub server: String, pub description: Option, } @@ -1233,6 +1238,7 @@ impl Host { rr, server, mxprio: MaybeString::default(), + ttl: Some(MaybeString::default()), mx: MaybeString::default(), description: None, } @@ -1421,7 +1427,7 @@ pub struct VirtualIp { #[yaserde(attribute = true)] pub version: String, #[yaserde(rename = "vip")] - pub vip: Vip, + pub vip: Option, } #[derive(Default, PartialEq, Debug, YaSerialize, YaDeserialize)] diff --git a/opnsense-config/src/modules/dhcp.rs b/opnsense-config/src/modules/dhcp.rs index 2c06304..3ab9ba1 100644 --- a/opnsense-config/src/modules/dhcp.rs +++ b/opnsense-config/src/modules/dhcp.rs @@ -179,7 +179,21 @@ impl<'a> DhcpConfig<'a> { pub fn set_boot_filename(&mut self, boot_filename: &str) { self.enable_netboot(); - self.get_lan_dhcpd().filename64 = Some(boot_filename.to_string()); self.get_lan_dhcpd().bootfilename = Some(boot_filename.to_string()); } + + pub fn set_filename(&mut self, filename: &str) { + self.enable_netboot(); + self.get_lan_dhcpd().filename = Some(filename.to_string()); + } + + pub fn set_filename64(&mut self, filename64: &str) { + self.enable_netboot(); + self.get_lan_dhcpd().filename64 = Some(filename64.to_string()); + } + + pub fn set_filenameipxe(&mut self, filenameipxe: &str) { + self.enable_netboot(); + self.get_lan_dhcpd().filenameipxe = Some(filenameipxe.to_string()); + } }