722 lines
		
	
	
		
			31 KiB
		
	
	
	
		
			YAML
		
	
	
	
	
	
			
		
		
	
	
			722 lines
		
	
	
		
			31 KiB
		
	
	
	
		
			YAML
		
	
	
	
	
	
| # Default values for a single rook-ceph cluster
 | |
| # This is a YAML-formatted file.
 | |
| # Declare variables to be passed into your templates.
 | |
| 
 | |
| # -- Namespace of the main rook operator
 | |
| operatorNamespace: rook-ceph
 | |
| 
 | |
| # -- The metadata.name of the CephCluster CR
 | |
| # @default -- The same as the namespace
 | |
| clusterName:
 | |
| 
 | |
| # -- Optional override of the target kubernetes version
 | |
| kubeVersion:
 | |
| 
 | |
| # -- Cluster ceph.conf override
 | |
| configOverride:
 | |
| # configOverride: |
 | |
| #   [global]
 | |
| #   mon_allow_pool_delete = true
 | |
| #   osd_pool_default_size = 3
 | |
| #   osd_pool_default_min_size = 2
 | |
| 
 | |
| # Installs a debugging toolbox deployment
 | |
| toolbox:
 | |
|   # -- Enable Ceph debugging pod deployment. See [toolbox](../Troubleshooting/ceph-toolbox.md)
 | |
|   enabled: true
 | |
|   # -- Toolbox image, defaults to the image used by the Ceph cluster
 | |
|   image: #quay.io/ceph/ceph:v19.2.2
 | |
|   # -- Toolbox tolerations
 | |
|   tolerations: []
 | |
|   # -- Toolbox affinity
 | |
|   affinity: {}
 | |
|   # -- Toolbox container security context
 | |
|   containerSecurityContext:
 | |
|     runAsNonRoot: true
 | |
|     runAsUser: 2016
 | |
|     runAsGroup: 2016
 | |
|     capabilities:
 | |
|       drop: ["ALL"]
 | |
|   # -- Toolbox resources
 | |
|   resources:
 | |
|     limits:
 | |
|       memory: "1Gi"
 | |
|     requests:
 | |
|       cpu: "100m"
 | |
|       memory: "128Mi"
 | |
|   # -- Set the priority class for the toolbox if desired
 | |
|   priorityClassName:
 | |
| 
 | |
| monitoring:
 | |
|   # -- Enable Prometheus integration, will also create necessary RBAC rules to allow Operator to create ServiceMonitors.
 | |
|   # Monitoring requires Prometheus to be pre-installed
 | |
|   enabled: false
 | |
|   # -- Whether to disable the metrics reported by Ceph. If false, the prometheus mgr module and Ceph exporter are enabled
 | |
|   metricsDisabled: false
 | |
|   # -- Whether to create the Prometheus rules for Ceph alerts
 | |
|   createPrometheusRules: false
 | |
|   # -- The namespace in which to create the prometheus rules, if different from the rook cluster namespace.
 | |
|   # If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
 | |
|   # deployed) to set rulesNamespaceOverride for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
 | |
|   rulesNamespaceOverride:
 | |
|   # Monitoring settings for external clusters:
 | |
|   # externalMgrEndpoints: <list of endpoints>
 | |
|   # externalMgrPrometheusPort: <port>
 | |
|   # Scrape interval for prometheus
 | |
|   # interval: 10s
 | |
|   # allow adding custom labels and annotations to the prometheus rule
 | |
|   prometheusRule:
 | |
|     # -- Labels applied to PrometheusRule
 | |
|     labels: {}
 | |
|     # -- Annotations applied to PrometheusRule
 | |
|     annotations: {}
 | |
| 
 | |
| # -- Create & use PSP resources. Set this to the same value as the rook-ceph chart.
 | |
| pspEnable: false
 | |
| 
 | |
| # imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
 | |
| # imagePullSecrets:
 | |
| # - name: my-registry-secret
 | |
| 
 | |
| # All values below are taken from the CephCluster CRD
 | |
| # -- Cluster configuration.
 | |
| # @default -- See [below](#ceph-cluster-spec)
 | |
| cephClusterSpec:
 | |
|   # This cluster spec example is for a converged cluster where all the Ceph daemons are running locally,
 | |
|   # as in the host-based example (cluster.yaml). For a different configuration such as a
 | |
|   # PVC-based cluster (cluster-on-pvc.yaml), external cluster (cluster-external.yaml),
 | |
|   # or stretch cluster (cluster-stretched.yaml), replace this entire `cephClusterSpec`
 | |
|   # with the specs from those examples.
 | |
| 
 | |
|   # For more details, check https://rook.io/docs/rook/v1.10/CRDs/Cluster/ceph-cluster-crd/
 | |
|   cephVersion:
 | |
|     # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
 | |
|     # v18 is Reef, v19 is Squid
 | |
|     # RECOMMENDATION: In production, use a specific version tag instead of the general v18 flag, which pulls the latest release and could result in different
 | |
|     # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
 | |
|     # If you want to be more precise, you can always use a timestamp tag such as quay.io/ceph/ceph:v19.2.2-20250409
 | |
|     # This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
 | |
|     image: quay.io/ceph/ceph:v19.2.2
 | |
|     # Whether to allow unsupported versions of Ceph. Currently Reef and Squid are supported.
 | |
|     # Future versions such as Tentacle (v20) would require this to be set to `true`.
 | |
|     # Do not set to true in production.
 | |
|     allowUnsupported: false
 | |
| 
 | |
|   # The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster.
 | |
|   # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
 | |
|   # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
 | |
|   dataDirHostPath: /var/lib/rook
 | |
| 
 | |
|   # Whether or not upgrade should continue even if a check fails
 | |
|   # This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
 | |
|   # Use at your OWN risk
 | |
|   # To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/v1.10/Upgrade/ceph-upgrade/
 | |
|   skipUpgradeChecks: false
 | |
| 
 | |
|   # Whether or not continue if PGs are not clean during an upgrade
 | |
|   continueUpgradeAfterChecksEvenIfNotHealthy: false
 | |
| 
 | |
|   # WaitTimeoutForHealthyOSDInMinutes defines the time (in minutes) the operator would wait before an OSD can be stopped for upgrade or restart.
 | |
|   # If the timeout exceeds and OSD is not ok to stop, then the operator would skip upgrade for the current OSD and proceed with the next one
 | |
|   # if `continueUpgradeAfterChecksEvenIfNotHealthy` is `false`. If `continueUpgradeAfterChecksEvenIfNotHealthy` is `true`, then operator would
 | |
|   # continue with the upgrade of an OSD even if its not ok to stop after the timeout. This timeout won't be applied if `skipUpgradeChecks` is `true`.
 | |
|   # The default wait timeout is 10 minutes.
 | |
|   waitTimeoutForHealthyOSDInMinutes: 10
 | |
| 
 | |
|   # Whether or not requires PGs are clean before an OSD upgrade. If set to `true` OSD upgrade process won't start until PGs are healthy.
 | |
|   # This configuration will be ignored if `skipUpgradeChecks` is `true`.
 | |
|   # Default is false.
 | |
|   upgradeOSDRequiresHealthyPGs: false
 | |
| 
 | |
|   mon:
 | |
|     # Set the number of mons to be started. Generally recommended to be 3.
 | |
|     # For highest availability, an odd number of mons should be specified.
 | |
|     count: 3
 | |
|     # The mons should be on unique nodes. For production, at least 3 nodes are recommended for this reason.
 | |
|     # Mons should only be allowed on the same node for test environments where data loss is acceptable.
 | |
|     allowMultiplePerNode: false
 | |
| 
 | |
|   mgr:
 | |
|     # When higher availability of the mgr is needed, increase the count to 2.
 | |
|     # In that case, one mgr will be active and one in standby. When Ceph updates which
 | |
|     # mgr is active, Rook will update the mgr services to match the active mgr.
 | |
|     count: 2
 | |
|     allowMultiplePerNode: false
 | |
|     modules:
 | |
|       # List of modules to optionally enable or disable.
 | |
|       # Note the "dashboard" and "monitoring" modules are already configured by other settings in the cluster CR.
 | |
|       # - name: rook
 | |
|       #   enabled: true
 | |
| 
 | |
|   # enable the ceph dashboard for viewing cluster status
 | |
|   dashboard:
 | |
|     enabled: true
 | |
|     # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
 | |
|     # urlPrefix: /ceph-dashboard
 | |
|     # serve the dashboard at the given port.
 | |
|     # port: 8443
 | |
|     # Serve the dashboard using SSL (if using ingress to expose the dashboard and `ssl: true` you need to set
 | |
|     # the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
 | |
|     ssl: true
 | |
| 
 | |
|   # Network configuration, see: https://github.com/rook/rook/blob/master/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
 | |
|   network:
 | |
|     connections:
 | |
|       # Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
 | |
|       # The default is false. When encryption is enabled, all communication between clients and Ceph daemons, or between Ceph daemons will be encrypted.
 | |
|       # When encryption is not enabled, clients still establish a strong initial authentication and data integrity is still validated with a crc check.
 | |
|       # IMPORTANT: Encryption requires the 5.11 kernel for the latest nbd and cephfs drivers. Alternatively for testing only,
 | |
|       # you can set the "mounter: rbd-nbd" in the rbd storage class, or "mounter: fuse" in the cephfs storage class.
 | |
|       # The nbd and fuse drivers are *not* recommended in production since restarting the csi driver pod will disconnect the volumes.
 | |
|       encryption:
 | |
|         enabled: false
 | |
|       # Whether to compress the data in transit across the wire. The default is false.
 | |
|       # The kernel requirements above for encryption also apply to compression.
 | |
|       compression:
 | |
|         enabled: false
 | |
|       # Whether to require communication over msgr2. If true, the msgr v1 port (6789) will be disabled
 | |
|       # and clients will be required to connect to the Ceph cluster with the v2 port (3300).
 | |
|       # Requires a kernel that supports msgr v2 (kernel 5.11 or CentOS 8.4 or newer).
 | |
|       requireMsgr2: false
 | |
|   #   # enable host networking
 | |
|   #   provider: host
 | |
|   #   # EXPERIMENTAL: enable the Multus network provider
 | |
|   #   provider: multus
 | |
|   #   selectors:
 | |
|   #     # The selector keys are required to be `public` and `cluster`.
 | |
|   #     # Based on the configuration, the operator will do the following:
 | |
|   #     #   1. if only the `public` selector key is specified both public_network and cluster_network Ceph settings will listen on that interface
 | |
|   #     #   2. if both `public` and `cluster` selector keys are specified the first one will point to 'public_network' flag and the second one to 'cluster_network'
 | |
|   #     #
 | |
|   #     # In order to work, each selector value must match a NetworkAttachmentDefinition object in Multus
 | |
|   #     #
 | |
|   #     # public: public-conf --> NetworkAttachmentDefinition object name in Multus
 | |
|   #     # cluster: cluster-conf --> NetworkAttachmentDefinition object name in Multus
 | |
|   #   # Provide internet protocol version. IPv6, IPv4 or empty string are valid options. Empty string would mean IPv4
 | |
|   #   ipFamily: "IPv6"
 | |
|   #   # Ceph daemons to listen on both IPv4 and Ipv6 networks
 | |
|   #   dualStack: false
 | |
| 
 | |
|   # enable the crash collector for ceph daemon crash collection
 | |
|   crashCollector:
 | |
|     disable: false
 | |
|     # Uncomment daysToRetain to prune ceph crash entries older than the
 | |
|     # specified number of days.
 | |
|     # daysToRetain: 30
 | |
| 
 | |
|   # enable log collector, daemons will log on files and rotate
 | |
|   logCollector:
 | |
|     enabled: true
 | |
|     periodicity: daily # one of: hourly, daily, weekly, monthly
 | |
|     maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
 | |
| 
 | |
|   # automate [data cleanup process](https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
 | |
|   cleanupPolicy:
 | |
|     # Since cluster cleanup is destructive to data, confirmation is required.
 | |
|     # To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
 | |
|     # This value should only be set when the cluster is about to be deleted. After the confirmation is set,
 | |
|     # Rook will immediately stop configuring the cluster and only wait for the delete command.
 | |
|     # If the empty string is set, Rook will not destroy any data on hosts during uninstall.
 | |
|     confirmation: ""
 | |
|     # sanitizeDisks represents settings for sanitizing OSD disks on cluster deletion
 | |
|     sanitizeDisks:
 | |
|       # method indicates if the entire disk should be sanitized or simply ceph's metadata
 | |
|       # in both case, re-install is possible
 | |
|       # possible choices are 'complete' or 'quick' (default)
 | |
|       method: quick
 | |
|       # dataSource indicate where to get random bytes from to write on the disk
 | |
|       # possible choices are 'zero' (default) or 'random'
 | |
|       # using random sources will consume entropy from the system and will take much more time then the zero source
 | |
|       dataSource: zero
 | |
|       # iteration overwrite N times instead of the default (1)
 | |
|       # takes an integer value
 | |
|       iteration: 1
 | |
|     # allowUninstallWithVolumes defines how the uninstall should be performed
 | |
|     # If set to true, cephCluster deletion does not wait for the PVs to be deleted.
 | |
|     allowUninstallWithVolumes: false
 | |
| 
 | |
|   # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
 | |
|   # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
 | |
|   # tolerate taints with a key of 'storage-node'.
 | |
|   # placement:
 | |
|   #   all:
 | |
|   #     nodeAffinity:
 | |
|   #       requiredDuringSchedulingIgnoredDuringExecution:
 | |
|   #         nodeSelectorTerms:
 | |
|   #           - matchExpressions:
 | |
|   #             - key: role
 | |
|   #               operator: In
 | |
|   #               values:
 | |
|   #               - storage-node
 | |
|   #     podAffinity:
 | |
|   #     podAntiAffinity:
 | |
|   #     topologySpreadConstraints:
 | |
|   #     tolerations:
 | |
|   #     - key: storage-node
 | |
|   #       operator: Exists
 | |
|   #   # The above placement information can also be specified for mon, osd, and mgr components
 | |
|   #   mon:
 | |
|   #   # Monitor deployments may contain an anti-affinity rule for avoiding monitor
 | |
|   #   # collocation on the same node. This is a required rule when host network is used
 | |
|   #   # or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
 | |
|   #   # preferred rule with weight: 50.
 | |
|   #   osd:
 | |
|   #   mgr:
 | |
|   #   cleanup:
 | |
| 
 | |
|   # annotations:
 | |
|   #   all:
 | |
|   #   mon:
 | |
|   #   osd:
 | |
|   #   cleanup:
 | |
|   #   prepareosd:
 | |
|   #   # If no mgr annotations are set, prometheus scrape annotations will be set by default.
 | |
|   #   mgr:
 | |
|   #   dashboard:
 | |
| 
 | |
|   # labels:
 | |
|   #   all:
 | |
|   #   mon:
 | |
|   #   osd:
 | |
|   #   cleanup:
 | |
|   #   mgr:
 | |
|   #   prepareosd:
 | |
|   #   # monitoring is a list of key-value pairs. It is injected into all the monitoring resources created by operator.
 | |
|   #   # These labels can be passed as LabelSelector to Prometheus
 | |
|   #   monitoring:
 | |
|   #   dashboard:
 | |
| 
 | |
|   resources:
 | |
|     mgr:
 | |
|       limits:
 | |
|         memory: "1Gi"
 | |
|       requests:
 | |
|         cpu: "500m"
 | |
|         memory: "512Mi"
 | |
|     mon:
 | |
|       limits:
 | |
|         memory: "2Gi"
 | |
|       requests:
 | |
|         cpu: "1000m"
 | |
|         memory: "1Gi"
 | |
|     osd:
 | |
|       limits:
 | |
|         memory: "4Gi"
 | |
|       requests:
 | |
|         cpu: "1000m"
 | |
|         memory: "4Gi"
 | |
|     prepareosd:
 | |
|       # limits: It is not recommended to set limits on the OSD prepare job
 | |
|       #         since it's a one-time burst for memory that must be allowed to
 | |
|       #         complete without an OOM kill.  Note however that if a k8s
 | |
|       #         limitRange guardrail is defined external to Rook, the lack of
 | |
|       #         a limit here may result in a sync failure, in which case a
 | |
|       #         limit should be added.  1200Mi may suffice for up to 15Ti
 | |
|       #         OSDs ; for larger devices 2Gi may be required.
 | |
|       #         cf. https://github.com/rook/rook/pull/11103
 | |
|       requests:
 | |
|         cpu: "500m"
 | |
|         memory: "50Mi"
 | |
|     mgr-sidecar:
 | |
|       limits:
 | |
|         memory: "100Mi"
 | |
|       requests:
 | |
|         cpu: "100m"
 | |
|         memory: "40Mi"
 | |
|     crashcollector:
 | |
|       limits:
 | |
|         memory: "60Mi"
 | |
|       requests:
 | |
|         cpu: "100m"
 | |
|         memory: "60Mi"
 | |
|     logcollector:
 | |
|       limits:
 | |
|         memory: "1Gi"
 | |
|       requests:
 | |
|         cpu: "100m"
 | |
|         memory: "100Mi"
 | |
|     cleanup:
 | |
|       limits:
 | |
|         memory: "1Gi"
 | |
|       requests:
 | |
|         cpu: "500m"
 | |
|         memory: "100Mi"
 | |
|     exporter:
 | |
|       limits:
 | |
|         memory: "128Mi"
 | |
|       requests:
 | |
|         cpu: "50m"
 | |
|         memory: "50Mi"
 | |
| 
 | |
|   # The option to automatically remove OSDs that are out and are safe to destroy.
 | |
|   removeOSDsIfOutAndSafeToRemove: false
 | |
| 
 | |
|   # priority classes to apply to ceph resources
 | |
|   priorityClassNames:
 | |
|     mon: system-node-critical
 | |
|     osd: system-node-critical
 | |
|     mgr: system-cluster-critical
 | |
| 
 | |
|   storage: # cluster level storage configuration and selection
 | |
|     useAllNodes: true
 | |
|     useAllDevices: true
 | |
|     # deviceFilter:
 | |
|     # config:
 | |
|     #   crushRoot: "custom-root" # specify a non-default root label for the CRUSH map
 | |
|     #   metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
 | |
|     #   databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
 | |
|     #   osdsPerDevice: "1" # this value can be overridden at the node or device level
 | |
|     #   encryptedDevice: "true" # the default value for this option is "false"
 | |
|     # # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
 | |
|     # # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
 | |
|     # nodes:
 | |
|     #   - name: "172.17.4.201"
 | |
|     #     devices: # specific devices to use for storage can be specified for each node
 | |
|     #       - name: "sdb"
 | |
|     #       - name: "nvme01" # multiple osds can be created on high performance devices
 | |
|     #         config:
 | |
|     #           osdsPerDevice: "5"
 | |
|     #       - name: "/dev/disk/by-id/ata-ST4000DM004-XXXX" # devices can be specified using full udev paths
 | |
|     #     config: # configuration can be specified at the node level which overrides the cluster level config
 | |
|     #   - name: "172.17.4.301"
 | |
|     #     deviceFilter: "^sd."
 | |
| 
 | |
|   # The section for configuring management of daemon disruptions during upgrade or fencing.
 | |
|   disruptionManagement:
 | |
|     # If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
 | |
|     # via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
 | |
|     # block eviction of OSDs by default and unblock them safely when drains are detected.
 | |
|     managePodBudgets: true
 | |
|     # A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
 | |
|     # default DOWN/OUT interval) when it is draining. This is only relevant when  `managePodBudgets` is `true`. The default value is `30` minutes.
 | |
|     osdMaintenanceTimeout: 30
 | |
| 
 | |
|   # Configure the healthcheck and liveness probes for ceph pods.
 | |
|   # Valid values for daemons are 'mon', 'osd', 'status'
 | |
|   healthCheck:
 | |
|     daemonHealth:
 | |
|       mon:
 | |
|         disabled: false
 | |
|         interval: 45s
 | |
|       osd:
 | |
|         disabled: false
 | |
|         interval: 60s
 | |
|       status:
 | |
|         disabled: false
 | |
|         interval: 60s
 | |
|     # Change pod liveness probe, it works for all mon, mgr, and osd pods.
 | |
|     livenessProbe:
 | |
|       mon:
 | |
|         disabled: false
 | |
|       mgr:
 | |
|         disabled: false
 | |
|       osd:
 | |
|         disabled: false
 | |
| 
 | |
| ingress:
 | |
|   # -- Enable an ingress for the ceph-dashboard
 | |
|   dashboard:
 | |
|     # {}
 | |
|     # labels:
 | |
|        # external-dns/private: "true"
 | |
|      annotations:
 | |
|        "route.openshift.io/termination": "passthrough"
 | |
|        # external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
 | |
|        # nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
 | |
|        # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
 | |
|        # nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
 | |
|        # nginx.ingress.kubernetes.io/server-snippet: |
 | |
|        #   proxy_ssl_verify off;
 | |
|      host:
 | |
|        name: ceph.apps.ncd0.harmony.mcd
 | |
|        path: null # TODO the chart does not allow removing the path, and it causes openshift to fail creating a route, because path is not supported with termination mode passthrough
 | |
|        pathType: ImplementationSpecific
 | |
|      tls:
 | |
|      - {}
 | |
|     #   secretName: testsecret-tls
 | |
|     # Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
 | |
|     # to set the ingress class
 | |
|     # ingressClassName: openshift-default
 | |
|     # labels:
 | |
|     #   external-dns/private: "true"
 | |
|     # annotations:
 | |
|     #   external-dns.alpha.kubernetes.io/hostname: dashboard.example.com
 | |
|     #   nginx.ingress.kubernetes.io/rewrite-target: /ceph-dashboard/$2
 | |
|     # If the dashboard has ssl: true the following will make sure the NGINX Ingress controller can expose the dashboard correctly
 | |
|     #   nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
 | |
|     #   nginx.ingress.kubernetes.io/server-snippet: |
 | |
|     #     proxy_ssl_verify off;
 | |
|     # host:
 | |
|     #   name: dashboard.example.com
 | |
|     #   path: "/ceph-dashboard(/|$)(.*)"
 | |
|     #   pathType: Prefix
 | |
|     # tls:
 | |
|     # - hosts:
 | |
|     #     - dashboard.example.com
 | |
|     #   secretName: testsecret-tls
 | |
|     ## Note: Only one of ingress class annotation or the `ingressClassName:` can be used at a time
 | |
|     ## to set the ingress class
 | |
|     # ingressClassName: nginx
 | |
| 
 | |
| # -- A list of CephBlockPool configurations to deploy
 | |
| # @default -- See [below](#ceph-block-pools)
 | |
| cephBlockPools:
 | |
|   - name: ceph-blockpool
 | |
|     # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
 | |
|     spec:
 | |
|       failureDomain: host
 | |
|       replicated:
 | |
|         size: 3
 | |
|       # Enables collecting RBD per-image IO statistics by enabling dynamic OSD performance counters. Defaults to false.
 | |
|       # For reference: https://docs.ceph.com/docs/latest/mgr/prometheus/#rbd-io-statistics
 | |
|       # enableRBDStats: true
 | |
|     storageClass:
 | |
|       enabled: true
 | |
|       name: ceph-block
 | |
|       annotations: {}
 | |
|       labels: {}
 | |
|       isDefault: true
 | |
|       reclaimPolicy: Delete
 | |
|       allowVolumeExpansion: true
 | |
|       volumeBindingMode: "Immediate"
 | |
|       mountOptions: []
 | |
|       # see https://kubernetes.io/docs/concepts/storage/storage-classes/#allowed-topologies
 | |
|       allowedTopologies: []
 | |
|       #        - matchLabelExpressions:
 | |
|       #            - key: rook-ceph-role
 | |
|       #              values:
 | |
|       #                - storage-node
 | |
|       # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
 | |
|       parameters:
 | |
|         # (optional) mapOptions is a comma-separated list of map options.
 | |
|         # For krbd options refer
 | |
|         # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
 | |
|         # For nbd options refer
 | |
|         # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
 | |
|         # mapOptions: lock_on_read,queue_depth=1024
 | |
| 
 | |
|         # (optional) unmapOptions is a comma-separated list of unmap options.
 | |
|         # For krbd options refer
 | |
|         # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
 | |
|         # For nbd options refer
 | |
|         # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
 | |
|         # unmapOptions: force
 | |
| 
 | |
|         # RBD image format. Defaults to "2".
 | |
|         imageFormat: "2"
 | |
| 
 | |
|         # RBD image features, equivalent to OR'd bitfield value: 63
 | |
|         # Available for imageFormat: "2". Older releases of CSI RBD
 | |
|         # support only the `layering` feature. The Linux kernel (KRBD) supports the
 | |
|         # full feature complement as of 5.4
 | |
|         imageFeatures: layering
 | |
| 
 | |
|         # These secrets contain Ceph admin credentials.
 | |
|         csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
 | |
|         csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
 | |
|         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
 | |
|         csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
 | |
|         csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
 | |
|         csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
 | |
|         # Specify the filesystem type of the volume. If not specified, csi-provisioner
 | |
|         # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
 | |
|         # in hyperconverged settings where the volume is mounted on the same node as the osds.
 | |
|         csi.storage.k8s.io/fstype: ext4
 | |
| 
 | |
| # -- A list of CephFileSystem configurations to deploy
 | |
| # @default -- See [below](#ceph-file-systems)
 | |
| cephFileSystems:
 | |
|   - name: ceph-filesystem
 | |
|     # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
 | |
|     spec:
 | |
|       metadataPool:
 | |
|         replicated:
 | |
|           size: 3
 | |
|       dataPools:
 | |
|         - failureDomain: host
 | |
|           replicated:
 | |
|             size: 3
 | |
|           # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
 | |
|           name: data0
 | |
|       metadataServer:
 | |
|         activeCount: 1
 | |
|         activeStandby: true
 | |
|         resources:
 | |
|           limits:
 | |
|             memory: "4Gi"
 | |
|           requests:
 | |
|             cpu: "1000m"
 | |
|             memory: "4Gi"
 | |
|         priorityClassName: system-cluster-critical
 | |
|     storageClass:
 | |
|       enabled: true
 | |
|       isDefault: false
 | |
|       name: ceph-filesystem
 | |
|       # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default
 | |
|       pool: data0
 | |
|       reclaimPolicy: Delete
 | |
|       allowVolumeExpansion: true
 | |
|       volumeBindingMode: "Immediate"
 | |
|       annotations: {}
 | |
|       labels: {}
 | |
|       mountOptions: []
 | |
|       # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
 | |
|       parameters:
 | |
|         # The secrets contain Ceph admin credentials.
 | |
|         csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
 | |
|         csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
 | |
|         csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
 | |
|         csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
 | |
|         csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
 | |
|         csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
 | |
|         # Specify the filesystem type of the volume. If not specified, csi-provisioner
 | |
|         # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock
 | |
|         # in hyperconverged settings where the volume is mounted on the same node as the osds.
 | |
|         csi.storage.k8s.io/fstype: ext4
 | |
| 
 | |
| # -- Settings for the filesystem snapshot class
 | |
| # @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
 | |
| cephFileSystemVolumeSnapshotClass:
 | |
|   enabled: false
 | |
|   name: ceph-filesystem
 | |
|   isDefault: true
 | |
|   deletionPolicy: Delete
 | |
|   annotations: {}
 | |
|   labels: {}
 | |
|   # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
 | |
|   parameters: {}
 | |
| 
 | |
| # -- Settings for the block pool snapshot class
 | |
| # @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
 | |
| cephBlockPoolsVolumeSnapshotClass:
 | |
|   enabled: false
 | |
|   name: ceph-block
 | |
|   isDefault: false
 | |
|   deletionPolicy: Delete
 | |
|   annotations: {}
 | |
|   labels: {}
 | |
|   # see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
 | |
|   parameters: {}
 | |
| 
 | |
| # -- A list of CephObjectStore configurations to deploy
 | |
| # @default -- See [below](#ceph-object-stores)
 | |
| cephObjectStores:
 | |
|   - name: ceph-objectstore
 | |
|     # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
 | |
|     spec:
 | |
|       metadataPool:
 | |
|         failureDomain: host
 | |
|         replicated:
 | |
|           size: 3
 | |
|       dataPool:
 | |
|         failureDomain: host
 | |
|         erasureCoded:
 | |
|           dataChunks: 2
 | |
|           codingChunks: 1
 | |
|         parameters:
 | |
|           bulk: "true"
 | |
|       preservePoolsOnDelete: true
 | |
|       gateway:
 | |
|         port: 80
 | |
|         resources:
 | |
|           limits:
 | |
|             memory: "2Gi"
 | |
|           requests:
 | |
|             cpu: "1000m"
 | |
|             memory: "1Gi"
 | |
|         # securePort: 443
 | |
|         # sslCertificateRef:
 | |
|         instances: 1
 | |
|         priorityClassName: system-cluster-critical
 | |
|         # opsLogSidecar:
 | |
|         #   resources:
 | |
|         #     limits:
 | |
|         #       memory: "100Mi"
 | |
|         #     requests:
 | |
|         #       cpu: "100m"
 | |
|         #       memory: "40Mi"
 | |
|     storageClass:
 | |
|       enabled: true
 | |
|       name: ceph-bucket
 | |
|       reclaimPolicy: Delete
 | |
|       volumeBindingMode: "Immediate"
 | |
|       annotations: {}
 | |
|       labels: {}
 | |
|       # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
 | |
|       parameters:
 | |
|         # note: objectStoreNamespace and objectStoreName are configured by the chart
 | |
|         region: us-east-1
 | |
|     ingress:
 | |
|       # Enable an ingress for the ceph-objectstore
 | |
|       enabled: true
 | |
|       # The ingress port by default will be the object store's "securePort" (if set), or the gateway "port".
 | |
|       # To override those defaults, set this ingress port to the desired port.
 | |
|       # port: 80
 | |
|       # annotations: {}
 | |
|       host:
 | |
|         name: objectstore.apps.ncd0.harmony.mcd
 | |
|         path: /
 | |
|         pathType: Prefix
 | |
|       # tls:
 | |
|       # - hosts:
 | |
|       #     - objectstore.example.com
 | |
|       #   secretName: ceph-objectstore-tls
 | |
|       # ingressClassName: nginx
 | |
| ## cephECBlockPools are disabled by default, please remove the comments and set desired values to enable it
 | |
| ## For erasure coded a replicated metadata pool is required.
 | |
| ## https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
 | |
| #cephECBlockPools:
 | |
| #  - name: ec-pool
 | |
| #    spec:
 | |
| #      metadataPool:
 | |
| #        replicated:
 | |
| #          size: 2
 | |
| #      dataPool:
 | |
| #        failureDomain: osd
 | |
| #        erasureCoded:
 | |
| #          dataChunks: 2
 | |
| #          codingChunks: 1
 | |
| #        deviceClass: hdd
 | |
| #
 | |
| #    parameters:
 | |
| #      # clusterID is the namespace where the rook cluster is running
 | |
| #      # If you change this namespace, also change the namespace below where the secret namespaces are defined
 | |
| #      clusterID: rook-ceph # namespace:cluster
 | |
| #      # (optional) mapOptions is a comma-separated list of map options.
 | |
| #      # For krbd options refer
 | |
| #      # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
 | |
| #      # For nbd options refer
 | |
| #      # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
 | |
| #      # mapOptions: lock_on_read,queue_depth=1024
 | |
| #
 | |
| #      # (optional) unmapOptions is a comma-separated list of unmap options.
 | |
| #      # For krbd options refer
 | |
| #      # https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
 | |
| #      # For nbd options refer
 | |
| #      # https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
 | |
| #      # unmapOptions: force
 | |
| #
 | |
| #      # RBD image format. Defaults to "2".
 | |
| #      imageFormat: "2"
 | |
| #
 | |
| #      # RBD image features, equivalent to OR'd bitfield value: 63
 | |
| #      # Available for imageFormat: "2". Older releases of CSI RBD
 | |
| #      # support only the `layering` feature. The Linux kernel (KRBD) supports the
 | |
| #      # full feature complement as of 5.4
 | |
| #      # imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
 | |
| #      imageFeatures: layering
 | |
| #
 | |
| #    storageClass:
 | |
| #      provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
 | |
| #      enabled: true
 | |
| #      name: rook-ceph-block
 | |
| #      isDefault: false
 | |
| #      annotations: { }
 | |
| #      labels: { }
 | |
| #      allowVolumeExpansion: true
 | |
| #      reclaimPolicy: Delete
 | |
| 
 | |
| # -- CSI driver name prefix for cephfs, rbd and nfs.
 | |
| # @default -- `namespace name where rook-ceph operator is deployed`
 | |
| csiDriverNamePrefix:
 |