feat: initial codebase

This commit is contained in:
Med Mouine 2024-09-03 10:22:37 -04:00
commit 412789abb2
24 changed files with 2879 additions and 0 deletions

70
.gitignore vendored Normal file
View File

@ -0,0 +1,70 @@
### dotenv template
.env
### OSX template
# General
.DS_Store
.AppleDouble
.LSOverride
.vscode/
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### Example user template template
### Example user template
# IntelliJ project files
.idea
*.iml
out
gen
### Go template
# If you prefer the allow list template instead of the deny list, see community template:
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
#
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
### SSH template
**/.ssh/id_*
**/.ssh/*_id_*
**/.ssh/known_hosts
### Helm template
# Chart dependencies
**/charts/*.tgz

7
.tool-versions Normal file
View File

@ -0,0 +1,7 @@
kubectl 1.26.7
minikube 1.29.0
helm 3.11.0
stern 1.23.0
yq 4.34.2
gomplate v3.11.5
vale 3.6.1

11
README.md Normal file
View File

@ -0,0 +1,11 @@
# Cluster management template project
This project is based on a GitLab [Project Template](https://docs.gitlab.com/ee/gitlab-basics/create-project.html).
For more information, see [the documentation for this template](https://docs.gitlab.com/ee/user/clusters/management_project_template.html).
Improvements can be proposed in the [original project](https://gitlab.com/gitlab-org/project-templates/cluster-management).
## Supported Kubernetes versions
The project should be used with a [supported version of Kubernetes cluster](https://docs.gitlab.com/ee/user/clusters/agent/#supported-cluster-versions).

View File

@ -0,0 +1,12 @@
---
repositories:
- name: gitea-charts
url: https://dl.gitea.io/charts/
releases:
- name: gitea
namespace: nt
chart: gitea-charts/gitea
version: 8.3.0
values:
- values.yaml

View File

@ -0,0 +1,71 @@
# Gitea Helm values
gitea:
admin:
username: gitea_admin
password: "{{ env "GITEA_ADMIN_PASSWORD" }}"
email: "admin@example.com"
config:
APP_NAME: "Gitea: Git with a cup of tea"
RUN_MODE: prod
server:
SSH_DOMAIN: gitea.nationtech.io
DOMAIN: gitea.nationtech.io
ROOT_URL: https://gitea.nationtech.io
DISABLE_SSH: false
START_SSH_SERVER: true
SSH_PORT: 22
SSH_LISTEN_PORT: 22
database:
DB_TYPE: postgres
HOST: postgresql:5432
NAME: gitea
USER: gitea
PASSWD: "{{ env "GITEA_DB_PASSWORD" }}"
service:
DISABLE_REGISTRATION: true
security:
INSTALL_LOCK: true
SECRET_KEY: "{{ env "GITEA_SECRET_KEY" }}"
webhook:
ALLOWED_HOST_LIST: woodpecker.nationtech.io
persistence:
enabled: true
size: 10Gi
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: gitea.nationtech.io
paths:
- path: /
pathType: Prefix
tls:
- secretName: gitea-tls
hosts:
- gitea.nationtech.io
postgresql:
enabled: true
global:
postgresql:
auth:
username: gitea
password: "{{ env "GITEA_DB_PASSWORD" }}"
database: gitea
primary:
persistence:
enabled: true
size: 8Gi
storageClass: "managed-premium"
metrics:
enabled: true
serviceMonitor:
enabled: true
namespace: monitoring
interval: 1m

View File

@ -0,0 +1,12 @@
repositories:
- name: gitlab
url: https://charts.gitlab.io
releases:
- name: runner
namespace: gitlab-managed-apps
chart: gitlab/gitlab-runner
version: 0.44.0
installed: true
values:
- values.yaml.gotmpl

View File

@ -0,0 +1,80 @@
## REQUIRED VALUES
gitlabUrl: {{ requiredEnv "CI_SERVER_URL" | quote }}
runnerRegistrationToken: {{ requiredEnv "GITLAB_RUNNER_REGISTRATION_TOKEN" | quote }}
## Configure the maximum number of concurrent jobs
## - Documentation: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section
## - Default value: 10
## - Currently don't support auto-scaling.
concurrent: 4
## Defines in seconds how often to check GitLab for a new builds
## - Documentation: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section
## - Default value: 3
checkInterval: 3
## For RBAC support
rbac:
create: true
clusterWideAccess: false
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "create", "delete"]
- apiGroups: [""]
resources: ["pods/exec"]
verbs: ["get", "create"]
- apiGroups: [""]
resources: ["pods/attach"]
verbs: ["create"]
- apiGroups: [""]
resources: ["secrets", "configmaps"]
verbs: ["create", "update", "delete"]
- apiGroups: [""]
resources: ["services"]
verbs: ["create"]
## Configuration for the Pods that that the runner launches for each new job
runners:
image: ubuntu:20.04
builds: {}
services: {}
helpers: {}
## Specify the tags associated with the runner. Comma-separated list of tags.
## - Documentation: https://docs.gitlab.com/ce/ci/runners/#using-tags
tags: kubernetes,cluster
## Determine whether the runner should also run jobs without tags.
## - Documentation: https://docs.gitlab.com/ee/ci/runners/configure_runners.html#set-a-runner-to-run-untagged-jobs
# runUntagged: true
## Run all containers with the privileged flag enabled
## This will allow the docker:dind image to run if you need to run Docker
## commands. Please read the docs before turning this on:
## - Documentation: https://docs.gitlab.com/runner/executors/kubernetes.html#using-docker-dind
privileged: true
## Kubernetes related options to control which nodes executors use
## - Documentation: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
# nodeSelector:
# myLabel: myValue
#
## Documentation: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
# nodeTolerations:
# - key: myTaint
# operator: Equal
# value: myValue
# effect: NoSchedule
## If you can't find a setting you think should be here this may help:
##
## The gitlab-runner chart uses `templates/configmap.yaml` to configure runners
## `configmap.yaml`'s `data.register-the-runner` transforms this file into runner CLI options
## `configmap.yaml`'s `data.config.toml` and `data.config.template.toml` transform this file into the runner's config.toml
##
## - Source code for `configmap.yaml` https://gitlab.com/gitlab-org/charts/gitlab-runner/-/blob/main/templates/configmap.yaml
## - Documentation for `config.toml` https://docs.gitlab.com/runner/executors/kubernetes.html#the-available-configtoml-settings
## - Source code for runner CLI options (see `KubernetesConfig` struct) https://gitlab.com/gitlab-org/gitlab-runner/-/blob/main/common/config.go
resources: {}

View File

@ -0,0 +1,19 @@
repositories:
- name: gitlab
url: https://charts.gitlab.io/
releases:
- name: gitlab
namespace: gitlab
chart: gitlab/gitlab
version: 8.4.0
installed: true
values:
- values.yaml
- name: grafana
namespace: logging
chart: grafana/grafana
version: 6.50.7
values:
- applications/logging/values/grafana-values.yaml

View File

@ -0,0 +1,600 @@
---
serviceAccount:
enabled: true
certificates:
customCAs: []
image:
repository: registry.gitlab.com/gitlab-org/build/cng/certificates
certmanager:
install: false
installCRDs: false
nameOverride: certmanager
rbac:
create: true
certmanager-issuer:
email: security@nationtech.io
common:
labels: {}
deployment:
annotations: {}
envVars:
- name: CI_JOB_ID
value: $CI_JOB_ID
geo:
enabled: false
gitlab:
gitlab-exporter:
enabled: true
gitlab-pages:
ingress:
tls:
secretName: gitlab-pages-tls
enabled: true
gitlab-shell:
enabled: true
kas:
enabled: true
mailroom:
enabled: true
migrations:
enabled: true
sidekiq:
enabled: true
toolbox:
antiAffinityLabels:
matchLabels:
app: gitaly
enabled: true
replicas: 1
webservice:
enabled: true
ingress:
tls:
secretName: gitlab-tls
gitlab-zoekt:
gateway:
basicAuth:
enabled: true
secretName: '{{ include "gitlab.zoekt.gateway.basicAuth.secretName" $ }}'
indexer:
internalApi:
enabled: true
gitlabUrl: '{{ include "gitlab.zoekt.indexer.internalApi.gitlabUrl" $ }}'
secretKey: '{{ include "gitlab.zoekt.indexer.internalApi.secretKey" $ }}'
secretName: '{{ include "gitlab.zoekt.indexer.internalApi.secretName" $ }}'
install: true
global:
edition: ee
affinity:
nodeAffinity:
key: topology.kubernetes.io/zone
values: []
podAntiAffinity:
topologyKey: kubernetes.io/hostname
antiAffinity: soft
gitaly:
enabled: true
replicas: 2
praefect:
enabled: false
redis:
cluster:
enabled: false
appConfig:
resources:
requests:
cpu: 200m
memory: 1Gi
limits:
cpu: 1
memory: 1Gi
smartcard:
enabled: false
kerberos:
dedicatedPort:
enabled: false
https: true
port: 8443
enabled: false
keytab:
key: keytab
simpleLdapLinkingAllowedRealms: []
kubectl:
image:
repository: registry.gitlab.com/gitlab-org/build/cng/kubectl
securityContext:
fsGroup: 65534
runAsUser: 65534
ldap:
preventSignin: false
servers: {}
lfs:
bucket: git-lfs
connection: {}
enabled: true
proxy_download: true
maxRequestDurationSeconds: null
microsoft_graph_mailer:
enabled: false
minio:
enabled: true
monitoring:
enabled: true
object_store:
enabled: true
proxy_download: true
omniauth:
enabled: false
packages:
enabled: true
proxy_download: true
bucket: gitlab-packages
connection: {}
pages:
enabled: true
host: pages.gitlab.nationtech.io
namespaceInPath: true
accessControl: true
artifactsServer: true
https: null
objectStore:
bucket: gitlab-pages
connection: {}
enabled: true
applicationSettingsCacheSeconds: 60
artifacts:
bucket: gitlab-artifacts
enabled: true
proxy_download: true
backups:
bucket: gitlab-backups
tmpBucket: tmp
ciSecureFiles:
bucket: gitlab-ci-secure-files
connection: {}
enabled: false
contentSecurityPolicy:
enabled: false
report_only: true
cron_jobs: {}
defaultProjectsFeatures:
builds: true
issues: true
mergeRequests: true
snippets: true
wiki: true
dependencyProxy:
bucket: gitlab-dependency-proxy
connection: {}
enabled: true
proxy_download: true
duoAuth:
enabled: false
enableImpersonation: false
enableSeatLink: true
enableUsagePing: true
externalDiffs:
bucket: gitlab-mr-diffs
connection: {}
enabled: false
proxy_download: true
extra:
bizible:
googleAnalyticsId: null
googleTagManagerNonceId: null
matomoDisableCookies: null
matomoSiteId: null
matomoUrl: null
oneTrustId: null
gitlab_docs:
enabled: false
kas:
enabled: true
service:
apiExternalPort: 8153
tls:
enabled: false
verify: true
graphQlTimeout: null
gravatar:
plainUrl: null
sslUrl: null
hosts:
domain: brizo.nationtech.io
externalIP: null
https: true
gitlab:
name: gitlab.nationtech.io
minio:
name: minio.gitlab.nationtech.io
registry:
name: registry.gitlab.nationtech.io
protocol: https
ssh: gitlab.nationtech.io
incomingEmail:
enabled: false
ingress:
annotations:
cert-manager.io/issuer: letsencrypt-prod
kubernetes.io/tls-acme: 'true'
nginx.ingress.kubernetes.io/proxy-body-size: 10000m
class: nginx
configureCertmanager: false
enabled: true
path: /
pathType: Prefix
provider: nginx
tls:
enabled: true
secretName: gitlab-tls
useNewIngressForCerts: false
initialDefaults: {}
initialRootPassword: {}
issueClosingPattern: null
job:
nameSuffixOverride: null
keda:
enabled: false
psql:
ci: {}
connectTimeout: null
database: gitlabhq_production
keepalives: null
keepalivesCount: null
keepalivesIdle: null
keepalivesInterval: null
main: {}
password:
key: postgres-password
secret: gitlab-postgres
useSecret: true
tcpUserTimeout: null
username: gitlab
registry:
enabled: true
host: registry.gitlab.nationtech.io
port: 433
tokenIssuer: gitlab-issuer
api:
protocol: http
serviceName: registry
port: 5000
tls:
enabled: true
secretName: gitlab-registry-tls
sentry:
clientside_dsn: null
dsn: null
enabled: false
environment: null
serviceDeskEmail:
enabled: false
shell:
authToken:
secret: gitlab-gitlab-shell-auth-token
hostKeys:
secret: gitlab-gitlab-shell-host-keys
sidekiq:
routingRules: []
smtp:
enabled: false
uploads:
bucket: gitlab-uploads
enabled: true
proxy_download: true
usernameChangingEnabled: true
webhookTimeout: null
webservice:
tls:
enabled: true
workerTimeout: 300
workhorse:
tls:
enabled: false
serviceName: webservice-default
minio:
install: true
nginx-ingress:
class: nginx
enabled: false
tcpExternalConfig: 'true'
nginx-ingress-geo:
enabled: false
rbac:
create: true
scope: false
serviceAccount:
create: true
tcpExternalConfig: 'true'
postgresql:
auth:
existingSecret: '{{ include "gitlab.psql.password.secret" . }}'
password: bogus-satisfy-upgrade
postgresPassword: bogus-satisfy-upgrade
replicationPassword: ""
replicationUsername: repl_user
secretKeys:
adminPasswordKey: postgresql-postgres-password
replicationPasswordKey: replication-password
userPasswordKey: '{{ include "gitlab.psql.password.key" $ }}'
usePasswordFiles: false
image:
tag: 14.10.0
install: true
metrics:
enabled: true
service:
annotations:
gitlab.com/prometheus_port: '9187'
gitlab.com/prometheus_scrape: 'true'
prometheus.io/port: '9187'
prometheus.io/scrape: 'true'
primary:
extraVolumeMounts:
- mountPath: /docker-entrypoint-preinitdb.d/init_revision.sh
name: custom-init-scripts
subPath: init_revision.sh
initdb:
scriptsConfigMap: '{{ include "gitlab.psql.initdbscripts" $}}'
podAnnotations:
postgresql.gitlab/init-revision: '1'
prometheus:
install: false
alertmanager:
enabled: true
config:
global:
resolve_timeout: 5m
route:
group_by: ['job']
group_wait: 30s
group_interval: 5m
repeat_interval: 12h
receiver: 'null'
routes:
- match:
alertname: Watchdog
receiver: 'null'
receivers:
- name: 'null'
kubeStateMetrics:
enabled: true
nodeExporter:
enabled: true
pushgateway:
enabled: true
rbac:
create: true
server:
image:
tag: v2.38.0
retention: 15d
strategy:
type: Recreate
serverFiles:
prometheus.yml:
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- localhost:9090
- bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
job_name: kubernetes-apiservers
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- action: keep
regex: default;kubernetes;https
source_labels:
- __meta_kubernetes_namespace
- __meta_kubernetes_service_name
- __meta_kubernetes_endpoint_port_name
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_pod_annotation_gitlab_com_prometheus_scrape
- action: replace
regex: (https?)
source_labels:
- __meta_kubernetes_pod_annotation_gitlab_com_prometheus_scheme
target_label: __scheme__
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_pod_annotation_gitlab_com_prometheus_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_pod_annotation_gitlab_com_prometheus_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: kubernetes_pod_name
- job_name: kubernetes-service-endpoints
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_service_annotation_gitlab_com_prometheus_scrape
- action: replace
regex: (https?)
source_labels:
- __meta_kubernetes_service_annotation_gitlab_com_prometheus_scheme
target_label: __scheme__
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_service_annotation_gitlab_com_prometheus_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_service_annotation_gitlab_com_prometheus_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_service_name
target_label: kubernetes_name
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: kubernetes_node
- job_name: kubernetes-services
kubernetes_sd_configs:
- role: service
metrics_path: /probe
params:
module:
- http_2xx
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_service_annotation_gitlab_com_prometheus_probe
- source_labels:
- __address__
target_label: __param_target
- replacement: blackbox
target_label: __address__
- source_labels:
- __param_target
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- source_labels:
- __meta_kubernetes_service_name
target_label: kubernetes_name
rbac:
create: true
redis:
architecture: standalone
auth:
existingSecret: gitlab-redis-secret
existingSecretKey: redis-password
usePasswordFiles: true
cluster:
enabled: false
install: true
metrics:
enabled: true
registry:
enabled: true
database:
enabled: true
name: registry # must match the database name you created above
user: registry # must match the database username you created above
password:
secret: gitlab-registry-postgresql # must match the secret name
key: password # must match the secret key to read the password from
sslmode: verify-full
ssl:
secret: gitlab-registry-postgresql-ssl # you will need to create this secret manually
clientKey: client-key.pem
clientCertificate: client-cert.pem
serverCA: server-ca.pem
migrations:
enabled: true # this option will execute the schema migration as part of the registry deployment
tls:
enabled: true
secretName: gitlab-registry-tls
shared-secrets:
enabled: true
env: production
rbac:
create: true
resources:
requests:
cpu: 50m
securityContext:
fsGroup: 65534
runAsUser: 65534
selfsign:
caSubject: GitLab Helm Chart
expiry: 3650d
image:
repository: registry.gitlab.com/gitlab-org/build/cng/cfssl-self-sign
keyAlgorithm: rsa
keySize: '4096'
serviceAccount:
create: true
enabled: true
gitlab-runner:
install: true
rbac:
create: true
runners:
locked: false
# Set secret to an arbitrary value because the runner chart renders the gitlab-runner.secret template only if it is not empty.
# The parent/GitLab chart overrides the template to render the actual secret name.
secret: "nonempty"
privileged: true
config: |
[[runners]]
[runners.kubernetes]
privileged = true
image = "ubuntu:22.04"
{{- if .Values.global.minio.enabled }}
[runners.cache]
Type = "s3"
Path = "gitlab-runner"
Shared = true
[runners.cache.s3]
ServerAddress = {{ include "gitlab-runner.cache-tpl.s3ServerAddress" . }}
BucketName = "runner-cache"
BucketLocation = "us-east-1"
Insecure = true
{{ end }}
podAnnotations:
gitlab.com/prometheus_scrape: "true"
gitlab.com/prometheus_port: 9252
upgradeCheck:
annotations: {}
configMapAnnotations: {}
enabled: true
image: {}
priorityClassName: ''
resources:
requests:
cpu: 50m
securityContext:
fsGroup: 65534
runAsUser: 65534
tolerations: []

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
---
repositories:
- name: harbor
url: https://helm.goharbor.io
releases:
- name: harbor
namespace: harbor
chart: harbor/harbor
version: 1.15.1
values:
- values.yaml
set:
- name: harborAdminPassword
value: '{{ env "HARBOR_ADMIN_PASSWORD" | default "Harbor12345" }}'
- name: database.internal.password
value: '{{ env "HARBOR_DB_PASSWORD" | default "changeme" }}'
- name: redis.internal.password
value: '{{ env "HARBOR_REDIS_PASSWORD" | default "changeme" }}'

View File

@ -0,0 +1,152 @@
harborAdminPassword: "Harbor12345"
tlsSecretName: "harbor.nationtech.io-tls"
expose:
type: ingress
tls:
enabled: true
certSource: secret
secret:
secretName: "harbor.nationtech.io-tls"
ingress:
hosts:
core: harbor.nationtech.io
annotations:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
className: "nginx"
ports:
httpPort: 80
persistence:
enabled: true
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
subPath: "registry"
accessMode: ReadWriteOnce
size: 20Gi
jobservice:
jobLog:
subPath: "jobservice"
accessMode: ReadWriteOnce
size: 2Gi
database:
subPath: "db"
accessMode: ReadWriteOnce
size: 10Gi
redis:
subPath: "redis"
accessMode: ReadWriteOnce
size: 2Gi
trivy:
subPath: "trivy"
accessMode: ReadWriteOnce
size: 2Gi
imageChartStorage:
disableredirect: false
type: filesystem
filesystem:
rootdirectory: /storage
# Enable Prometheus metrics
metrics:
enabled: true
core:
path: /metrics
port: 8001
registry:
path: /metrics
port: 8001
jobservice:
path: /metrics
port: 8001
exporter:
path: /metrics
port: 8001
serviceMonitor:
enabled: true
namespace: monitoring
additionalLabels:
release: prometheus
interval: 15s
metricRelabelings: []
relabelings: []
# Disable tracing as we're not using Jaeger
trace:
enabled: false
# Enable internal TLS
internalTLS:
enabled: true
strong_ssl_ciphers: true
certSource: "auto"
# Use internal database for simplicity
database:
type: internal
internal:
password: "changeme"
# Use internal Redis for simplicity
redis:
type: internal
internal:
password: "changeme"
# Enable Trivy scanner
trivy:
enabled: true
image:
repository: goharbor/trivy-adapter-photon
tag: dev
replicas: 2
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1
memory: 1Gi
vulnType: "os,library"
severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
ignoreUnfixed: false
insecure: false
skipUpdate: false
skipJavaDBUpdate: false
offlineScan: false
securityCheck: "vuln"
timeout: 5m0s
# High Availability settings
portal:
replicas: 2
core:
replicas: 2
jobservice:
replicas: 2
registry:
replicas: 2
chartmuseum:
replicas: 2
# Logging configuration
log:
level: info
local:
rotateCount: 50
rotateSize: 200M
location: /var/log/harbor
external:
enabled: true
endpoint: http://fluentd.logging:24224
index: harbor
type: fluentd

View File

@ -0,0 +1,46 @@
repositories:
- name: grafana
url: https://grafana.github.io/helm-charts
- name: elastic
url: https://helm.elastic.co
releases:
- name: elasticsearch
namespace: {{ .Values.namespace | default "logging" }}
chart: elastic/elasticsearch
version: 7.17.10
values:
- values/elasticsearch-values.yaml
- ../common-config.yaml
- name: filebeat
namespace: {{ .Values.namespace | default "logging" }}
chart: elastic/filebeat
version: 7.17.10
values:
- values/filebeat-values.yaml
- ../common-config.yaml
- name: loki
namespace: {{ .Values.namespace | default "logging" }}
chart: grafana/loki
version: 5.8.3
values:
- values/loki-values.yaml
- ../common-config.yaml
- name: promtail
namespace: {{ .Values.namespace | default "logging" }}
chart: grafana/promtail
version: 6.11.3
values:
- values/promtail-values.yaml
- ../common-config.yaml
- name: grafana
namespace: {{ .Values.namespace | default "logging" }}
chart: grafana/grafana
version: 6.50.7
values:
- values/grafana-values.yaml
- ../common-config.yaml

View File

@ -0,0 +1,16 @@
replicas: 3
minimumMasterNodes: 2
resources:
requests:
cpu: "100m"
memory: "1Gi"
limits:
cpu: "1000m"
memory: "2Gi"
volumeClaimTemplate:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 30Gi

View File

@ -0,0 +1,12 @@
daemonset:
enabled: true
filebeatConfig:
filebeat.yml: |
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
output.elasticsearch:
hosts: ['${ELASTICSEARCH_HOST:elasticsearch-master:9200}']

View File

@ -0,0 +1,62 @@
persistence:
enabled: true
size: 10Gi
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Loki
type: loki
url: http://loki.{{ .Release.Namespace }}.svc.cluster.local:3100
access: proxy
isDefault: false
- name: Elasticsearch
type: elasticsearch
url: http://elasticsearch-master.{{ .Release.Namespace }}.svc.cluster.local:9200
access: proxy
isDefault: true
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: ''
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards
dashboards:
default:
loki-logs:
gnetId: 12611
revision: 1
datasource: Loki
elasticsearch-logs:
gnetId: 4358
revision: 1
datasource: Elasticsearch
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- grafana.{{ .Release.Namespace }}.{{ .Values.global.hosts.domain }}
tls:
- secretName: grafana-tls
hosts:
- grafana.{{ .Release.Namespace }}.{{ .Values.global.hosts.domain }}
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi

View File

@ -0,0 +1,33 @@
config:
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
storage:
type: filesystem
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 400m
memory: 512Mi
service:
port: 3100
persistence:
enabled: true
size: 10Gi
serviceMonitor:
enabled: true
namespace: logging
interval: 30s

View File

@ -0,0 +1,74 @@
config:
lokiAddress: "http://loki.{{ .Release.Namespace }}:3100/loki/api/v1/push"
snippets:
extraScrapeConfigs: |
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
- job_name: kubernetes-pods-logs
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: keep
regex: {{ .Release.Namespace }}
source_labels:
- __meta_kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: pod
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_container_name
target_label: container
- replacement: /var/log/pods/*$1/*.log
separator: /
source_labels:
- __meta_kubernetes_pod_uid
- __meta_kubernetes_pod_container_name
target_label: __path__
resources:
limits:
cpu: 200m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
tolerations:
- effect: NoSchedule
operator: Exists
updateStrategy:
type: RollingUpdate
serviceMonitor:
enabled: true
namespace: {{.Release.Namespace}}
interval: 30s

View File

@ -0,0 +1,13 @@
repositories:
- name: vault-helm
url: https://helm.releases.hashicorp.com
releases:
- name: vault
namespace: {{ .Values.namespace | default "vault" }}
chart: vault-helm/vault
version: 0.28.1
installed: true
values:
- values.yaml
- ../common-config.yaml

View File

@ -0,0 +1,2 @@
ui:
enabled: true

View File

@ -0,0 +1,12 @@
---
repositories:
- name: woodpecker
url: https://woodpecker-ci.org/helm-charts
releases:
- name: woodpecker
namespace: ci
chart: woodpecker/woodpecker
version: 1.0.3
values:
- values.yaml

View File

@ -0,0 +1,108 @@
---
# Woodpecker server configuration
woodpecker:
server:
image:
repository: woodpeckerci/woodpecker-server
tag: v1.0.3 # Use a specific version instead of 'latest'
replicaCount: 2 # Run multiple replicas for high availability
service:
type: ClusterIP
port: 8000
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: woodpecker.example.com
paths:
- path: /
tls:
- secretName: woodpecker-tls
hosts:
- woodpecker.example.com
env:
WOODPECKER_OPEN: "false" # Disable open registration for production
WOODPECKER_HOST: "https://woodpecker.example.com" # Use HTTPS
WOODPECKER_GITHUB: "true"
WOODPECKER_GITHUB_CLIENT: "{{ .Env.WOODPECKER_GITHUB_CLIENT }}"
WOODPECKER_GITHUB_SECRET: "{{ .Env.WOODPECKER_GITHUB_SECRET }}"
WOODPECKER_AGENT_SECRET: "{{ .Env.WOODPECKER_AGENT_SECRET }}"
WOODPECKER_GRPC_SECRET: "{{ .Env.WOODPECKER_GRPC_SECRET }}"
WOODPECKER_GRPC_ADDR: ":9000"
WOODPECKER_SERVER_ADDR: ":8000"
WOODPECKER_METRICS_SERVER_ADDR: ":9001"
WOODPECKER_ADMIN: "{{ .Env.WOODPECKER_ADMIN }}"
WOODPECKER_DATABASE_DRIVER: "postgres"
WOODPECKER_DATABASE_DATASOURCE: "postgres://{{ .Env.POSTGRES_USER }}:{{ .Env.POSTGRES_PASSWORD }}@postgresql:5432/woodpecker?sslmode=require"
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# Woodpecker agent configuration
agent:
image:
repository: woodpeckerci/woodpecker-agent
tag: v1.0.3 # Use a specific version instead of 'latest'
replicaCount: 3 # Run multiple agents for better parallelism
env:
WOODPECKER_SERVER: "woodpecker-server:9000"
WOODPECKER_AGENT_SECRET: "{{ .Env.WOODPECKER_AGENT_SECRET }}"
WOODPECKER_BACKEND: "kubernetes"
WOODPECKER_BACKEND_K8S_NAMESPACE: "ci"
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: "20Gi" # Increased volume size
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: "managed-premium" # Use a production-grade storage class
WOODPECKER_BACKEND_K8S_STORAGE_RWX: "true"
WOODPECKER_BACKEND_K8S_POD_LABELS: '{"app":"woodpecker-job"}'
WOODPECKER_BACKEND_K8S_POD_ANNOTATIONS: '{"prometheus.io/scrape":"true","prometheus.io/port":"9000"}'
WOODPECKER_BACKEND_K8S_POD_NODE_SELECTOR: '{"kubernetes.io/os":"linux"}'
WOODPECKER_BACKEND_K8S_SECCTX_NONROOT: "true"
WOODPECKER_BACKEND_K8S_PULL_SECRET_NAMES: "woodpecker-pull-secret"
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 1
memory: 1Gi
# PostgreSQL configuration
postgresql:
enabled: true
postgresqlUsername: "{{ .Env.POSTGRES_USER }}"
postgresqlPassword: "{{ .Env.POSTGRES_PASSWORD }}"
postgresqlDatabase: "woodpecker"
persistence:
enabled: true
size: 20Gi
storageClass: "managed-premium" # Use a production-grade storage class
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 1Gi
# Prometheus integration
metrics:
serviceMonitor:
enabled: true
namespace: monitoring
interval: 15s
scrapeTimeout: 14s
selector:
release: prometheus
# Logging integration
logging:
fluentd:
enabled: true
config:
logLevel: info
fluentdAddress: fluentd.logging:24224

25
common-config.yaml Normal file
View File

@ -0,0 +1,25 @@
---
# Common configurations for all applications
# Ingress configurations
ingress:
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: "letsencrypt-prod"
tls:
enabled: true
# Persistence configurations
persistence:
storageClass: "ceph-block"
# Monitoring configurations
monitoring:
enabled: true
namespace: monitoring
# Logging configurations
logging:
enabled: true
fluentd:
address: fluentd.logging:24224

45
helmfile.yaml Normal file
View File

@ -0,0 +1,45 @@
---
helmDefaults:
atomic: true
wait: true
repositories:
- name: gitea-charts
url: https://dl.gitea.io/charts/
- name: woodpecker
url: https://woodpecker-ci.org/helm-charts
- name: harbor
url: https://helm.goharbor.io
releases:
- name: gitea
namespace: nt
chart: gitea-charts/gitea
version: 8.3.0
values:
- applications/gitea/values.yaml
- name: woodpecker
namespace: ci
chart: woodpecker/woodpecker
version: 1.0.3
values:
- applications/woodpecker/values.yaml
- name: harbor
namespace: harbor
chart: harbor/harbor
version: 1.15.1
values:
- applications/harbor/values.yaml
helmfiles:
- path: applications/vault/helmfile.yaml
- path: applications/logging/helmfile.yaml
- path: applications/gitea/helmfile.yaml
- path: applications/woodpecker/helmfile.yaml
- path: applications/harbor/helmfile.yaml
# Common configurations
commonConfig:
- common-config.yaml