harbor + woodpecker #1

Open
med wants to merge 2 commits from harbor into main
6 changed files with 109 additions and 295 deletions

View File

@ -1,7 +1,8 @@
kubectl 1.26.7
minikube 1.29.0
helm 3.11.0
stern 1.23.0
yq 4.34.2
gomplate v3.11.5
vale 3.6.1
kubectl 1.26.7
helm 3.15.4
stern 1.23.0
yq 4.34.2
gomplate v3.11.5
vale 3.6.1
helmfile 0.167.1
helm-diff 3.9.10

View File

@ -7,7 +7,7 @@ releases:
- name: harbor
namespace: harbor
chart: harbor/harbor
version: 1.15.1
version: 1.11.4
values:
- values.yaml
set:
@ -15,5 +15,5 @@ releases:
value: '{{ env "HARBOR_ADMIN_PASSWORD" | default "Harbor12345" }}'
- name: database.internal.password
value: '{{ env "HARBOR_DB_PASSWORD" | default "changeme" }}'
- name: redis.internal.password
value: '{{ env "HARBOR_REDIS_PASSWORD" | default "changeme" }}'
- name: hostname
value: '{{ env "HARBOR_HOSTNAME" | default "hub.nationtech.io" }}'

View File

@ -1,152 +1,52 @@
harborAdminPassword: "Harbor12345"
tlsSecretName: "harbor.nationtech.io-tls"
chartVersion: 1.11.4
chartmuseum:
enabled: true
database:
internal:
password: {{ .Values.database.internal.password }}
type: internal
expose:
type: ingress
ingress:
annotations:
cert-manager.io/issuer: letsencrypt-prod
kubernetes.io/tls-acme: "true"
hosts:
core: {{ .Values.hostname }}
className: nginx
tls:
enabled: true
certSource: secret
secret:
secretName: "harbor.nationtech.io-tls"
ingress:
hosts:
core: harbor.nationtech.io
annotations:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
className: "nginx"
ports:
httpPort: 80
persistence:
enabled: true
resourcePolicy: "keep"
persistentVolumeClaim:
registry:
subPath: "registry"
accessMode: ReadWriteOnce
size: 20Gi
jobservice:
jobLog:
subPath: "jobservice"
accessMode: ReadWriteOnce
size: 2Gi
database:
subPath: "db"
accessMode: ReadWriteOnce
size: 10Gi
redis:
subPath: "redis"
accessMode: ReadWriteOnce
size: 2Gi
trivy:
subPath: "trivy"
accessMode: ReadWriteOnce
size: 2Gi
imageChartStorage:
disableredirect: false
type: filesystem
filesystem:
rootdirectory: /storage
# Enable Prometheus metrics
metrics:
enabled: true
core:
path: /metrics
port: 8001
registry:
path: /metrics
port: 8001
jobservice:
path: /metrics
port: 8001
exporter:
path: /metrics
port: 8001
serviceMonitor:
enabled: true
namespace: monitoring
additionalLabels:
release: prometheus
interval: 15s
metricRelabelings: []
relabelings: []
# Disable tracing as we're not using Jaeger
trace:
enabled: false
# Enable internal TLS
internalTLS:
enabled: true
strong_ssl_ciphers: true
certSource: "auto"
# Use internal database for simplicity
database:
type: internal
internal:
password: "changeme"
# Use internal Redis for simplicity
redis:
type: internal
internal:
password: "changeme"
# Enable Trivy scanner
trivy:
enabled: true
image:
repository: goharbor/trivy-adapter-photon
tag: dev
replicas: 2
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1
memory: 1Gi
vulnType: "os,library"
severity: "UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL"
ignoreUnfixed: false
insecure: false
skipUpdate: false
skipJavaDBUpdate: false
offlineScan: false
securityCheck: "vuln"
timeout: 5m0s
# High Availability settings
portal:
replicas: 2
core:
replicas: 2
secretName: {{ .Values.hostname }}-tls
type: ingress
externalURL: https://{{ .Values.hostname }}
jobservice:
replicas: 2
registry:
replicas: 2
chartmuseum:
replicas: 2
# Logging configuration
log:
level: info
local:
rotateCount: 50
rotateSize: 200M
location: /var/log/harbor
external:
enabled: true
endpoint: http://fluentd.logging:24224
index: harbor
type: fluentd
notary:
enabled: false
persistence:
persistentVolumeClaim:
chartmuseum:
size: 10Gi
storageClass: ceph-block
database:
size: 10Gi
storageClass: ceph-block
jobservice:
jobLog:
size: 2Gi
storageClass: ceph-block
redis:
size: 2Gi
storageClass: ceph-block
registry:
size: 10Gi
storageClass: ceph-block
trivy:
size: 5Gi
storageClass: ceph-block
redis:
enabled: true
trivy:
enabled: true

View File

@ -1,12 +1,12 @@
---
repositories:
- name: woodpecker
url: https://woodpecker-ci.org/helm-charts
url: https://woodpecker-ci.org/
releases:
- name: woodpecker
namespace: ci
namespace: woodpecker
chart: woodpecker/woodpecker
version: 1.0.3
version: 1.5.1
values:
- values.yaml

View File

@ -1,108 +1,55 @@
---
# Woodpecker server configuration
woodpecker:
server:
image:
repository: woodpeckerci/woodpecker-server
tag: v1.0.3 # Use a specific version instead of 'latest'
replicaCount: 2 # Run multiple replicas for high availability
service:
type: ClusterIP
port: 8000
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: woodpecker.example.com
paths:
- path: /
tls:
- secretName: woodpecker-tls
hosts:
- woodpecker.example.com
env:
WOODPECKER_OPEN: "false" # Disable open registration for production
WOODPECKER_HOST: "https://woodpecker.example.com" # Use HTTPS
WOODPECKER_GITHUB: "true"
WOODPECKER_GITHUB_CLIENT: "{{ .Env.WOODPECKER_GITHUB_CLIENT }}"
WOODPECKER_GITHUB_SECRET: "{{ .Env.WOODPECKER_GITHUB_SECRET }}"
WOODPECKER_AGENT_SECRET: "{{ .Env.WOODPECKER_AGENT_SECRET }}"
WOODPECKER_GRPC_SECRET: "{{ .Env.WOODPECKER_GRPC_SECRET }}"
WOODPECKER_GRPC_ADDR: ":9000"
WOODPECKER_SERVER_ADDR: ":8000"
WOODPECKER_METRICS_SERVER_ADDR: ":9001"
WOODPECKER_ADMIN: "{{ .Env.WOODPECKER_ADMIN }}"
WOODPECKER_DATABASE_DRIVER: "postgres"
WOODPECKER_DATABASE_DATASOURCE: "postgres://{{ .Env.POSTGRES_USER }}:{{ .Env.POSTGRES_PASSWORD }}@postgresql:5432/woodpecker?sslmode=require"
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
server:
host: "ci.nationtech.io"
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/issuer: letsencrypt-prod
kubernetes.io/tls-acme: "true"
hosts:
- host: "ci.nationtech.io"
paths:
- path: "/"
pathType: Prefix
backend:
service:
name: woodpecker-server
port:
number: 80
tls:
- secretName: "ci.nationtech.io-tls"
hosts:
- "ci.nationtech.io"
env:
WOODPECKER_OPEN: "true"
WOODPECKER_ADMIN: "woodpecker,admin,ci,nationtech,med"
WOODPECKER_HOST: "https://ci.nationtech.io"
WOODPECKER_AGENT_SECRET: "woodpecker-secret"
WOODPECKER_GRPC_ADDR: ":9000"
WOODPECKER_GITEA: "true"
WOODPECKER_GITEA_URL: "https://git.nationtech.io"
WOODPECKER_GITEA_CLIENT: "2a17849f-7747-44b9-a0d4-c79bc4aeff3d"
WOODPECKER_GITEA_SECRET: "gto_5zpyckcvuawq6l2zaja4mt3mptigpyc5o7nibmbd76jd2e5tu3fa"
# Woodpecker agent configuration
agent:
image:
repository: woodpeckerci/woodpecker-agent
tag: v1.0.3 # Use a specific version instead of 'latest'
replicaCount: 3 # Run multiple agents for better parallelism
replicaCount: 2
env:
WOODPECKER_SERVER: "woodpecker-server:9000"
WOODPECKER_AGENT_SECRET: "{{ .Env.WOODPECKER_AGENT_SECRET }}"
WOODPECKER_AGENT_SECRET: "woodpecker-secret"
WOODPECKER_MAX_PROCS: "2"
WOODPECKER_BACKEND: "kubernetes"
WOODPECKER_BACKEND_K8S_NAMESPACE: "ci"
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: "20Gi" # Increased volume size
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: "managed-premium" # Use a production-grade storage class
WOODPECKER_BACKEND_K8S_NAMESPACE: "woodpecker"
WOODPECKER_BACKEND_K8S_STORAGE_CLASS: "ceph-block"
WOODPECKER_BACKEND_K8S_VOLUME_SIZE: "10Gi"
WOODPECKER_BACKEND_K8S_STORAGE_RWX: "true"
WOODPECKER_BACKEND_K8S_POD_LABELS: '{"app":"woodpecker-job"}'
WOODPECKER_BACKEND_K8S_POD_ANNOTATIONS: '{"prometheus.io/scrape":"true","prometheus.io/port":"9000"}'
WOODPECKER_BACKEND_K8S_POD_NODE_SELECTOR: '{"kubernetes.io/os":"linux"}'
WOODPECKER_BACKEND_K8S_SECCTX_NONROOT: "true"
WOODPECKER_BACKEND_K8S_PULL_SECRET_NAMES: "woodpecker-pull-secret"
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 1
memory: 1Gi
WOODPECKER_BACKEND_K8S_POD_LABELS: '{"app.kubernetes.io/name":"agent"}'
WOODPECKER_BACKEND_K8S_POD_ANNOTATIONS: ""
WOODPECKER_CONNECT_RETRY_COUNT: "3"
WOODPECKER_BACKEND_K8S_PULL_SECRET_NAMES: ""
# PostgreSQL configuration
postgresql:
enabled: true
postgresqlUsername: "{{ .Env.POSTGRES_USER }}"
postgresqlPassword: "{{ .Env.POSTGRES_PASSWORD }}"
postgresqlDatabase: "woodpecker"
persistence:
enabled: true
size: 20Gi
storageClass: "managed-premium" # Use a production-grade storage class
resources:
requests:
cpu: 100m
memory: 256Mi
limits:
cpu: 500m
memory: 1Gi
# Prometheus integration
metrics:
serviceMonitor:
enabled: true
namespace: monitoring
interval: 15s
scrapeTimeout: 14s
selector:
release: prometheus
# Logging integration
logging:
fluentd:
enabled: true
config:
logLevel: info
fluentdAddress: fluentd.logging:24224
image:
registry: docker.io
repository: woodpeckerci/woodpecker-agent
pullPolicy: IfNotPresent
tag: "latest"

View File

@ -3,43 +3,9 @@ helmDefaults:
atomic: true
wait: true
repositories:
- name: gitea-charts
url: https://dl.gitea.io/charts/
- name: woodpecker
url: https://woodpecker-ci.org/helm-charts
- name: harbor
url: https://helm.goharbor.io
releases:
- name: gitea
namespace: nt
chart: gitea-charts/gitea
version: 8.3.0
values:
- applications/gitea/values.yaml
- name: woodpecker
namespace: ci
chart: woodpecker/woodpecker
version: 1.0.3
values:
- applications/woodpecker/values.yaml
- name: harbor
namespace: harbor
chart: harbor/harbor
version: 1.15.1
values:
- applications/harbor/values.yaml
helmfiles:
- path: applications/vault/helmfile.yaml
- path: applications/logging/helmfile.yaml
- path: applications/gitea/helmfile.yaml
# - path: applications/vault/helmfile.yaml
# - path: applications/logging/helmfile.yaml
# - path: applications/gitea/helmfile.yaml
- path: applications/woodpecker/helmfile.yaml
- path: applications/harbor/helmfile.yaml
# Common configurations
commonConfig:
- common-config.yaml
# - path: applications/harbor/helmfile.yaml