datatracker/helm/values.yaml
2024-05-13 21:41:36 -04:00

646 lines
18 KiB
YAML

# Default values for datatracker.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
## Define serviceAccount names for components. Defaults to component's fully qualified name.
##
serviceAccounts:
datatracker:
create: true
name: datatracker
annotations: {}
celery:
create: true
name: celery
annotations: {}
beat:
create: true
name: beat
annotations: {}
rabbitmq:
create: true
name: rabbitmq
annotations: {}
memcached:
create: true
name: memcached
annotations: {}
# -------------------------------------------------------------
# DATATRACKER
# -------------------------------------------------------------
datatracker:
name: datatracker
image:
repository: "ghcr.io/ietf-tools/datatracker"
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
# tag: "v1.1.0"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: datatracker.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# livenessProbe:
# httpGet:
# # /submit/tool-instructions/ just happens to be cheap until we get a real health endpoint
# path: /submit/tool-instructions/
# port: http
podAnnotations: {}
podLabels: {}
#readinessProbe:
# httpGet:
# # /submit/tool-instructions/ just happens to be cheap until we get a real health endpoint
# path: /submit/tool-instructions/
# port: http
replicaCount: 1
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
podSecurityContext:
runAsNonRoot: true
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
service:
type: ClusterIP
port: 80
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# startupProbe:
# initialDelaySeconds: 15
# periodSeconds: 5
# timeoutSeconds: 5
# successThreshold: 1
# failureThreshold: 60
# httpGet:
# # /submit/tool-instructions/ just happens to be cheap until we get a real health endpoint
# path: /submit/tool-instructions/
# port: http
# Additional volumes on the output Deployment definition.
volumes:
- name: settings-local-volume
configMap:
name: django-configmap
- name: datatracker-shared-volume
persistentVolumeClaim:
claimName: "datatracker-shared-volume-claim"
- name: datatracker-tmp
emptyDir:
sizeLimit: "2Gi"
# Additional volumeMounts on the output Deployment definition.
volumeMounts:
- name: settings-local-volume
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
readOnly: true
- name: datatracker-shared-volume
mountPath: /a
- name: datatracker-tmp
mountPath: /tmp
tolerations: []
nodeSelector: {}
affinity: {}
# -------------------------------------------------------------
# CELERY
# -------------------------------------------------------------
celery:
name: celery
image: {}
# defaults to datatracker settings if not specified separately
#repository: "ghcr.io/ietf-tools/datatracker"
#pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
# tag: "v1.1.0"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
livenessProbe:
exec:
command: ["celery", "-A", "ietf", "inspect", "ping"]
periodSeconds: 30
timeoutSeconds: 5
podAnnotations: {}
podLabels: {}
replicaCount: 1
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
podSecurityContext:
runAsNonRoot: true
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
startupProbe:
initialDelaySeconds: 15
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 60
exec:
command: ["celery", "-A", "ietf", "inspect", "ping"]
# Additional volumes on the output Deployment definition.
volumes:
- name: settings-local-volume
configMap:
name: django-configmap
- name: datatracker-shared-volume
persistentVolumeClaim:
claimName: "datatracker-shared-volume-claim"
- name: celery-tmp
emptyDir:
sizeLimit: "2Gi"
# Additional volumeMounts on the output Deployment definition.
volumeMounts:
- name: settings-local-volume
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
readOnly: true
- name: datatracker-shared-volume
mountPath: /a
- name: celery-tmp
mountPath: /tmp
tolerations: []
nodeSelector: {}
affinity: {}
# -------------------------------------------------------------
# BEAT
# -------------------------------------------------------------
beat:
name: beat
image: {}
# defaults to datatracker settings if not specified separately
# repository: "ghcr.io/ietf-tools/datatracker"
# pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
# tag: "v1.1.0"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
livenessProbe:
exec:
command: ["celery", "-A", "ietf", "inspect", "ping"]
periodSeconds: 30
timeoutSeconds: 5
podAnnotations: {}
podLabels: {}
replicaCount: 1
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
podSecurityContext:
runAsNonRoot: true
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
startupProbe:
initialDelaySeconds: 15
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 60
exec:
command: ["celery", "-A", "ietf", "inspect", "ping"]
# Additional volumes on the output Deployment definition.
volumes:
- name: settings-local-volume
configMap:
name: django-configmap
- name: datatracker-shared-volume
persistentVolumeClaim:
claimName: "datatracker-shared-volume-claim"
- name: beat-tmp
emptyDir:
sizeLimit: "2Gi"
# Additional volumeMounts on the output Deployment definition.
volumeMounts:
- name: settings-local-volume
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
readOnly: true
- name: datatracker-shared-volume
mountPath: /a
- name: beat-tmp
mountPath: /tmp
tolerations: []
nodeSelector: {}
affinity: {}
# -------------------------------------------------------------
# RABBITMQ
# -------------------------------------------------------------
rabbitmq:
name: "rabbitmq"
image:
repository: "ghcr.io/ietf-tools/datatracker-mq"
pullPolicy: IfNotPresent
tag: "3.12-alpine"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
livenessProbe:
exec:
command: ["rabbitmq-diagnostics", "-q", "ping"]
periodSeconds: 30
timeoutSeconds: 5
podAnnotations: {}
podLabels: {}
replicaCount: 1
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
podSecurityContext:
runAsNonRoot: true
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
# rabbitmq image sets up uid/gid 100/101
runAsUser: 100
runAsGroup: 101
service:
type: ClusterIP
port: 5672
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
startupProbe:
initialDelaySeconds: 15
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 60
exec:
command: ["rabbitmq-diagnostics", "-q", "ping"]
# Additional volumes on the output Deployment definition.
volumes:
- name: "rabbitmq-data"
persistentVolumeClaim:
claimName: "rabbitmq-data-claim"
- name: "rabbitmq-config"
configMap:
name: "rabbitmq-configmap"
- name: "rabbitmq-tmp"
emptyDir:
sizeLimit: 50Mi
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts:
- name: "rabbitmq-data"
mountPath: "/var/lib/rabbitmq"
subPath: "rabbitmq"
- name: "rabbitmq-config"
mountPath: "/etc/rabbitmq"
- name: "rabbitmq-tmp"
mountPath: "/tmp"
tolerations: []
nodeSelector: {}
affinity: {}
# -------------------------------------------------------------
# MEMCACHED
# -------------------------------------------------------------
memcached:
name: memcached
image:
repository: "memcached"
pullPolicy: IfNotPresent
tag: "1.6-alpine"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
podAnnotations: {}
podLabels: {}
replicaCount: 1
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
podSecurityContext:
runAsNonRoot: true
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
# memcached image sets up uid/gid 11211
runAsUser: 11211
runAsGroup: 11211
service:
type: ClusterIP
port: 11211
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
tolerations: []
nodeSelector: {}
affinity: {}
# -------------------------------------------------------------
# SCOUT APM SETTINGS
# -------------------------------------------------------------
# Set this to enable a Scout APM Core Agent sidecar
scoutapm:
image:
repository: "scoutapp/scoutapm"
tag: "version-1.4.0"
# -------------------------------------------------------------
# PERSISTENT VOLUMES
# -------------------------------------------------------------
persistentVolumes:
datatrackerSharedVolume:
# Note: This is the /a shared volume
volumeName: "datatracker-shared-volume"
accessModes:
- "ReadWriteMany" # or ReadWriteOnce and force datatracker/celery/beat to a single node
storageClassName: "" # Empty string means do not use default storage class
storage: "600Gi" # actual PersistentVolume must be at least this big or the PVC will not bind
rabbitmqDataVolume:
volumeName: "rabbitmq-data-volume"
accessModes:
- "ReadWriteOnce"
storageClassName: "" # Empty string means do not use default storage class
storage: "8Gi" # actual PersistentVolume must be at least this big or the PVC will not bind
# -------------------------------------------------------------
# COMMON
# -------------------------------------------------------------
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
env:
# n.b., these are debug values / non-secret secrets
DATATRACKER_SERVER_MODE: "development" # development for staging, production for production
DATATRACKER_ADMINS: |-
Robert Sparks <rjsparks@nostrum.com>
Ryan Cross <rcross@amsl.com>
Kesara Rathnayake <kesara@staff.ietf.org>
Jennifer Richards <jennifer@staff.ietf.org>
Nicolas Giard <nick@staff.ietf.org>
DATATRACKER_ALLOWED_HOSTS: ".ietf.org" # newline-separated list also allowed
# DATATRACKER_DATATRACKER_DEBUG: "false"
# DB access details - needs to be filled in
# DATATRACKER_DBHOST: "db"
# DATATRACKER_DBPORT: "5432"
# DATATRACKER_DBNAME: "datatracker"
# DATATRACKER_DBUSER: "django" # secret
# DATATRACKER_DBPASS: "RkTkDPFnKpko" # secret
DATATRACKER_DJANGO_SECRET_KEY: "PDwXboUq!=hPjnrtG2=ge#N$Dwy+wn@uivrugwpic8mxyPfHk" # secret
# Set this to point testing / staging at the production statics server until we
# sort that out
# DATATRACKER_STATIC_URL: "https://static.ietf.org/dt/12.10.0/"
# DATATRACKER_EMAIL_DEBUG: "true"
# Outgoing email details
# DATATRACKER_EMAIL_HOST: "localhost" # defaults to localhost
# DATATRACKER_EMAIL_PORT: "2025" # defaults to 2025
# The value here is the default from settings.py (i.e., not actually secret)
DATATRACKER_NOMCOM_APP_SECRET_B64: "m9pzMezVoFNJfsvU9XSZxGnXnwup6P5ZgCQeEnROOoQ=" # secret
DATATRACKER_IANA_SYNC_PASSWORD: "this-is-the-iana-sync-password" # secret
DATATRACKER_RFC_EDITOR_SYNC_PASSWORD: "this-is-the-rfc-editor-sync-password" # secret
DATATRACKER_YOUTUBE_API_KEY: "this-is-the-youtube-api-key" # secret
DATATRACKER_GITHUB_BACKUP_API_KEY: "this-is-the-github-backup-api-key" # secret
# API key configuration
DATATRACKER_API_KEY_TYPE: "ES265"
# secret - value here is the default from settings.py (i.e., not actually secret)
DATATRACKER_API_PUBLIC_KEY_PEM_B64: |-
Ci0tLS0tQkVHSU4gUFVCTElDIEtFWS0tLS0tCk1Ga3dFd1lIS29aSXpqMENBUVlJS
29aSXpqMERBUWNEUWdBRXFWb2pzYW9mREpTY3VNSk4rdHNodW15Tk01TUUKZ2Fyel
ZQcWtWb3ZtRjZ5RTdJSi9kdjRGY1YrUUtDdEovck9TOGUzNlk4WkFFVll1dWtoZXM
weVoxdz09Ci0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo=
# secret - value here is the default from settings.py (i.e., not actually secret)
DATATRACKER_API_PRIVATE_KEY_PEM_B64: |-
Ci0tLS0tQkVHSU4gUFJJVkFURSBLRVktLS0tLQpNSUdIQWdFQU1CTUdCeXFHU000O
UFnRUdDQ3FHU000OUF3RUhCRzB3YXdJQkFRUWdvSTZMSmtvcEtxOFhySGk5ClFxR1
F2RTRBODNURllqcUx6KzhnVUxZZWNzcWhSQU5DQUFTcFdpT3hxaDhNbEp5NHdrMzY
yeUc2Ykkwemt3U0IKcXZOVStxUldpK1lYcklUc2duOTIvZ1Z4WDVBb0swbitzNUx4
N2ZwanhrQVJWaTY2U0Y2elRKblgKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
# DATATRACKER_MEETECHO_API_BASE: "https://meetings.conf.meetecho.com/api/v1/"
DATATRACKER_MEETECHO_CLIENT_ID: "this-is-the-meetecho-client-id" # secret
DATATRACKER_MEETECHO_CLIENT_SECRET: "this-is-the-meetecho-client-secret" # secret
# DATATRACKER_MATOMO_SITE_ID: "7" # must be present to enable Matomo
# DATATRACKER_MATOMO_DOMAIN_PATH: "analytics.ietf.org"
CELERY_PASSWORD: "this-is-a-secret" # secret
DATATRACKER_APP_API_TOKENS_JSON: "{}" # secret
# use this to override default - one entry per line
# DATATRACKER_CSRF_TRUSTED_ORIGINS: |-
# https://datatracker.staging.ietf.org
# Scout configuration
DATATRACKER_SCOUT_KEY: "this-is-the-scout-key"
DATATRACKER_SCOUT_NAME: "StagingDatatracker"
MEMCACHED_MEM_LIMIT: "1024"