ci: k8s fixup (#7401)

* ci: remove stray serviceName

* ci: volumeClaimTemplate name -> volumeMount, not volume

* ci: datatracker listens on containerPort 8000

* ci: services/containers have dt- prefix

* chore: adjust indent for k8s yaml

* ci: use a secret for CELERY_PASSWORD

* fix: touched wrong CELERY_PASSWORD setting

* ci: get rid of the celery pw secretGenerator

* ci: use DB_PASS instead of DBPASS (etc) for k8s

* ci: Fill in django-config.yaml from env vars

* ci: add vault-mappings.txt

* ci: use $CELERY_PASSWORD in rabbitmq.yaml

* ci: moving vault-mappings.txt out of this repo

* Revert "ci: Fill in django-config.yaml from env vars"

This reverts commit 75cd181deb390d3ab21d6887b091d66c80e1d18e.

* Revert "ci: use $CELERY_PASSWORD in rabbitmq.yaml"

This reverts commit f251f9920d07c65413f72fd165cc06acd562c2c7.

* ci: parameterize db OPTIONS setting
This commit is contained in:
Jennifer Richards 2024-05-09 15:24:39 -03:00 committed by Nicolas Giard
parent d075404fdb
commit 867360e96f
9 changed files with 294 additions and 289 deletions

View file

@ -55,4 +55,10 @@ insert_final_newline = false
# --------------------------------------------------------- # ---------------------------------------------------------
# Use 2-space indents # Use 2-space indents
[helm/**.yaml] [helm/**.yaml]
indent_size = 2
# Settings for Kubernetes yaml
# ---------------------------------------------------------
# Use 2-space indents
[k8s/**.yaml]
indent_size = 2 indent_size = 2

View file

@ -58,4 +58,4 @@ spec:
name: files-cfgmap name: files-cfgmap
dnsPolicy: ClusterFirst dnsPolicy: ClusterFirst
restartPolicy: Always restartPolicy: Always
terminationGracePeriodSeconds: 30 terminationGracePeriodSeconds: 30

View file

@ -77,4 +77,4 @@ spec:
name: files-cfgmap name: files-cfgmap
dnsPolicy: ClusterFirst dnsPolicy: ClusterFirst
restartPolicy: Always restartPolicy: Always
terminationGracePeriodSeconds: 30 terminationGracePeriodSeconds: 30

View file

@ -1,94 +1,94 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: datatracker name: datatracker
spec: spec:
replicas: 1 replicas: 1
revisionHistoryLimit: 2 revisionHistoryLimit: 2
selector: selector:
matchLabels: matchLabels:
app: datatracker app: datatracker
strategy: strategy:
type: Recreate type: Recreate
template: template:
metadata: metadata:
labels: labels:
app: datatracker app: datatracker
spec: spec:
securityContext: securityContext:
runAsNonRoot: true runAsNonRoot: true
containers: containers:
# ----------------------------------------------------- # -----------------------------------------------------
# ScoutAPM Container # ScoutAPM Container
# ----------------------------------------------------- # -----------------------------------------------------
- name: scoutapm - name: scoutapm
image: "scoutapp/scoutapm:version-1.4.0" image: "scoutapp/scoutapm:version-1.4.0"
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
livenessProbe: livenessProbe:
exec: exec:
command: command:
- "sh" - "sh"
- "-c" - "-c"
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'" - "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
securityContext: securityContext:
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
runAsUser: 65534 # "nobody" user by default runAsUser: 65534 # "nobody" user by default
runAsGroup: 65534 # "nogroup" group by default runAsGroup: 65534 # "nogroup" group by default
# ----------------------------------------------------- # -----------------------------------------------------
# Datatracker Container # Datatracker Container
# ----------------------------------------------------- # -----------------------------------------------------
- name: datatracker - name: datatracker
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG" image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
imagePullPolicy: Always imagePullPolicy: Always
ports: ports:
- containerPort: 80 - containerPort: 8000
name: http name: http
protocol: TCP protocol: TCP
volumeMounts: volumeMounts:
- name: dt-vol - name: dt-vol
mountPath: /a mountPath: /a
- name: dt-tmp - name: dt-tmp
mountPath: /tmp mountPath: /tmp
- name: dt-cfg - name: dt-cfg
mountPath: /workspace/ietf/settings_local.py mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py subPath: settings_local.py
env: env:
- name: "CONTAINER_ROLE" - name: "CONTAINER_ROLE"
value: "datatracker" value: "datatracker"
envFrom: envFrom:
- configMapRef: - configMapRef:
name: django-config name: django-config
securityContext: securityContext:
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities: capabilities:
drop: drop:
- ALL - ALL
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
runAsUser: 1000 runAsUser: 1000
runAsGroup: 1000 runAsGroup: 1000
volumes: volumes:
# To be overriden with the actual shared volume # To be overriden with the actual shared volume
- name: dt-vol - name: dt-vol
- name: dt-tmp - name: dt-tmp
emptyDir: emptyDir:
sizeLimit: "2Gi" sizeLimit: "2Gi"
- name: dt-cfg - name: dt-cfg
configMap: configMap:
name: files-cfgmap name: files-cfgmap
dnsPolicy: ClusterFirst dnsPolicy: ClusterFirst
restartPolicy: Always restartPolicy: Always
terminationGracePeriodSeconds: 30 terminationGracePeriodSeconds: 30
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: datatracker name: datatracker
spec: spec:
type: ClusterIP type: ClusterIP
ports: ports:
- port: 80 - port: 80
targetPort: http targetPort: http
protocol: TCP protocol: TCP
name: http name: http
selector: selector:
app: datatracker app: datatracker

View file

@ -15,11 +15,11 @@ data:
# DATATRACKER_DATATRACKER_DEBUG: "false" # DATATRACKER_DATATRACKER_DEBUG: "false"
# DB access details - needs to be filled in # DB access details - needs to be filled in
# DATATRACKER_DBHOST: "db" # DATATRACKER_DB_HOST: "db"
# DATATRACKER_DBPORT: "5432" # DATATRACKER_DB_PORT: "5432"
# DATATRACKER_DBNAME: "datatracker" # DATATRACKER_DB_NAME: "datatracker"
# DATATRACKER_DBUSER: "django" # secret # DATATRACKER_DB_USER: "django" # secret
# DATATRACKER_DBPASS: "RkTkDPFnKpko" # secret # DATATRACKER_DB_PASS: "RkTkDPFnKpko" # secret
DATATRACKER_DJANGO_SECRET_KEY: "PDwXboUq!=hPjnrtG2=ge#N$Dwy+wn@uivrugwpic8mxyPfHk" # secret DATATRACKER_DJANGO_SECRET_KEY: "PDwXboUq!=hPjnrtG2=ge#N$Dwy+wn@uivrugwpic8mxyPfHk" # secret
@ -74,4 +74,4 @@ data:
# Scout configuration # Scout configuration
DATATRACKER_SCOUT_KEY: "this-is-the-scout-key" DATATRACKER_SCOUT_KEY: "this-is-the-scout-key"
DATATRACKER_SCOUT_NAME: "StagingDatatracker" DATATRACKER_SCOUT_NAME: "StagingDatatracker"

View file

@ -10,4 +10,4 @@ resources:
- datatracker.yaml - datatracker.yaml
- django-config.yaml - django-config.yaml
- memcached.yaml - memcached.yaml
- rabbitmq.yaml - rabbitmq.yaml

View file

@ -5,7 +5,6 @@ metadata:
spec: spec:
replicas: 1 replicas: 1
revisionHistoryLimit: 2 revisionHistoryLimit: 2
serviceName: memcached
selector: selector:
matchLabels: matchLabels:
app: memcached app: memcached
@ -50,4 +49,4 @@ spec:
protocol: TCP protocol: TCP
name: memcached name: memcached
selector: selector:
app: memcached app: memcached

View file

@ -1,176 +1,175 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: StatefulSet kind: StatefulSet
metadata: metadata:
name: rabbitmq name: rabbitmq
spec: spec:
replicas: 1 replicas: 1
revisionHistoryLimit: 2 revisionHistoryLimit: 2
serviceName: rabbitmq selector:
selector: matchLabels:
matchLabels: app: rabbitmq
app: rabbitmq template:
template: metadata:
metadata: labels:
labels: app: rabbitmq
app: rabbitmq spec:
spec: securityContext:
securityContext: runAsNonRoot: true
runAsNonRoot: true initContainers:
initContainers: # -----------------------------------------------------
# ----------------------------------------------------- # Init RabbitMQ data
# Init RabbitMQ data # -----------------------------------------------------
# ----------------------------------------------------- - name: init-rabbitmq
- name: init-rabbitmq image: busybox:stable
image: busybox:stable command:
command: - "sh"
- "sh" - "-c"
- "-c" - "mkdir -p -m700 /mnt/rabbitmq && chown 100:101 /mnt/rabbitmq"
- "mkdir -p -m700 /mnt/rabbitmq && chown 100:101 /mnt/rabbitmq" securityContext:
securityContext: runAsNonRoot: false
runAsNonRoot: false runAsUser: 0
runAsUser: 0 readOnlyRootFilesystem: true
readOnlyRootFilesystem: true volumeMounts:
volumeMounts: - name: "rabbitmq-data"
- name: "rabbitmq-data" mountPath: "/mnt"
mountPath: "/mnt" containers:
containers: # -----------------------------------------------------
# ----------------------------------------------------- # RabbitMQ Container
# RabbitMQ Container # -----------------------------------------------------
# ----------------------------------------------------- - image: "ghcr.io/ietf-tools/datatracker-mq:3.12-alpine"
- image: "ghcr.io/ietf-tools/datatracker-mq:3.12-alpine" imagePullPolicy: Always
imagePullPolicy: Always name: rabbitmq
name: rabbitmq ports:
ports: - name: amqp
- name: amqp containerPort: 5672
containerPort: 5672 protocol: TCP
protocol: TCP volumeMounts:
volumeMounts: - name: rabbitmq-data
- name: rabbitmq-data mountPath: /var/lib/rabbitmq
mountPath: /var/lib/rabbitmq subPath: "rabbitmq"
subPath: "rabbitmq" - name: rabbitmq-tmp
- name: rabbitmq-tmp mountPath: /tmp
mountPath: /tmp - name: rabbitmq-config
- name: rabbitmq-config mountPath: "/etc/rabbitmq"
mountPath: "/etc/rabbitmq" env:
livenessProbe: - name: "CELERY_PASSWORD"
exec: value: "this-is-a-secret"
command: ["rabbitmq-diagnostics", "-q", "ping"] livenessProbe:
periodSeconds: 30 exec:
timeoutSeconds: 5 command: ["rabbitmq-diagnostics", "-q", "ping"]
startupProbe: periodSeconds: 30
initialDelaySeconds: 15 timeoutSeconds: 5
periodSeconds: 5 startupProbe:
timeoutSeconds: 5 initialDelaySeconds: 15
successThreshold: 1 periodSeconds: 5
failureThreshold: 60 timeoutSeconds: 5
exec: successThreshold: 1
command: ["rabbitmq-diagnostics", "-q", "ping"] failureThreshold: 60
securityContext: exec:
allowPrivilegeEscalation: false command: ["rabbitmq-diagnostics", "-q", "ping"]
capabilities: securityContext:
drop: allowPrivilegeEscalation: false
- ALL capabilities:
readOnlyRootFilesystem: true drop:
# rabbitmq image sets up uid/gid 100/101 - ALL
runAsUser: 100 readOnlyRootFilesystem: true
runAsGroup: 101 # rabbitmq image sets up uid/gid 100/101
volumes: runAsUser: 100
- name: rabbitmq-data runAsGroup: 101
persistentVolumeClaim: volumes:
claimName: "rabbitmq-data-vol" - name: rabbitmq-tmp
- name: rabbitmq-tmp emptyDir:
emptyDir: sizeLimit: "50Mi"
sizeLimit: "50Mi" - name: rabbitmq-config
- name: rabbitmq-config configMap:
configMap: name: "rabbitmq-configmap"
name: "rabbitmq-configmap" dnsPolicy: ClusterFirst
dnsPolicy: ClusterFirst restartPolicy: Always
restartPolicy: Always terminationGracePeriodSeconds: 30
terminationGracePeriodSeconds: 30 volumeClaimTemplates:
volumeClaimTemplates: - metadata:
- metadata: name: rabbitmq-data
name: rabbitmq-data-vol spec:
spec: accessModes:
accessModes: - ReadWriteOnce
- ReadWriteOnce resources:
resources: requests:
requests: storage: 8Gi
storage: 8Gi # storageClassName: ""
# storageClassName: "" ---
--- apiVersion: v1
apiVersion: v1 kind: ConfigMap
kind: ConfigMap metadata:
metadata: name: rabbitmq-configmap
name: rabbitmq-configmap data:
data: definitions.json: |-
definitions.json: |- {
{ "permissions": [
"permissions": [ {
{ "configure": ".*",
"configure": ".*", "read": ".*",
"read": ".*", "user": "datatracker",
"user": "datatracker", "vhost": "dt",
"vhost": "dt", "write": ".*"
"write": ".*" }
} ],
], "users": [
"users": [ {
{ "hashing_algorithm": "rabbit_password_hashing_sha256",
"hashing_algorithm": "rabbit_password_hashing_sha256", "limits": {},
"limits": {}, "name": "datatracker",
"name": "datatracker", "password_hash": "HJxcItcpXtBN+R/CH7dUelfKBOvdUs3AWo82SBw2yLMSguzb",
"password_hash": "HJxcItcpXtBN+R/CH7dUelfKBOvdUs3AWo82SBw2yLMSguzb", "tags": []
"tags": [] }
} ],
], "vhosts": [
"vhosts": [ {
{ "limits": [],
"limits": [], "metadata": {
"metadata": { "description": "",
"description": "", "tags": []
"tags": [] },
}, "name": "dt"
"name": "dt" }
} ]
] }
} rabbitmq.conf: |-
rabbitmq.conf: |- # prevent guest from logging in over tcp
# prevent guest from logging in over tcp loopback_users.guest = true
loopback_users.guest = true
# load saved definitions
# load saved definitions load_definitions = /etc/rabbitmq/definitions.json
load_definitions = /etc/rabbitmq/definitions.json
# Ensure that enough disk is available to flush to disk. To do this, need to limit the
# Ensure that enough disk is available to flush to disk. To do this, need to limit the # memory available to the container to something reasonable. See
# memory available to the container to something reasonable. See # https://www.rabbitmq.com/production-checklist.html#monitoring-and-resource-usage
# https://www.rabbitmq.com/production-checklist.html#monitoring-and-resource-usage # for recommendations.
# for recommendations.
# 1-1.5 times the memory available to the container is adequate for disk limit
# 1-1.5 times the memory available to the container is adequate for disk limit disk_free_limit.absolute = 6000MB
disk_free_limit.absolute = 6000MB
# This should be ~40% of the memory available to the container. Use an
# This should be ~40% of the memory available to the container. Use an # absolute number because relative will be proprtional to the full machine
# absolute number because relative will be proprtional to the full machine # memory.
# memory. vm_memory_high_watermark.absolute = 1600MB
vm_memory_high_watermark.absolute = 1600MB
# Logging
# Logging log.file = false
log.file = false log.console = true
log.console = true log.console.level = info
log.console.level = info log.console.formatter = json
log.console.formatter = json ---
--- apiVersion: v1
apiVersion: v1 kind: Service
kind: Service metadata:
metadata: name: rabbitmq
name: rabbitmq spec:
spec: type: ClusterIP
type: ClusterIP clusterIP: None # headless service
clusterIP: None # headless service ports:
ports: - port: 5672
- port: 5672 targetPort: amqp
targetPort: amqp protocol: TCP
protocol: TCP name: amqp
name: amqp selector:
selector: app: rabbitmq
app: rabbitmq

View file

@ -87,12 +87,13 @@ if _allowed_hosts_str is not None:
DATABASES = { DATABASES = {
"default": { "default": {
"HOST": os.environ.get("DATATRACKER_DBHOST", "db"), "HOST": os.environ.get("DATATRACKER_DB_HOST", "db"),
"PORT": os.environ.get("DATATRACKER_DBPORT", "5432"), "PORT": os.environ.get("DATATRACKER_DB_PORT", "5432"),
"NAME": os.environ.get("DATATRACKER_DBNAME", "datatracker"), "NAME": os.environ.get("DATATRACKER_DB_NAME", "datatracker"),
"ENGINE": "django.db.backends.postgresql", "ENGINE": "django.db.backends.postgresql",
"USER": os.environ.get("DATATRACKER_DBUSER", "django"), "USER": os.environ.get("DATATRACKER_DB_USER", "django"),
"PASSWORD": os.environ.get("DATATRACKER_DBPASS", ""), "PASSWORD": os.environ.get("DATATRACKER_DB_PASS", ""),
"OPTIONS": json.loads(os.environ.get("DATATRACKER_DB_OPTS_JSON", "{}")),
}, },
} }
@ -111,7 +112,7 @@ _celery_password = os.environ.get("CELERY_PASSWORD", None)
if _celery_password is None: if _celery_password is None:
raise RuntimeError("CELERY_PASSWORD must be set") raise RuntimeError("CELERY_PASSWORD must be set")
CELERY_BROKER_URL = "amqp://datatracker:{password}@{host}/{queue}".format( CELERY_BROKER_URL = "amqp://datatracker:{password}@{host}/{queue}".format(
host=os.environ.get("RABBITMQ_HOSTNAME", "rabbitmq"), host=os.environ.get("RABBITMQ_HOSTNAME", "dt-rabbitmq"),
password=_celery_password, password=_celery_password,
queue=os.environ.get("RABBITMQ_QUEUE", "dt") queue=os.environ.get("RABBITMQ_QUEUE", "dt")
) )
@ -212,8 +213,8 @@ DE_GFM_BINARY = "/usr/local/bin/de-gfm"
IDSUBMIT_IDNITS_BINARY = "/usr/local/bin/idnits" IDSUBMIT_IDNITS_BINARY = "/usr/local/bin/idnits"
# Duplicating production cache from settings.py and using it whether we're in production mode or not # Duplicating production cache from settings.py and using it whether we're in production mode or not
MEMCACHED_HOST = os.environ.get("MEMCACHED_SERVICE_HOST", "127.0.0.1") MEMCACHED_HOST = os.environ.get("DT_MEMCACHED_SERVICE_HOST", "127.0.0.1")
MEMCACHED_PORT = os.environ.get("MEMCACHED_SERVICE_PORT", "11211") MEMCACHED_PORT = os.environ.get("DT_MEMCACHED_SERVICE_PORT", "11211")
from ietf import __version__ from ietf import __version__
CACHES = { CACHES = {
"default": { "default": {