ci: merge pull request #7416 from ietf-tools/feat/k8s

ci: k8s deployment files
This commit is contained in:
Robert Sparks 2024-05-16 10:58:53 -05:00 committed by GitHub
commit de8b3b5ce3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
30 changed files with 1106 additions and 544 deletions

View file

@ -50,3 +50,9 @@ indent_size = 2
[ietf/**.html] [ietf/**.html]
insert_final_newline = false insert_final_newline = false
# Settings for Kubernetes yaml
# ---------------------------------------------------------
# Use 2-space indents
[k8s/**.yaml]
indent_size = 2

View file

@ -220,7 +220,7 @@ jobs:
.devcontainer .devcontainer
.github .github
.vscode .vscode
helm k8s
playwright playwright
svn-history svn-history
docker-compose.yml docker-compose.yml

View file

@ -3,14 +3,36 @@ LABEL maintainer="IETF Tools Team <tools-discuss@ietf.org>"
ENV DEBIAN_FRONTEND=noninteractive ENV DEBIAN_FRONTEND=noninteractive
# uid 498 = wwwrun and gid 496 = www on ietfa
RUN groupadd -g 1000 datatracker && \
useradd -c "Datatracker User" -u 1000 -g datatracker -m -s /bin/false datatracker
RUN apt-get purge -y imagemagick imagemagick-6-common
# Install libreoffice (needed via PPT2PDF_COMMAND)
RUN echo "deb http://deb.debian.org/debian bullseye-backports main" > /etc/apt/sources.list.d/bullseye-backports.list && \
apt-get update && \
apt-get -qyt bullseye-backports install libreoffice-nogui
COPY . . COPY . .
COPY ./dev/build/start.sh ./start.sh COPY ./dev/build/start.sh ./start.sh
RUN pip3 --disable-pip-version-check --no-cache-dir install -r requirements.txt COPY ./dev/build/datatracker-start.sh ./datatracker-start.sh
COPY ./dev/build/celery-start.sh ./celery-start.sh
RUN pip3 --disable-pip-version-check --no-cache-dir install -r requirements.txt && \
echo '# empty' > ietf/settings_local.py && \
ietf/manage.py patch_libraries && \
rm -f ietf/settings_local.py
RUN chmod +x start.sh && \ RUN chmod +x start.sh && \
chmod +x datatracker-start.sh && \
chmod +x celery-start.sh && \
chmod +x docker/scripts/app-create-dirs.sh && \ chmod +x docker/scripts/app-create-dirs.sh && \
sh ./docker/scripts/app-create-dirs.sh sh ./docker/scripts/app-create-dirs.sh
VOLUME [ "/assets" ] RUN mkdir -p /a
VOLUME [ "/a" ]
EXPOSE 8000 EXPOSE 8000

22
dev/build/celery-start.sh Normal file
View file

@ -0,0 +1,22 @@
#!/bin/bash
#
# Run a celery worker
#
echo "Running Datatracker checks..."
./ietf/manage.py check
cleanup () {
# Cleanly terminate the celery app by sending it a TERM, then waiting for it to exit.
if [[ -n "${celery_pid}" ]]; then
echo "Gracefully terminating celery worker. This may take a few minutes if tasks are in progress..."
kill -TERM "${celery_pid}"
wait "${celery_pid}"
fi
}
trap 'trap "" TERM; cleanup' TERM
# start celery in the background so we can trap the TERM signal
celery "$@" &
celery_pid=$!
wait "${celery_pid}"

View file

@ -0,0 +1,17 @@
#!/bin/bash
echo "Running Datatracker checks..."
./ietf/manage.py check
echo "Running Datatracker migrations..."
./ietf/manage.py migrate --settings=settings_local
echo "Starting Datatracker..."
gunicorn \
--workers "${DATATRACKER_GUNICORN_WORKERS:-9}" \
--max-requests "${DATATRACKER_GUNICORN_MAX_REQUESTS:-32768}" \
--timeout "${DATATRACKER_GUNICORN_TIMEOUT:-180}" \
--bind :8000 \
--log-level "${DATATRACKER_GUNICORN_LOG_LEVEL:-info}" \
ietf.wsgi:application

View file

@ -1,10 +1,20 @@
#!/bin/bash #!/bin/bash
#
echo "Running Datatracker checks..." # Environment config:
./ietf/manage.py check #
# CONTAINER_ROLE - datatracker, celery, or beat (defaults to datatracker)
echo "Running Datatracker migrations..." #
./ietf/manage.py migrate --settings=settings_local case "${CONTAINER_ROLE:-datatracker}" in
datatracker)
echo "Starting Datatracker..." exec ./datatracker-start.sh
./ietf/manage.py runserver 0.0.0.0:8000 --settings=settings_local ;;
celery)
exec ./celery-start.sh --app=ietf worker
;;
beat)
exec ./celery-start.sh --app=ietf beat
;;
*)
echo "Unknown role '${CONTAINER_ROLE}'"
exit 255
esac

View file

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View file

@ -1,23 +0,0 @@
apiVersion: v2
name: datatracker
description: The day-to-day front-end to the IETF database for people who work on IETF standards.
home: https://datatracker.ietf.org
sources:
- https://github.com/ietf-tools/datatracker
maintainers:
- name: IETF Tools Team
email: tools-discuss@ietf.org
url: https://github.com/ietf-tools
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.0.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.0.0"

View file

@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "datatracker.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "datatracker.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "datatracker.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "datatracker.labels" -}}
helm.sh/chart: {{ include "datatracker.chart" . }}
{{ include "datatracker.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "datatracker.selectorLabels" -}}
app.kubernetes.io/name: {{ include "datatracker.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "datatracker.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "datatracker.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View file

@ -1,66 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "datatracker.fullname" . }}
labels:
{{- include "datatracker.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
revisionHistoryLimit: {{ .Values.revisionHistoryLimit }}
selector:
matchLabels:
{{- include "datatracker.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "datatracker.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "datatracker.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ default "latest" .Values.image.tag }}"
imagePullPolicy: {{ default "IfNotPresent" .Values.image.imagePullPolicy }}
env:
{{- if .Values.env }}
{{- toYaml .Values.env | nindent 12 }}
{{- end }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: http
containerPort: 8000
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 12 }}
startupProbe:
{{- toYaml .Values.startupProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -1,32 +0,0 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "datatracker.fullname" . }}
labels:
{{- include "datatracker.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "datatracker.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View file

@ -1,61 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "datatracker.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "datatracker.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -1,19 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{include "datatracker.fullname" .}}
labels: {{- include "datatracker.labels" . | nindent 4 }}
{{- with .Values.service.annotations }}
annotations:
{{- range $key, $value := . }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
type: {{.Values.service.type}}
ports:
- port: {{ default "80" .Values.service.port}}
targetPort: http
protocol: TCP
name: http
selector: {{- include "datatracker.selectorLabels" . | nindent 4}}

View file

@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "datatracker.serviceAccountName" . }}
labels:
{{- include "datatracker.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end -}}

View file

@ -1,118 +0,0 @@
# Default values for datatracker.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: "ghcr.io/ietf-tools/datatracker"
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
# tag: "v1.1.0"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
livenessProbe:
httpGet:
path: /healthz
port: http
readinessProbe:
httpGet:
path: /healthz
port: http
startupProbe:
initialDelaySeconds: 15
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 60
httpGet:
path: /healthz
port: http
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: datatracker.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -1,14 +1,20 @@
import os import os
import scout_apm.celery import scout_apm.celery
from celery import Celery import celery
from scout_apm.api import Config from scout_apm.api import Config
# Disable celery's internal logging configuration, we set it up via Django
@celery.signals.setup_logging.connect
def on_setup_logging(**kwargs):
pass
# Set the default Django settings module for the 'celery' program # Set the default Django settings module for the 'celery' program
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ietf.settings') os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ietf.settings')
app = Celery('ietf') app = celery.Celery('ietf')
# Using a string here means the worker doesn't have to serialize # Using a string here means the worker doesn't have to serialize
# the configuration object to child processes. # the configuration object to child processes.
@ -17,10 +23,13 @@ app = Celery('ietf')
app.config_from_object('django.conf:settings', namespace='CELERY') app.config_from_object('django.conf:settings', namespace='CELERY')
# Turn on Scout APM celery instrumentation if configured in the environment # Turn on Scout APM celery instrumentation if configured in the environment
scout_key = os.environ.get("SCOUT_KEY", "") scout_key = os.environ.get("DATATRACKER_SCOUT_KEY", None)
scout_name = os.environ.get("SCOUT_NAME", "") if scout_key is not None:
scout_core_agent_socket_path = os.environ.get("SCOUT_CORE_AGENT_SOCKET_PATH", "tcp://scoutapm:6590") scout_name = os.environ.get("DATATRACKER_SCOUT_NAME", "Datatracker")
if scout_key and scout_name: scout_core_agent_socket_path = "tcp://{host}:{port}".format(
host=os.environ.get("DATATRACKER_SCOUT_CORE_AGENT_HOST", "localhost"),
port=os.environ.get("DATATRACKER_SCOUT_CORE_AGENT_PORT", "6590"),
)
Config.set( Config.set(
key=scout_key, key=scout_key,
name=scout_name, name=scout_name,

View file

@ -949,9 +949,9 @@ def post_process(doc):
Does post processing on uploaded file. Does post processing on uploaded file.
- Convert PPT to PDF - Convert PPT to PDF
''' '''
if is_powerpoint(doc) and hasattr(settings, 'SECR_PPT2PDF_COMMAND'): if is_powerpoint(doc) and hasattr(settings, 'PPT2PDF_COMMAND'):
try: try:
cmd = list(settings.SECR_PPT2PDF_COMMAND) # Don't operate on the list actually in settings cmd = list(settings.PPT2PDF_COMMAND) # Don't operate on the list actually in settings
cmd.append(doc.get_file_path()) # outdir cmd.append(doc.get_file_path()) # outdir
cmd.append(os.path.join(doc.get_file_path(), doc.uploaded_filename)) # filename cmd.append(os.path.join(doc.get_file_path(), doc.uploaded_filename)) # filename
subprocess.check_call(cmd) subprocess.check_call(cmd)

View file

@ -3,14 +3,11 @@
import datetime import datetime
import os
import shutil
from pyquery import PyQuery from pyquery import PyQuery
import debug # pyflakes:ignore import debug # pyflakes:ignore
from django.conf import settings
from django.urls import reverse from django.urls import reverse
from django.utils import timezone from django.utils import timezone
@ -27,24 +24,6 @@ from ietf.utils.test_utils import TestCase
class SecrMeetingTestCase(TestCase): class SecrMeetingTestCase(TestCase):
settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['AGENDA_PATH'] settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['AGENDA_PATH']
def setUp(self):
super().setUp()
self.bluesheet_dir = self.tempdir('bluesheet')
self.bluesheet_path = os.path.join(self.bluesheet_dir,'blue_sheet.rtf')
self.saved_secr_blue_sheet_path = settings.SECR_BLUE_SHEET_PATH
settings.SECR_BLUE_SHEET_PATH = self.bluesheet_path
# n.b., the bluesheet upload relies on SECR_PROCEEDINGS_DIR being the same
# as AGENDA_PATH. This is probably a bug, but may not be worth fixing if
# the secr app is on the way out.
self.saved_secr_proceedings_dir = settings.SECR_PROCEEDINGS_DIR
settings.SECR_PROCEEDINGS_DIR = settings.AGENDA_PATH
def tearDown(self):
settings.SECR_PROCEEDINGS_DIR = self.saved_secr_proceedings_dir
settings.SECR_BLUE_SHEET_PATH = self.saved_secr_blue_sheet_path
shutil.rmtree(self.bluesheet_dir)
super().tearDown()
def test_main(self): def test_main(self):
"Main Test" "Main Test"

View file

@ -125,6 +125,10 @@ FORM_RENDERER = "django.forms.renderers.DjangoDivFormRenderer"
# In the future (relative to 4.2), the default will become 'django.db.models.BigAutoField.' # In the future (relative to 4.2), the default will become 'django.db.models.BigAutoField.'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
# OIDC configuration
_SITE_URL = os.environ.get("OIDC_SITE_URL", None)
if _SITE_URL is not None:
SITE_URL = _SITE_URL
if SERVER_MODE == 'production': if SERVER_MODE == 'production':
MEDIA_ROOT = '/a/www/www6s/lib/dt/media/' MEDIA_ROOT = '/a/www/www6s/lib/dt/media/'
@ -236,7 +240,7 @@ LOGGING = {
# #
'loggers': { 'loggers': {
'django': { 'django': {
'handlers': ['debug_console', 'mail_admins',], 'handlers': ['debug_console', 'mail_admins'],
'level': 'INFO', 'level': 'INFO',
}, },
'django.request': { 'django.request': {
@ -256,7 +260,11 @@ LOGGING = {
'level': 'DEBUG', 'level': 'DEBUG',
}, },
'datatracker': { 'datatracker': {
'handlers': ['syslog'], 'handlers': ['debug_console'],
'level': 'INFO',
},
'celery': {
'handlers': ['debug_console'],
'level': 'INFO', 'level': 'INFO',
}, },
}, },
@ -281,13 +289,6 @@ LOGGING = {
'class': 'logging.StreamHandler', 'class': 'logging.StreamHandler',
'formatter': 'django.server', 'formatter': 'django.server',
}, },
'syslog': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'facility': 'user',
'formatter': 'plain',
'address': '/dev/log',
},
'mail_admins': { 'mail_admins': {
'level': 'ERROR', 'level': 'ERROR',
'filters': [ 'filters': [
@ -976,7 +977,7 @@ DE_GFM_BINARY = '/usr/bin/de-gfm.ruby2.5'
DAYS_TO_EXPIRE_REGISTRATION_LINK = 3 DAYS_TO_EXPIRE_REGISTRATION_LINK = 3
MINUTES_TO_EXPIRE_RESET_PASSWORD_LINK = 60 MINUTES_TO_EXPIRE_RESET_PASSWORD_LINK = 60
HTPASSWD_COMMAND = "/usr/bin/htpasswd" HTPASSWD_COMMAND = "/usr/bin/htpasswd"
HTPASSWD_FILE = "/www/htpasswd" HTPASSWD_FILE = "/a/www/htpasswd"
# Generation of pdf files # Generation of pdf files
GHOSTSCRIPT_COMMAND = "/usr/bin/gs" GHOSTSCRIPT_COMMAND = "/usr/bin/gs"
@ -987,12 +988,11 @@ BIBXML_BASE_PATH = '/a/ietfdata/derived/bibxml'
# Timezone files for iCalendar # Timezone files for iCalendar
TZDATA_ICS_PATH = BASE_DIR + '/../vzic/zoneinfo/' TZDATA_ICS_PATH = BASE_DIR + '/../vzic/zoneinfo/'
SECR_BLUE_SHEET_PATH = '/a/www/ietf-datatracker/documents/blue_sheet.rtf' DATATRACKER_MAX_UPLOAD_SIZE = 40960000
SECR_BLUE_SHEET_URL = IDTRACKER_BASE_URL + '/documents/blue_sheet.rtf' PPT2PDF_COMMAND = [
SECR_INTERIM_LISTING_DIR = '/a/www/www6/meeting/interim' "/usr/bin/soffice", "--headless", "--convert-to", "pdf:writer_globaldocument_pdf_Export", "--outdir"
SECR_MAX_UPLOAD_SIZE = 40960000 ]
SECR_PROCEEDINGS_DIR = '/a/www/www6s/proceedings/'
SECR_PPT2PDF_COMMAND = ['/usr/bin/soffice','--headless','--convert-to','pdf:writer_globaldocument_pdf_Export','--outdir']
STATS_REGISTRATION_ATTENDEES_JSON_URL = 'https://registration.ietf.org/{number}/attendees/' STATS_REGISTRATION_ATTENDEES_JSON_URL = 'https://registration.ietf.org/{number}/attendees/'
PROCEEDINGS_VERSION_CHANGES = [ PROCEEDINGS_VERSION_CHANGES = [
0, # version 1 0, # version 1
@ -1204,81 +1204,83 @@ else:
MIDDLEWARE += DEV_MIDDLEWARE MIDDLEWARE += DEV_MIDDLEWARE
TEMPLATES[0]['OPTIONS']['context_processors'] += DEV_TEMPLATE_CONTEXT_PROCESSORS TEMPLATES[0]['OPTIONS']['context_processors'] += DEV_TEMPLATE_CONTEXT_PROCESSORS
if 'CACHES' not in locals(): if "CACHES" not in locals():
if SERVER_MODE == 'production': if SERVER_MODE == "production":
MEMCACHED_HOST = os.environ.get("MEMCACHED_SERVICE_HOST", "127.0.0.1")
MEMCACHED_PORT = os.environ.get("MEMCACHED_SERVICE_PORT", "11211")
CACHES = { CACHES = {
'default': { "default": {
'BACKEND': 'ietf.utils.cache.LenientMemcacheCache', "BACKEND": "ietf.utils.cache.LenientMemcacheCache",
'LOCATION': '127.0.0.1:11211', "LOCATION": f"{MEMCACHED_HOST}:{MEMCACHED_PORT}",
'VERSION': __version__, "VERSION": __version__,
'KEY_PREFIX': 'ietf:dt', "KEY_PREFIX": "ietf:dt",
'KEY_FUNCTION': lambda key, key_prefix, version: ( "KEY_FUNCTION": lambda key, key_prefix, version: (
f"{key_prefix}:{version}:{sha384(str(key).encode('utf8')).hexdigest()}" f"{key_prefix}:{version}:{sha384(str(key).encode('utf8')).hexdigest()}"
), ),
}, },
'sessions': { "sessions": {
'BACKEND': 'ietf.utils.cache.LenientMemcacheCache', "BACKEND": "ietf.utils.cache.LenientMemcacheCache",
'LOCATION': '127.0.0.1:11211', "LOCATION": f"{MEMCACHED_HOST}:{MEMCACHED_PORT}",
# No release-specific VERSION setting. # No release-specific VERSION setting.
'KEY_PREFIX': 'ietf:dt', "KEY_PREFIX": "ietf:dt",
}, },
'htmlized': { "htmlized": {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', "BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
'LOCATION': '/a/cache/datatracker/htmlized', "LOCATION": "/a/cache/datatracker/htmlized",
'OPTIONS': { "OPTIONS": {
'MAX_ENTRIES': 100000, # 100,000 "MAX_ENTRIES": 100000, # 100,000
}, },
}, },
'pdfized': { "pdfized": {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', "BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
'LOCATION': '/a/cache/datatracker/pdfized', "LOCATION": "/a/cache/datatracker/pdfized",
'OPTIONS': { "OPTIONS": {
'MAX_ENTRIES': 100000, # 100,000 "MAX_ENTRIES": 100000, # 100,000
}, },
}, },
'slowpages': { "slowpages": {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', "BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
'LOCATION': '/a/cache/datatracker/slowpages', "LOCATION": "/a/cache/datatracker/slowpages",
'OPTIONS': { "OPTIONS": {
'MAX_ENTRIES': 5000, "MAX_ENTRIES": 5000,
}, },
}, },
} }
else: else:
CACHES = { CACHES = {
'default': { "default": {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache', "BACKEND": "django.core.cache.backends.dummy.DummyCache",
#'BACKEND': 'ietf.utils.cache.LenientMemcacheCache', #'BACKEND': 'ietf.utils.cache.LenientMemcacheCache',
#'LOCATION': '127.0.0.1:11211', #'LOCATION': '127.0.0.1:11211',
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', #'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'VERSION': __version__, "VERSION": __version__,
'KEY_PREFIX': 'ietf:dt', "KEY_PREFIX": "ietf:dt",
}, },
'sessions': { "sessions": {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
}, },
'htmlized': { "htmlized": {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache', "BACKEND": "django.core.cache.backends.dummy.DummyCache",
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', #'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/cache/datatracker/htmlized', "LOCATION": "/var/cache/datatracker/htmlized",
'OPTIONS': { "OPTIONS": {
'MAX_ENTRIES': 1000, "MAX_ENTRIES": 1000,
}, },
}, },
'pdfized': { "pdfized": {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache', "BACKEND": "django.core.cache.backends.dummy.DummyCache",
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', #'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/cache/datatracker/pdfized', "LOCATION": "/var/cache/datatracker/pdfized",
'OPTIONS': { "OPTIONS": {
'MAX_ENTRIES': 1000, "MAX_ENTRIES": 1000,
}, },
}, },
'slowpages': { "slowpages": {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache', "BACKEND": "django.core.cache.backends.dummy.DummyCache",
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', #'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/cache/datatracker/', "LOCATION": "/var/cache/datatracker/",
'OPTIONS': { "OPTIONS": {
'MAX_ENTRIES': 5000, "MAX_ENTRIES": 5000,
}, },
}, },
} }

View file

@ -60,3 +60,36 @@ MIDDLEWARE = [ c for c in MIDDLEWARE if not c in DEV_MIDDLEWARE ] # pyflakes:ign
TEMPLATES[0]['OPTIONS']['context_processors'] = [ p for p in TEMPLATES[0]['OPTIONS']['context_processors'] if not p in DEV_TEMPLATE_CONTEXT_PROCESSORS ] # pyflakes:ignore TEMPLATES[0]['OPTIONS']['context_processors'] = [ p for p in TEMPLATES[0]['OPTIONS']['context_processors'] if not p in DEV_TEMPLATE_CONTEXT_PROCESSORS ] # pyflakes:ignore
REQUEST_PROFILE_STORE_ANONYMOUS_SESSIONS = False REQUEST_PROFILE_STORE_ANONYMOUS_SESSIONS = False
# Override loggers with a safer set in case things go to the log during testing. Specifically,
# make sure there are no syslog loggers that might send things to a real syslog.
LOGGING["loggers"] = { # pyflakes:ignore
'django': {
'handlers': ['debug_console'],
'level': 'INFO',
},
'django.request': {
'handlers': ['debug_console'],
'level': 'ERROR',
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
},
'django.security': {
'handlers': ['debug_console', ],
'level': 'INFO',
},
'oidc_provider': {
'handlers': ['debug_console', ],
'level': 'DEBUG',
},
'datatracker': {
'handlers': ['debug_console'],
'level': 'INFO',
},
'celery': {
'handlers': ['debug_console'],
'level': 'INFO',
},
}

View file

@ -0,0 +1,31 @@
# Copyright The IETF Trust 2024, All Rights Reserved
import django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from pathlib import Path
from ietf.utils import patch
class Command(BaseCommand):
"""Apply IETF patches to libraries"""
requires_system_checks = tuple()
def handle(self, *args, **options):
library_path = Path(django.__file__).parent.parent
top_dir = Path(settings.BASE_DIR).parent
# All patches in settings.CHECKS_LIBRARY_PATCHES_TO_APPLY must have a
# relative file path starting from the site-packages dir, e.g.
# 'django/db/models/fields/__init__.py'
for patch_file in settings.CHECKS_LIBRARY_PATCHES_TO_APPLY:
patch_set = patch.fromfile(top_dir / Path(patch_file))
if not patch_set:
raise CommandError(f"Could not parse patch file '{patch_file}'")
if not patch_set.apply(root=bytes(library_path)):
raise CommandError(f"Could not apply the patch from '{patch_file}'")
if patch_set.already_patched:
self.stdout.write(f"Patch from '{patch_file}' was already applied")
else:
self.stdout.write(f"Applied the patch from '{patch_file}'")

View file

@ -60,6 +60,7 @@ class RegexStringValidator(object):
validate_regular_expression_string = RegexStringValidator() validate_regular_expression_string = RegexStringValidator()
def validate_file_size(file, missing_ok=False): def validate_file_size(file, missing_ok=False):
try: try:
size = file.size size = file.size
@ -69,8 +70,14 @@ def validate_file_size(file, missing_ok=False):
else: else:
raise raise
if size > settings.SECR_MAX_UPLOAD_SIZE: if size > settings.DATATRACKER_MAX_UPLOAD_SIZE:
raise ValidationError('Please keep filesize under %s. Requested upload size was %s' % (filesizeformat(settings.SECR_MAX_UPLOAD_SIZE), filesizeformat(file.size))) raise ValidationError(
"Please keep filesize under {}. Requested upload size was {}".format(
filesizeformat(settings.DATATRACKER_MAX_UPLOAD_SIZE),
filesizeformat(file.size)
)
)
def validate_mime_type(file, valid, missing_ok=False): def validate_mime_type(file, valid, missing_ok=False):
try: try:

61
k8s/beat.yaml Normal file
View file

@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: beat
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: beat
strategy:
type: Recreate
template:
metadata:
labels:
app: beat
spec:
securityContext:
runAsNonRoot: true
containers:
- name: beat
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
imagePullPolicy: Always
ports:
- containerPort: 8000
name: http
protocol: TCP
volumeMounts:
- name: dt-vol
mountPath: /a
- name: dt-tmp
mountPath: /tmp
- name: dt-cfg
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
env:
- name: "CONTAINER_ROLE"
value: "beat"
envFrom:
- configMapRef:
name: django-config
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
volumes:
# To be overriden with the actual shared volume
- name: dt-vol
- name: dt-tmp
emptyDir:
sizeLimit: "2Gi"
- name: dt-cfg
configMap:
name: files-cfgmap
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30

80
k8s/celery.yaml Normal file
View file

@ -0,0 +1,80 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: celery
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: celery
strategy:
type: Recreate
template:
metadata:
labels:
app: celery
spec:
securityContext:
runAsNonRoot: true
containers:
# -----------------------------------------------------
# ScoutAPM Container
# -----------------------------------------------------
- name: scoutapm
image: "scoutapp/scoutapm:version-1.4.0"
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- "sh"
- "-c"
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
securityContext:
readOnlyRootFilesystem: true
runAsUser: 65534 # "nobody" user by default
runAsGroup: 65534 # "nogroup" group by default
# -----------------------------------------------------
# Celery Container
# -----------------------------------------------------
- name: celery
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
imagePullPolicy: Always
ports:
- containerPort: 8000
name: http
protocol: TCP
volumeMounts:
- name: dt-vol
mountPath: /a
- name: dt-tmp
mountPath: /tmp
- name: dt-cfg
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
env:
- name: "CONTAINER_ROLE"
value: "celery"
envFrom:
- configMapRef:
name: django-config
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
volumes:
# To be overriden with the actual shared volume
- name: dt-vol
- name: dt-tmp
emptyDir:
sizeLimit: "2Gi"
- name: dt-cfg
configMap:
name: files-cfgmap
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30

94
k8s/datatracker.yaml Normal file
View file

@ -0,0 +1,94 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: datatracker
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: datatracker
strategy:
type: Recreate
template:
metadata:
labels:
app: datatracker
spec:
securityContext:
runAsNonRoot: true
containers:
# -----------------------------------------------------
# ScoutAPM Container
# -----------------------------------------------------
- name: scoutapm
image: "scoutapp/scoutapm:version-1.4.0"
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
command:
- "sh"
- "-c"
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
securityContext:
readOnlyRootFilesystem: true
runAsUser: 65534 # "nobody" user by default
runAsGroup: 65534 # "nogroup" group by default
# -----------------------------------------------------
# Datatracker Container
# -----------------------------------------------------
- name: datatracker
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
imagePullPolicy: Always
ports:
- containerPort: 8000
name: http
protocol: TCP
volumeMounts:
- name: dt-vol
mountPath: /a
- name: dt-tmp
mountPath: /tmp
- name: dt-cfg
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
env:
- name: "CONTAINER_ROLE"
value: "datatracker"
envFrom:
- configMapRef:
name: django-config
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
volumes:
# To be overriden with the actual shared volume
- name: dt-vol
- name: dt-tmp
emptyDir:
sizeLimit: "2Gi"
- name: dt-cfg
configMap:
name: files-cfgmap
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
name: datatracker
spec:
type: ClusterIP
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app: datatracker

79
k8s/django-config.yaml Normal file
View file

@ -0,0 +1,79 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: django-config
data:
# n.b., these are debug values / non-secret secrets
DATATRACKER_SERVER_MODE: "development" # development for staging, production for production
DATATRACKER_ADMINS: |-
Robert Sparks <rjsparks@nostrum.com>
Ryan Cross <rcross@amsl.com>
Kesara Rathnayake <kesara@staff.ietf.org>
Jennifer Richards <jennifer@staff.ietf.org>
Nicolas Giard <nick@staff.ietf.org>
DATATRACKER_ALLOWED_HOSTS: ".ietf.org" # newline-separated list also allowed
# DATATRACKER_DATATRACKER_DEBUG: "false"
# DB access details - needs to be filled in
# DATATRACKER_DB_HOST: "db"
# DATATRACKER_DB_PORT: "5432"
# DATATRACKER_DB_NAME: "datatracker"
# DATATRACKER_DB_USER: "django" # secret
# DATATRACKER_DB_PASS: "RkTkDPFnKpko" # secret
DATATRACKER_DJANGO_SECRET_KEY: "PDwXboUq!=hPjnrtG2=ge#N$Dwy+wn@uivrugwpic8mxyPfHk" # secret
# Set this to point testing / staging at the production statics server until we
# sort that out
# DATATRACKER_STATIC_URL: "https://static.ietf.org/dt/12.10.0/"
# DATATRACKER_EMAIL_DEBUG: "true"
# Outgoing email details
# DATATRACKER_EMAIL_HOST: "localhost" # defaults to localhost
# DATATRACKER_EMAIL_PORT: "2025" # defaults to 2025
# The value here is the default from settings.py (i.e., not actually secret)
DATATRACKER_NOMCOM_APP_SECRET_B64: "m9pzMezVoFNJfsvU9XSZxGnXnwup6P5ZgCQeEnROOoQ=" # secret
DATATRACKER_IANA_SYNC_PASSWORD: "this-is-the-iana-sync-password" # secret
DATATRACKER_RFC_EDITOR_SYNC_PASSWORD: "this-is-the-rfc-editor-sync-password" # secret
DATATRACKER_YOUTUBE_API_KEY: "this-is-the-youtube-api-key" # secret
DATATRACKER_GITHUB_BACKUP_API_KEY: "this-is-the-github-backup-api-key" # secret
# API key configuration
DATATRACKER_API_KEY_TYPE: "ES265"
# secret - value here is the default from settings.py (i.e., not actually secret)
DATATRACKER_API_PUBLIC_KEY_PEM_B64: |-
Ci0tLS0tQkVHSU4gUFVCTElDIEtFWS0tLS0tCk1Ga3dFd1lIS29aSXpqMENBUVlJS
29aSXpqMERBUWNEUWdBRXFWb2pzYW9mREpTY3VNSk4rdHNodW15Tk01TUUKZ2Fyel
ZQcWtWb3ZtRjZ5RTdJSi9kdjRGY1YrUUtDdEovck9TOGUzNlk4WkFFVll1dWtoZXM
weVoxdz09Ci0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo=
# secret - value here is the default from settings.py (i.e., not actually secret)
DATATRACKER_API_PRIVATE_KEY_PEM_B64: |-
Ci0tLS0tQkVHSU4gUFJJVkFURSBLRVktLS0tLQpNSUdIQWdFQU1CTUdCeXFHU000O
UFnRUdDQ3FHU000OUF3RUhCRzB3YXdJQkFRUWdvSTZMSmtvcEtxOFhySGk5ClFxR1
F2RTRBODNURllqcUx6KzhnVUxZZWNzcWhSQU5DQUFTcFdpT3hxaDhNbEp5NHdrMzY
yeUc2Ykkwemt3U0IKcXZOVStxUldpK1lYcklUc2duOTIvZ1Z4WDVBb0swbitzNUx4
N2ZwanhrQVJWaTY2U0Y2elRKblgKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
# DATATRACKER_MEETECHO_API_BASE: "https://meetings.conf.meetecho.com/api/v1/"
DATATRACKER_MEETECHO_CLIENT_ID: "this-is-the-meetecho-client-id" # secret
DATATRACKER_MEETECHO_CLIENT_SECRET: "this-is-the-meetecho-client-secret" # secret
# DATATRACKER_MATOMO_SITE_ID: "7" # must be present to enable Matomo
# DATATRACKER_MATOMO_DOMAIN_PATH: "analytics.ietf.org"
CELERY_PASSWORD: "this-is-a-secret" # secret
# Only one of these may be set
# DATATRACKER_APP_API_TOKENS_JSON_B64: "e30K" # secret
# DATATRACKER_APP_API_TOKENS_JSON: "{}" # secret
# use this to override default - one entry per line
# DATATRACKER_CSRF_TRUSTED_ORIGINS: |-
# https://datatracker.staging.ietf.org
# Scout configuration
DATATRACKER_SCOUT_KEY: "this-is-the-scout-key"
DATATRACKER_SCOUT_NAME: "StagingDatatracker"

13
k8s/kustomization.yaml Normal file
View file

@ -0,0 +1,13 @@
namespace: datatracker
namePrefix: dt-
configMapGenerator:
- name: files-cfgmap
files:
- settings_local.py
resources:
- beat.yaml
- celery.yaml
- datatracker.yaml
- django-config.yaml
- memcached.yaml
- rabbitmq.yaml

74
k8s/memcached.yaml Normal file
View file

@ -0,0 +1,74 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: memcached
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: memcached
template:
metadata:
labels:
app: memcached
spec:
securityContext:
runAsNonRoot: true
containers:
- image: "quay.io/prometheus/memcached-exporter:v0.14.3"
imagePullPolicy: IfNotPresent
name: memcached-exporter
ports:
- name: metrics
containerPort: 9150
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 65534 # nobody
runAsGroup: 65534 # nobody
- image: "memcached:1.6-alpine"
imagePullPolicy: IfNotPresent
args: ["-m", "1024"]
name: memcached
ports:
- name: memcached
containerPort: 11211
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
# memcached image sets up uid/gid 11211
runAsUser: 11211
runAsGroup: 11211
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
name: memcached
annotations:
k8s.grafana.com/scrape: "true" # this is not a bool
k8s.grafana.com/metrics.portName: "metrics"
spec:
type: ClusterIP
ports:
- port: 11211
targetPort: memcached
protocol: TCP
name: memcached
- port: 9150
targetPort: metrics
protocol: TCP
name: metrics
selector:
app: memcached

175
k8s/rabbitmq.yaml Normal file
View file

@ -0,0 +1,175 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: rabbitmq
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
app: rabbitmq
template:
metadata:
labels:
app: rabbitmq
spec:
securityContext:
runAsNonRoot: true
initContainers:
# -----------------------------------------------------
# Init RabbitMQ data
# -----------------------------------------------------
- name: init-rabbitmq
image: busybox:stable
command:
- "sh"
- "-c"
- "mkdir -p -m700 /mnt/rabbitmq && chown 100:101 /mnt/rabbitmq"
securityContext:
runAsNonRoot: false
runAsUser: 0
readOnlyRootFilesystem: true
volumeMounts:
- name: "rabbitmq-data"
mountPath: "/mnt"
containers:
# -----------------------------------------------------
# RabbitMQ Container
# -----------------------------------------------------
- image: "ghcr.io/ietf-tools/datatracker-mq:3.12-alpine"
imagePullPolicy: Always
name: rabbitmq
ports:
- name: amqp
containerPort: 5672
protocol: TCP
volumeMounts:
- name: rabbitmq-data
mountPath: /var/lib/rabbitmq
subPath: "rabbitmq"
- name: rabbitmq-tmp
mountPath: /tmp
- name: rabbitmq-config
mountPath: "/etc/rabbitmq"
env:
- name: "CELERY_PASSWORD"
value: "this-is-a-secret"
livenessProbe:
exec:
command: ["rabbitmq-diagnostics", "-q", "ping"]
periodSeconds: 30
timeoutSeconds: 5
startupProbe:
initialDelaySeconds: 15
periodSeconds: 5
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 60
exec:
command: ["rabbitmq-diagnostics", "-q", "ping"]
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
# rabbitmq image sets up uid/gid 100/101
runAsUser: 100
runAsGroup: 101
volumes:
- name: rabbitmq-tmp
emptyDir:
sizeLimit: "50Mi"
- name: rabbitmq-config
configMap:
name: "rabbitmq-configmap"
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumeClaimTemplates:
- metadata:
name: rabbitmq-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
# storageClassName: ""
---
apiVersion: v1
kind: ConfigMap
metadata:
name: rabbitmq-configmap
data:
definitions.json: |-
{
"permissions": [
{
"configure": ".*",
"read": ".*",
"user": "datatracker",
"vhost": "dt",
"write": ".*"
}
],
"users": [
{
"hashing_algorithm": "rabbit_password_hashing_sha256",
"limits": {},
"name": "datatracker",
"password_hash": "HJxcItcpXtBN+R/CH7dUelfKBOvdUs3AWo82SBw2yLMSguzb",
"tags": []
}
],
"vhosts": [
{
"limits": [],
"metadata": {
"description": "",
"tags": []
},
"name": "dt"
}
]
}
rabbitmq.conf: |-
# prevent guest from logging in over tcp
loopback_users.guest = true
# load saved definitions
load_definitions = /etc/rabbitmq/definitions.json
# Ensure that enough disk is available to flush to disk. To do this, need to limit the
# memory available to the container to something reasonable. See
# https://www.rabbitmq.com/production-checklist.html#monitoring-and-resource-usage
# for recommendations.
# 1-1.5 times the memory available to the container is adequate for disk limit
disk_free_limit.absolute = 6000MB
# This should be ~40% of the memory available to the container. Use an
# absolute number because relative will be proprtional to the full machine
# memory.
vm_memory_high_watermark.absolute = 1600MB
# Logging
log.file = false
log.console = true
log.console.level = info
log.console.formatter = json
---
apiVersion: v1
kind: Service
metadata:
name: rabbitmq
spec:
type: ClusterIP
clusterIP: None # headless service
ports:
- port: 5672
targetPort: amqp
protocol: TCP
name: amqp
selector:
app: rabbitmq

264
k8s/settings_local.py Normal file
View file

@ -0,0 +1,264 @@
# Copyright The IETF Trust 2007-2024, All Rights Reserved
# -*- coding: utf-8 -*-
from base64 import b64decode
from email.utils import parseaddr
import json
from ietf import __release_hash__
from ietf.settings import * # pyflakes:ignore
def _multiline_to_list(s):
"""Helper to split at newlines and conver to list"""
return [item.strip() for item in s.split("\n")]
# Default to "development". Production _must_ set DATATRACKER_SERVER_MODE="production" in the env!
SERVER_MODE = os.environ.get("DATATRACKER_SERVER_MODE", "development")
# Secrets
_SECRET_KEY = os.environ.get("DATATRACKER_DJANGO_SECRET_KEY", None)
if _SECRET_KEY is not None:
SECRET_KEY = _SECRET_KEY
else:
raise RuntimeError("DATATRACKER_DJANGO_SECRET_KEY must be set")
_NOMCOM_APP_SECRET_B64 = os.environ.get("DATATRACKER_NOMCOM_APP_SECRET_B64", None)
if _NOMCOM_APP_SECRET_B64 is not None:
NOMCOM_APP_SECRET = b64decode(_NOMCOM_APP_SECRET_B64)
else:
raise RuntimeError("DATATRACKER_NOMCOM_APP_SECRET_B64 must be set")
_IANA_SYNC_PASSWORD = os.environ.get("DATATRACKER_IANA_SYNC_PASSWORD", None)
if _IANA_SYNC_PASSWORD is not None:
IANA_SYNC_PASSWORD = _IANA_SYNC_PASSWORD
else:
raise RuntimeError("DATATRACKER_IANA_SYNC_PASSWORD must be set")
_RFC_EDITOR_SYNC_PASSWORD = os.environ.get("DATATRACKER_RFC_EDITOR_SYNC_PASSWORD", None)
if _RFC_EDITOR_SYNC_PASSWORD is not None:
RFC_EDITOR_SYNC_PASSWORD = os.environ.get("DATATRACKER_RFC_EDITOR_SYNC_PASSWORD")
else:
raise RuntimeError("DATATRACKER_RFC_EDITOR_SYNC_PASSWORD must be set")
_YOUTUBE_API_KEY = os.environ.get("DATATRACKER_YOUTUBE_API_KEY", None)
if _YOUTUBE_API_KEY is not None:
YOUTUBE_API_KEY = _YOUTUBE_API_KEY
else:
raise RuntimeError("DATATRACKER_YOUTUBE_API_KEY must be set")
_GITHUB_BACKUP_API_KEY = os.environ.get("DATATRACKER_GITHUB_BACKUP_API_KEY", None)
if _GITHUB_BACKUP_API_KEY is not None:
GITHUB_BACKUP_API_KEY = _GITHUB_BACKUP_API_KEY
else:
raise RuntimeError("DATATRACKER_GITHUB_BACKUP_API_KEY must be set")
_API_KEY_TYPE = os.environ.get("DATATRACKER_API_KEY_TYPE", None)
if _API_KEY_TYPE is not None:
API_KEY_TYPE = _API_KEY_TYPE
else:
raise RuntimeError("DATATRACKER_API_KEY_TYPE must be set")
_API_PUBLIC_KEY_PEM_B64 = os.environ.get("DATATRACKER_API_PUBLIC_KEY_PEM_B64", None)
if _API_PUBLIC_KEY_PEM_B64 is not None:
API_PUBLIC_KEY_PEM = b64decode(_API_PUBLIC_KEY_PEM_B64)
else:
raise RuntimeError("DATATRACKER_API_PUBLIC_KEY_PEM_B64 must be set")
_API_PRIVATE_KEY_PEM_B64 = os.environ.get("DATATRACKER_API_PRIVATE_KEY_PEM_B64", None)
if _API_PRIVATE_KEY_PEM_B64 is not None:
API_PRIVATE_KEY_PEM = b64decode(_API_PRIVATE_KEY_PEM_B64)
else:
raise RuntimeError("DATATRACKER_API_PRIVATE_KEY_PEM_B64 must be set")
# Set DEBUG if DATATRACKER_DEBUG env var is the word "true"
DEBUG = os.environ.get("DATATRACKER_DEBUG", "false").lower() == "true"
# DATATRACKER_ALLOWED_HOSTS env var is a comma-separated list of allowed hosts
_allowed_hosts_str = os.environ.get("DATATRACKER_ALLOWED_HOSTS", None)
if _allowed_hosts_str is not None:
ALLOWED_HOSTS = _multiline_to_list(_allowed_hosts_str)
DATABASES = {
"default": {
"HOST": os.environ.get("DATATRACKER_DB_HOST", "db"),
"PORT": os.environ.get("DATATRACKER_DB_PORT", "5432"),
"NAME": os.environ.get("DATATRACKER_DB_NAME", "datatracker"),
"ENGINE": "django.db.backends.postgresql",
"USER": os.environ.get("DATATRACKER_DB_USER", "django"),
"PASSWORD": os.environ.get("DATATRACKER_DB_PASS", ""),
"OPTIONS": json.loads(os.environ.get("DATATRACKER_DB_OPTS_JSON", "{}")),
},
}
# DATATRACKER_ADMINS is a newline-delimited list of addresses parseable by email.utils.parseaddr
_admins_str = os.environ.get("DATATRACKER_ADMINS", None)
if _admins_str is not None:
ADMINS = [parseaddr(admin) for admin in _multiline_to_list(_admins_str)]
else:
raise RuntimeError("DATATRACKER_ADMINS must be set")
USING_DEBUG_EMAIL_SERVER = os.environ.get("DATATRACKER_EMAIL_DEBUG", "false").lower() == "true"
EMAIL_HOST = os.environ.get("DATATRACKER_EMAIL_HOST", "localhost")
EMAIL_PORT = int(os.environ.get("DATATRACKER_EMAIL_PORT", "2025"))
_celery_password = os.environ.get("CELERY_PASSWORD", None)
if _celery_password is None:
raise RuntimeError("CELERY_PASSWORD must be set")
CELERY_BROKER_URL = "amqp://datatracker:{password}@{host}/{queue}".format(
host=os.environ.get("RABBITMQ_HOSTNAME", "dt-rabbitmq"),
password=_celery_password,
queue=os.environ.get("RABBITMQ_QUEUE", "dt")
)
IANA_SYNC_USERNAME = "ietfsync"
IANA_SYNC_CHANGES_URL = "https://datatracker.iana.org:4443/data-tracker/changes"
IANA_SYNC_PROTOCOLS_URL = "http://www.iana.org/protocols/"
RFC_EDITOR_NOTIFICATION_URL = "http://www.rfc-editor.org/parser/parser.php"
STATS_REGISTRATION_ATTENDEES_JSON_URL = 'https://registration.ietf.org/{number}/attendees/?apikey=redacted'
#FIRST_CUTOFF_DAYS = 12
#SECOND_CUTOFF_DAYS = 12
#SUBMISSION_CUTOFF_DAYS = 26
#SUBMISSION_CORRECTION_DAYS = 57
MEETING_MATERIALS_SUBMISSION_CUTOFF_DAYS = 26
MEETING_MATERIALS_SUBMISSION_CORRECTION_DAYS = 54
HTPASSWD_COMMAND = "/usr/bin/htpasswd2"
_MEETECHO_CLIENT_ID = os.environ.get("DATATRACKER_MEETECHO_CLIENT_ID", None)
_MEETECHO_CLIENT_SECRET = os.environ.get("DATATRACKER_MEETECHO_CLIENT_SECRET", None)
if _MEETECHO_CLIENT_ID is not None and _MEETECHO_CLIENT_SECRET is not None:
MEETECHO_API_CONFIG = {
"api_base": os.environ.get(
"DATATRACKER_MEETECHO_API_BASE",
"https://meetings.conf.meetecho.com/api/v1/",
),
"client_id": _MEETECHO_CLIENT_ID,
"client_secret": _MEETECHO_CLIENT_SECRET,
"request_timeout": 3.01, # python-requests doc recommend slightly > a multiple of 3 seconds
}
else:
raise RuntimeError(
"DATATRACKER_MEETECHO_CLIENT_ID and DATATRACKER_MEETECHO_CLIENT_SECRET must be set"
)
# For APP_API_TOKENS, ccept either base64-encoded JSON or raw JSON, but not both
if "DATATRACKER_APP_API_TOKENS_JSON_B64" in os.environ:
if "DATATRACKER_APP_API_TOKENS_JSON" in os.environ:
raise RuntimeError(
"Only one of DATATRACKER_APP_API_TOKENS_JSON and DATATRACKER_APP_API_TOKENS_JSON_B64 may be set"
)
_APP_API_TOKENS_JSON = b64decode(os.environ.get("DATATRACKER_APP_API_TOKENS_JSON_B64"))
else:
_APP_API_TOKENS_JSON = os.environ.get("DATATRACKER_APP_API_TOKENS_JSON", None)
if _APP_API_TOKENS_JSON is not None:
APP_API_TOKENS = json.loads(_APP_API_TOKENS_JSON)
else:
APP_API_TOKENS = {}
EMAIL_COPY_TO = ""
# Until we teach the datatracker to look beyond cloudflare for this check
IDSUBMIT_MAX_DAILY_SAME_SUBMITTER = 5000
# Leave DATATRACKER_MATOMO_SITE_ID unset to disable Matomo reporting
if "DATATRACKER_MATOMO_SITE_ID" in os.environ:
MATOMO_DOMAIN_PATH = os.environ.get("DATATRACKER_MATOMO_DOMAIN_PATH", "analytics.ietf.org")
MATOMO_SITE_ID = os.environ.get("DATATRACKER_MATOMO_SITE_ID")
MATOMO_DISABLE_COOKIES = True
# Leave DATATRACKER_SCOUT_KEY unset to disable Scout APM agent
_SCOUT_KEY = os.environ.get("DATATRACKER_SCOUT_KEY", None)
if _SCOUT_KEY is not None:
if SERVER_MODE == "production":
PROD_PRE_APPS = ["scout_apm.django", ]
else:
DEV_PRE_APPS = ["scout_apm.django", ]
SCOUT_MONITOR = True
SCOUT_KEY = _SCOUT_KEY
SCOUT_NAME = os.environ.get("DATATRACKER_SCOUT_NAME", "Datatracker")
SCOUT_ERRORS_ENABLED = True
SCOUT_SHUTDOWN_MESSAGE_ENABLED = False
SCOUT_CORE_AGENT_SOCKET_PATH = "tcp://{host}:{port}".format(
host=os.environ.get("DATATRACKER_SCOUT_CORE_AGENT_HOST", "localhost"),
port=os.environ.get("DATATRACKER_SCOUT_CORE_AGENT_PORT", "6590"),
)
SCOUT_CORE_AGENT_DOWNLOAD = False
SCOUT_CORE_AGENT_LAUNCH = False
SCOUT_REVISION_SHA = __release_hash__[:7]
# Path to the email alias lists. Used by ietf.utils.aliases
DRAFT_ALIASES_PATH = "/a/postfix/draft-aliases"
DRAFT_VIRTUAL_PATH = "/a/postfix/draft-virtual"
GROUP_ALIASES_PATH = "/a/postfix/group-aliases"
GROUP_VIRTUAL_PATH = "/a/postfix/group-virtual"
STATIC_URL = os.environ.get("DATATRACKER_STATIC_URL", None)
if STATIC_URL is None:
from ietf import __version__
STATIC_URL = f"https://static.ietf.org/dt/{__version__}/"
# Set these to the same as "production" in settings.py, whether production mode or not
MEDIA_ROOT = "/a/www/www6s/lib/dt/media/"
MEDIA_URL = "https://www.ietf.org/lib/dt/media/"
PHOTOS_DIRNAME = "photo"
PHOTOS_DIR = MEDIA_ROOT + PHOTOS_DIRNAME
# Normally only set for debug, but needed until we have a real FS
DJANGO_VITE_MANIFEST_PATH = os.path.join(BASE_DIR, 'static/dist-neue/manifest.json')
# Binaries that are different in the docker image
DE_GFM_BINARY = "/usr/local/bin/de-gfm"
IDSUBMIT_IDNITS_BINARY = "/usr/local/bin/idnits"
# Duplicating production cache from settings.py and using it whether we're in production mode or not
MEMCACHED_HOST = os.environ.get("DT_MEMCACHED_SERVICE_HOST", "127.0.0.1")
MEMCACHED_PORT = os.environ.get("DT_MEMCACHED_SERVICE_PORT", "11211")
from ietf import __version__
CACHES = {
"default": {
"BACKEND": "ietf.utils.cache.LenientMemcacheCache",
"LOCATION": f"{MEMCACHED_HOST}:{MEMCACHED_PORT}",
"VERSION": __version__,
"KEY_PREFIX": "ietf:dt",
"KEY_FUNCTION": lambda key, key_prefix, version: (
f"{key_prefix}:{version}:{sha384(str(key).encode('utf8')).hexdigest()}"
),
},
"sessions": {
"BACKEND": "ietf.utils.cache.LenientMemcacheCache",
"LOCATION": f"{MEMCACHED_HOST}:{MEMCACHED_PORT}",
# No release-specific VERSION setting.
"KEY_PREFIX": "ietf:dt",
},
"htmlized": {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"LOCATION": "/a/cache/datatracker/htmlized",
"OPTIONS": {
"MAX_ENTRIES": 100000, # 100,000
},
},
"pdfized": {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"LOCATION": "/a/cache/datatracker/pdfized",
"OPTIONS": {
"MAX_ENTRIES": 100000, # 100,000
},
},
"slowpages": {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"LOCATION": "/a/cache/datatracker/slowpages",
"OPTIONS": {
"MAX_ENTRIES": 5000,
},
},
}
_csrf_trusted_origins_str = os.environ.get("DATATRACKER_CSRF_TRUSTED_ORIGINS")
if _csrf_trusted_origins_str is not None:
CSRF_TRUSTED_ORIGINS = _multiline_to_list(_csrf_trusted_origins_str)