ci: merge main to release (#7693)

ci: merge main to release
This commit is contained in:
Robert Sparks 2024-07-15 16:54:26 -05:00 committed by GitHub
commit 865bfb5451
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 373 additions and 167 deletions

View file

@ -9,14 +9,10 @@ updates:
directory: "/"
schedule:
interval: "weekly"
reviewers:
- "ngpixel"
- package-ecosystem: "docker"
directory: "/docker"
schedule:
interval: "weekly"
reviewers:
- "ngpixel"
- package-ecosystem: "pip"
directory: "/"
schedule:
@ -27,8 +23,6 @@ updates:
directory: "/"
schedule:
interval: "weekly"
reviewers:
- "ngpixel"
groups:
yarn:
patterns:
@ -37,8 +31,6 @@ updates:
directory: "/playwright"
schedule:
interval: "weekly"
reviewers:
- "ngpixel"
groups:
npm:
patterns:
@ -47,8 +39,6 @@ updates:
directory: "/dev/coverage-action"
schedule:
interval: "weekly"
reviewers:
- "ngpixel"
groups:
npm:
patterns:
@ -57,8 +47,6 @@ updates:
directory: "/dev/deploy-to-container"
schedule:
interval: "weekly"
reviewers:
- "ngpixel"
groups:
npm:
patterns:
@ -67,8 +55,6 @@ updates:
directory: "/dev/diff"
schedule:
interval: "weekly"
reviewers:
- "ngpixel"
groups:
npm:
patterns:

View file

@ -156,6 +156,11 @@ On Windows:
docker compose down -v --rmi all
docker image prune
```
### Updating an older environment
If you already have a clone, such as from a previous codesprint, and are updating that clone, before starting the datatracker from the updated image:
* rm ietf/settings_local.py # The startup script will put a new one, appropriate to the current release, in place
* Execute the `Clean all` sequence above.
### Accessing PostgreSQL Port
@ -197,4 +202,4 @@ drwxrwxr-x 5 100999 100999 4096 May 25 07:56 client
(etc...)
```
Try uninstalling Docker Desktop and installing Docker Compose manually. The Docker Compose bundled with Docker Desktop is incompatible with our software. See also [Rootless Docker: file ownership changes #3343](https://github.com/lando/lando/issues/3343), [Docker context desktop-linux has container permission issues #75](https://github.com/docker/desktop-linux/issues/75).
Try uninstalling Docker Desktop and installing Docker Compose manually. The Docker Compose bundled with Docker Desktop is incompatible with our software. See also [Rootless Docker: file ownership changes #3343](https://github.com/lando/lando/issues/3343), [Docker context desktop-linux has container permission issues #75](https://github.com/docker/desktop-linux/issues/75).

View file

@ -1,5 +1,11 @@
#!/bin/bash
if test $(basename $PWD ) != "docker"
then
echo "Run this from the docker directory" 1>&2
exit 1
fi
read -p "Stop and remove all containers, volumes and images for this project? [y/N] " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
@ -7,6 +13,5 @@ then
cd ..
echo "Shutting down any instance still running and purge images..."
docker compose down -v --rmi all
cd docker
echo "Done!"
fi

View file

@ -1,5 +1,11 @@
#!/bin/bash
if test $(basename $PWD ) != "docker"
then
echo "Run this from the docker directory" 1>&2
exit 1
fi
cd ..
echo "Shutting down any instance still running..."
docker compose down
@ -9,5 +15,5 @@ docker volume rm -f "${PROJNAME}_postgresdb-data"
echo "Rebuilding the DB image..."
docker compose pull db
docker compose build --no-cache db
cd docker
echo "Done!"

View file

@ -806,7 +806,7 @@ class ApproveBallotTests(TestCase):
ballot = create_ballot_if_not_open(None, draft, ad, 'approve')
old_ballot_id = ballot.id
draft.set_state(State.objects.get(used=True, type="draft-iesg", slug="iesg-eva"))
url = urlreverse('ietf.doc.views_ballot.clear_ballot', kwargs=dict(name=draft.name,ballot_type_slug=draft.ballot_open('approve').ballot_type.slug))
url = urlreverse('ietf.doc.views_ballot.clear_ballot', kwargs=dict(name=draft.name,ballot_type_slug="approve"))
login_testing_unauthorized(self, "secretary", url)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
@ -816,6 +816,11 @@ class ApproveBallotTests(TestCase):
self.assertIsNotNone(ballot)
self.assertEqual(ballot.ballotpositiondocevent_set.count(),0)
self.assertNotEqual(old_ballot_id, ballot.id)
# It's not valid to clear a ballot of a type where there's no matching state
url = urlreverse('ietf.doc.views_ballot.clear_ballot', kwargs=dict(name=draft.name,ballot_type_slug="statchg"))
r = self.client.post(url,{})
self.assertEqual(r.status_code, 404)
def test_ballot_downref_approve(self):
ad = Person.objects.get(name="Areað Irector")

View file

@ -484,7 +484,47 @@ class StatusChangeTests(TestCase):
verify_relations(doc,'rfc9998','tobcp' )
verify_relations(doc,'rfc14' ,'tohist')
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Affected RFC list changed.'))
def test_clear_ballot(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_ballot.clear_ballot',kwargs=dict(name=doc.name, ballot_type_slug="statchg"))
login_testing_unauthorized(self, "secretary", url)
# Some additional setup
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9998'),relationship_id='tohist')
create_ballot_if_not_open(None, doc, Person.objects.get(user__username="secretary"), "statchg")
doc.set_state(State.objects.get(slug='iesgeval',type='statchg'))
old_ballot = doc.ballot_open("statchg")
self.assertIsNotNone(old_ballot)
r = self.client.post(url, dict())
self.assertEqual(r.status_code,302)
new_ballot = doc.ballot_open("statchg")
self.assertIsNotNone(new_ballot)
self.assertNotEqual(new_ballot, old_ballot)
self.assertEqual(doc.get_state_slug("statchg"),"iesgeval")
def test_clear_deferred_ballot(self):
doc = Document.objects.get(name='status-change-imaginary-mid-review')
url = urlreverse('ietf.doc.views_ballot.clear_ballot',kwargs=dict(name=doc.name, ballot_type_slug="statchg"))
login_testing_unauthorized(self, "secretary", url)
# Some additional setup
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9998'),relationship_id='tohist')
create_ballot_if_not_open(None, doc, Person.objects.get(user__username="secretary"), "statchg")
doc.set_state(State.objects.get(slug='defer',type='statchg'))
old_ballot = doc.ballot_open("statchg")
self.assertIsNotNone(old_ballot)
r = self.client.post(url, dict())
self.assertEqual(r.status_code,302)
new_ballot = doc.ballot_open("statchg")
self.assertIsNotNone(new_ballot)
self.assertNotEqual(new_ballot, old_ballot)
self.assertEqual(doc.get_state_slug("statchg"),"iesgeval")
def setUp(self):
super().setUp()
IndividualRfcFactory(rfc_number=14,std_level_id='unkn') # draft was never issued

View file

@ -399,11 +399,22 @@ def send_ballot_comment(request, name, ballot_id):
def clear_ballot(request, name, ballot_type_slug):
"""Clear all positions and discusses on every open ballot for a document."""
doc = get_object_or_404(Document, name=name)
# If there's no appropriate ballot type state, clearing would be an invalid action.
# This will need to be updated if we ever allow defering IRTF ballots
if ballot_type_slug == "approve":
state_machine = "draft-iesg"
elif ballot_type_slug in ["statchg","conflrev"]:
state_machine = ballot_type_slug
else:
state_machine = None
state_slug = state_machine and doc.get_state_slug(state_machine)
if state_machine is None or state_slug is None:
raise Http404
if request.method == 'POST':
by = request.user.person
if close_ballot(doc, by, ballot_type_slug):
create_ballot_if_not_open(request, doc, by, ballot_type_slug)
if doc.get_state('draft-iesg').slug == 'defer':
if state_slug == "defer":
do_undefer_ballot(request,doc)
return redirect("ietf.doc.views_doc.document_main", name=doc.name)

View file

@ -5,6 +5,7 @@ from django.conf.urls.static import static as static_url
from django.contrib import admin
from django.contrib.sitemaps import views as sitemap_views
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.http import HttpResponse
from django.urls import include, path
from django.views import static as static_view
from django.views.generic import TemplateView
@ -35,6 +36,7 @@ sitemaps = {
urlpatterns = [
url(r'^$', views_search.frontpage),
url(r'^health/', lambda _: HttpResponse()),
url(r'^accounts/', include('ietf.ietfauth.urls')),
url(r'^admin/', admin.site.urls),
url(r'^admin/docs/', include('django.contrib.admindocs.urls')),

View file

@ -679,3 +679,12 @@ class SearchableFieldTests(TestCase):
self.assertTrue(changed_form.has_changed())
unchanged_form = TestForm(initial={'test_field': [1]}, data={'test_field': [1]})
self.assertFalse(unchanged_form.has_changed())
class HealthTests(TestCase):
def test_health(self):
self.assertEqual(
self.client.get("/health/").status_code,
200,
)

5
k8s/README.md Normal file
View file

@ -0,0 +1,5 @@
# Kustomize deployment
## Run locally
The `secrets.yaml` file is provided as a reference only and must be referenced manually in the `kustomization.yaml` file.

View file

@ -19,6 +19,71 @@ spec:
runAsNonRoot: true
containers:
# -----------------------------------------------------
# Auth Container
# -----------------------------------------------------
- name: auth
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
imagePullPolicy: Always
volumeMounts:
- name: dt-vol
mountPath: /a
- name: dt-tmp
mountPath: /tmp
- name: dt-home
mountPath: /home/datatracker
- name: dt-xml2rfc-cache
mountPath: /var/cache/xml2rfc
- name: dt-cfg
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
env:
- name: "CONTAINER_ROLE"
value: "datatracker"
# ensures the pod gets recreated on every deploy:
- name: "DEPLOY_UID"
value: "$DEPLOY_UID"
envFrom:
- secretRef:
name: dt-secrets-env
startupProbe:
httpGet:
port: 8000
path: /health/
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 30
timeoutSeconds: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
# -----------------------------------------------------
# Nginx Container
# -----------------------------------------------------
- name: nginx
image: "ghcr.io/nginxinc/nginx-unprivileged:1.27"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
name: http
protocol: TCP
livenessProbe:
httpGet:
port: 8080
path: /health/nginx
securityContext:
readOnlyRootFilesystem: true
volumeMounts:
- name: nginx-tmp
mountPath: /tmp
- name: dt-cfg
mountPath: /etc/nginx/conf.d/auth.conf
subPath: nginx-auth.conf
# -----------------------------------------------------
# ScoutAPM Container
# -----------------------------------------------------
- name: scoutapm
@ -43,45 +108,6 @@ spec:
readOnlyRootFilesystem: true
runAsUser: 65534 # "nobody" user by default
runAsGroup: 65534 # "nogroup" group by default
# -----------------------------------------------------
# Datatracker Container
# -----------------------------------------------------
- name: datatracker
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
imagePullPolicy: Always
ports:
- containerPort: 8000
name: http
protocol: TCP
volumeMounts:
- name: dt-vol
mountPath: /a
- name: dt-tmp
mountPath: /tmp
- name: dt-home
mountPath: /home/datatracker
- name: dt-xml2rfc-cache
mountPath: /var/cache/xml2rfc
- name: dt-cfg
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
env:
- name: "CONTAINER_ROLE"
value: "datatracker"
# ensures the pod gets recreated on every deploy:
- name: "DEPLOY_UID"
value: "$DEPLOY_UID"
envFrom:
- configMapRef:
name: django-config
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
volumes:
# To be overriden with the actual shared volume
- name: dt-vol
@ -97,6 +123,9 @@ spec:
- name: dt-cfg
configMap:
name: files-cfgmap
- name: nginx-tmp
emptyDir:
sizeLimit: "500Mi"
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 60
@ -108,9 +137,13 @@ metadata:
spec:
type: ClusterIP
ports:
- port: 8080
- port: 80
targetPort: http
protocol: TCP
name: http
- port: 8080
targetPort: http
protocol: TCP
name: http-old
selector:
app: auth

View file

@ -20,6 +20,9 @@ spec:
securityContext:
runAsNonRoot: true
containers:
# -----------------------------------------------------
# Beat Container
# -----------------------------------------------------
- name: beat
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
imagePullPolicy: Always
@ -39,8 +42,8 @@ spec:
- name: "CONTAINER_ROLE"
value: "beat"
envFrom:
- configMapRef:
name: django-config
- secretRef:
name: dt-secrets-env
securityContext:
allowPrivilegeEscalation: false
capabilities:

View file

@ -21,31 +21,6 @@ spec:
runAsNonRoot: true
containers:
# -----------------------------------------------------
# ScoutAPM Container
# -----------------------------------------------------
- name: scoutapm
image: "scoutapp/scoutapm:version-1.4.0"
imagePullPolicy: IfNotPresent
# Replace command with one that will shut down on a TERM signal
# The ./core-agent start command line is from the scoutapm docker image
command:
- "sh"
- "-c"
- >-
trap './core-agent shutdown --tcp 0.0.0.0:6590' TERM;
./core-agent start --daemonize false --log-level debug --tcp 0.0.0.0:6590 &
wait $!
livenessProbe:
exec:
command:
- "sh"
- "-c"
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
securityContext:
readOnlyRootFilesystem: true
runAsUser: 65534 # "nobody" user by default
runAsGroup: 65534 # "nogroup" group by default
# -----------------------------------------------------
# Celery Container
# -----------------------------------------------------
- name: celery
@ -71,8 +46,8 @@ spec:
- name: "CONTAINER_ROLE"
value: "celery"
envFrom:
- configMapRef:
name: django-config
- secretRef:
name: dt-secrets-env
securityContext:
allowPrivilegeEscalation: false
capabilities:
@ -81,6 +56,31 @@ spec:
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
# -----------------------------------------------------
# ScoutAPM Container
# -----------------------------------------------------
- name: scoutapm
image: "scoutapp/scoutapm:version-1.4.0"
imagePullPolicy: IfNotPresent
# Replace command with one that will shut down on a TERM signal
# The ./core-agent start command line is from the scoutapm docker image
command:
- "sh"
- "-c"
- >-
trap './core-agent shutdown --tcp 0.0.0.0:6590' TERM;
./core-agent start --daemonize false --log-level debug --tcp 0.0.0.0:6590 &
wait $!
livenessProbe:
exec:
command:
- "sh"
- "-c"
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
securityContext:
readOnlyRootFilesystem: true
runAsUser: 65534 # "nobody" user by default
runAsGroup: 65534 # "nogroup" group by default
volumes:
# To be overriden with the actual shared volume
- name: dt-vol

View file

@ -19,6 +19,71 @@ spec:
runAsNonRoot: true
containers:
# -----------------------------------------------------
# Datatracker Container
# -----------------------------------------------------
- name: datatracker
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
imagePullPolicy: Always
volumeMounts:
- name: dt-vol
mountPath: /a
- name: dt-tmp
mountPath: /tmp
- name: dt-home
mountPath: /home/datatracker
- name: dt-xml2rfc-cache
mountPath: /var/cache/xml2rfc
- name: dt-cfg
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
env:
- name: "CONTAINER_ROLE"
value: "datatracker"
# ensures the pod gets recreated on every deploy:
- name: "DEPLOY_UID"
value: "$DEPLOY_UID"
envFrom:
- secretRef:
name: dt-secrets-env
startupProbe:
httpGet:
port: 8000
path: /health/
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 30
timeoutSeconds: 3
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
# -----------------------------------------------------
# Nginx Container
# -----------------------------------------------------
- name: nginx
image: "ghcr.io/nginxinc/nginx-unprivileged:1.27"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
name: http
protocol: TCP
livenessProbe:
httpGet:
port: 8080
path: /health/nginx
securityContext:
readOnlyRootFilesystem: true
volumeMounts:
- name: nginx-tmp
mountPath: /tmp
- name: dt-cfg
mountPath: /etc/nginx/conf.d/datatracker.conf
subPath: nginx-datatracker.conf
# -----------------------------------------------------
# ScoutAPM Container
# -----------------------------------------------------
- name: scoutapm
@ -43,45 +108,6 @@ spec:
readOnlyRootFilesystem: true
runAsUser: 65534 # "nobody" user by default
runAsGroup: 65534 # "nogroup" group by default
# -----------------------------------------------------
# Datatracker Container
# -----------------------------------------------------
- name: datatracker
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
imagePullPolicy: Always
ports:
- containerPort: 8000
name: http
protocol: TCP
volumeMounts:
- name: dt-vol
mountPath: /a
- name: dt-tmp
mountPath: /tmp
- name: dt-home
mountPath: /home/datatracker
- name: dt-xml2rfc-cache
mountPath: /var/cache/xml2rfc
- name: dt-cfg
mountPath: /workspace/ietf/settings_local.py
subPath: settings_local.py
env:
- name: "CONTAINER_ROLE"
value: "datatracker"
# ensures the pod gets recreated on every deploy:
- name: "DEPLOY_UID"
value: "$DEPLOY_UID"
envFrom:
- configMapRef:
name: django-config
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 1000
runAsGroup: 1000
initContainers:
- name: migration
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
@ -126,6 +152,9 @@ spec:
- name: dt-cfg
configMap:
name: files-cfgmap
- name: nginx-tmp
emptyDir:
sizeLimit: "500Mi"
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 60

View file

@ -3,12 +3,13 @@ namePrefix: dt-
configMapGenerator:
- name: files-cfgmap
files:
- nginx-auth.conf
- nginx-datatracker.conf
- settings_local.py
resources:
- auth.yaml
- beat.yaml
- celery.yaml
- datatracker.yaml
- django-config.yaml
- memcached.yaml
- rabbitmq.yaml

View file

@ -16,21 +16,9 @@ spec:
securityContext:
runAsNonRoot: true
containers:
- image: "quay.io/prometheus/memcached-exporter:v0.14.3"
imagePullPolicy: IfNotPresent
name: memcached-exporter
ports:
- name: metrics
containerPort: 9150
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 65534 # nobody
runAsGroup: 65534 # nobody
# -----------------------------------------------------
# Memcached
# -----------------------------------------------------
- image: "memcached:1.6-alpine"
imagePullPolicy: IfNotPresent
args: ["-m", "1024"]
@ -48,6 +36,24 @@ spec:
# memcached image sets up uid/gid 11211
runAsUser: 11211
runAsGroup: 11211
# -----------------------------------------------------
# Memcached Exporter for Prometheus
# -----------------------------------------------------
- image: "quay.io/prometheus/memcached-exporter:v0.14.3"
imagePullPolicy: IfNotPresent
name: memcached-exporter
ports:
- name: metrics
containerPort: 9150
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 65534 # nobody
runAsGroup: 65534 # nobody
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 30

34
k8s/nginx-auth.conf Normal file
View file

@ -0,0 +1,34 @@
server {
listen 8080 default_server;
server_name _;
# Note that regex location matches take priority over non-regex "prefix" matches. Use regexes so that
# our deny all rule does not squelch the other locations.
location ~ ^/health/nginx$ {
return 200;
}
location ~ ^/robots.txt$ {
add_header Content-Type text/plain;
return 200 "User-agent: *\nDisallow: /\n";
}
location ~ ^/accounts/create.* {
return 302 https://datatracker.ietf.org/accounts/create;
}
# n.b. (?!...) is a negative lookahead group
location ~ ^(/(?!(api/openid/|accounts/login/|accounts/logout/|accounts/reset/|person/.*/photo|group/groupmenu.json)).*) {
deny all;
}
location / {
add_header Content-Security-Policy "default-src 'self' 'unsafe-inline' data: https://datatracker.ietf.org/ https://www.ietf.org/ http://ietf.org/ https://analytics.ietf.org https://static.ietf.org; frame-ancestors 'self' ietf.org *.ietf.org meetecho.com *.meetecho.com gather.town *.gather.town";
proxy_set_header Host $${keepempty}host;
proxy_set_header Connection close;
proxy_set_header X-Request-Start "t=${msec}";
proxy_set_header X-Forwarded-For $${keepempty}proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $${keepempty}remote_addr;
proxy_pass http://localhost:8000;
}
}

View file

@ -0,0 +1,23 @@
server {
listen 8080 default_server;
server_name _;
location /health/nginx {
return 200;
}
location /robots.txt {
add_header Content-Type text/plain;
return 200 "User-agent: *\nDisallow: /doc/pdf/\n";
}
location / {
add_header Content-Security-Policy "default-src 'self' 'unsafe-inline' data: https://datatracker.ietf.org/ https://www.ietf.org/ http://ietf.org/ https://analytics.ietf.org https://static.ietf.org; frame-ancestors 'self' ietf.org *.ietf.org meetecho.com *.meetecho.com";
proxy_set_header Host $${keepempty}host;
proxy_set_header Connection close;
proxy_set_header X-Request-Start "t=${msec}";
proxy_set_header X-Forwarded-For $${keepempty}proxy_add_x_forwarded_for;
proxy_set_header X-Real-IP $${keepempty}remote_addr;
proxy_pass http://localhost:8000;
}
}

View file

@ -15,23 +15,6 @@ spec:
spec:
securityContext:
runAsNonRoot: true
initContainers:
# -----------------------------------------------------
# Init RabbitMQ data
# -----------------------------------------------------
- name: init-rabbitmq
image: busybox:stable
command:
- "sh"
- "-c"
- "mkdir -p -m700 /mnt/rabbitmq && chown 100:101 /mnt/rabbitmq"
securityContext:
runAsNonRoot: false
runAsUser: 0
readOnlyRootFilesystem: true
volumeMounts:
- name: "rabbitmq-data"
mountPath: "/mnt"
containers:
# -----------------------------------------------------
# RabbitMQ Container
@ -52,8 +35,11 @@ spec:
- name: rabbitmq-config
mountPath: "/etc/rabbitmq"
env:
- name: "CELERY_PASSWORD"
value: "this-is-a-secret"
- name: CELERY_PASSWORD
valueFrom:
secretKeyRef:
name: dt-secrets-env
key: CELERY_PASSWORD
livenessProbe:
exec:
command: ["rabbitmq-diagnostics", "-q", "ping"]
@ -76,6 +62,23 @@ spec:
# rabbitmq image sets up uid/gid 100/101
runAsUser: 100
runAsGroup: 101
initContainers:
# -----------------------------------------------------
# Init RabbitMQ data
# -----------------------------------------------------
- name: init-rabbitmq
image: busybox:stable
command:
- "sh"
- "-c"
- "mkdir -p -m700 /mnt/rabbitmq && chown 100:101 /mnt/rabbitmq"
securityContext:
runAsNonRoot: false
runAsUser: 0
readOnlyRootFilesystem: true
volumeMounts:
- name: "rabbitmq-data"
mountPath: "/mnt"
volumes:
- name: rabbitmq-tmp
emptyDir:

View file

@ -1,9 +1,9 @@
apiVersion: v1
kind: ConfigMap
kind: Secret
metadata:
name: django-config
data:
# n.b., these are debug values / non-secret secrets
name: secrets-env
type: Opaque
stringData:
DATATRACKER_SERVER_MODE: "development" # development for staging, production for production
DATATRACKER_ADMINS: |-
Robert Sparks <rjsparks@nostrum.com>
@ -80,4 +80,4 @@ data:
# Scout configuration
DATATRACKER_SCOUT_KEY: "this-is-the-scout-key"
DATATRACKER_SCOUT_NAME: "StagingDatatracker"
DATATRACKER_SCOUT_NAME: "StagingDatatracker"