commit
ce00e1bb4b
17
.github/workflows/build.yml
vendored
17
.github/workflows/build.yml
vendored
|
@ -31,6 +31,11 @@ on:
|
|||
default: false
|
||||
required: true
|
||||
type: boolean
|
||||
skiparm:
|
||||
description: 'Skip ARM64 Build'
|
||||
default: false
|
||||
required: true
|
||||
type: boolean
|
||||
ignoreLowerCoverage:
|
||||
description: 'Ignore Lower Coverage'
|
||||
default: false
|
||||
|
@ -160,7 +165,7 @@ jobs:
|
|||
|
||||
- name: Download a Coverage Results
|
||||
if: ${{ github.event.inputs.skiptests == 'false' || github.ref_name == 'release' }}
|
||||
uses: actions/download-artifact@v4.1.7
|
||||
uses: actions/download-artifact@v4.1.8
|
||||
with:
|
||||
name: coverage
|
||||
|
||||
|
@ -241,11 +246,11 @@ jobs:
|
|||
- name: Build Release Docker Image
|
||||
uses: docker/build-push-action@v6
|
||||
env:
|
||||
DOCKER_BUILD_NO_SUMMARY: true
|
||||
DOCKER_BUILD_SUMMARY: false
|
||||
with:
|
||||
context: .
|
||||
file: dev/build/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: ${{ github.event.inputs.skiparm == 'true' && 'linux/amd64' || 'linux/amd64,linux/arm64' }}
|
||||
push: true
|
||||
tags: ghcr.io/ietf-tools/datatracker:${{ env.PKG_VERSION }}
|
||||
cache-from: type=gha
|
||||
|
@ -388,7 +393,7 @@ jobs:
|
|||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download a Release Artifact
|
||||
uses: actions/download-artifact@v4.1.7
|
||||
uses: actions/download-artifact@v4.1.8
|
||||
with:
|
||||
name: release-${{ env.PKG_VERSION }}
|
||||
|
||||
|
@ -435,6 +440,7 @@ jobs:
|
|||
inputs: '{ "environment":"${{ secrets.GHA_K8S_CLUSTER }}", "app":"datatracker", "appVersion":"${{ env.PKG_VERSION }}", "remoteRef":"${{ github.sha }}" }'
|
||||
wait-for-completion: true
|
||||
wait-for-completion-timeout: 10m
|
||||
wait-for-completion-interval: 30s
|
||||
display-workflow-run-url: false
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
|
@ -443,7 +449,7 @@ jobs:
|
|||
prod:
|
||||
name: Deploy to Production
|
||||
if: ${{ !failure() && !cancelled() && (github.event.inputs.deploy == 'Staging + Prod' || github.ref_name == 'release') }}
|
||||
needs: [staging]
|
||||
needs: [prepare, staging]
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: production
|
||||
|
@ -461,4 +467,5 @@ jobs:
|
|||
inputs: '{ "environment":"${{ secrets.GHA_K8S_CLUSTER }}", "app":"datatracker", "appVersion":"${{ env.PKG_VERSION }}", "remoteRef":"${{ github.sha }}" }'
|
||||
wait-for-completion: true
|
||||
wait-for-completion-timeout: 10m
|
||||
wait-for-completion-interval: 30s
|
||||
display-workflow-run-url: false
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import slugify from 'slugify'
|
||||
|
||||
export default (str) => {
|
||||
return slugify(str.replace('/', '-'), { lower: true })
|
||||
return slugify(str.replaceAll('/', '-'), { lower: true })
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ RUN echo "deb http://deb.debian.org/debian bullseye-backports main" > /etc/apt/s
|
|||
COPY . .
|
||||
COPY ./dev/build/start.sh ./start.sh
|
||||
COPY ./dev/build/datatracker-start.sh ./datatracker-start.sh
|
||||
COPY ./dev/build/migration-start.sh ./migration-start.sh
|
||||
COPY ./dev/build/celery-start.sh ./celery-start.sh
|
||||
COPY ./dev/build/gunicorn.conf.py ./gunicorn.conf.py
|
||||
|
||||
|
@ -27,6 +28,7 @@ RUN pip3 --disable-pip-version-check --no-cache-dir install -r requirements.txt
|
|||
|
||||
RUN chmod +x start.sh && \
|
||||
chmod +x datatracker-start.sh && \
|
||||
chmod +x migration-start.sh && \
|
||||
chmod +x celery-start.sh && \
|
||||
chmod +x docker/scripts/app-create-dirs.sh && \
|
||||
sh ./docker/scripts/app-create-dirs.sh
|
||||
|
|
|
@ -8,11 +8,14 @@ echo "Running Datatracker checks..."
|
|||
if ! ietf/manage.py migrate --skip-checks --check ; then
|
||||
echo "Unapplied migrations found, waiting to start..."
|
||||
sleep 5
|
||||
while ! ietf/manage.py migrate --skip-checks --check ; do
|
||||
while ! ietf/manage.py migrate --skip-checks --check ; do
|
||||
echo "... still waiting for migrations..."
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Starting Celery..."
|
||||
|
||||
cleanup () {
|
||||
# Cleanly terminate the celery app by sending it a TERM, then waiting for it to exit.
|
||||
if [[ -n "${celery_pid}" ]]; then
|
||||
|
|
|
@ -3,8 +3,14 @@
|
|||
echo "Running Datatracker checks..."
|
||||
./ietf/manage.py check
|
||||
|
||||
echo "Running Datatracker migrations..."
|
||||
./ietf/manage.py migrate --skip-checks --settings=settings_local
|
||||
if ! ietf/manage.py migrate --skip-checks --check ; then
|
||||
echo "Unapplied migrations found, waiting to start..."
|
||||
sleep 5
|
||||
while ! ietf/manage.py migrate --skip-checks --check ; do
|
||||
echo "... still waiting for migrations..."
|
||||
sleep 5
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Starting Datatracker..."
|
||||
|
||||
|
|
6
dev/build/migration-start.sh
Normal file
6
dev/build/migration-start.sh
Normal file
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "Running Datatracker migrations..."
|
||||
./ietf/manage.py migrate --skip-checks --settings=settings_local
|
||||
|
||||
echo "Done!"
|
|
@ -5,14 +5,20 @@
|
|||
# CONTAINER_ROLE - datatracker, celery, or beat (defaults to datatracker)
|
||||
#
|
||||
case "${CONTAINER_ROLE:-datatracker}" in
|
||||
datatracker)
|
||||
auth)
|
||||
exec ./datatracker-start.sh
|
||||
;;
|
||||
beat)
|
||||
exec ./celery-start.sh --app=ietf beat
|
||||
;;
|
||||
celery)
|
||||
exec ./celery-start.sh --app=ietf worker
|
||||
;;
|
||||
beat)
|
||||
exec ./celery-start.sh --app=ietf beat
|
||||
datatracker)
|
||||
exec ./datatracker-start.sh
|
||||
;;
|
||||
migrations)
|
||||
exec ./migration-start.sh
|
||||
;;
|
||||
*)
|
||||
echo "Unknown role '${CONTAINER_ROLE}'"
|
||||
|
|
|
@ -1389,15 +1389,17 @@ class DraftAliasGenerator:
|
|||
# states__type_id, states__slug directly in the `filter()`
|
||||
# works, but it does not work as expected in `exclude()`.
|
||||
active_state = State.objects.get(type_id="draft", slug="active")
|
||||
active_pks = [] # build a static list of the drafts we actually returned as "active"
|
||||
active_drafts = drafts.filter(states=active_state)
|
||||
for this_draft in active_drafts:
|
||||
active_pks.append(this_draft.pk)
|
||||
for alias, addresses in self._yield_aliases_for_draft(this_draft):
|
||||
yield alias, addresses
|
||||
|
||||
# Annotate with the draft state slug so we can check for drafts that
|
||||
# have become RFCs
|
||||
inactive_recent_drafts = (
|
||||
drafts.exclude(states=active_state)
|
||||
drafts.exclude(pk__in=active_pks) # don't re-filter by state, states may have changed during the run!
|
||||
.filter(expires__gte=show_since)
|
||||
.annotate(
|
||||
# Why _default_manager instead of objects? See:
|
||||
|
|
|
@ -6650,7 +6650,9 @@ class MaterialsTests(TestCase):
|
|||
self.assertEqual(r.status_code, 200)
|
||||
self.assertRegex(r.content.decode(), r"These\s+slides\s+have\s+already\s+been\s+rejected")
|
||||
|
||||
def test_approve_proposed_slides(self):
|
||||
@override_settings(MEETECHO_API_CONFIG="fake settings") # enough to trigger API calls
|
||||
@patch("ietf.meeting.views.SlidesManager")
|
||||
def test_approve_proposed_slides(self, mock_slides_manager_cls):
|
||||
submission = SlideSubmissionFactory()
|
||||
session = submission.session
|
||||
session.meeting.importantdate_set.create(name_id='revsub',date=date_today() + datetime.timedelta(days=20))
|
||||
|
@ -6666,19 +6668,30 @@ class MaterialsTests(TestCase):
|
|||
self.assertEqual(r.status_code,302)
|
||||
self.assertEqual(SlideSubmission.objects.filter(status__slug = 'pending').count(), 0)
|
||||
self.assertEqual(SlideSubmission.objects.filter(status__slug = 'approved').count(), 1)
|
||||
submission = SlideSubmission.objects.get(id = submission.id)
|
||||
submission.refresh_from_db()
|
||||
self.assertEqual(submission.status_id, 'approved')
|
||||
self.assertIsNotNone(submission.doc)
|
||||
self.assertEqual(session.presentations.count(),1)
|
||||
self.assertEqual(session.presentations.first().document.title,'different title')
|
||||
self.assertEqual(mock_slides_manager_cls.call_count, 1)
|
||||
self.assertEqual(mock_slides_manager_cls.call_args, call(api_config="fake settings"))
|
||||
self.assertEqual(mock_slides_manager_cls.return_value.add.call_count, 1)
|
||||
self.assertEqual(
|
||||
mock_slides_manager_cls.return_value.add.call_args,
|
||||
call(session=session, slides=submission.doc, order=1),
|
||||
)
|
||||
mock_slides_manager_cls.reset_mock()
|
||||
r = self.client.get(url)
|
||||
self.assertEqual(r.status_code, 200)
|
||||
self.assertRegex(r.content.decode(), r"These\s+slides\s+have\s+already\s+been\s+approved")
|
||||
self.assertFalse(mock_slides_manager_cls.called)
|
||||
self.assertEqual(len(outbox), 1)
|
||||
self.assertIn(submission.submitter.email_address(), outbox[0]['To'])
|
||||
self.assertIn('Slides approved', outbox[0]['Subject'])
|
||||
|
||||
def test_approve_proposed_slides_multisession_apply_one(self):
|
||||
@override_settings(MEETECHO_API_CONFIG="fake settings") # enough to trigger API calls
|
||||
@patch("ietf.meeting.views.SlidesManager")
|
||||
def test_approve_proposed_slides_multisession_apply_one(self, mock_slides_manager_cls):
|
||||
submission = SlideSubmissionFactory(session__meeting__type_id='ietf')
|
||||
session1 = submission.session
|
||||
session2 = SessionFactory(group=submission.session.group, meeting=submission.session.meeting)
|
||||
|
@ -6691,11 +6704,22 @@ class MaterialsTests(TestCase):
|
|||
q = PyQuery(r.content)
|
||||
self.assertTrue(q('#id_apply_to_all'))
|
||||
r = self.client.post(url,dict(title='yet another title',approve='approve'))
|
||||
submission.refresh_from_db()
|
||||
self.assertIsNotNone(submission.doc)
|
||||
self.assertEqual(r.status_code,302)
|
||||
self.assertEqual(session1.presentations.count(),1)
|
||||
self.assertEqual(session2.presentations.count(),0)
|
||||
self.assertEqual(mock_slides_manager_cls.call_count, 1)
|
||||
self.assertEqual(mock_slides_manager_cls.call_args, call(api_config="fake settings"))
|
||||
self.assertEqual(mock_slides_manager_cls.return_value.add.call_count, 1)
|
||||
self.assertEqual(
|
||||
mock_slides_manager_cls.return_value.add.call_args,
|
||||
call(session=session1, slides=submission.doc, order=1),
|
||||
)
|
||||
|
||||
def test_approve_proposed_slides_multisession_apply_all(self):
|
||||
@override_settings(MEETECHO_API_CONFIG="fake settings") # enough to trigger API calls
|
||||
@patch("ietf.meeting.views.SlidesManager")
|
||||
def test_approve_proposed_slides_multisession_apply_all(self, mock_slides_manager_cls):
|
||||
submission = SlideSubmissionFactory(session__meeting__type_id='ietf')
|
||||
session1 = submission.session
|
||||
session2 = SessionFactory(group=submission.session.group, meeting=submission.session.meeting)
|
||||
|
@ -6706,11 +6730,24 @@ class MaterialsTests(TestCase):
|
|||
r = self.client.get(url)
|
||||
self.assertEqual(r.status_code,200)
|
||||
r = self.client.post(url,dict(title='yet another title',apply_to_all=1,approve='approve'))
|
||||
submission.refresh_from_db()
|
||||
self.assertEqual(r.status_code,302)
|
||||
self.assertEqual(session1.presentations.count(),1)
|
||||
self.assertEqual(session2.presentations.count(),1)
|
||||
self.assertEqual(mock_slides_manager_cls.call_count, 1)
|
||||
self.assertEqual(mock_slides_manager_cls.call_args, call(api_config="fake settings"))
|
||||
self.assertEqual(mock_slides_manager_cls.return_value.add.call_count, 2)
|
||||
self.assertCountEqual(
|
||||
mock_slides_manager_cls.return_value.add.call_args_list,
|
||||
[
|
||||
call(session=session1, slides=submission.doc, order=1),
|
||||
call(session=session2, slides=submission.doc, order=1),
|
||||
]
|
||||
)
|
||||
|
||||
def test_submit_and_approve_multiple_versions(self):
|
||||
@override_settings(MEETECHO_API_CONFIG="fake settings") # enough to trigger API calls
|
||||
@patch("ietf.meeting.views.SlidesManager")
|
||||
def test_submit_and_approve_multiple_versions(self, mock_slides_manager_cls):
|
||||
session = SessionFactory(meeting__type_id='ietf')
|
||||
chair = RoleFactory(group=session.group,name_id='chair').person
|
||||
session.meeting.importantdate_set.create(name_id='revsub',date=date_today()+datetime.timedelta(days=20))
|
||||
|
@ -6725,14 +6762,23 @@ class MaterialsTests(TestCase):
|
|||
self.assertEqual(r.status_code, 302)
|
||||
self.client.logout()
|
||||
|
||||
submission = SlideSubmission.objects.get(session = session)
|
||||
submission = SlideSubmission.objects.get(session=session)
|
||||
|
||||
approve_url = urlreverse('ietf.meeting.views.approve_proposed_slides', kwargs={'slidesubmission_id':submission.pk,'num':submission.session.meeting.number})
|
||||
login_testing_unauthorized(self, chair.user.username, approve_url)
|
||||
r = self.client.post(approve_url,dict(title=submission.title,approve='approve'))
|
||||
submission.refresh_from_db()
|
||||
self.assertEqual(r.status_code,302)
|
||||
self.client.logout()
|
||||
|
||||
self.assertEqual(mock_slides_manager_cls.call_count, 1)
|
||||
self.assertEqual(mock_slides_manager_cls.call_args, call(api_config="fake settings"))
|
||||
self.assertEqual(mock_slides_manager_cls.return_value.add.call_count, 1)
|
||||
self.assertEqual(
|
||||
mock_slides_manager_cls.return_value.add.call_args,
|
||||
call(session=session, slides=submission.doc, order=1),
|
||||
)
|
||||
mock_slides_manager_cls.reset_mock()
|
||||
|
||||
self.assertEqual(session.presentations.first().document.rev,'00')
|
||||
|
||||
login_testing_unauthorized(self,newperson.user.username,propose_url)
|
||||
|
@ -6752,12 +6798,24 @@ class MaterialsTests(TestCase):
|
|||
approve_url = urlreverse('ietf.meeting.views.approve_proposed_slides', kwargs={'slidesubmission_id':second_submission.pk,'num':second_submission.session.meeting.number})
|
||||
login_testing_unauthorized(self, chair.user.username, approve_url)
|
||||
r = self.client.post(approve_url,dict(title=submission.title,approve='approve'))
|
||||
first_submission.refresh_from_db()
|
||||
second_submission.refresh_from_db()
|
||||
self.assertEqual(r.status_code,302)
|
||||
self.assertEqual(mock_slides_manager_cls.call_count, 1)
|
||||
self.assertEqual(mock_slides_manager_cls.call_args, call(api_config="fake settings"))
|
||||
self.assertEqual(mock_slides_manager_cls.return_value.add.call_count, 0)
|
||||
self.assertEqual(mock_slides_manager_cls.return_value.revise.call_count, 1)
|
||||
self.assertEqual(
|
||||
mock_slides_manager_cls.return_value.revise.call_args,
|
||||
call(session=session, slides=second_submission.doc),
|
||||
)
|
||||
mock_slides_manager_cls.reset_mock()
|
||||
|
||||
disapprove_url = urlreverse('ietf.meeting.views.approve_proposed_slides', kwargs={'slidesubmission_id':first_submission.pk,'num':first_submission.session.meeting.number})
|
||||
r = self.client.post(disapprove_url,dict(title='some title',disapprove="disapprove"))
|
||||
self.assertEqual(r.status_code,302)
|
||||
self.client.logout()
|
||||
self.assertFalse(mock_slides_manager_cls.called)
|
||||
|
||||
self.assertEqual(SlideSubmission.objects.filter(status__slug = 'pending').count(),0)
|
||||
self.assertEqual(SlideSubmission.objects.filter(status__slug = 'rejected').count(),1)
|
||||
|
|
|
@ -5009,18 +5009,25 @@ def approve_proposed_slides(request, slidesubmission_id, num):
|
|||
)
|
||||
doc.states.add(State.objects.get(type_id='slides',slug='active'))
|
||||
doc.states.add(State.objects.get(type_id='reuse_policy',slug='single'))
|
||||
added_presentations = []
|
||||
revised_presentations = []
|
||||
if submission.session.presentations.filter(document=doc).exists():
|
||||
sp = submission.session.presentations.get(document=doc)
|
||||
sp.rev = doc.rev
|
||||
sp.save()
|
||||
revised_presentations.append(sp)
|
||||
else:
|
||||
max_order = submission.session.presentations.filter(document__type='slides').aggregate(Max('order'))['order__max'] or 0
|
||||
submission.session.presentations.create(document=doc,rev=doc.rev,order=max_order+1)
|
||||
added_presentations.append(
|
||||
submission.session.presentations.create(document=doc,rev=doc.rev,order=max_order+1)
|
||||
)
|
||||
if apply_to_all:
|
||||
for other_session in sessions:
|
||||
if other_session != submission.session and not other_session.presentations.filter(document=doc).exists():
|
||||
max_order = other_session.presentations.filter(document__type='slides').aggregate(Max('order'))['order__max'] or 0
|
||||
other_session.presentations.create(document=doc,rev=doc.rev,order=max_order+1)
|
||||
added_presentations.append(
|
||||
other_session.presentations.create(document=doc,rev=doc.rev,order=max_order+1)
|
||||
)
|
||||
sub_name, sub_ext = os.path.splitext(submission.filename)
|
||||
target_filename = '%s-%s%s' % (sub_name[:sub_name.rfind('-ss')],doc.rev,sub_ext)
|
||||
doc.uploaded_filename = target_filename
|
||||
|
@ -5033,6 +5040,20 @@ def approve_proposed_slides(request, slidesubmission_id, num):
|
|||
post_process(doc)
|
||||
DocEvent.objects.create(type="approved_slides", doc=doc, rev=doc.rev, by=request.user.person, desc="Slides approved")
|
||||
|
||||
# update meetecho slide info if configured
|
||||
if hasattr(settings, "MEETECHO_API_CONFIG"):
|
||||
sm = SlidesManager(api_config=settings.MEETECHO_API_CONFIG)
|
||||
for sp in added_presentations:
|
||||
try:
|
||||
sm.add(session=sp.session, slides=doc, order=sp.order)
|
||||
except MeetechoAPIError as err:
|
||||
log(f"Error in SlidesManager.add(): {err}")
|
||||
for sp in revised_presentations:
|
||||
try:
|
||||
sm.revise(session=sp.session, slides=doc)
|
||||
except MeetechoAPIError as err:
|
||||
log(f"Error in SlidesManager.revise(): {err}")
|
||||
|
||||
acronym = submission.session.group.acronym
|
||||
submission.status = SlideSubmissionStatusName.objects.get(slug='approved')
|
||||
submission.doc = doc
|
||||
|
|
116
k8s/auth.yaml
Normal file
116
k8s/auth.yaml
Normal file
|
@ -0,0 +1,116 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: auth
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: auth
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: auth
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
containers:
|
||||
# -----------------------------------------------------
|
||||
# ScoutAPM Container
|
||||
# -----------------------------------------------------
|
||||
- name: scoutapm
|
||||
image: "scoutapp/scoutapm:version-1.4.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
# Replace command with one that will shut down on a TERM signal
|
||||
# The ./core-agent start command line is from the scoutapm docker image
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- >-
|
||||
trap './core-agent shutdown --tcp 0.0.0.0:6590' TERM;
|
||||
./core-agent start --daemonize false --log-level debug --tcp 0.0.0.0:6590 &
|
||||
wait $!
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 65534 # "nobody" user by default
|
||||
runAsGroup: 65534 # "nogroup" group by default
|
||||
# -----------------------------------------------------
|
||||
# Datatracker Container
|
||||
# -----------------------------------------------------
|
||||
- name: datatracker
|
||||
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: dt-vol
|
||||
mountPath: /a
|
||||
- name: dt-tmp
|
||||
mountPath: /tmp
|
||||
- name: dt-home
|
||||
mountPath: /home/datatracker
|
||||
- name: dt-xml2rfc-cache
|
||||
mountPath: /var/cache/xml2rfc
|
||||
- name: dt-cfg
|
||||
mountPath: /workspace/ietf/settings_local.py
|
||||
subPath: settings_local.py
|
||||
env:
|
||||
- name: "CONTAINER_ROLE"
|
||||
value: "datatracker"
|
||||
# ensures the pod gets recreated on every deploy:
|
||||
- name: "DEPLOY_UID"
|
||||
value: "$DEPLOY_UID"
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: django-config
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
volumes:
|
||||
# To be overriden with the actual shared volume
|
||||
- name: dt-vol
|
||||
- name: dt-tmp
|
||||
emptyDir:
|
||||
sizeLimit: "2Gi"
|
||||
- name: dt-xml2rfc-cache
|
||||
emptyDir:
|
||||
sizeLimit: "2Gi"
|
||||
- name: dt-home
|
||||
emptyDir:
|
||||
sizeLimit: "2Gi"
|
||||
- name: dt-cfg
|
||||
configMap:
|
||||
name: files-cfgmap
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 60
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: auth
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: auth
|
|
@ -82,6 +82,35 @@ spec:
|
|||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
initContainers:
|
||||
- name: migration
|
||||
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
|
||||
env:
|
||||
- name: "CONTAINER_ROLE"
|
||||
value: "migrations"
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: django-config
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
volumeMounts:
|
||||
- name: dt-vol
|
||||
mountPath: /a
|
||||
- name: dt-tmp
|
||||
mountPath: /tmp
|
||||
- name: dt-home
|
||||
mountPath: /home/datatracker
|
||||
- name: dt-xml2rfc-cache
|
||||
mountPath: /var/cache/xml2rfc
|
||||
- name: dt-cfg
|
||||
mountPath: /workspace/ietf/settings_local.py
|
||||
subPath: settings_local.py
|
||||
volumes:
|
||||
# To be overriden with the actual shared volume
|
||||
- name: dt-vol
|
||||
|
|
|
@ -20,6 +20,8 @@ data:
|
|||
# DATATRACKER_DB_NAME: "datatracker"
|
||||
# DATATRACKER_DB_USER: "django" # secret
|
||||
# DATATRACKER_DB_PASS: "RkTkDPFnKpko" # secret
|
||||
# DATATRACKER_DB_CONN_MAX_AGE: "0" # connection per request if not set, no limit if set to "None"
|
||||
# DATATRACKER_DB_CONN_HEALTH_CHECKS: "false"
|
||||
|
||||
DATATRACKER_DJANGO_SECRET_KEY: "PDwXboUq!=hPjnrtG2=ge#N$Dwy+wn@uivrugwpic8mxyPfHk" # secret
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ configMapGenerator:
|
|||
files:
|
||||
- settings_local.py
|
||||
resources:
|
||||
- auth.yaml
|
||||
- beat.yaml
|
||||
- celery.yaml
|
||||
- datatracker.yaml
|
||||
|
|
|
@ -92,6 +92,16 @@ DATABASES = {
|
|||
},
|
||||
}
|
||||
|
||||
# Configure persistent connections. A setting of 0 is Django's default.
|
||||
_conn_max_age = os.environ.get("DATATRACKER_DB_CONN_MAX_AGE", "0")
|
||||
# A string "none" means unlimited age.
|
||||
DATABASES["default"]["CONN_MAX_AGE"] = None if _conn_max_age.lower() == "none" else int(_conn_max_age)
|
||||
# Enable connection health checks if DATATRACKER_DB_CONN_HEALTH_CHECK is the string "true"
|
||||
_conn_health_checks = bool(
|
||||
os.environ.get("DATATRACKER_DB_CONN_HEALTH_CHECKS", "false").lower() == "true"
|
||||
)
|
||||
DATABASES["default"]["CONN_HEALTH_CHECKS"] = _conn_health_checks
|
||||
|
||||
# DATATRACKER_ADMINS is a newline-delimited list of addresses parseable by email.utils.parseaddr
|
||||
_admins_str = os.environ.get("DATATRACKER_ADMINS", None)
|
||||
if _admins_str is not None:
|
||||
|
|
Loading…
Reference in a new issue