ci: k8s fixup (#7401)
* ci: remove stray serviceName * ci: volumeClaimTemplate name -> volumeMount, not volume * ci: datatracker listens on containerPort 8000 * ci: services/containers have dt- prefix * chore: adjust indent for k8s yaml * ci: use a secret for CELERY_PASSWORD * fix: touched wrong CELERY_PASSWORD setting * ci: get rid of the celery pw secretGenerator * ci: use DB_PASS instead of DBPASS (etc) for k8s * ci: Fill in django-config.yaml from env vars * ci: add vault-mappings.txt * ci: use $CELERY_PASSWORD in rabbitmq.yaml * ci: moving vault-mappings.txt out of this repo * Revert "ci: Fill in django-config.yaml from env vars" This reverts commit 75cd181deb390d3ab21d6887b091d66c80e1d18e. * Revert "ci: use $CELERY_PASSWORD in rabbitmq.yaml" This reverts commit f251f9920d07c65413f72fd165cc06acd562c2c7. * ci: parameterize db OPTIONS setting
This commit is contained in:
parent
d075404fdb
commit
867360e96f
|
@ -55,4 +55,10 @@ insert_final_newline = false
|
|||
# ---------------------------------------------------------
|
||||
# Use 2-space indents
|
||||
[helm/**.yaml]
|
||||
indent_size = 2
|
||||
|
||||
# Settings for Kubernetes yaml
|
||||
# ---------------------------------------------------------
|
||||
# Use 2-space indents
|
||||
[k8s/**.yaml]
|
||||
indent_size = 2
|
|
@ -58,4 +58,4 @@ spec:
|
|||
name: files-cfgmap
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
|
|
@ -77,4 +77,4 @@ spec:
|
|||
name: files-cfgmap
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
|
|
@ -1,94 +1,94 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: datatracker
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: datatracker
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: datatracker
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
containers:
|
||||
# -----------------------------------------------------
|
||||
# ScoutAPM Container
|
||||
# -----------------------------------------------------
|
||||
- name: scoutapm
|
||||
image: "scoutapp/scoutapm:version-1.4.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 65534 # "nobody" user by default
|
||||
runAsGroup: 65534 # "nogroup" group by default
|
||||
# -----------------------------------------------------
|
||||
# Datatracker Container
|
||||
# -----------------------------------------------------
|
||||
- name: datatracker
|
||||
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: dt-vol
|
||||
mountPath: /a
|
||||
- name: dt-tmp
|
||||
mountPath: /tmp
|
||||
- name: dt-cfg
|
||||
mountPath: /workspace/ietf/settings_local.py
|
||||
subPath: settings_local.py
|
||||
env:
|
||||
- name: "CONTAINER_ROLE"
|
||||
value: "datatracker"
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: django-config
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
volumes:
|
||||
# To be overriden with the actual shared volume
|
||||
- name: dt-vol
|
||||
- name: dt-tmp
|
||||
emptyDir:
|
||||
sizeLimit: "2Gi"
|
||||
- name: dt-cfg
|
||||
configMap:
|
||||
name: files-cfgmap
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: datatracker
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: datatracker
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: datatracker
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: datatracker
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: datatracker
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
containers:
|
||||
# -----------------------------------------------------
|
||||
# ScoutAPM Container
|
||||
# -----------------------------------------------------
|
||||
- name: scoutapm
|
||||
image: "scoutapp/scoutapm:version-1.4.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 65534 # "nobody" user by default
|
||||
runAsGroup: 65534 # "nogroup" group by default
|
||||
# -----------------------------------------------------
|
||||
# Datatracker Container
|
||||
# -----------------------------------------------------
|
||||
- name: datatracker
|
||||
image: "ghcr.io/ietf-tools/datatracker:$APP_IMAGE_TAG"
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: dt-vol
|
||||
mountPath: /a
|
||||
- name: dt-tmp
|
||||
mountPath: /tmp
|
||||
- name: dt-cfg
|
||||
mountPath: /workspace/ietf/settings_local.py
|
||||
subPath: settings_local.py
|
||||
env:
|
||||
- name: "CONTAINER_ROLE"
|
||||
value: "datatracker"
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: django-config
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
volumes:
|
||||
# To be overriden with the actual shared volume
|
||||
- name: dt-vol
|
||||
- name: dt-tmp
|
||||
emptyDir:
|
||||
sizeLimit: "2Gi"
|
||||
- name: dt-cfg
|
||||
configMap:
|
||||
name: files-cfgmap
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: datatracker
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: datatracker
|
||||
|
|
|
@ -15,11 +15,11 @@ data:
|
|||
# DATATRACKER_DATATRACKER_DEBUG: "false"
|
||||
|
||||
# DB access details - needs to be filled in
|
||||
# DATATRACKER_DBHOST: "db"
|
||||
# DATATRACKER_DBPORT: "5432"
|
||||
# DATATRACKER_DBNAME: "datatracker"
|
||||
# DATATRACKER_DBUSER: "django" # secret
|
||||
# DATATRACKER_DBPASS: "RkTkDPFnKpko" # secret
|
||||
# DATATRACKER_DB_HOST: "db"
|
||||
# DATATRACKER_DB_PORT: "5432"
|
||||
# DATATRACKER_DB_NAME: "datatracker"
|
||||
# DATATRACKER_DB_USER: "django" # secret
|
||||
# DATATRACKER_DB_PASS: "RkTkDPFnKpko" # secret
|
||||
|
||||
DATATRACKER_DJANGO_SECRET_KEY: "PDwXboUq!=hPjnrtG2=ge#N$Dwy+wn@uivrugwpic8mxyPfHk" # secret
|
||||
|
||||
|
@ -74,4 +74,4 @@ data:
|
|||
|
||||
# Scout configuration
|
||||
DATATRACKER_SCOUT_KEY: "this-is-the-scout-key"
|
||||
DATATRACKER_SCOUT_NAME: "StagingDatatracker"
|
||||
DATATRACKER_SCOUT_NAME: "StagingDatatracker"
|
||||
|
|
|
@ -10,4 +10,4 @@ resources:
|
|||
- datatracker.yaml
|
||||
- django-config.yaml
|
||||
- memcached.yaml
|
||||
- rabbitmq.yaml
|
||||
- rabbitmq.yaml
|
||||
|
|
|
@ -5,7 +5,6 @@ metadata:
|
|||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
serviceName: memcached
|
||||
selector:
|
||||
matchLabels:
|
||||
app: memcached
|
||||
|
@ -50,4 +49,4 @@ spec:
|
|||
protocol: TCP
|
||||
name: memcached
|
||||
selector:
|
||||
app: memcached
|
||||
app: memcached
|
||||
|
|
|
@ -1,176 +1,175 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: rabbitmq
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
serviceName: rabbitmq
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rabbitmq
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rabbitmq
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
initContainers:
|
||||
# -----------------------------------------------------
|
||||
# Init RabbitMQ data
|
||||
# -----------------------------------------------------
|
||||
- name: init-rabbitmq
|
||||
image: busybox:stable
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- "mkdir -p -m700 /mnt/rabbitmq && chown 100:101 /mnt/rabbitmq"
|
||||
securityContext:
|
||||
runAsNonRoot: false
|
||||
runAsUser: 0
|
||||
readOnlyRootFilesystem: true
|
||||
volumeMounts:
|
||||
- name: "rabbitmq-data"
|
||||
mountPath: "/mnt"
|
||||
containers:
|
||||
# -----------------------------------------------------
|
||||
# RabbitMQ Container
|
||||
# -----------------------------------------------------
|
||||
- image: "ghcr.io/ietf-tools/datatracker-mq:3.12-alpine"
|
||||
imagePullPolicy: Always
|
||||
name: rabbitmq
|
||||
ports:
|
||||
- name: amqp
|
||||
containerPort: 5672
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: rabbitmq-data
|
||||
mountPath: /var/lib/rabbitmq
|
||||
subPath: "rabbitmq"
|
||||
- name: rabbitmq-tmp
|
||||
mountPath: /tmp
|
||||
- name: rabbitmq-config
|
||||
mountPath: "/etc/rabbitmq"
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["rabbitmq-diagnostics", "-q", "ping"]
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
startupProbe:
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 60
|
||||
exec:
|
||||
command: ["rabbitmq-diagnostics", "-q", "ping"]
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
# rabbitmq image sets up uid/gid 100/101
|
||||
runAsUser: 100
|
||||
runAsGroup: 101
|
||||
volumes:
|
||||
- name: rabbitmq-data
|
||||
persistentVolumeClaim:
|
||||
claimName: "rabbitmq-data-vol"
|
||||
- name: rabbitmq-tmp
|
||||
emptyDir:
|
||||
sizeLimit: "50Mi"
|
||||
- name: rabbitmq-config
|
||||
configMap:
|
||||
name: "rabbitmq-configmap"
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: rabbitmq-data-vol
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 8Gi
|
||||
# storageClassName: ""
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: rabbitmq-configmap
|
||||
data:
|
||||
definitions.json: |-
|
||||
{
|
||||
"permissions": [
|
||||
{
|
||||
"configure": ".*",
|
||||
"read": ".*",
|
||||
"user": "datatracker",
|
||||
"vhost": "dt",
|
||||
"write": ".*"
|
||||
}
|
||||
],
|
||||
"users": [
|
||||
{
|
||||
"hashing_algorithm": "rabbit_password_hashing_sha256",
|
||||
"limits": {},
|
||||
"name": "datatracker",
|
||||
"password_hash": "HJxcItcpXtBN+R/CH7dUelfKBOvdUs3AWo82SBw2yLMSguzb",
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"vhosts": [
|
||||
{
|
||||
"limits": [],
|
||||
"metadata": {
|
||||
"description": "",
|
||||
"tags": []
|
||||
},
|
||||
"name": "dt"
|
||||
}
|
||||
]
|
||||
}
|
||||
rabbitmq.conf: |-
|
||||
# prevent guest from logging in over tcp
|
||||
loopback_users.guest = true
|
||||
|
||||
# load saved definitions
|
||||
load_definitions = /etc/rabbitmq/definitions.json
|
||||
|
||||
# Ensure that enough disk is available to flush to disk. To do this, need to limit the
|
||||
# memory available to the container to something reasonable. See
|
||||
# https://www.rabbitmq.com/production-checklist.html#monitoring-and-resource-usage
|
||||
# for recommendations.
|
||||
|
||||
# 1-1.5 times the memory available to the container is adequate for disk limit
|
||||
disk_free_limit.absolute = 6000MB
|
||||
|
||||
# This should be ~40% of the memory available to the container. Use an
|
||||
# absolute number because relative will be proprtional to the full machine
|
||||
# memory.
|
||||
vm_memory_high_watermark.absolute = 1600MB
|
||||
|
||||
# Logging
|
||||
log.file = false
|
||||
log.console = true
|
||||
log.console.level = info
|
||||
log.console.formatter = json
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rabbitmq
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None # headless service
|
||||
ports:
|
||||
- port: 5672
|
||||
targetPort: amqp
|
||||
protocol: TCP
|
||||
name: amqp
|
||||
selector:
|
||||
app: rabbitmq
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: rabbitmq
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rabbitmq
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rabbitmq
|
||||
spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
initContainers:
|
||||
# -----------------------------------------------------
|
||||
# Init RabbitMQ data
|
||||
# -----------------------------------------------------
|
||||
- name: init-rabbitmq
|
||||
image: busybox:stable
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- "mkdir -p -m700 /mnt/rabbitmq && chown 100:101 /mnt/rabbitmq"
|
||||
securityContext:
|
||||
runAsNonRoot: false
|
||||
runAsUser: 0
|
||||
readOnlyRootFilesystem: true
|
||||
volumeMounts:
|
||||
- name: "rabbitmq-data"
|
||||
mountPath: "/mnt"
|
||||
containers:
|
||||
# -----------------------------------------------------
|
||||
# RabbitMQ Container
|
||||
# -----------------------------------------------------
|
||||
- image: "ghcr.io/ietf-tools/datatracker-mq:3.12-alpine"
|
||||
imagePullPolicy: Always
|
||||
name: rabbitmq
|
||||
ports:
|
||||
- name: amqp
|
||||
containerPort: 5672
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: rabbitmq-data
|
||||
mountPath: /var/lib/rabbitmq
|
||||
subPath: "rabbitmq"
|
||||
- name: rabbitmq-tmp
|
||||
mountPath: /tmp
|
||||
- name: rabbitmq-config
|
||||
mountPath: "/etc/rabbitmq"
|
||||
env:
|
||||
- name: "CELERY_PASSWORD"
|
||||
value: "this-is-a-secret"
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["rabbitmq-diagnostics", "-q", "ping"]
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
startupProbe:
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 60
|
||||
exec:
|
||||
command: ["rabbitmq-diagnostics", "-q", "ping"]
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
# rabbitmq image sets up uid/gid 100/101
|
||||
runAsUser: 100
|
||||
runAsGroup: 101
|
||||
volumes:
|
||||
- name: rabbitmq-tmp
|
||||
emptyDir:
|
||||
sizeLimit: "50Mi"
|
||||
- name: rabbitmq-config
|
||||
configMap:
|
||||
name: "rabbitmq-configmap"
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: rabbitmq-data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 8Gi
|
||||
# storageClassName: ""
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: rabbitmq-configmap
|
||||
data:
|
||||
definitions.json: |-
|
||||
{
|
||||
"permissions": [
|
||||
{
|
||||
"configure": ".*",
|
||||
"read": ".*",
|
||||
"user": "datatracker",
|
||||
"vhost": "dt",
|
||||
"write": ".*"
|
||||
}
|
||||
],
|
||||
"users": [
|
||||
{
|
||||
"hashing_algorithm": "rabbit_password_hashing_sha256",
|
||||
"limits": {},
|
||||
"name": "datatracker",
|
||||
"password_hash": "HJxcItcpXtBN+R/CH7dUelfKBOvdUs3AWo82SBw2yLMSguzb",
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"vhosts": [
|
||||
{
|
||||
"limits": [],
|
||||
"metadata": {
|
||||
"description": "",
|
||||
"tags": []
|
||||
},
|
||||
"name": "dt"
|
||||
}
|
||||
]
|
||||
}
|
||||
rabbitmq.conf: |-
|
||||
# prevent guest from logging in over tcp
|
||||
loopback_users.guest = true
|
||||
|
||||
# load saved definitions
|
||||
load_definitions = /etc/rabbitmq/definitions.json
|
||||
|
||||
# Ensure that enough disk is available to flush to disk. To do this, need to limit the
|
||||
# memory available to the container to something reasonable. See
|
||||
# https://www.rabbitmq.com/production-checklist.html#monitoring-and-resource-usage
|
||||
# for recommendations.
|
||||
|
||||
# 1-1.5 times the memory available to the container is adequate for disk limit
|
||||
disk_free_limit.absolute = 6000MB
|
||||
|
||||
# This should be ~40% of the memory available to the container. Use an
|
||||
# absolute number because relative will be proprtional to the full machine
|
||||
# memory.
|
||||
vm_memory_high_watermark.absolute = 1600MB
|
||||
|
||||
# Logging
|
||||
log.file = false
|
||||
log.console = true
|
||||
log.console.level = info
|
||||
log.console.formatter = json
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rabbitmq
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None # headless service
|
||||
ports:
|
||||
- port: 5672
|
||||
targetPort: amqp
|
||||
protocol: TCP
|
||||
name: amqp
|
||||
selector:
|
||||
app: rabbitmq
|
||||
|
|
|
@ -87,12 +87,13 @@ if _allowed_hosts_str is not None:
|
|||
|
||||
DATABASES = {
|
||||
"default": {
|
||||
"HOST": os.environ.get("DATATRACKER_DBHOST", "db"),
|
||||
"PORT": os.environ.get("DATATRACKER_DBPORT", "5432"),
|
||||
"NAME": os.environ.get("DATATRACKER_DBNAME", "datatracker"),
|
||||
"HOST": os.environ.get("DATATRACKER_DB_HOST", "db"),
|
||||
"PORT": os.environ.get("DATATRACKER_DB_PORT", "5432"),
|
||||
"NAME": os.environ.get("DATATRACKER_DB_NAME", "datatracker"),
|
||||
"ENGINE": "django.db.backends.postgresql",
|
||||
"USER": os.environ.get("DATATRACKER_DBUSER", "django"),
|
||||
"PASSWORD": os.environ.get("DATATRACKER_DBPASS", ""),
|
||||
"USER": os.environ.get("DATATRACKER_DB_USER", "django"),
|
||||
"PASSWORD": os.environ.get("DATATRACKER_DB_PASS", ""),
|
||||
"OPTIONS": json.loads(os.environ.get("DATATRACKER_DB_OPTS_JSON", "{}")),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -111,7 +112,7 @@ _celery_password = os.environ.get("CELERY_PASSWORD", None)
|
|||
if _celery_password is None:
|
||||
raise RuntimeError("CELERY_PASSWORD must be set")
|
||||
CELERY_BROKER_URL = "amqp://datatracker:{password}@{host}/{queue}".format(
|
||||
host=os.environ.get("RABBITMQ_HOSTNAME", "rabbitmq"),
|
||||
host=os.environ.get("RABBITMQ_HOSTNAME", "dt-rabbitmq"),
|
||||
password=_celery_password,
|
||||
queue=os.environ.get("RABBITMQ_QUEUE", "dt")
|
||||
)
|
||||
|
@ -212,8 +213,8 @@ DE_GFM_BINARY = "/usr/local/bin/de-gfm"
|
|||
IDSUBMIT_IDNITS_BINARY = "/usr/local/bin/idnits"
|
||||
|
||||
# Duplicating production cache from settings.py and using it whether we're in production mode or not
|
||||
MEMCACHED_HOST = os.environ.get("MEMCACHED_SERVICE_HOST", "127.0.0.1")
|
||||
MEMCACHED_PORT = os.environ.get("MEMCACHED_SERVICE_PORT", "11211")
|
||||
MEMCACHED_HOST = os.environ.get("DT_MEMCACHED_SERVICE_HOST", "127.0.0.1")
|
||||
MEMCACHED_PORT = os.environ.get("DT_MEMCACHED_SERVICE_PORT", "11211")
|
||||
from ietf import __version__
|
||||
CACHES = {
|
||||
"default": {
|
||||
|
|
Loading…
Reference in a new issue