chore: remove helm stuff
This commit is contained in:
parent
38b0b2c035
commit
635ca28ec1
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
|
@ -220,7 +220,7 @@ jobs:
|
|||
.devcontainer
|
||||
.github
|
||||
.vscode
|
||||
helm
|
||||
k8s
|
||||
playwright
|
||||
svn-history
|
||||
docker-compose.yml
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -1,23 +0,0 @@
|
|||
apiVersion: v2
|
||||
name: datatracker
|
||||
description: The day-to-day front-end to the IETF database for people who work on IETF standards.
|
||||
home: https://datatracker.ietf.org
|
||||
sources:
|
||||
- https://github.com/ietf-tools/datatracker
|
||||
maintainers:
|
||||
- name: IETF Tools Team
|
||||
email: tools-discuss@ietf.org
|
||||
url: https://github.com/ietf-tools
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 1.0.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.0.0"
|
|
@ -1,259 +0,0 @@
|
|||
# Copyright The IETF Trust 2007-2024, All Rights Reserved
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from base64 import b64decode
|
||||
from email.utils import parseaddr
|
||||
import json
|
||||
|
||||
from ietf import __release_hash__
|
||||
from ietf.settings import * # pyflakes:ignore
|
||||
|
||||
|
||||
def _remove_whitespace_and_b64decode(s):
|
||||
"""Helper to strip out whitespace and base64 decode"""
|
||||
return b64decode("".join(s.split()))
|
||||
|
||||
|
||||
def _multiline_to_list(s):
|
||||
"""Helper to split at newlines and conver to list"""
|
||||
return [item.strip() for item in s.split("\n")]
|
||||
|
||||
|
||||
# Default to "development". Production _must_ set DATATRACKER_SERVER_MODE="production" in the env!
|
||||
SERVER_MODE = os.environ.get("DATATRACKER_SERVER_MODE", "development")
|
||||
|
||||
# Secrets
|
||||
_SECRET_KEY = os.environ.get("DATATRACKER_DJANGO_SECRET_KEY", None)
|
||||
if _SECRET_KEY is not None:
|
||||
SECRET_KEY = _SECRET_KEY
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_DJANGO_SECRET_KEY must be set")
|
||||
|
||||
_NOMCOM_APP_SECRET_B64 = os.environ.get("DATATRACKER_NOMCOM_APP_SECRET_B64", None)
|
||||
if _NOMCOM_APP_SECRET_B64 is not None:
|
||||
NOMCOM_APP_SECRET = _remove_whitespace_and_b64decode(_NOMCOM_APP_SECRET_B64)
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_NOMCOM_APP_SECRET_B64 must be set")
|
||||
|
||||
_IANA_SYNC_PASSWORD = os.environ.get("DATATRACKER_IANA_SYNC_PASSWORD", None)
|
||||
if _IANA_SYNC_PASSWORD is not None:
|
||||
IANA_SYNC_PASSWORD = _IANA_SYNC_PASSWORD
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_IANA_SYNC_PASSWORD must be set")
|
||||
|
||||
_RFC_EDITOR_SYNC_PASSWORD = os.environ.get("DATATRACKER_RFC_EDITOR_SYNC_PASSWORD", None)
|
||||
if _RFC_EDITOR_SYNC_PASSWORD is not None:
|
||||
RFC_EDITOR_SYNC_PASSWORD = os.environ.get("DATATRACKER_RFC_EDITOR_SYNC_PASSWORD")
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_RFC_EDITOR_SYNC_PASSWORD must be set")
|
||||
|
||||
_YOUTUBE_API_KEY = os.environ.get("DATATRACKER_YOUTUBE_API_KEY", None)
|
||||
if _YOUTUBE_API_KEY is not None:
|
||||
YOUTUBE_API_KEY = _YOUTUBE_API_KEY
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_YOUTUBE_API_KEY must be set")
|
||||
|
||||
_GITHUB_BACKUP_API_KEY = os.environ.get("DATATRACKER_GITHUB_BACKUP_API_KEY", None)
|
||||
if _GITHUB_BACKUP_API_KEY is not None:
|
||||
GITHUB_BACKUP_API_KEY = _GITHUB_BACKUP_API_KEY
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_GITHUB_BACKUP_API_KEY must be set")
|
||||
|
||||
_API_KEY_TYPE = os.environ.get("DATATRACKER_API_KEY_TYPE", None)
|
||||
if _API_KEY_TYPE is not None:
|
||||
API_KEY_TYPE = _API_KEY_TYPE
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_API_KEY_TYPE must be set")
|
||||
|
||||
_API_PUBLIC_KEY_PEM_B64 = os.environ.get("DATATRACKER_API_PUBLIC_KEY_PEM_B64", None)
|
||||
if _API_PUBLIC_KEY_PEM_B64 is not None:
|
||||
API_PUBLIC_KEY_PEM = _remove_whitespace_and_b64decode(_API_PUBLIC_KEY_PEM_B64)
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_API_PUBLIC_KEY_PEM_B64 must be set")
|
||||
|
||||
_API_PRIVATE_KEY_PEM_B64 = os.environ.get("DATATRACKER_API_PRIVATE_KEY_PEM_B64", None)
|
||||
if _API_PRIVATE_KEY_PEM_B64 is not None:
|
||||
API_PRIVATE_KEY_PEM = _remove_whitespace_and_b64decode(_API_PRIVATE_KEY_PEM_B64)
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_API_PRIVATE_KEY_PEM_B64 must be set")
|
||||
|
||||
# Set DEBUG if DATATRACKER_DEBUG env var is the word "true"
|
||||
DEBUG = os.environ.get("DATATRACKER_DEBUG", "false").lower() == "true"
|
||||
|
||||
# DATATRACKER_ALLOWED_HOSTS env var is a comma-separated list of allowed hosts
|
||||
_allowed_hosts_str = os.environ.get("DATATRACKER_ALLOWED_HOSTS", None)
|
||||
if _allowed_hosts_str is not None:
|
||||
ALLOWED_HOSTS = _multiline_to_list(_allowed_hosts_str)
|
||||
|
||||
DATABASES = {
|
||||
"default": {
|
||||
"HOST": os.environ.get("DATATRACKER_DBHOST", "db"),
|
||||
"PORT": os.environ.get("DATATRACKER_DBPORT", "5432"),
|
||||
"NAME": os.environ.get("DATATRACKER_DBNAME", "datatracker"),
|
||||
"ENGINE": "django.db.backends.postgresql",
|
||||
"USER": os.environ.get("DATATRACKER_DBUSER", "django"),
|
||||
"PASSWORD": os.environ.get("DATATRACKER_DBPASS", ""),
|
||||
},
|
||||
}
|
||||
|
||||
# DATATRACKER_ADMINS is a newline-delimited list of addresses parseable by email.utils.parseaddr
|
||||
_admins_str = os.environ.get("DATATRACKER_ADMINS", None)
|
||||
if _admins_str is not None:
|
||||
ADMINS = [parseaddr(admin) for admin in _multiline_to_list(_admins_str)]
|
||||
else:
|
||||
raise RuntimeError("DATATRACKER_ADMINS must be set")
|
||||
|
||||
USING_DEBUG_EMAIL_SERVER = os.environ.get("DATATRACKER_EMAIL_DEBUG", "false").lower() == "true"
|
||||
EMAIL_HOST = os.environ.get("DATATRACKER_EMAIL_HOST", "localhost")
|
||||
EMAIL_PORT = int(os.environ.get("DATATRACKER_EMAIL_PORT", "2025"))
|
||||
|
||||
_celery_password = os.environ.get("CELERY_PASSWORD", None)
|
||||
if _celery_password is None:
|
||||
raise RuntimeError("CELERY_PASSWORD must be set")
|
||||
CELERY_BROKER_URL = "amqp://datatracker:{password}@{host}/{queue}".format(
|
||||
host=os.environ.get("RABBITMQ_HOSTNAME", "rabbitmq"),
|
||||
password=_celery_password,
|
||||
queue=os.environ.get("RABBITMQ_QUEUE", "dt")
|
||||
)
|
||||
|
||||
IANA_SYNC_USERNAME = "ietfsync"
|
||||
IANA_SYNC_CHANGES_URL = "https://datatracker.iana.org:4443/data-tracker/changes"
|
||||
IANA_SYNC_PROTOCOLS_URL = "http://www.iana.org/protocols/"
|
||||
|
||||
RFC_EDITOR_NOTIFICATION_URL = "http://www.rfc-editor.org/parser/parser.php"
|
||||
|
||||
STATS_REGISTRATION_ATTENDEES_JSON_URL = 'https://registration.ietf.org/{number}/attendees/?apikey=redacted'
|
||||
|
||||
#FIRST_CUTOFF_DAYS = 12
|
||||
#SECOND_CUTOFF_DAYS = 12
|
||||
#SUBMISSION_CUTOFF_DAYS = 26
|
||||
#SUBMISSION_CORRECTION_DAYS = 57
|
||||
MEETING_MATERIALS_SUBMISSION_CUTOFF_DAYS = 26
|
||||
MEETING_MATERIALS_SUBMISSION_CORRECTION_DAYS = 54
|
||||
|
||||
HTPASSWD_COMMAND = "/usr/bin/htpasswd2"
|
||||
|
||||
_MEETECHO_CLIENT_ID = os.environ.get("DATATRACKER_MEETECHO_CLIENT_ID", None)
|
||||
_MEETECHO_CLIENT_SECRET = os.environ.get("DATATRACKER_MEETECHO_CLIENT_SECRET", None)
|
||||
if _MEETECHO_CLIENT_ID is not None and _MEETECHO_CLIENT_SECRET is not None:
|
||||
MEETECHO_API_CONFIG = {
|
||||
"api_base": os.environ.get(
|
||||
"DATATRACKER_MEETECHO_API_BASE",
|
||||
"https://meetings.conf.meetecho.com/api/v1/",
|
||||
),
|
||||
"client_id": _MEETECHO_CLIENT_ID,
|
||||
"client_secret": _MEETECHO_CLIENT_SECRET,
|
||||
"request_timeout": 3.01, # python-requests doc recommend slightly > a multiple of 3 seconds
|
||||
}
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"DATATRACKER_MEETECHO_CLIENT_ID and DATATRACKER_MEETECHO_CLIENT_SECRET must be set"
|
||||
)
|
||||
|
||||
_APP_API_TOKENS_JSON = os.environ.get("DATATRACKER_APP_API_TOKENS_JSON", None)
|
||||
if _APP_API_TOKENS_JSON is not None:
|
||||
APP_API_TOKENS = json.loads(_APP_API_TOKENS_JSON)
|
||||
else:
|
||||
APP_API_TOKENS = {}
|
||||
|
||||
EMAIL_COPY_TO = ""
|
||||
|
||||
# Until we teach the datatracker to look beyond cloudflare for this check
|
||||
IDSUBMIT_MAX_DAILY_SAME_SUBMITTER = 5000
|
||||
|
||||
# Leave DATATRACKER_MATOMO_SITE_ID unset to disable Matomo reporting
|
||||
if "DATATRACKER_MATOMO_SITE_ID" in os.environ:
|
||||
MATOMO_DOMAIN_PATH = os.environ.get("DATATRACKER_MATOMO_DOMAIN_PATH", "analytics.ietf.org")
|
||||
MATOMO_SITE_ID = os.environ.get("DATATRACKER_MATOMO_SITE_ID")
|
||||
MATOMO_DISABLE_COOKIES = True
|
||||
|
||||
# Leave DATATRACKER_SCOUT_KEY unset to disable Scout APM agent
|
||||
_SCOUT_KEY = os.environ.get("DATATRACKER_SCOUT_KEY", None)
|
||||
if _SCOUT_KEY is not None:
|
||||
if SERVER_MODE == "production":
|
||||
PROD_PRE_APPS = ["scout_apm.django", ]
|
||||
else:
|
||||
DEV_PRE_APPS = ["scout_apm.django", ]
|
||||
SCOUT_MONITOR = True
|
||||
SCOUT_KEY = _SCOUT_KEY
|
||||
SCOUT_NAME = os.environ.get("DATATRACKER_SCOUT_NAME", "Datatracker")
|
||||
SCOUT_ERRORS_ENABLED = True
|
||||
SCOUT_SHUTDOWN_MESSAGE_ENABLED = False
|
||||
SCOUT_CORE_AGENT_SOCKET_PATH = "tcp://{host}:{port}".format(
|
||||
host=os.environ.get("DATATRACKER_SCOUT_CORE_AGENT_HOST", "localhost"),
|
||||
port=os.environ.get("DATATRACKER_SCOUT_CORE_AGENT_PORT", "6590"),
|
||||
)
|
||||
SCOUT_CORE_AGENT_DOWNLOAD = False
|
||||
SCOUT_CORE_AGENT_LAUNCH = False
|
||||
SCOUT_REVISION_SHA = __release_hash__[:7]
|
||||
|
||||
# Path to the email alias lists. Used by ietf.utils.aliases
|
||||
DRAFT_ALIASES_PATH = "/a/postfix/draft-aliases"
|
||||
DRAFT_VIRTUAL_PATH = "/a/postfix/draft-virtual"
|
||||
GROUP_ALIASES_PATH = "/a/postfix/group-aliases"
|
||||
GROUP_VIRTUAL_PATH = "/a/postfix/group-virtual"
|
||||
|
||||
STATIC_URL = os.environ.get("DATATRACKER_STATIC_URL", None)
|
||||
if STATIC_URL is None:
|
||||
from ietf import __version__
|
||||
STATIC_URL = f"https://static.ietf.org/dt/{__version__}/"
|
||||
|
||||
# Set these to the same as "production" in settings.py, whether production mode or not
|
||||
MEDIA_ROOT = "/a/www/www6s/lib/dt/media/"
|
||||
MEDIA_URL = "https://www.ietf.org/lib/dt/media/"
|
||||
PHOTOS_DIRNAME = "photo"
|
||||
PHOTOS_DIR = MEDIA_ROOT + PHOTOS_DIRNAME
|
||||
|
||||
# Normally only set for debug, but needed until we have a real FS
|
||||
DJANGO_VITE_MANIFEST_PATH = os.path.join(BASE_DIR, 'static/dist-neue/manifest.json')
|
||||
|
||||
# Binaries that are different in the docker image
|
||||
DE_GFM_BINARY = "/usr/local/bin/de-gfm"
|
||||
IDSUBMIT_IDNITS_BINARY = "/usr/local/bin/idnits"
|
||||
|
||||
# Duplicating production cache from settings.py and using it whether we're in production mode or not
|
||||
MEMCACHED_HOST = os.environ.get("MEMCACHED_SERVICE_HOST", "127.0.0.1")
|
||||
MEMCACHED_PORT = os.environ.get("MEMCACHED_SERVICE_PORT", "11211")
|
||||
from ietf import __version__
|
||||
CACHES = {
|
||||
"default": {
|
||||
"BACKEND": "ietf.utils.cache.LenientMemcacheCache",
|
||||
"LOCATION": f"{MEMCACHED_HOST}:{MEMCACHED_PORT}",
|
||||
"VERSION": __version__,
|
||||
"KEY_PREFIX": "ietf:dt",
|
||||
"KEY_FUNCTION": lambda key, key_prefix, version: (
|
||||
f"{key_prefix}:{version}:{sha384(str(key).encode('utf8')).hexdigest()}"
|
||||
),
|
||||
},
|
||||
"sessions": {
|
||||
"BACKEND": "ietf.utils.cache.LenientMemcacheCache",
|
||||
"LOCATION": f"{MEMCACHED_HOST}:{MEMCACHED_PORT}",
|
||||
# No release-specific VERSION setting.
|
||||
"KEY_PREFIX": "ietf:dt",
|
||||
},
|
||||
"htmlized": {
|
||||
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
|
||||
"LOCATION": "/a/cache/datatracker/htmlized",
|
||||
"OPTIONS": {
|
||||
"MAX_ENTRIES": 100000, # 100,000
|
||||
},
|
||||
},
|
||||
"pdfized": {
|
||||
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
|
||||
"LOCATION": "/a/cache/datatracker/pdfized",
|
||||
"OPTIONS": {
|
||||
"MAX_ENTRIES": 100000, # 100,000
|
||||
},
|
||||
},
|
||||
"slowpages": {
|
||||
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
|
||||
"LOCATION": "/a/cache/datatracker/slowpages",
|
||||
"OPTIONS": {
|
||||
"MAX_ENTRIES": 5000,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_csrf_trusted_origins_str = os.environ.get("DATATRACKER_CSRF_TRUSTED_ORIGINS")
|
||||
if _csrf_trusted_origins_str is not None:
|
||||
CSRF_TRUSTED_ORIGINS = _multiline_to_list(_csrf_trusted_origins_str)
|
|
@ -1,179 +0,0 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "datatracker.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "datatracker.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a fully qualified datatracker name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "datatracker.datatracker.fullname" -}}
|
||||
{{- if .Values.datatracker.fullnameOverride -}}
|
||||
{{- .Values.datatracker.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- printf "%s-%s" .Release.Name .Values.datatracker.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s-%s" .Release.Name $name .Values.datatracker.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a fully qualified celery name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "datatracker.celery.fullname" -}}
|
||||
{{- if .Values.celery.fullnameOverride -}}
|
||||
{{- .Values.celery.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- printf "%s-%s" .Release.Name .Values.celery.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s-%s" .Release.Name $name .Values.celery.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a fully qualified celery name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "datatracker.beat.fullname" -}}
|
||||
{{- if .Values.beat.fullnameOverride -}}
|
||||
{{- .Values.beat.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- printf "%s-%s" .Release.Name .Values.beat.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s-%s" .Release.Name $name .Values.beat.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a fully qualified rabbitmq name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "datatracker.rabbitmq.fullname" -}}
|
||||
{{- if .Values.rabbitmq.fullnameOverride -}}
|
||||
{{- .Values.rabbitmq.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- printf "%s-%s" .Release.Name .Values.rabbitmq.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s-%s" .Release.Name $name .Values.rabbitmq.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a fully qualified memcached name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
*/}}
|
||||
{{- define "datatracker.memcached.fullname" -}}
|
||||
{{- if .Values.memcached.fullnameOverride -}}
|
||||
{{- .Values.memcached.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- printf "%s-%s" .Release.Name .Values.memcached.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s-%s" .Release.Name $name .Values.memcached.name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "datatracker.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "datatracker.commonLabels" -}}
|
||||
helm.sh/chart: {{ include "datatracker.chart" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/part-of: {{ include "datatracker.name" . | default "datatracker" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "datatracker.selectorLabels" -}}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "datatracker.serviceAccountName.datatracker" -}}
|
||||
{{- if .Values.serviceAccounts.datatracker.create -}}
|
||||
{{ default (include "datatracker.datatracker.fullname" .) .Values.serviceAccounts.datatracker.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccounts.datatracker.name }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
|
||||
{{- define "datatracker.serviceAccountName.celery" -}}
|
||||
{{- if .Values.serviceAccounts.celery.create -}}
|
||||
{{ default (include "datatracker.celery.fullname" .) .Values.serviceAccounts.celery.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccounts.celery.name }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
|
||||
{{- define "datatracker.serviceAccountName.beat" -}}
|
||||
{{- if .Values.serviceAccounts.beat.create -}}
|
||||
{{ default (include "datatracker.beat.fullname" .) .Values.serviceAccounts.beat.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccounts.beat.name }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
|
||||
{{- define "datatracker.serviceAccountName.rabbitmq" -}}
|
||||
{{- if .Values.serviceAccounts.rabbitmq.create -}}
|
||||
{{ default (include "datatracker.rabbitmq.fullname" .) .Values.serviceAccounts.rabbitmq.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccounts.rabbitmq.name }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
|
||||
{{- define "datatracker.serviceAccountName.memcached" -}}
|
||||
{{- if .Values.serviceAccounts.memcached.create -}}
|
||||
{{ default (include "datatracker.memcached.fullname" .) .Values.serviceAccounts.memcached.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccounts.memcached.name }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
|
@ -1,69 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: django-configmap
|
||||
data:
|
||||
settings_local.py: |-
|
||||
{{- .Files.Get "settings_local.py" | nindent 4 }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: rabbitmq-configmap
|
||||
data:
|
||||
definitions.json: |-
|
||||
{
|
||||
"permissions": [
|
||||
{
|
||||
"configure": ".*",
|
||||
"read": ".*",
|
||||
"user": "datatracker",
|
||||
"vhost": "dt",
|
||||
"write": ".*"
|
||||
}
|
||||
],
|
||||
"users": [
|
||||
{
|
||||
"hashing_algorithm": "rabbit_password_hashing_sha256",
|
||||
"limits": {},
|
||||
"name": "datatracker",
|
||||
"password_hash": "HJxcItcpXtBN+R/CH7dUelfKBOvdUs3AWo82SBw2yLMSguzb",
|
||||
"tags": []
|
||||
}
|
||||
],
|
||||
"vhosts": [
|
||||
{
|
||||
"limits": [],
|
||||
"metadata": {
|
||||
"description": "",
|
||||
"tags": []
|
||||
},
|
||||
"name": "dt"
|
||||
}
|
||||
]
|
||||
}
|
||||
rabbitmq.conf: |-
|
||||
# prevent guest from logging in over tcp
|
||||
loopback_users.guest = true
|
||||
|
||||
# load saved definitions
|
||||
load_definitions = /etc/rabbitmq/definitions.json
|
||||
|
||||
# Ensure that enough disk is available to flush to disk. To do this, need to limit the
|
||||
# memory available to the container to something reasonable. See
|
||||
# https://www.rabbitmq.com/production-checklist.html#monitoring-and-resource-usage
|
||||
# for recommendations.
|
||||
|
||||
# 1-1.5 times the memory available to the container is adequate for disk limit
|
||||
disk_free_limit.absolute = 6000MB
|
||||
|
||||
# This should be ~40% of the memory available to the container. Use an
|
||||
# absolute number because relative will be proprtional to the full machine
|
||||
# memory.
|
||||
vm_memory_high_watermark.absolute = 1600MB
|
||||
|
||||
# Logging
|
||||
log.file = false
|
||||
log.console = true
|
||||
log.console.level = info
|
||||
log.console.formatter = json
|
|
@ -1,75 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "datatracker.beat.fullname" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/name: {{ .Values.beat.name }}
|
||||
spec:
|
||||
{{- $podValues := .Values.beat }}
|
||||
replicas: {{ $podValues.replicaCount }}
|
||||
revisionHistoryLimit: {{ $podValues.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 8 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
spec:
|
||||
{{- with $podValues.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "datatracker.serviceAccountName.beat" . }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.securityContext | nindent 12 }}
|
||||
image: "{{ default $.Values.datatracker.image.repository $podValues.image.repository }}:{{ default .Chart.AppVersion (default $.Values.datatracker.image.tag $podValues.image.tag) }}"
|
||||
imagePullPolicy: {{ default "IfNotPresent" (default $.Values.datatracker.image.imagePullPolicy $podValues.image.imagePullPolicy) }}
|
||||
env:
|
||||
- name: "CONTAINER_ROLE"
|
||||
value: "beat"
|
||||
{{- if .Values.env }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key | quote }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml $podValues.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml $podValues.readinessProbe | nindent 12 }}
|
||||
startupProbe:
|
||||
{{- toYaml $podValues.startupProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml $podValues.resources | nindent 12 }}
|
||||
{{- with $podValues.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -1,90 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "datatracker.celery.fullname" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/name: {{ .Values.celery.name }}
|
||||
spec:
|
||||
{{- $podValues := .Values.celery }}
|
||||
replicas: {{ $podValues.replicaCount }}
|
||||
revisionHistoryLimit: {{ $podValues.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 8 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
spec:
|
||||
{{- with $podValues.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "datatracker.serviceAccountName.celery" . }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
{{- if .Values.scoutapm }}
|
||||
- name: "scoutapm"
|
||||
image: "{{ .Values.scoutapm.image.repository }}:{{ default "latest" .Values.scoutapm.image.tag }}"
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.scoutapm.image.imagePullPolicy }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: {{ default true .Values.scoutapm.readOnlyRootFilesystem }}
|
||||
runAsUser: {{ default 65534 .Values.scoutapm.runAsUser }} # "nobody" user by default
|
||||
runAsGroup: {{ default 65534 .Values.scoutapm.runAsGroup }} # "nogroup" group by default
|
||||
{{- end }}
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.securityContext | nindent 12 }}
|
||||
image: "{{ default $.Values.datatracker.image.repository $podValues.image.repository }}:{{ default .Chart.AppVersion (default $.Values.datatracker.image.tag $podValues.image.tag) }}"
|
||||
imagePullPolicy: {{ default "IfNotPresent" (default $.Values.datatracker.image.imagePullPolicy $podValues.image.imagePullPolicy) }}
|
||||
env:
|
||||
- name: "CONTAINER_ROLE"
|
||||
value: "celery"
|
||||
{{- if .Values.env }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key | quote }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml $podValues.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml $podValues.readinessProbe | nindent 12 }}
|
||||
startupProbe:
|
||||
{{- toYaml $podValues.startupProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml $podValues.resources | nindent 12 }}
|
||||
{{- with $podValues.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -1,90 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "datatracker.datatracker.fullname" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/name: {{ .Values.datatracker.name }}
|
||||
spec:
|
||||
{{- $podValues := .Values.datatracker }}
|
||||
replicas: {{ $podValues.replicaCount }}
|
||||
revisionHistoryLimit: {{ $podValues.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 8 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
spec:
|
||||
{{- with $podValues.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "datatracker.serviceAccountName.datatracker" . }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
{{- if .Values.scoutapm }}
|
||||
- name: "scoutapm"
|
||||
image: "{{ .Values.scoutapm.image.repository }}:{{ default "latest" .Values.scoutapm.image.tag }}"
|
||||
imagePullPolicy: {{ default "IfNotPresent" .Values.scoutapm.image.imagePullPolicy }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- "./core-agent probe --tcp 0.0.0.0:6590 | grep -q 'Agent found'"
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: {{ default true .Values.scoutapm.readOnlyRootFilesystem }}
|
||||
runAsUser: {{ default 65534 .Values.scoutapm.runAsUser }} # "nobody" user by default
|
||||
runAsGroup: {{ default 65534 .Values.scoutapm.runAsGroup }} # "nogroup" group by default
|
||||
{{- end }}
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.securityContext | nindent 12 }}
|
||||
image: "{{ $podValues.image.repository }}:{{ default .Chart.AppVersion $podValues.image.tag }}"
|
||||
imagePullPolicy: {{ default "IfNotPresent" $podValues.image.imagePullPolicy }}
|
||||
env:
|
||||
- name: "CONTAINER_ROLE"
|
||||
value: "datatracker"
|
||||
{{- if $.Values.env }}
|
||||
{{- range $key, $val := $.Values.env }}
|
||||
- name: {{ $key | quote }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml $podValues.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml $podValues.readinessProbe | nindent 12 }}
|
||||
startupProbe:
|
||||
{{- toYaml $podValues.startupProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml $podValues.resources | nindent 12 }}
|
||||
{{- with $podValues.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -1,74 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "datatracker.memcached.fullname" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/name: {{ .Values.memcached.name }}
|
||||
spec:
|
||||
{{- $podValues := .Values.memcached }}
|
||||
replicas: {{ $podValues.replicaCount }}
|
||||
revisionHistoryLimit: {{ $podValues.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 8 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
spec:
|
||||
{{- with $podValues.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "datatracker.serviceAccountName.memcached" . }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.securityContext | nindent 12 }}
|
||||
image: "{{ $podValues.image.repository }}:{{ default "latest" $podValues.image.tag }}"
|
||||
args: ["-m", "$(MEMCACHED_MEM_LIMIT)"]
|
||||
imagePullPolicy: {{ default "IfNotPresent" $podValues.image.imagePullPolicy }}
|
||||
env:
|
||||
{{- if .Values.env }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key | quote }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: memcached
|
||||
containerPort: 11211
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml $podValues.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml $podValues.readinessProbe | nindent 12 }}
|
||||
startupProbe:
|
||||
{{- toYaml $podValues.startupProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml $podValues.resources | nindent 12 }}
|
||||
{{- with $podValues.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -1,32 +0,0 @@
|
|||
{{- if .Values.autoscaling.enabled }}
|
||||
apiVersion: autoscaling/v2
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "datatracker.fullname" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "datatracker.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,61 +0,0 @@
|
|||
{{- if .Values.datatracker.ingress.enabled -}}
|
||||
{{- $fullName := include "datatracker.fullname" . -}}
|
||||
{{- $svcPort := .Values.datatracker.service.port -}}
|
||||
{{- if and .Values.datatracker.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||
{{- if not (hasKey .Values.datatracker.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||
{{- $_ := set .Values.datatracker.ingress.annotations "kubernetes.io/ingress.class" .Values.datatracker.ingress.className}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
{{- with .Values.datatracker.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.datatracker.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
ingressClassName: {{ .Values.datatracker.ingress.className }}
|
||||
{{- end }}
|
||||
{{- if .Values.datatracker.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.datatracker.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.datatracker.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
pathType: {{ .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,43 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: "datatracker-shared-volume-claim"
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/name: {{ .Values.datatracker.name }}
|
||||
spec:
|
||||
{{- with .Values.persistentVolumes.datatrackerSharedVolume }}
|
||||
storageClassName: {{ .storageClassName | quote }}
|
||||
{{- if .volumeName }}
|
||||
volumeName: {{ .volumeName | quote }}
|
||||
{{- end }}
|
||||
accessModes:
|
||||
{{- range .accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .storage | quote }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: "rabbitmq-data-claim"
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/name: {{ .Values.rabbitmq.name }}
|
||||
spec:
|
||||
{{- with .Values.persistentVolumes.rabbitmqDataVolume }}
|
||||
storageClassName: {{ .storageClassName | quote }}
|
||||
{{- if .volumeName }}
|
||||
volumeName: {{ .volumeName | quote }}
|
||||
{{- end }}
|
||||
accessModes:
|
||||
{{- range .accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .storage | quote }}
|
||||
{{- end }}
|
|
@ -1,12 +0,0 @@
|
|||
{{- if .Values.serviceAccounts.beat.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "datatracker.serviceAccountName.beat" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccounts.beat.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
|
@ -1,12 +0,0 @@
|
|||
{{- if .Values.serviceAccounts.celery.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "datatracker.serviceAccountName.celery" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccounts.celery.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
|
@ -1,12 +0,0 @@
|
|||
{{- if .Values.serviceAccounts.datatracker.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "datatracker.serviceAccountName.datatracker" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccounts.datatracker.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
|
@ -1,12 +0,0 @@
|
|||
{{- if .Values.serviceAccounts.memcached.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "datatracker.serviceAccountName.memcached" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccounts.memcached.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
|
@ -1,12 +0,0 @@
|
|||
{{- if .Values.serviceAccounts.rabbitmq.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "datatracker.serviceAccountName.rabbitmq" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccounts.rabbitmq.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{include "datatracker.fullname" .}}
|
||||
labels: {{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
{{- with .Values.datatracker.service.annotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := . }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{.Values.datatracker.service.type}}
|
||||
ports:
|
||||
- port: {{ default "80" .Values.datatracker.service.port}}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 4}}
|
||||
app.kubernetes.io/name: {{ .Values.datatracker.name }}
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: memcached
|
||||
labels: {{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
{{- with .Values.memcached.service.annotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := . }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{.Values.memcached.service.type}}
|
||||
ports:
|
||||
- port: {{ default "11211" .Values.memcached.service.port}}
|
||||
targetPort: memcached
|
||||
protocol: TCP
|
||||
name: memcached
|
||||
selector:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 4}}
|
||||
app.kubernetes.io/name: {{ .Values.memcached.name }}
|
|
@ -1,22 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: rabbitmq
|
||||
labels: {{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
{{- with .Values.rabbitmq.service.annotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := . }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{.Values.rabbitmq.service.type}}
|
||||
clusterIP: None # headless service
|
||||
ports:
|
||||
- port: {{ default "5672" .Values.rabbitmq.service.port}}
|
||||
targetPort: amqp
|
||||
protocol: TCP
|
||||
name: amqp
|
||||
selector:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 4}}
|
||||
app.kubernetes.io/name: {{ .Values.rabbitmq.name }}
|
|
@ -1,87 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "datatracker.rabbitmq.fullname" . }}
|
||||
labels:
|
||||
{{- include "datatracker.commonLabels" . | nindent 4 }}
|
||||
app.kubernetes.io/name: {{ .Values.rabbitmq.name }}
|
||||
spec:
|
||||
{{- $podValues := .Values.rabbitmq }}
|
||||
replicas: {{ $podValues.replicaCount }}
|
||||
revisionHistoryLimit: {{ $podValues.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 6 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "datatracker.selectorLabels" . | nindent 8 }}
|
||||
app.kubernetes.io/name: {{ $podValues.name }}
|
||||
spec:
|
||||
{{- with $podValues.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "datatracker.serviceAccountName.rabbitmq" . }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.podSecurityContext | nindent 8 }}
|
||||
initContainers:
|
||||
- name: init-rabbitmq
|
||||
image: busybox:stable
|
||||
command:
|
||||
- "sh"
|
||||
- "-c"
|
||||
- "mkdir -p -m700 /mnt/rabbitmq && chown 100:101 /mnt/rabbitmq"
|
||||
securityContext:
|
||||
runAsNonRoot: false
|
||||
runAsUser: 0
|
||||
readOnlyRootFilesystem: true
|
||||
volumeMounts:
|
||||
- name: "rabbitmq-data"
|
||||
mountPath: "/mnt"
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml $podValues.securityContext | nindent 12 }}
|
||||
image: "{{ $podValues.image.repository }}:{{ default "latest" $podValues.image.tag }}"
|
||||
imagePullPolicy: {{ default "IfNotPresent" $podValues.image.pullPolicy }}
|
||||
env:
|
||||
{{- if .Values.env }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key | quote }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumeMounts }}
|
||||
volumeMounts:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: amqp
|
||||
containerPort: 5672
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
{{- toYaml $podValues.livenessProbe | nindent 12 }}
|
||||
readinessProbe:
|
||||
{{- toYaml $podValues.readinessProbe | nindent 12 }}
|
||||
startupProbe:
|
||||
{{- toYaml $podValues.startupProbe | nindent 12 }}
|
||||
resources:
|
||||
{{- toYaml $podValues.resources | nindent 12 }}
|
||||
{{- with $podValues.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with $podValues.volumes }}
|
||||
volumes:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
645
helm/values.yaml
645
helm/values.yaml
|
@ -1,645 +0,0 @@
|
|||
# Default values for datatracker.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
## Define serviceAccount names for components. Defaults to component's fully qualified name.
|
||||
##
|
||||
serviceAccounts:
|
||||
datatracker:
|
||||
create: true
|
||||
name: datatracker
|
||||
annotations: {}
|
||||
celery:
|
||||
create: true
|
||||
name: celery
|
||||
annotations: {}
|
||||
beat:
|
||||
create: true
|
||||
name: beat
|
||||
annotations: {}
|
||||
rabbitmq:
|
||||
create: true
|
||||
name: rabbitmq
|
||||
annotations: {}
|
||||
memcached:
|
||||
create: true
|
||||
name: memcached
|
||||
annotations: {}
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# DATATRACKER
|
||||
# -------------------------------------------------------------
|
||||
|
||||
datatracker:
|
||||
name: datatracker
|
||||
image:
|
||||
repository: "ghcr.io/ietf-tools/datatracker"
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
# tag: "v1.1.0"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: datatracker.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
# # /submit/tool-instructions/ just happens to be cheap until we get a real health endpoint
|
||||
# path: /submit/tool-instructions/
|
||||
# port: http
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
#readinessProbe:
|
||||
# httpGet:
|
||||
# # /submit/tool-instructions/ just happens to be cheap until we get a real health endpoint
|
||||
# path: /submit/tool-instructions/
|
||||
# port: http
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
# startupProbe:
|
||||
# initialDelaySeconds: 15
|
||||
# periodSeconds: 5
|
||||
# timeoutSeconds: 5
|
||||
# successThreshold: 1
|
||||
# failureThreshold: 60
|
||||
# httpGet:
|
||||
# # /submit/tool-instructions/ just happens to be cheap until we get a real health endpoint
|
||||
# path: /submit/tool-instructions/
|
||||
# port: http
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes:
|
||||
- name: settings-local-volume
|
||||
configMap:
|
||||
name: django-configmap
|
||||
- name: datatracker-shared-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: "datatracker-shared-volume-claim"
|
||||
- name: datatracker-tmp
|
||||
emptyDir:
|
||||
sizeLimit: "2Gi"
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts:
|
||||
- name: settings-local-volume
|
||||
mountPath: /workspace/ietf/settings_local.py
|
||||
subPath: settings_local.py
|
||||
readOnly: true
|
||||
- name: datatracker-shared-volume
|
||||
mountPath: /a
|
||||
- name: datatracker-tmp
|
||||
mountPath: /tmp
|
||||
|
||||
tolerations: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
affinity: {}
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# CELERY
|
||||
# -------------------------------------------------------------
|
||||
|
||||
celery:
|
||||
name: celery
|
||||
image: {}
|
||||
# defaults to datatracker settings if not specified separately
|
||||
#repository: "ghcr.io/ietf-tools/datatracker"
|
||||
#pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
# tag: "v1.1.0"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["celery", "-A", "ietf", "inspect", "ping"]
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
startupProbe:
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 60
|
||||
exec:
|
||||
command: ["celery", "-A", "ietf", "inspect", "ping"]
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes:
|
||||
- name: settings-local-volume
|
||||
configMap:
|
||||
name: django-configmap
|
||||
- name: datatracker-shared-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: "datatracker-shared-volume-claim"
|
||||
- name: celery-tmp
|
||||
emptyDir:
|
||||
sizeLimit: "2Gi"
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts:
|
||||
- name: settings-local-volume
|
||||
mountPath: /workspace/ietf/settings_local.py
|
||||
subPath: settings_local.py
|
||||
readOnly: true
|
||||
- name: datatracker-shared-volume
|
||||
mountPath: /a
|
||||
- name: celery-tmp
|
||||
mountPath: /tmp
|
||||
|
||||
tolerations: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
affinity: {}
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# BEAT
|
||||
# -------------------------------------------------------------
|
||||
|
||||
beat:
|
||||
name: beat
|
||||
image: {}
|
||||
# defaults to datatracker settings if not specified separately
|
||||
# repository: "ghcr.io/ietf-tools/datatracker"
|
||||
# pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
# tag: "v1.1.0"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["celery", "-A", "ietf", "inspect", "ping"]
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
startupProbe:
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 60
|
||||
exec:
|
||||
command: ["celery", "-A", "ietf", "inspect", "ping"]
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes:
|
||||
- name: settings-local-volume
|
||||
configMap:
|
||||
name: django-configmap
|
||||
- name: datatracker-shared-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: "datatracker-shared-volume-claim"
|
||||
- name: beat-tmp
|
||||
emptyDir:
|
||||
sizeLimit: "2Gi"
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts:
|
||||
- name: settings-local-volume
|
||||
mountPath: /workspace/ietf/settings_local.py
|
||||
subPath: settings_local.py
|
||||
readOnly: true
|
||||
- name: datatracker-shared-volume
|
||||
mountPath: /a
|
||||
- name: beat-tmp
|
||||
mountPath: /tmp
|
||||
|
||||
tolerations: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
affinity: {}
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# RABBITMQ
|
||||
# -------------------------------------------------------------
|
||||
|
||||
rabbitmq:
|
||||
name: "rabbitmq"
|
||||
image:
|
||||
repository: "ghcr.io/ietf-tools/datatracker-mq"
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "3.12-alpine"
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
livenessProbe:
|
||||
exec:
|
||||
command: ["rabbitmq-diagnostics", "-q", "ping"]
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
# rabbitmq image sets up uid/gid 100/101
|
||||
runAsUser: 100
|
||||
runAsGroup: 101
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 5672
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
startupProbe:
|
||||
initialDelaySeconds: 15
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 60
|
||||
exec:
|
||||
command: ["rabbitmq-diagnostics", "-q", "ping"]
|
||||
|
||||
# Additional volumes on the output Deployment definition.
|
||||
volumes:
|
||||
- name: "rabbitmq-data"
|
||||
persistentVolumeClaim:
|
||||
claimName: "rabbitmq-data-claim"
|
||||
- name: "rabbitmq-config"
|
||||
configMap:
|
||||
name: "rabbitmq-configmap"
|
||||
- name: "rabbitmq-tmp"
|
||||
emptyDir:
|
||||
sizeLimit: 50Mi
|
||||
# - name: foo
|
||||
# secret:
|
||||
# secretName: mysecret
|
||||
# optional: false
|
||||
|
||||
# Additional volumeMounts on the output Deployment definition.
|
||||
volumeMounts:
|
||||
- name: "rabbitmq-data"
|
||||
mountPath: "/var/lib/rabbitmq"
|
||||
subPath: "rabbitmq"
|
||||
- name: "rabbitmq-config"
|
||||
mountPath: "/etc/rabbitmq"
|
||||
- name: "rabbitmq-tmp"
|
||||
mountPath: "/tmp"
|
||||
|
||||
tolerations: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
affinity: {}
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# MEMCACHED
|
||||
# -------------------------------------------------------------
|
||||
|
||||
memcached:
|
||||
name: memcached
|
||||
image:
|
||||
repository: "memcached"
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "1.6-alpine"
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
# memcached image sets up uid/gid 11211
|
||||
runAsUser: 11211
|
||||
runAsGroup: 11211
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 11211
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
tolerations: []
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
affinity: {}
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# SCOUT APM SETTINGS
|
||||
# -------------------------------------------------------------
|
||||
# Set this to enable a Scout APM Core Agent sidecar
|
||||
scoutapm:
|
||||
image:
|
||||
repository: "scoutapp/scoutapm"
|
||||
tag: "version-1.4.0"
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# PERSISTENT VOLUMES
|
||||
# -------------------------------------------------------------
|
||||
|
||||
persistentVolumes:
|
||||
datatrackerSharedVolume:
|
||||
# Note: This is the /a shared volume
|
||||
volumeName: "datatracker-shared-volume"
|
||||
accessModes:
|
||||
- "ReadWriteMany" # or ReadWriteOnce and force datatracker/celery/beat to a single node
|
||||
storageClassName: "" # Empty string means do not use default storage class
|
||||
storage: "600Gi" # actual PersistentVolume must be at least this big or the PVC will not bind
|
||||
|
||||
rabbitmqDataVolume:
|
||||
volumeName: "rabbitmq-data-volume"
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
storageClassName: "" # Empty string means do not use default storage class
|
||||
storage: "8Gi" # actual PersistentVolume must be at least this big or the PVC will not bind
|
||||
|
||||
# -------------------------------------------------------------
|
||||
# COMMON
|
||||
# -------------------------------------------------------------
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
env:
|
||||
# n.b., these are debug values / non-secret secrets
|
||||
DATATRACKER_SERVER_MODE: "development" # development for staging, production for production
|
||||
DATATRACKER_ADMINS: |-
|
||||
Robert Sparks <rjsparks@nostrum.com>
|
||||
Ryan Cross <rcross@amsl.com>
|
||||
Kesara Rathnayake <kesara@staff.ietf.org>
|
||||
Jennifer Richards <jennifer@staff.ietf.org>
|
||||
Nicolas Giard <nick@staff.ietf.org>
|
||||
DATATRACKER_ALLOWED_HOSTS: ".ietf.org" # newline-separated list also allowed
|
||||
# DATATRACKER_DATATRACKER_DEBUG: "false"
|
||||
|
||||
# DB access details - needs to be filled in
|
||||
# DATATRACKER_DBHOST: "db"
|
||||
# DATATRACKER_DBPORT: "5432"
|
||||
# DATATRACKER_DBNAME: "datatracker"
|
||||
# DATATRACKER_DBUSER: "django" # secret
|
||||
# DATATRACKER_DBPASS: "RkTkDPFnKpko" # secret
|
||||
|
||||
DATATRACKER_DJANGO_SECRET_KEY: "PDwXboUq!=hPjnrtG2=ge#N$Dwy+wn@uivrugwpic8mxyPfHk" # secret
|
||||
|
||||
# Set this to point testing / staging at the production statics server until we
|
||||
# sort that out
|
||||
# DATATRACKER_STATIC_URL: "https://static.ietf.org/dt/12.10.0/"
|
||||
|
||||
# DATATRACKER_EMAIL_DEBUG: "true"
|
||||
|
||||
# Outgoing email details
|
||||
# DATATRACKER_EMAIL_HOST: "localhost" # defaults to localhost
|
||||
# DATATRACKER_EMAIL_PORT: "2025" # defaults to 2025
|
||||
|
||||
# The value here is the default from settings.py (i.e., not actually secret)
|
||||
DATATRACKER_NOMCOM_APP_SECRET_B64: "m9pzMezVoFNJfsvU9XSZxGnXnwup6P5ZgCQeEnROOoQ=" # secret
|
||||
|
||||
DATATRACKER_IANA_SYNC_PASSWORD: "this-is-the-iana-sync-password" # secret
|
||||
DATATRACKER_RFC_EDITOR_SYNC_PASSWORD: "this-is-the-rfc-editor-sync-password" # secret
|
||||
DATATRACKER_YOUTUBE_API_KEY: "this-is-the-youtube-api-key" # secret
|
||||
DATATRACKER_GITHUB_BACKUP_API_KEY: "this-is-the-github-backup-api-key" # secret
|
||||
|
||||
# API key configuration
|
||||
DATATRACKER_API_KEY_TYPE: "ES265"
|
||||
# secret - value here is the default from settings.py (i.e., not actually secret)
|
||||
DATATRACKER_API_PUBLIC_KEY_PEM_B64: |-
|
||||
Ci0tLS0tQkVHSU4gUFVCTElDIEtFWS0tLS0tCk1Ga3dFd1lIS29aSXpqMENBUVlJS
|
||||
29aSXpqMERBUWNEUWdBRXFWb2pzYW9mREpTY3VNSk4rdHNodW15Tk01TUUKZ2Fyel
|
||||
ZQcWtWb3ZtRjZ5RTdJSi9kdjRGY1YrUUtDdEovck9TOGUzNlk4WkFFVll1dWtoZXM
|
||||
weVoxdz09Ci0tLS0tRU5EIFBVQkxJQyBLRVktLS0tLQo=
|
||||
# secret - value here is the default from settings.py (i.e., not actually secret)
|
||||
DATATRACKER_API_PRIVATE_KEY_PEM_B64: |-
|
||||
Ci0tLS0tQkVHSU4gUFJJVkFURSBLRVktLS0tLQpNSUdIQWdFQU1CTUdCeXFHU000O
|
||||
UFnRUdDQ3FHU000OUF3RUhCRzB3YXdJQkFRUWdvSTZMSmtvcEtxOFhySGk5ClFxR1
|
||||
F2RTRBODNURllqcUx6KzhnVUxZZWNzcWhSQU5DQUFTcFdpT3hxaDhNbEp5NHdrMzY
|
||||
yeUc2Ykkwemt3U0IKcXZOVStxUldpK1lYcklUc2duOTIvZ1Z4WDVBb0swbitzNUx4
|
||||
N2ZwanhrQVJWaTY2U0Y2elRKblgKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
|
||||
|
||||
# DATATRACKER_MEETECHO_API_BASE: "https://meetings.conf.meetecho.com/api/v1/"
|
||||
DATATRACKER_MEETECHO_CLIENT_ID: "this-is-the-meetecho-client-id" # secret
|
||||
DATATRACKER_MEETECHO_CLIENT_SECRET: "this-is-the-meetecho-client-secret" # secret
|
||||
|
||||
# DATATRACKER_MATOMO_SITE_ID: "7" # must be present to enable Matomo
|
||||
# DATATRACKER_MATOMO_DOMAIN_PATH: "analytics.ietf.org"
|
||||
|
||||
CELERY_PASSWORD: "this-is-a-secret" # secret
|
||||
|
||||
DATATRACKER_APP_API_TOKENS_JSON: "{}" # secret
|
||||
|
||||
# use this to override default - one entry per line
|
||||
# DATATRACKER_CSRF_TRUSTED_ORIGINS: |-
|
||||
# https://datatracker.staging.ietf.org
|
||||
|
||||
# Scout configuration
|
||||
DATATRACKER_SCOUT_KEY: "this-is-the-scout-key"
|
||||
DATATRACKER_SCOUT_NAME: "StagingDatatracker"
|
||||
|
||||
MEMCACHED_MEM_LIMIT: "1024"
|
Loading…
Reference in a new issue