chore: merge main into feat/tzaware branch
This commit is contained in:
commit
1debd1ff23
52
.github/workflows/build.yml
vendored
52
.github/workflows/build.yml
vendored
|
@ -16,6 +16,11 @@ on:
|
|||
description: 'Create Production Release'
|
||||
required: true
|
||||
type: boolean
|
||||
sandbox:
|
||||
description: 'Deploy to Sandbox'
|
||||
default: true
|
||||
required: true
|
||||
type: boolean
|
||||
skiptests:
|
||||
description: 'Skip Tests'
|
||||
required: true
|
||||
|
@ -86,18 +91,18 @@ jobs:
|
|||
run: |
|
||||
if [[ $NEXT_VERSION ]]; then
|
||||
echo "Using AUTO SEMVER mode: $NEXT_VERSION"
|
||||
echo "::set-output name=should_deploy::true"
|
||||
echo "::set-output name=pkg_version::$NEXT_VERSION"
|
||||
echo "should_deploy=true" >> $GITHUB_OUTPUT
|
||||
echo "pkg_version=$NEXT_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "::notice::Release $NEXT_VERSION created using branch $GITHUB_REF_NAME"
|
||||
elif [[ "$GITHUB_REF" =~ ^refs/tags/* ]]; then
|
||||
echo "Using TAG mode: $GITHUB_REF_NAME"
|
||||
echo "::set-output name=should_deploy::true"
|
||||
echo "::set-output name=pkg_version::$GITHUB_REF_NAME"
|
||||
echo "should_deploy=true" >> $GITHUB_OUTPUT
|
||||
echo "pkg_version=$GITHUB_REF_NAME" >> $GITHUB_OUTPUT
|
||||
echo "::notice::Release $GITHUB_REF_NAME created using tag $GITHUB_REF_NAME"
|
||||
else
|
||||
echo "Using TEST mode: 8.0.0-dev.$GITHUB_RUN_NUMBER"
|
||||
echo "::set-output name=should_deploy::false"
|
||||
echo "::set-output name=pkg_version::8.0.0-dev.$GITHUB_RUN_NUMBER"
|
||||
echo "should_deploy=false" >> $GITHUB_OUTPUT
|
||||
echo "pkg_version=8.0.0-dev.$GITHUB_RUN_NUMBER" >> $GITHUB_OUTPUT
|
||||
echo "::notice::Non-production build 8.0.0-dev.$GITHUB_RUN_NUMBER created using branch $GITHUB_REF_NAME"
|
||||
fi
|
||||
|
||||
|
@ -390,7 +395,6 @@ jobs:
|
|||
|
||||
- name: Upload Build Artifacts
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ env.SHOULD_DEPLOY == 'false' || github.event.inputs.dryrun == 'true' }}
|
||||
with:
|
||||
name: release-${{ env.PKG_VERSION }}
|
||||
path: /home/runner/work/release/release.tar.gz
|
||||
|
@ -405,3 +409,37 @@ jobs:
|
|||
}
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_GH_BOT }}
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
# SANDBOX
|
||||
# -----------------------------------------------------------------
|
||||
sandbox:
|
||||
name: Deploy to Sandbox
|
||||
if: ${{ always() && github.event.inputs.sandbox == 'true' }}
|
||||
needs: [prepare, release]
|
||||
runs-on: dev-server
|
||||
env:
|
||||
PKG_VERSION: ${{needs.prepare.outputs.pkg_version}}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Download a Release Artifact
|
||||
uses: actions/download-artifact@v3.0.0
|
||||
with:
|
||||
name: release-${{ env.PKG_VERSION }}
|
||||
|
||||
- name: Deploy to containers
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: |
|
||||
cd dev/deploy-to-container
|
||||
npm ci
|
||||
cd ../..
|
||||
node ./dev/deploy-to-container/cli.js --branch ${{ github.ref_name }} --domain neverusethis.com
|
||||
|
||||
- name: Cleanup old docker resources
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
run: |
|
||||
docker image prune -a -f
|
||||
|
|
7
dev/deploy-to-container/.editorconfig
Normal file
7
dev/deploy-to-container/.editorconfig
Normal file
|
@ -0,0 +1,7 @@
|
|||
[*]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = false
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
1
dev/deploy-to-container/.gitignore
vendored
Normal file
1
dev/deploy-to-container/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
/node_modules
|
3
dev/deploy-to-container/.npmrc
Normal file
3
dev/deploy-to-container/.npmrc
Normal file
|
@ -0,0 +1,3 @@
|
|||
audit = false
|
||||
fund = false
|
||||
save-exact = true
|
22
dev/deploy-to-container/README.md
Normal file
22
dev/deploy-to-container/README.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Datatracker Deploy to Container Tool
|
||||
|
||||
This tool takes a release.tar.gz build file and deploys it as a container, along with its own database container.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Node `16.x` or later
|
||||
- Docker
|
||||
|
||||
## Usage
|
||||
|
||||
1. From the `dev/deploy-to-container` directory, run the command:
|
||||
```sh
|
||||
npm install
|
||||
```
|
||||
2. Make sure you have a `release.tar.gz` tarball in the project root directory.
|
||||
3. From the project root directory (back up 2 levels), run the command: (replacing the `branch` and `domain` arguments)
|
||||
```sh
|
||||
node ./dev/deploy-to-container/cli.js --branch main --domain something.com
|
||||
```
|
||||
|
||||
A container named `dt-app-BRANCH` and `dt-db-BRANCH` (where BRANCH is the argument provided above) will be created.
|
282
dev/deploy-to-container/cli.js
Normal file
282
dev/deploy-to-container/cli.js
Normal file
|
@ -0,0 +1,282 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
import Docker from 'dockerode'
|
||||
import path from 'path'
|
||||
import fs from 'fs-extra'
|
||||
import tar from 'tar'
|
||||
import yargs from 'yargs/yargs'
|
||||
import { hideBin } from 'yargs/helpers'
|
||||
import slugify from 'slugify'
|
||||
import { nanoid, customAlphabet } from 'nanoid'
|
||||
import { alphanumeric } from 'nanoid-dictionary'
|
||||
|
||||
const nanoidAlphaNum = customAlphabet(alphanumeric, 16)
|
||||
|
||||
async function main () {
|
||||
const basePath = process.cwd()
|
||||
const releasePath = path.join(basePath, 'release')
|
||||
const argv = yargs(hideBin(process.argv)).argv
|
||||
|
||||
// Parse branch argument
|
||||
let branch = argv.branch
|
||||
if (!branch) {
|
||||
throw new Error('Missing --branch argument!')
|
||||
}
|
||||
if (branch.indexOf('/') >= 0) {
|
||||
branch = branch.split('/')[1]
|
||||
}
|
||||
branch = slugify(branch, { lower: true, strict: true })
|
||||
if (branch.length < 1) {
|
||||
throw new Error('Branch name is empty!')
|
||||
}
|
||||
console.info(`Will use branch name "${branch}"`)
|
||||
|
||||
// Parse domain argument
|
||||
const domain = argv.domain
|
||||
if (!domain) {
|
||||
throw new Error('Missing --domain argument!')
|
||||
}
|
||||
const hostname = `dt-${branch}.${domain}`
|
||||
console.info(`Will use hostname "${hostname}"`)
|
||||
|
||||
// Connect to Docker Engine API
|
||||
console.info('Connecting to Docker Engine API...')
|
||||
const dock = new Docker()
|
||||
await dock.ping()
|
||||
console.info('Connected to Docker Engine API.')
|
||||
|
||||
// Extract release artifact
|
||||
console.info('Extracting release artifact...')
|
||||
if (!(await fs.pathExists(path.join(basePath, 'release.tar.gz')))) {
|
||||
throw new Error('Missing release.tar.gz file!')
|
||||
}
|
||||
await fs.emptyDir(releasePath)
|
||||
await tar.x({
|
||||
cwd: releasePath,
|
||||
file: 'release.tar.gz'
|
||||
})
|
||||
console.info('Extracted release artifact successfully.')
|
||||
|
||||
// Update the settings_local.py file
|
||||
console.info('Setting configuration files...')
|
||||
const mqKey = nanoidAlphaNum()
|
||||
const settingsPath = path.join(releasePath, 'ietf/settings_local.py')
|
||||
const cfgRaw = await fs.readFile(path.join(basePath, 'dev/deploy-to-container/settings_local.py'), 'utf8')
|
||||
await fs.outputFile(settingsPath,
|
||||
cfgRaw
|
||||
.replace('__DBHOST__', `dt-db-${branch}`)
|
||||
.replace('__SECRETKEY__', nanoid(36))
|
||||
.replace('__MQCONNSTR__', `amqp://datatracker:${mqKey}@dt-mq-${branch}/dt`)
|
||||
)
|
||||
await fs.copy(path.join(basePath, 'docker/scripts/app-create-dirs.sh'), path.join(releasePath, 'app-create-dirs.sh'))
|
||||
await fs.copy(path.join(basePath, 'dev/deploy-to-container/start.sh'), path.join(releasePath, 'start.sh'))
|
||||
await fs.copy(path.join(basePath, 'test/data'), path.join(releasePath, 'test/data'))
|
||||
console.info('Updated configuration files.')
|
||||
|
||||
// Pull latest DB image
|
||||
console.info('Pulling latest DB docker image...')
|
||||
const dbImagePullStream = await dock.pull('ghcr.io/ietf-tools/datatracker-db:latest')
|
||||
await new Promise((resolve, reject) => {
|
||||
dock.modem.followProgress(dbImagePullStream, (err, res) => err ? reject(err) : resolve(res))
|
||||
})
|
||||
console.info('Pulled latest DB docker image successfully.')
|
||||
|
||||
// Pull latest Datatracker Base image
|
||||
console.info('Pulling latest Datatracker base docker image...')
|
||||
const appImagePullStream = await dock.pull('ghcr.io/ietf-tools/datatracker-app-base:latest')
|
||||
await new Promise((resolve, reject) => {
|
||||
dock.modem.followProgress(appImagePullStream, (err, res) => err ? reject(err) : resolve(res))
|
||||
})
|
||||
console.info('Pulled latest Datatracker base docker image.')
|
||||
|
||||
// Pull latest MQ image
|
||||
console.info('Pulling latest MQ docker image...')
|
||||
const mqImagePullStream = await dock.pull('ghcr.io/ietf-tools/datatracker-mq:latest')
|
||||
await new Promise((resolve, reject) => {
|
||||
dock.modem.followProgress(mqImagePullStream, (err, res) => err ? reject(err) : resolve(res))
|
||||
})
|
||||
console.info('Pulled latest MQ docker image.')
|
||||
|
||||
// Pull latest Celery image
|
||||
console.info('Pulling latest Celery docker image...')
|
||||
const celeryImagePullStream = await dock.pull('ghcr.io/ietf-tools/datatracker-celery:latest')
|
||||
await new Promise((resolve, reject) => {
|
||||
dock.modem.followProgress(celeryImagePullStream, (err, res) => err ? reject(err) : resolve(res))
|
||||
})
|
||||
console.info('Pulled latest Celery docker image.')
|
||||
|
||||
// Terminate existing containers
|
||||
console.info('Ensuring existing containers with same name are terminated...')
|
||||
const containers = await dock.listContainers({ all: true })
|
||||
for (const container of containers) {
|
||||
if (
|
||||
container.Names.includes(`/dt-db-${branch}`) ||
|
||||
container.Names.includes(`/dt-app-${branch}`) ||
|
||||
container.Names.includes(`/dt-mq-${branch}`) ||
|
||||
container.Names.includes(`/dt-celery-${branch}`) ||
|
||||
container.Names.includes(`/dt-beat-${branch}`)
|
||||
) {
|
||||
console.info(`Terminating old container ${container.Id}...`)
|
||||
const oldContainer = dock.getContainer(container.Id)
|
||||
if (container.State === 'running') {
|
||||
await oldContainer.stop({ t: 5 })
|
||||
}
|
||||
await oldContainer.remove({
|
||||
force: true,
|
||||
v: true
|
||||
})
|
||||
}
|
||||
}
|
||||
console.info('Existing containers with same name have been terminated.')
|
||||
|
||||
// Get shared docker network
|
||||
console.info('Querying shared docker network...')
|
||||
const networks = await dock.listNetworks()
|
||||
if (!networks.some(n => n.Name === 'shared')) {
|
||||
console.info('No shared docker network found, creating a new one...')
|
||||
await dock.createNetwork({
|
||||
Name: 'shared',
|
||||
CheckDuplicate: true
|
||||
})
|
||||
console.info('Created shared docker network successfully.')
|
||||
} else {
|
||||
console.info('Existing shared docker network found.')
|
||||
}
|
||||
|
||||
// Get assets docker volume
|
||||
console.info('Querying assets docker volume...')
|
||||
const assetsVolume = await dock.getVolume('dt-assets')
|
||||
if (!assetsVolume) {
|
||||
console.info('No assets docker volume found, creating a new one...')
|
||||
await dock.createVolume({
|
||||
Name: 'dt-assets'
|
||||
})
|
||||
console.info('Created assets docker volume successfully.')
|
||||
} else {
|
||||
console.info('Existing assets docker volume found.')
|
||||
}
|
||||
|
||||
// Create DB container
|
||||
console.info(`Creating DB docker container... [dt-db-${branch}]`)
|
||||
const dbContainer = await dock.createContainer({
|
||||
Image: 'ghcr.io/ietf-tools/datatracker-db:latest',
|
||||
name: `dt-db-${branch}`,
|
||||
Hostname: `dt-db-${branch}`,
|
||||
HostConfig: {
|
||||
NetworkMode: 'shared',
|
||||
RestartPolicy: {
|
||||
Name: 'unless-stopped'
|
||||
}
|
||||
}
|
||||
})
|
||||
await dbContainer.start()
|
||||
console.info('Created and started DB docker container successfully.')
|
||||
|
||||
// Create MQ container
|
||||
console.info(`Creating MQ docker container... [dt-mq-${branch}]`)
|
||||
const mqContainer = await dock.createContainer({
|
||||
Image: 'ghcr.io/ietf-tools/datatracker-mq:latest',
|
||||
name: `dt-mq-${branch}`,
|
||||
Hostname: `dt-mq-${branch}`,
|
||||
Env: [
|
||||
`CELERY_PASSWORD=${mqKey}`
|
||||
],
|
||||
HostConfig: {
|
||||
Memory: 4 * (1024 ** 3), // in bytes
|
||||
NetworkMode: 'shared',
|
||||
RestartPolicy: {
|
||||
Name: 'unless-stopped'
|
||||
}
|
||||
}
|
||||
})
|
||||
await mqContainer.start()
|
||||
console.info('Created and started MQ docker container successfully.')
|
||||
|
||||
// Create Celery containers
|
||||
console.info(`Creating Celery docker containers... [dt-celery-${branch}, dt-beat-${branch}]`)
|
||||
const conConfs = [
|
||||
{ name: 'celery', role: 'worker' },
|
||||
{ name: 'beat', role: 'beat' }
|
||||
]
|
||||
const celeryContainers = {}
|
||||
for (const conConf of conConfs) {
|
||||
celeryContainers[conConf.name] = await dock.createContainer({
|
||||
Image: 'ghcr.io/ietf-tools/datatracker-celery:latest',
|
||||
name: `dt-${conConf.name}-${branch}`,
|
||||
Hostname: `dt-${conConf.name}-${branch}`,
|
||||
Env: [
|
||||
'CELERY_APP=ietf',
|
||||
`CELERY_ROLE=${conConf.role}`,
|
||||
'UPDATE_REQUIREMENTS_FROM=requirements.txt'
|
||||
],
|
||||
HostConfig: {
|
||||
Binds: [
|
||||
'dt-assets:/assets'
|
||||
],
|
||||
Init: true,
|
||||
NetworkMode: 'shared',
|
||||
RestartPolicy: {
|
||||
Name: 'unless-stopped'
|
||||
}
|
||||
},
|
||||
Cmd: ['--loglevel=INFO']
|
||||
})
|
||||
}
|
||||
console.info('Created Celery docker containers successfully.')
|
||||
|
||||
// Create Datatracker container
|
||||
console.info(`Creating Datatracker docker container... [dt-app-${branch}]`)
|
||||
const appContainer = await dock.createContainer({
|
||||
Image: 'ghcr.io/ietf-tools/datatracker-app-base:latest',
|
||||
name: `dt-app-${branch}`,
|
||||
Hostname: `dt-app-${branch}`,
|
||||
Env: [
|
||||
`LETSENCRYPT_HOST=${hostname}`,
|
||||
`VIRTUAL_HOST=${hostname}`,
|
||||
`VIRTUAL_PORT=8000`
|
||||
],
|
||||
HostConfig: {
|
||||
Binds: [
|
||||
'dt-assets:/assets'
|
||||
],
|
||||
NetworkMode: 'shared',
|
||||
RestartPolicy: {
|
||||
Name: 'unless-stopped'
|
||||
}
|
||||
},
|
||||
Entrypoint: ['bash', '-c', 'chmod +x ./start.sh && ./start.sh']
|
||||
})
|
||||
console.info(`Created Datatracker docker container successfully.`)
|
||||
|
||||
// Inject updated release into container
|
||||
console.info('Building updated release tarball to inject into containers...')
|
||||
const tgzPath = path.join(basePath, 'import.tgz')
|
||||
await tar.c({
|
||||
gzip: true,
|
||||
file: tgzPath,
|
||||
cwd: releasePath,
|
||||
filter (path) {
|
||||
if (path.includes('.git') || path.includes('node_modules')) { return false }
|
||||
return true
|
||||
}
|
||||
}, ['.'])
|
||||
console.info('Injecting archive into Datatracker + Celery docker containers...')
|
||||
await celeryContainers.celery.putArchive(tgzPath, { path: '/workspace' })
|
||||
await celeryContainers.beat.putArchive(tgzPath, { path: '/workspace' })
|
||||
await appContainer.putArchive(tgzPath, { path: '/workspace' })
|
||||
await fs.remove(tgzPath)
|
||||
console.info(`Imported working files into Datatracker + Celery docker containers successfully.`)
|
||||
|
||||
console.info('Starting Celery containers...')
|
||||
await celeryContainers.celery.start()
|
||||
await celeryContainers.beat.start()
|
||||
console.info('Celery containers started successfully.')
|
||||
|
||||
console.info('Starting Datatracker container...')
|
||||
await appContainer.start()
|
||||
console.info('Datatracker container started successfully.')
|
||||
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
main()
|
1143
dev/deploy-to-container/package-lock.json
generated
Normal file
1143
dev/deploy-to-container/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load diff
16
dev/deploy-to-container/package.json
Normal file
16
dev/deploy-to-container/package.json
Normal file
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"name": "deploy-to-container",
|
||||
"type": "module",
|
||||
"dependencies": {
|
||||
"dockerode": "^3.3.3",
|
||||
"fs-extra": "^10.1.0",
|
||||
"nanoid": "4.0.0",
|
||||
"nanoid-dictionary": "5.0.0-beta.1",
|
||||
"slugify": "1.6.5",
|
||||
"tar": "^6.1.11",
|
||||
"yargs": "^17.5.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16"
|
||||
}
|
||||
}
|
80
dev/deploy-to-container/settings_local.py
Normal file
80
dev/deploy-to-container/settings_local.py
Normal file
|
@ -0,0 +1,80 @@
|
|||
# Copyright The IETF Trust 2007-2019, All Rights Reserved
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from ietf.settings import * # pyflakes:ignore
|
||||
|
||||
ALLOWED_HOSTS = ['*']
|
||||
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'HOST': '__DBHOST__',
|
||||
'PORT': 3306,
|
||||
'NAME': 'ietf_utf8',
|
||||
'ENGINE': 'django.db.backends.mysql',
|
||||
'USER': 'django',
|
||||
'PASSWORD': 'RkTkDPFnKpko',
|
||||
'OPTIONS': {
|
||||
'sql_mode': 'STRICT_TRANS_TABLES',
|
||||
'init_command': 'SET storage_engine=InnoDB; SET names "utf8"',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
DATABASE_TEST_OPTIONS = {
|
||||
'init_command': 'SET storage_engine=InnoDB',
|
||||
}
|
||||
|
||||
SECRET_KEY = "__SECRETKEY__"
|
||||
|
||||
CELERY_BROKER_URL = '__MQCONNSTR__'
|
||||
|
||||
IDSUBMIT_IDNITS_BINARY = "/usr/local/bin/idnits"
|
||||
IDSUBMIT_REPOSITORY_PATH = "test/id/"
|
||||
IDSUBMIT_STAGING_PATH = "test/staging/"
|
||||
INTERNET_DRAFT_ARCHIVE_DIR = "test/archive/"
|
||||
INTERNET_ALL_DRAFTS_ARCHIVE_DIR = "test/archive/"
|
||||
RFC_PATH = "test/rfc/"
|
||||
|
||||
AGENDA_PATH = '/assets/www6s/proceedings/'
|
||||
MEETINGHOST_LOGO_PATH = AGENDA_PATH
|
||||
|
||||
USING_DEBUG_EMAIL_SERVER=True
|
||||
EMAIL_HOST='localhost'
|
||||
EMAIL_PORT=2025
|
||||
|
||||
MEDIA_BASE_DIR = '/assets'
|
||||
MEDIA_ROOT = MEDIA_BASE_DIR + '/media/'
|
||||
MEDIA_URL = '/media/'
|
||||
|
||||
PHOTOS_DIRNAME = 'photo'
|
||||
PHOTOS_DIR = MEDIA_ROOT + PHOTOS_DIRNAME
|
||||
|
||||
SUBMIT_YANG_CATALOG_MODEL_DIR = '/assets/ietf-ftp/yang/catalogmod/'
|
||||
SUBMIT_YANG_DRAFT_MODEL_DIR = '/assets/ietf-ftp/yang/draftmod/'
|
||||
SUBMIT_YANG_INVAL_MODEL_DIR = '/assets/ietf-ftp/yang/invalmod/'
|
||||
SUBMIT_YANG_IANA_MODEL_DIR = '/assets/ietf-ftp/yang/ianamod/'
|
||||
SUBMIT_YANG_RFC_MODEL_DIR = '/assets/ietf-ftp/yang/rfcmod/'
|
||||
|
||||
# Set INTERNAL_IPS for use within Docker. See https://knasmueller.net/fix-djangos-debug-toolbar-not-showing-inside-docker
|
||||
import socket
|
||||
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
|
||||
INTERNAL_IPS = [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips]
|
||||
|
||||
# DEV_TEMPLATE_CONTEXT_PROCESSORS = [
|
||||
# 'ietf.context_processors.sql_debug',
|
||||
# ]
|
||||
|
||||
DOCUMENT_PATH_PATTERN = '/assets/ietf-ftp/{doc.type_id}/'
|
||||
INTERNET_DRAFT_PATH = '/assets/ietf-ftp/internet-drafts/'
|
||||
RFC_PATH = '/assets/ietf-ftp/rfc/'
|
||||
CHARTER_PATH = '/assets/ietf-ftp/charter/'
|
||||
BOFREQ_PATH = '/assets/ietf-ftp/bofreq/'
|
||||
CONFLICT_REVIEW_PATH = '/assets/ietf-ftp/conflict-reviews/'
|
||||
STATUS_CHANGE_PATH = '/assets/ietf-ftp/status-changes/'
|
||||
INTERNET_DRAFT_ARCHIVE_DIR = '/assets/ietf-ftp/internet-drafts/'
|
||||
INTERNET_ALL_DRAFTS_ARCHIVE_DIR = '/assets/ietf-ftp/internet-drafts/'
|
||||
|
||||
NOMCOM_PUBLIC_KEYS_DIR = 'data/nomcom_keys/public_keys/'
|
||||
SLIDE_STAGING_PATH = 'test/staging/'
|
||||
|
||||
DE_GFM_BINARY = '/usr/local/bin/de-gfm'
|
15
dev/deploy-to-container/start.sh
Normal file
15
dev/deploy-to-container/start.sh
Normal file
|
@ -0,0 +1,15 @@
|
|||
#!/bin/bash
|
||||
|
||||
echo "Fixing permissions..."
|
||||
chmod -R 777 ./
|
||||
echo "Ensure all requirements.txt packages are installed..."
|
||||
pip --disable-pip-version-check --no-cache-dir install -r requirements.txt
|
||||
echo "Creating data directories..."
|
||||
chmod +x ./app-create-dirs.sh
|
||||
./app-create-dirs.sh
|
||||
echo "Running Datatracker checks..."
|
||||
./ietf/manage.py check
|
||||
echo "Running Datatracker migrations..."
|
||||
./ietf/manage.py migrate
|
||||
echo "Starting Datatracker..."
|
||||
./ietf/manage.py runserver 0.0.0.0:8000 --settings=settings_local
|
|
@ -34,7 +34,7 @@ from ietf.meeting.utils import add_event_info_to_session_qs
|
|||
from ietf.utils.test_utils import assert_ical_response_is_valid
|
||||
from ietf.utils.jstest import ( IetfSeleniumTestCase, ifSeleniumEnabled, selenium_enabled,
|
||||
presence_of_element_child_by_css_selector )
|
||||
from ietf.utils.timezone import datetime_today, datetime_from_date, date_today
|
||||
from ietf.utils.timezone import datetime_today, datetime_from_date, date_today, timezone_not_near_midnight
|
||||
|
||||
if selenium_enabled():
|
||||
from selenium.webdriver.common.action_chains import ActionChains
|
||||
|
@ -411,7 +411,12 @@ class EditMeetingScheduleTests(IetfSeleniumTestCase):
|
|||
def test_past_swap_days_buttons(self):
|
||||
"""Swap days buttons should be hidden for past items"""
|
||||
wait = WebDriverWait(self.driver, 2)
|
||||
meeting = MeetingFactory(type_id='ietf', date=timezone.now() - datetime.timedelta(days=3), days=7)
|
||||
meeting = MeetingFactory(
|
||||
type_id='ietf',
|
||||
date=date=timezone.now() - datetime.timedelta(days=3),
|
||||
days=7,
|
||||
time_zone=timezone_not_near_midnight(),
|
||||
)
|
||||
room = RoomFactory(meeting=meeting)
|
||||
|
||||
# get current time in meeting time zone
|
||||
|
|
|
@ -2,12 +2,15 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
import datetime
|
||||
import io
|
||||
import json
|
||||
import os.path
|
||||
import pytz
|
||||
import shutil
|
||||
import types
|
||||
|
||||
from mock import patch
|
||||
from pyquery import PyQuery
|
||||
from typing import Dict, List # pyflakes:ignore
|
||||
|
||||
|
@ -39,6 +42,7 @@ from ietf.utils.mail import send_mail_preformatted, send_mail_text, send_mail_mi
|
|||
from ietf.utils.test_runner import get_template_paths, set_coverage_checking
|
||||
from ietf.utils.test_utils import TestCase, unicontent
|
||||
from ietf.utils.text import parse_unicode
|
||||
from ietf.utils.timezone import timezone_not_near_midnight
|
||||
from ietf.utils.xmldraft import XMLDraft
|
||||
|
||||
class SendingMail(TestCase):
|
||||
|
@ -476,3 +480,40 @@ class TestAndroidSiteManifest(TestCase):
|
|||
manifest = json.loads(unicontent(r))
|
||||
self.assertTrue('name' in manifest)
|
||||
self.assertTrue('theme_color' in manifest)
|
||||
|
||||
|
||||
class TimezoneTests(TestCase):
|
||||
"""Tests of the timezone utilities"""
|
||||
@patch(
|
||||
'ietf.utils.timezone.timezone.now',
|
||||
return_value=pytz.timezone('America/Chicago').localize(datetime.datetime(2022, 7, 1, 23, 15, 0)), # 23:15:00
|
||||
)
|
||||
def test_timezone_not_near_midnight(self, mock):
|
||||
# give it several choices that should be rejected and one that should be accepted
|
||||
with patch(
|
||||
'ietf.utils.timezone.pytz.common_timezones',
|
||||
[
|
||||
'America/Chicago', # time is 23:15, should be rejected
|
||||
'America/Lima', # time is 23:15, should be rejected
|
||||
'America/New_York', # time is 00:15, should be rejected
|
||||
'Europe/Riga', # time is 07:15, acceptable
|
||||
],
|
||||
):
|
||||
# check a few times (will pass by chance < 0.1% of the time)
|
||||
self.assertEqual(timezone_not_near_midnight(), 'Europe/Riga')
|
||||
self.assertEqual(timezone_not_near_midnight(), 'Europe/Riga')
|
||||
self.assertEqual(timezone_not_near_midnight(), 'Europe/Riga')
|
||||
self.assertEqual(timezone_not_near_midnight(), 'Europe/Riga')
|
||||
self.assertEqual(timezone_not_near_midnight(), 'Europe/Riga')
|
||||
|
||||
# now give it no valid choice
|
||||
with patch(
|
||||
'ietf.utils.timezone.pytz.common_timezones',
|
||||
[
|
||||
'America/Chicago', # time is 23:15, should be rejected
|
||||
'America/Lima', # time is 23:15, should be rejected
|
||||
'America/New_York', # time is 00:15, should be rejected
|
||||
],
|
||||
):
|
||||
with self.assertRaises(RuntimeError):
|
||||
timezone_not_near_midnight()
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import datetime
|
||||
import random
|
||||
|
||||
from typing import Union
|
||||
from zoneinfo import ZoneInfo
|
||||
|
@ -80,3 +81,24 @@ def time_now(tz=None):
|
|||
and may not behave correctly when daylight savings time shifts are relevant.)
|
||||
"""
|
||||
return timezone.now().astimezone(_tzinfo(tz)).time()
|
||||
|
||||
|
||||
def timezone_not_near_midnight():
|
||||
"""Get the name of a random timezone where it's not close to midnight
|
||||
|
||||
Avoids midnight +/- 1 hour. Raises RuntimeError if it is unable to find
|
||||
a time zone satisfying this constraint.
|
||||
"""
|
||||
timezone_options = pytz.common_timezones
|
||||
tzname = random.choice(timezone_options)
|
||||
right_now = timezone.now().astimezone(pytz.timezone(tzname))
|
||||
# Avoid the remote possibility of an infinite loop (might come up
|
||||
# if there is a problem with the time zone library)
|
||||
tries_left = 20
|
||||
while right_now.hour < 1 or right_now.hour >= 23:
|
||||
tzname = random.choice(timezone_options)
|
||||
right_now = right_now.astimezone(pytz.timezone(tzname))
|
||||
tries_left -= 1
|
||||
if tries_left <= 0:
|
||||
raise RuntimeError('Unable to find a time zone not near midnight')
|
||||
return tzname
|
||||
|
|
Loading…
Reference in a new issue