ci: merge main to release (pull request #7360)

ci: merge main to release
This commit is contained in:
Robert Sparks 2024-04-24 12:03:50 -05:00 committed by GitHub
commit be763a65aa
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
47 changed files with 1094 additions and 1296 deletions

View file

@ -59,7 +59,7 @@ jobs:
path: geckodriver.log
- name: Upload Coverage Results to Codecov
uses: codecov/codecov-action@v4.1.1
uses: codecov/codecov-action@v4.2.0
with:
files: coverage.xml

View file

@ -1,261 +0,0 @@
#!/usr/bin/env python3.7
# -*- mode: python; coding: utf-8 -*-
# Copyright The IETF Trust 2019, All Rights Reserved
"""
NAME
$program - Check for current copyright notice in given files
SYNOPSIS
$program [OPTIONS] ARGS
DESCRIPTION
Given a list of files or filename wildcard patterns, check all for
an IETF Trust copyright notice with the current year. Optionally
generate a diff on standard out which can be used by 'patch'.
An invocation similar to the following can be particularly useful with
a set of changed version-controlled files, as it will fix up the
Copyright statements of any python files with pending changes:
$ check-copyright -p $(svn st | cut -c 9- | grep '\.py$' ) | patch -p0
%(options)s
AUTHOR
Written by Henrik Levkowetz, <henrik@tools.ietf.org>
COPYRIGHT
Copyright 2019 the IETF Trust
This program is free software; you can redistribute it and/or modify
it under the terms of the Simplified BSD license as published by the
Open Source Initiative at http://opensource.org/licenses/BSD-2-Clause.
"""
import datetime
import os
import sys
import time
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if not path in sys.path:
sys.path.insert(0, path)
import getopt
import re
import pytz
import tzparse
import debug
version = "1.0.0"
program = os.path.basename(sys.argv[0])
progdir = os.path.dirname(sys.argv[0])
debug.debug = True
# ----------------------------------------------------------------------
# Parse options
options = ""
for line in re.findall("\n +(if|elif) +opt in \[(.+)\]:\s+#(.+)\n", open(sys.argv[0]).read()):
if not options:
options += "OPTIONS\n"
options += " %-16s %s\n" % (line[1].replace('"', ''), line[2])
options = options.strip()
# with ' < 1:' on the next line, this is a no-op:
if len(sys.argv) < 1:
print(__doc__ % locals())
sys.exit(1)
try:
opts, files = getopt.gnu_getopt(sys.argv[1:], "hC:pvV", ["help", "copyright=", "patch", "version", "verbose",])
except Exception as e:
print( "%s: %s" % (program, e))
sys.exit(1)
# ----------------------------------------------------------------------
# Handle options
# set default values, if any
opt_verbose = 0
opt_patch = False
opt_copyright = "Copyright The IETF Trust {years}, All Rights Reserved"
# handle individual options
for opt, value in opts:
if opt in ["-h", "--help"]: # Output this help, then exit
print( __doc__ % locals() )
sys.exit(1)
elif opt in ["-p", "--patch"]: # Generate patch output rather than error messages
opt_patch = True
elif opt in ["-C", "--copyright"]: # Copyright line pattern using {years} for years
opt_copyright = value
elif opt in ["-V", "--version"]: # Output version information, then exit
print( program, version )
sys.exit(0)
elif opt in ["-v", "--verbose"]: # Be more verbose
opt_verbose += 1
# ----------------------------------------------------------------------
def say(s):
sys.stderr.write("%s\n" % (s))
# ----------------------------------------------------------------------
def note(s):
if opt_verbose:
sys.stderr.write("%s\n" % (s))
# ----------------------------------------------------------------------
def die(s, error=1):
sys.stderr.write("\n%s: Error: %s\n\n" % (program, s))
sys.exit(error)
# ----------------------------------------------------------------------
def pipe(cmd, inp=None):
import shlex
from subprocess import Popen, PIPE
args = shlex.split(cmd)
bufsize = 4096
stdin = PIPE if inp else None
pipe = Popen(args, stdin=stdin, stdout=PIPE, stderr=PIPE, bufsize=bufsize, encoding='utf-8', universal_newlines=True)
out, err = pipe.communicate(inp)
code = pipe.returncode
if code != 0:
raise OSError(err)
return out
# ----------------------------------------------------------------------
def split_loginfo(line):
try:
parts = line.split()
rev = parts[0][1:]
who = parts[2]
date = parts[4]
time = parts[5]
tz = parts[6]
when = tzparse.tzparse(" ".join(parts[4:7]), "%Y-%m-%d %H:%M:%S %Z")
when = when.astimezone(pytz.utc)
except ValueError as e:
sys.stderr.write("Bad log line format: %s\n %s\n" % (line, e))
return rev, who, when
# ----------------------------------------------------------------------
def get_first_commit(path):
note("Getting first commit for '%s'" % path)
cmd = 'svn log %s' % path
if opt_verbose > 1:
note("Running '%s' ..." % cmd)
try:
commit_log = pipe(cmd)
commit_log = commit_log.splitlines()
commit_log.reverse()
for line in commit_log:
if re.search(loginfo_format, line):
rev, who, when = split_loginfo(line)
break
else:
pass
except OSError:
rev, who, when = None, None, datetime.datetime.now(datetime.timezone.utc)
return { path: { 'rev': rev, 'who': who, 'date': when.strftime('%Y-%m-%d %H:%M:%S'), }, }
# ----------------------------------------------------------------------
# The program itself
import os
import json
cwd = os.getcwd()
# Get current initinfo from cache and svn
cachefn = os.path.join(os.environ.get('HOME', '.'), '.initinfo')
if os.path.exists(cachefn):
note("Reading initinfo cache file %s" % cachefn)
with open(cachefn, "r") as file:
cache = json.load(file)
else:
sys.stderr.write("No initinfo cache file found -- will have to extract all information from SVN.\n"+
"This may take some time.\n\n")
cache = {}
initinfo = cache
merged_revs = {}
write_cache = False
loginfo_format = r'^r[0-9]+ \| [^@]+@[^@]+ \| \d\d\d\d-\d\d-\d\d '
year = time.strftime('%Y')
copyright_re = "(?i)"+opt_copyright.format(years=r"(\d+-)?\d+")
for path in files:
try:
if not os.path.exists(path):
note("File does not exist: %s" % path)
continue
note("Checking path %s" % path)
if not path in initinfo:
initinfo.update(get_first_commit(path))
write_cache = True
date = initinfo[path]['date']
init = date[:4]
copyright_year_re = "(?i)"+opt_copyright.format(years=r"({init}-)?{year}".format(init=init, year=year))
with open(path) as file:
try:
chunk = file.read(4000)
except UnicodeDecodeError as e:
sys.stderr.write(f'Error when reading {file.name}: {e}\n')
raise
if os.path.basename(path) == '__init__.py' and len(chunk)==0:
continue
if not re.search(copyright_year_re, chunk):
if year == init:
copyright = opt_copyright.format(years=year)
else:
copyright = opt_copyright.format(years=f"{init}-{year}")
if opt_patch:
print(f"--- {file.name}\t(original)")
print(f"+++ {file.name}\t(modified)")
if not re.search(copyright_re, chunk):
# Simple case, just insert copyright at the top
print( "@@ -1,3 +1,4 @@")
print(f"+# {copyright}")
for i, line in list(enumerate(chunk.splitlines()))[:3]:
print(f" {line}")
else:
# Find old copyright, then emit preceding lines,
# change, and following lines.
pos = None
for i, line in enumerate(chunk.splitlines(), start=1):
if re.search(copyright_re, line):
pos = i
break
if not pos:
raise RuntimeError("Unexpected state: Expected a copyright line, but found none")
print(f"@@ -1,{pos+3} +1,{pos+3} @@")
for i, line in list(enumerate(chunk.splitlines(), start=1))[:pos+3]:
if i == pos:
print(f"-{line}")
print(f"+# {copyright}")
else:
print(f" {line}")
else:
sys.stderr.write(f"{path}(1): Error: Missing or bad copyright. Expected: {copyright}")
except Exception:
if write_cache:
cache = initinfo
with open(cachefn, "w") as file:
json.dump(cache, file, indent=2, sort_keys=True)
raise
if write_cache:
cache = initinfo
with open(cachefn, "w") as file:
json.dump(cache, file, indent=2, sort_keys=True)

View file

@ -1,23 +0,0 @@
#!/bin/bash
# Hourly datatracker jobs, ***run as mailman***
#
# This script is expected to be triggered by cron from
# $DTDIR/etc/cron.d/datatracker which should be symlinked from
# /etc/cron.d/
export LANG=en_US.UTF-8
export PYTHONIOENCODING=utf-8
# Make sure we stop if something goes wrong:
program=${0##*/}
trap 'echo "$program($LINENO): Command failed with error code $? ([$$] $0 $*)"; exit 1' ERR
DTDIR=/a/www/ietf-datatracker/web
cd $DTDIR/
# Set up the virtual environment
source $DTDIR/env/bin/activate
logger -p user.info -t cron "Running $DTDIR/bin/mm_hourly"

View file

@ -8,7 +8,7 @@
"dependencies": {
"dockerode": "^4.0.2",
"fs-extra": "^11.2.0",
"nanoid": "5.0.6",
"nanoid": "5.0.7",
"nanoid-dictionary": "5.0.0-beta.1",
"slugify": "1.6.6",
"tar": "^6.2.1",
@ -337,9 +337,9 @@
"optional": true
},
"node_modules/nanoid": {
"version": "5.0.6",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.0.6.tgz",
"integrity": "sha512-rRq0eMHoGZxlvaFOUdK1Ev83Bd1IgzzR+WJ3IbDJ7QOSdAxYjlurSPqFs9s4lJg29RT6nPwizFtJhQS6V5xgiA==",
"version": "5.0.7",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.0.7.tgz",
"integrity": "sha512-oLxFY2gd2IqnjcYyOXD8XGCftpGtZP2AbHbOkthDkvRywH5ayNtPVy9YlOPcHckXzbLTCHpkb7FB+yuxKV13pQ==",
"funding": [
{
"type": "github",
@ -878,9 +878,9 @@
"optional": true
},
"nanoid": {
"version": "5.0.6",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.0.6.tgz",
"integrity": "sha512-rRq0eMHoGZxlvaFOUdK1Ev83Bd1IgzzR+WJ3IbDJ7QOSdAxYjlurSPqFs9s4lJg29RT6nPwizFtJhQS6V5xgiA=="
"version": "5.0.7",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-5.0.7.tgz",
"integrity": "sha512-oLxFY2gd2IqnjcYyOXD8XGCftpGtZP2AbHbOkthDkvRywH5ayNtPVy9YlOPcHckXzbLTCHpkb7FB+yuxKV13pQ=="
},
"nanoid-dictionary": {
"version": "5.0.0-beta.1",

View file

@ -4,7 +4,7 @@
"dependencies": {
"dockerode": "^4.0.2",
"fs-extra": "^11.2.0",
"nanoid": "5.0.6",
"nanoid": "5.0.7",
"nanoid-dictionary": "5.0.0-beta.1",
"slugify": "1.6.6",
"tar": "^6.2.1",

View file

@ -60,10 +60,11 @@ CHARTER_PATH = '/assets/ietf-ftp/charter/'
BOFREQ_PATH = '/assets/ietf-ftp/bofreq/'
CONFLICT_REVIEW_PATH = '/assets/ietf-ftp/conflict-reviews/'
STATUS_CHANGE_PATH = '/assets/ietf-ftp/status-changes/'
INTERNET_DRAFT_ARCHIVE_DIR = '/assets/archive/id'
INTERNET_DRAFT_ARCHIVE_DIR = '/assets/collection/draft-archive'
INTERNET_ALL_DRAFTS_ARCHIVE_DIR = '/assets/archive/id'
BIBXML_BASE_PATH = '/assets/ietfdata/derived/bibxml'
IDSUBMIT_REPOSITORY_PATH = INTERNET_DRAFT_PATH
FTP_DIR = '/assets/ftp'
NOMCOM_PUBLIC_KEYS_DIR = 'data/nomcom_keys/public_keys/'
SLIDE_STAGING_PATH = '/test/staging/'

View file

@ -57,9 +57,10 @@ CHARTER_PATH = '/assets/ietf-ftp/charter/'
BOFREQ_PATH = '/assets/ietf-ftp/bofreq/'
CONFLICT_REVIEW_PATH = '/assets/ietf-ftp/conflict-reviews/'
STATUS_CHANGE_PATH = '/assets/ietf-ftp/status-changes/'
INTERNET_DRAFT_ARCHIVE_DIR = '/assets/ietf-ftp/internet-drafts/'
INTERNET_DRAFT_ARCHIVE_DIR = '/assets/collection/draft-archive'
INTERNET_ALL_DRAFTS_ARCHIVE_DIR = '/assets/ietf-ftp/internet-drafts/'
BIBXML_BASE_PATH = '/assets/ietfdata/derived/bibxml'
FTP_DIR = '/assets/ftp'
NOMCOM_PUBLIC_KEYS_DIR = 'data/nomcom_keys/public_keys/'
SLIDE_STAGING_PATH = 'test/staging/'

View file

@ -56,9 +56,10 @@ CHARTER_PATH = '/assets/ietf-ftp/charter/'
BOFREQ_PATH = '/assets/ietf-ftp/bofreq/'
CONFLICT_REVIEW_PATH = '/assets/ietf-ftp/conflict-reviews/'
STATUS_CHANGE_PATH = '/assets/ietf-ftp/status-changes/'
INTERNET_DRAFT_ARCHIVE_DIR = '/assets/ietf-ftp/internet-drafts/'
INTERNET_DRAFT_ARCHIVE_DIR = '/assets/collection/draft-archive'
INTERNET_ALL_DRAFTS_ARCHIVE_DIR = '/assets/ietf-ftp/internet-drafts/'
BIBXML_BASE_PATH = '/assets/ietfdata/derived/bibxml'
FTP_DIR = '/assets/ftp'
NOMCOM_PUBLIC_KEYS_DIR = 'data/nomcom_keys/public_keys/'
SLIDE_STAGING_PATH = 'test/staging/'

View file

@ -46,10 +46,11 @@ CHARTER_PATH = '/assets/ietf-ftp/charter/'
BOFREQ_PATH = '/assets/ietf-ftp/bofreq/'
CONFLICT_REVIEW_PATH = '/assets/ietf-ftp/conflict-reviews/'
STATUS_CHANGE_PATH = '/assets/ietf-ftp/status-changes/'
INTERNET_DRAFT_ARCHIVE_DIR = '/assets/archive/id'
INTERNET_DRAFT_ARCHIVE_DIR = '/assets/collection/draft-archive'
INTERNET_ALL_DRAFTS_ARCHIVE_DIR = '/assets/archive/id'
BIBXML_BASE_PATH = '/assets/ietfdata/derived/bibxml'
IDSUBMIT_REPOSITORY_PATH = INTERNET_DRAFT_PATH
FTP_DIR = '/assets/ftp'
NOMCOM_PUBLIC_KEYS_DIR = 'data/nomcom_keys/public_keys/'
SLIDE_STAGING_PATH = 'test/staging/'

View file

@ -9,6 +9,8 @@ for sub in \
test/wiki/ietf \
data/nomcom_keys/public_keys \
/assets/archive/id \
/assets/collection \
/assets/collection/draft-archive \
/assets/ietf-ftp \
/assets/ietf-ftp/bofreq \
/assets/ietf-ftp/charter \
@ -33,6 +35,10 @@ for sub in \
/assets/www6/iesg \
/assets/www6/iesg/evaluation \
/assets/media/photo \
/assets/ftp \
/assets/ftp/charter \
/assets/ftp/internet-drafts \
/assets/ftp/review \
; do
if [ ! -d "$sub" ]; then
echo "Creating dir $sub"

View file

@ -1,6 +1,6 @@
# Copyright The IETF Trust 2015-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import base64
import datetime
import json
import html
@ -36,11 +36,12 @@ from ietf.person.factories import PersonFactory, random_faker, EmailFactory
from ietf.person.models import Email, User
from ietf.person.models import PersonalApiKey
from ietf.stats.models import MeetingRegistration
from ietf.utils.mail import outbox, get_payload_text
from ietf.utils.mail import empty_outbox, outbox, get_payload_text
from ietf.utils.models import DumpInfo
from ietf.utils.test_utils import TestCase, login_testing_unauthorized, reload_db_objects
from .ietf_utils import is_valid_token, requires_api_token
from .views import EmailIngestionError
OMITTED_APPS = (
'ietf.secr.meetings',
@ -1013,6 +1014,194 @@ class CustomApiTests(TestCase):
sorted(e.address for e in emails),
)
@override_settings(APP_API_TOKENS={"ietf.api.views.ingest_email": "valid-token"})
@mock.patch("ietf.api.views.iana_ingest_review_email")
@mock.patch("ietf.api.views.ipr_ingest_response_email")
@mock.patch("ietf.api.views.nomcom_ingest_feedback_email")
def test_ingest_email(
self, mock_nomcom_ingest, mock_ipr_ingest, mock_iana_ingest
):
mocks = {mock_nomcom_ingest, mock_ipr_ingest, mock_iana_ingest}
empty_outbox()
url = urlreverse("ietf.api.views.ingest_email")
# test various bad calls
r = self.client.get(url)
self.assertEqual(r.status_code, 403)
self.assertFalse(any(m.called for m in mocks))
r = self.client.post(url)
self.assertEqual(r.status_code, 403)
self.assertFalse(any(m.called for m in mocks))
r = self.client.get(url, headers={"X-Api-Key": "valid-token"})
self.assertEqual(r.status_code, 405)
self.assertFalse(any(m.called for m in mocks))
r = self.client.post(url, headers={"X-Api-Key": "valid-token"})
self.assertEqual(r.status_code, 415)
self.assertFalse(any(m.called for m in mocks))
r = self.client.post(
url, content_type="application/json", headers={"X-Api-Key": "valid-token"}
)
self.assertEqual(r.status_code, 400)
self.assertFalse(any(m.called for m in mocks))
r = self.client.post(
url,
"this is not JSON!",
content_type="application/json",
headers={"X-Api-Key": "valid-token"},
)
self.assertEqual(r.status_code, 400)
self.assertFalse(any(m.called for m in mocks))
r = self.client.post(
url,
{"json": "yes", "valid_schema": False},
content_type="application/json",
headers={"X-Api-Key": "valid-token"},
)
self.assertEqual(r.status_code, 400)
self.assertFalse(any(m.called for m in mocks))
# test that valid requests call handlers appropriately
message_b64 = base64.b64encode(b"This is a message").decode()
r = self.client.post(
url,
{"dest": "iana-review", "message": message_b64},
content_type="application/json",
headers={"X-Api-Key": "valid-token"},
)
self.assertEqual(r.status_code, 200)
self.assertTrue(mock_iana_ingest.called)
self.assertEqual(mock_iana_ingest.call_args, mock.call(b"This is a message"))
self.assertFalse(any(m.called for m in (mocks - {mock_iana_ingest})))
mock_iana_ingest.reset_mock()
r = self.client.post(
url,
{"dest": "ipr-response", "message": message_b64},
content_type="application/json",
headers={"X-Api-Key": "valid-token"},
)
self.assertEqual(r.status_code, 200)
self.assertTrue(mock_ipr_ingest.called)
self.assertEqual(mock_ipr_ingest.call_args, mock.call(b"This is a message"))
self.assertFalse(any(m.called for m in (mocks - {mock_ipr_ingest})))
mock_ipr_ingest.reset_mock()
r = self.client.post(
url,
{"dest": "nomcom-feedback", "message": message_b64, "year": 2024}, # arbitrary year
content_type="application/json",
headers={"X-Api-Key": "valid-token"},
)
self.assertEqual(r.status_code, 200)
self.assertTrue(mock_nomcom_ingest.called)
self.assertEqual(mock_nomcom_ingest.call_args, mock.call(b"This is a message", 2024))
self.assertFalse(any(m.called for m in (mocks - {mock_nomcom_ingest})))
mock_nomcom_ingest.reset_mock()
# test that exceptions lead to email being sent - assumes that iana-review handling is representative
mock_iana_ingest.side_effect = EmailIngestionError("Error: don't send email")
r = self.client.post(
url,
{"dest": "iana-review", "message": message_b64},
content_type="application/json",
headers={"X-Api-Key": "valid-token"},
)
self.assertEqual(r.status_code, 400)
self.assertTrue(mock_iana_ingest.called)
self.assertEqual(mock_iana_ingest.call_args, mock.call(b"This is a message"))
self.assertFalse(any(m.called for m in (mocks - {mock_iana_ingest})))
self.assertEqual(len(outbox), 0) # implicitly tests that _none_ of the earlier tests sent email
mock_iana_ingest.reset_mock()
# test default recipients and attached original message
mock_iana_ingest.side_effect = EmailIngestionError(
"Error: do send email",
email_body="This is my email\n",
email_original_message=b"This is the original message"
)
with override_settings(ADMINS=[("Some Admin", "admin@example.com")]):
r = self.client.post(
url,
{"dest": "iana-review", "message": message_b64},
content_type="application/json",
headers={"X-Api-Key": "valid-token"},
)
self.assertEqual(r.status_code, 400)
self.assertTrue(mock_iana_ingest.called)
self.assertEqual(mock_iana_ingest.call_args, mock.call(b"This is a message"))
self.assertFalse(any(m.called for m in (mocks - {mock_iana_ingest})))
self.assertEqual(len(outbox), 1)
self.assertIn("admin@example.com", outbox[0]["To"])
self.assertEqual("Error: do send email", outbox[0]["Subject"])
self.assertEqual("This is my email\n", get_payload_text(outbox[0].get_body()))
attachments = list(a for a in outbox[0].iter_attachments())
self.assertEqual(len(attachments), 1)
self.assertEqual(attachments[0].get_filename(), "original-message")
self.assertEqual(attachments[0].get_content_type(), "application/octet-stream")
self.assertEqual(attachments[0].get_content(), b"This is the original message")
mock_iana_ingest.reset_mock()
empty_outbox()
# test overridden recipients and no attached original message
mock_iana_ingest.side_effect = EmailIngestionError(
"Error: do send email",
email_body="This is my email\n",
email_recipients=("thatguy@example.com")
)
with override_settings(ADMINS=[("Some Admin", "admin@example.com")]):
r = self.client.post(
url,
{"dest": "iana-review", "message": message_b64},
content_type="application/json",
headers={"X-Api-Key": "valid-token"},
)
self.assertEqual(r.status_code, 400)
self.assertTrue(mock_iana_ingest.called)
self.assertEqual(mock_iana_ingest.call_args, mock.call(b"This is a message"))
self.assertFalse(any(m.called for m in (mocks - {mock_iana_ingest})))
self.assertEqual(len(outbox), 1)
self.assertNotIn("admin@example.com", outbox[0]["To"])
self.assertIn("thatguy@example.com", outbox[0]["To"])
self.assertEqual("Error: do send email", outbox[0]["Subject"])
self.assertEqual("This is my email\n", get_payload_text(outbox[0]))
mock_iana_ingest.reset_mock()
empty_outbox()
# test attached traceback
mock_iana_ingest.side_effect = EmailIngestionError(
"Error: do send email",
email_body="This is my email\n",
email_attach_traceback=True,
)
with override_settings(ADMINS=[("Some Admin", "admin@example.com")]):
r = self.client.post(
url,
{"dest": "iana-review", "message": message_b64},
content_type="application/json",
headers={"X-Api-Key": "valid-token"},
)
self.assertEqual(r.status_code, 400)
self.assertTrue(mock_iana_ingest.called)
self.assertEqual(mock_iana_ingest.call_args, mock.call(b"This is a message"))
self.assertFalse(any(m.called for m in (mocks - {mock_iana_ingest})))
self.assertEqual(len(outbox), 1)
self.assertIn("admin@example.com", outbox[0]["To"])
self.assertEqual("Error: do send email", outbox[0]["Subject"])
self.assertEqual("This is my email\n", get_payload_text(outbox[0].get_body()))
attachments = list(a for a in outbox[0].iter_attachments())
self.assertEqual(len(attachments), 1)
self.assertEqual(attachments[0].get_filename(), "traceback.txt")
self.assertEqual(attachments[0].get_content_type(), "text/plain")
self.assertIn("ietf.api.views.EmailIngestionError: Error: do send email", attachments[0].get_content())
mock_iana_ingest.reset_mock()
empty_outbox()
class DirectAuthApiTests(TestCase):

View file

@ -24,7 +24,9 @@ urlpatterns = [
# --- Custom API endpoints, sorted alphabetically ---
# Email alias information for drafts
url(r'^doc/draft-aliases/$', api_views.draft_aliases),
# GPRD: export of personal information for the logged-in person
# email ingestor
url(r'email/$', api_views.ingest_email),
# GDPR: export of personal information for the logged-in person
url(r'^export/personal-information/$', api_views.PersonalInformationExportView.as_view()),
# Email alias information for groups
url(r'^group/group-aliases/$', api_views.group_aliases),

View file

@ -1,10 +1,13 @@
# Copyright The IETF Trust 2017-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import base64
import binascii
import json
import jsonschema
import pytz
import re
import pytz
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
@ -18,11 +21,15 @@ from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.gzip import gzip_page
from django.views.generic.detail import DetailView
from email.message import EmailMessage
from jwcrypto.jwk import JWK
from tastypie.exceptions import BadRequest
from tastypie.serializers import Serializer
from tastypie.utils import is_valid_jsonp_callback_value
from tastypie.utils.mime import determine_format, build_content_type
from textwrap import dedent
from traceback import format_exception, extract_tb
from typing import Iterable, Optional
import ietf
from ietf.api import _api_list
@ -32,12 +39,16 @@ from ietf.doc.utils import DraftAliasGenerator, fuzzy_find_documents
from ietf.group.utils import GroupAliasGenerator, role_holder_emails
from ietf.ietfauth.utils import role_required
from ietf.ietfauth.views import send_account_creation_email
from ietf.ipr.utils import ingest_response_email as ipr_ingest_response_email
from ietf.meeting.models import Meeting
from ietf.nomcom.models import Volunteer, NomCom
from ietf.nomcom.utils import ingest_feedback_email as nomcom_ingest_feedback_email
from ietf.person.models import Person, Email
from ietf.stats.models import MeetingRegistration
from ietf.sync.iana import ingest_review_email as iana_ingest_review_email
from ietf.utils import log
from ietf.utils.decorators import require_api_key
from ietf.utils.mail import send_smtp
from ietf.utils.models import DumpInfo
@ -515,3 +526,153 @@ def role_holder_addresses(request):
}
)
return HttpResponse(status=405)
_response_email_json_validator = jsonschema.Draft202012Validator(
schema={
"type": "object",
"properties": {
"dest": {
"enum": [
"iana-review",
"ipr-response",
"nomcom-feedback",
]
},
"message": {
"type": "string", # base64-encoded mail message
},
},
"required": ["dest", "message"],
"if": {
# If dest == "nomcom-feedback"...
"properties": {
"dest": {"const": "nomcom-feedback"},
}
},
"then": {
# ... then also require year, an integer, be present
"properties": {
"year": {
"type": "integer",
},
},
"required": ["year"],
},
}
)
class EmailIngestionError(Exception):
"""Exception indicating ingestion failed"""
def __init__(
self,
msg="Message rejected",
*,
email_body: Optional[str] = None,
email_recipients: Optional[Iterable[str]] = None,
email_attach_traceback=False,
email_original_message: Optional[bytes]=None,
):
self.msg = msg
self.email_body = email_body
self.email_subject = msg
self.email_recipients = email_recipients
self.email_attach_traceback = email_attach_traceback
self.email_original_message = email_original_message
self.email_from = settings.SERVER_EMAIL
@staticmethod
def _summarize_error(error):
frame = extract_tb(error.__traceback__)[-1]
return dedent(f"""\
Error details:
Exception type: {type(error).__module__}.{type(error).__name__}
File: {frame.filename}
Line: {frame.lineno}""")
def as_emailmessage(self) -> Optional[EmailMessage]:
"""Generate an EmailMessage to report an error"""
if self.email_body is None:
return None
error = self if self.__cause__ is None else self.__cause__
format_values = dict(
error=error,
error_summary=self._summarize_error(error),
)
msg = EmailMessage()
if self.email_recipients is None:
msg["To"] = tuple(adm[1] for adm in settings.ADMINS)
else:
msg["To"] = self.email_recipients
msg["From"] = self.email_from
msg["Subject"] = self.msg
msg.set_content(
self.email_body.format(**format_values)
)
if self.email_attach_traceback:
msg.add_attachment(
"".join(format_exception(None, error, error.__traceback__)),
filename="traceback.txt",
)
if self.email_original_message is not None:
# Attach incoming message if it was provided. Send as a generic media
# type because we don't know for sure that it was actually a valid
# message.
msg.add_attachment(
self.email_original_message,
'application', 'octet-stream', # media type
filename='original-message',
)
return msg
@requires_api_token
@csrf_exempt
def ingest_email(request):
def _err(code, text):
return HttpResponse(text, status=code, content_type="text/plain")
if request.method != "POST":
return _err(405, "Method not allowed")
if request.content_type != "application/json":
return _err(415, "Content-Type must be application/json")
# Validate
try:
payload = json.loads(request.body)
_response_email_json_validator.validate(payload)
except json.decoder.JSONDecodeError as err:
return _err(400, f"JSON parse error at line {err.lineno} col {err.colno}: {err.msg}")
except jsonschema.exceptions.ValidationError as err:
return _err(400, f"JSON schema error at {err.json_path}: {err.message}")
except Exception:
return _err(400, "Invalid request format")
try:
message = base64.b64decode(payload["message"], validate=True)
except binascii.Error:
return _err(400, "Invalid message: bad base64 encoding")
dest = payload["dest"]
try:
if dest == "iana-review":
iana_ingest_review_email(message)
elif dest == "ipr-response":
ipr_ingest_response_email(message)
elif dest == "nomcom-feedback":
year = payload["year"]
nomcom_ingest_feedback_email(message, year)
else:
# Should never get here - json schema validation should enforce the enum
log.unreachable(date="2024-04-04")
return _err(400, "Invalid dest") # return something reasonable if we got here unexpectedly
except EmailIngestionError as err:
error_email = err.as_emailmessage()
if error_email is not None:
send_smtp(error_email)
return _err(400, err.msg)
return HttpResponse(status=200)

View file

@ -139,6 +139,9 @@ def move_draft_files_to_archive(doc, rev):
if os.path.exists(src):
try:
# ghostlinkd would keep this in the combined all archive since it would
# be sourced from a different place. But when ghostlinkd is removed, nothing
# new is needed here - the file will already exist in the combined archive
shutil.move(src, dst)
except IOError as e:
if "No such file or directory" in str(e):
@ -213,6 +216,10 @@ def clean_up_draft_files():
filename, revision = match.groups()
def move_file_to(subdir):
# Similar to move_draft_files_to_archive
# ghostlinkd would keep this in the combined all archive since it would
# be sourced from a different place. But when ghostlinkd is removed, nothing
# new is needed here - the file will already exist in the combined archive
shutil.move(path,
os.path.join(settings.INTERNET_DRAFT_ARCHIVE_DIR, subdir, basename))
@ -229,4 +236,5 @@ def clean_up_draft_files():
move_file_to("")
except Document.DoesNotExist:
# All uses of this past 2014 seem related to major system failures.
move_file_to("unknown_ids")

View file

@ -266,3 +266,24 @@ class ExtResourceForm(forms.Form):
@staticmethod
def valid_resource_tags():
return ExtResourceName.objects.all().order_by('slug').values_list('slug', flat=True)
class InvestigateForm(forms.Form):
name_fragment = forms.CharField(
label="File name or fragment to investigate",
required=True,
help_text=(
"Enter a filename such as draft-ietf-some-draft-00.txt or a fragment like draft-ietf-some-draft using at least 8 characters. The search will also work for files that are not necessarily drafts."
),
min_length=8,
)
def clean_name_fragment(self):
disallowed_characters = ["%", "/", "\\", "*"]
name_fragment = self.cleaned_data["name_fragment"]
# Manual inspection of the directories at the time of this writing shows
# looking for files with less than 8 characters in the name is not useful
# Requiring this will help protect against the secretariat unintentionally
# matching every draft.
if any(c in name_fragment for c in disallowed_characters):
raise ValidationError(f"The following characters are disallowed: {', '.join(disallowed_characters)}")
return name_fragment

View file

@ -142,6 +142,7 @@ class DocumentInfo(models.Model):
if self.is_dochistory():
self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
else:
# This could be simplified since anything in INTERNET_DRAFT_PATH is also already in INTERNET_ALL_DRAFTS_ARCHIVE_DIR
draft_state = self.get_state('draft')
if draft_state and draft_state.slug == 'active':
self._cached_file_path = settings.INTERNET_DRAFT_PATH

View file

@ -4,6 +4,7 @@
import datetime
import re
from pathlib import Path
from urllib.parse import urljoin
from zoneinfo import ZoneInfo
@ -899,3 +900,32 @@ def simple_history_delta_change_cnt(history):
delta = history.diff_against(prev)
return len(delta.changes)
return 0
@register.filter
def mtime(path):
"""Returns a datetime object representing mtime given a pathlib Path object"""
return datetime.datetime.fromtimestamp(path.stat().st_mtime).astimezone(ZoneInfo(settings.TIME_ZONE))
@register.filter
def url_for_path(path):
"""Consructs a 'best' URL for web access to the given pathlib Path object.
Assumes that the path is into the Internet-Draft archive or the proceedings.
"""
if path.match(f"{settings.AGENDA_PATH}/**/*"):
return (
f"https://www.ietf.org/proceedings/{path.relative_to(settings.AGENDA_PATH)}"
)
elif any(
[
pathdir in path.parents
for pathdir in [
Path(settings.INTERNET_DRAFT_PATH),
Path(settings.INTERNET_DRAFT_ARCHIVE_DIR).parent,
Path(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR),
]
]
):
return f"{settings.IETF_ID_ARCHIVE_URL}{path.name}"
else:
return "#"

View file

@ -45,7 +45,7 @@ from ietf.doc.factories import ( DocumentFactory, DocEventFactory, CharterFactor
StatusChangeFactory, DocExtResourceFactory, RgDraftFactory, BcpFactory)
from ietf.doc.forms import NotifyForm
from ietf.doc.fields import SearchableDocumentsField
from ietf.doc.utils import create_ballot_if_not_open, uppercase_std_abbreviated_name, DraftAliasGenerator
from ietf.doc.utils import create_ballot_if_not_open, investigate_fragment, uppercase_std_abbreviated_name, DraftAliasGenerator
from ietf.group.models import Group, Role
from ietf.group.factories import GroupFactory, RoleFactory
from ietf.ipr.factories import HolderIprDisclosureFactory
@ -3141,3 +3141,137 @@ class StateIndexTests(TestCase):
if not '-' in name:
self.assertIn(name, content)
class InvestigateTests(TestCase):
settings_temp_path_overrides = TestCase.settings_temp_path_overrides + [
"AGENDA_PATH",
# "INTERNET_DRAFT_PATH",
# "INTERNET_DRAFT_ARCHIVE_DIR",
# "INTERNET_ALL_DRAFTS_ARCHIVE_DIR",
]
def setUp(self):
super().setUp()
# Contort the draft archive dir temporary replacement
# to match the "collections" concept
archive_tmp_dir = Path(settings.INTERNET_DRAFT_ARCHIVE_DIR)
new_archive_dir = archive_tmp_dir / "draft-archive"
new_archive_dir.mkdir()
settings.INTERNET_DRAFT_ARCHIVE_DIR = str(new_archive_dir)
donated_personal_copy_dir = archive_tmp_dir / "donated-personal-copy"
donated_personal_copy_dir.mkdir()
meeting_dir = Path(settings.AGENDA_PATH) / "666"
meeting_dir.mkdir()
all_archive_dir = Path(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR)
repository_dir = Path(settings.INTERNET_DRAFT_PATH)
for path in [repository_dir, all_archive_dir]:
(path / "draft-this-is-active-00.txt").touch()
for path in [new_archive_dir, all_archive_dir]:
(path / "draft-old-but-can-authenticate-00.txt").touch()
(path / "draft-has-mixed-provenance-01.txt").touch()
for path in [donated_personal_copy_dir, all_archive_dir]:
(path / "draft-donated-from-a-personal-collection-00.txt").touch()
(path / "draft-has-mixed-provenance-00.txt").touch()
(path / "draft-has-mixed-provenance-00.txt.Z").touch()
(all_archive_dir / "draft-this-should-not-be-possible-00.txt").touch()
(meeting_dir / "draft-this-predates-the-archive-00.txt").touch()
def test_investigate_fragment(self):
result = investigate_fragment("this-is-active")
self.assertEqual(len(result["can_verify"]), 1)
self.assertEqual(len(result["unverifiable_collections"]), 0)
self.assertEqual(len(result["unexpected"]), 0)
self.assertEqual(
list(result["can_verify"])[0].name, "draft-this-is-active-00.txt"
)
result = investigate_fragment("old-but-can")
self.assertEqual(len(result["can_verify"]), 1)
self.assertEqual(len(result["unverifiable_collections"]), 0)
self.assertEqual(len(result["unexpected"]), 0)
self.assertEqual(
list(result["can_verify"])[0].name, "draft-old-but-can-authenticate-00.txt"
)
result = investigate_fragment("predates")
self.assertEqual(len(result["can_verify"]), 1)
self.assertEqual(len(result["unverifiable_collections"]), 0)
self.assertEqual(len(result["unexpected"]), 0)
self.assertEqual(
list(result["can_verify"])[0].name, "draft-this-predates-the-archive-00.txt"
)
result = investigate_fragment("personal-collection")
self.assertEqual(len(result["can_verify"]), 0)
self.assertEqual(len(result["unverifiable_collections"]), 1)
self.assertEqual(len(result["unexpected"]), 0)
self.assertEqual(
list(result["unverifiable_collections"])[0].name,
"draft-donated-from-a-personal-collection-00.txt",
)
result = investigate_fragment("mixed-provenance")
self.assertEqual(len(result["can_verify"]), 1)
self.assertEqual(len(result["unverifiable_collections"]), 2)
self.assertEqual(len(result["unexpected"]), 0)
self.assertEqual(
list(result["can_verify"])[0].name, "draft-has-mixed-provenance-01.txt"
)
self.assertEqual(
set([p.name for p in result["unverifiable_collections"]]),
set(
[
"draft-has-mixed-provenance-00.txt",
"draft-has-mixed-provenance-00.txt.Z",
]
),
)
result = investigate_fragment("not-be-possible")
self.assertEqual(len(result["can_verify"]), 0)
self.assertEqual(len(result["unverifiable_collections"]), 0)
self.assertEqual(len(result["unexpected"]), 1)
self.assertEqual(
list(result["unexpected"])[0].name,
"draft-this-should-not-be-possible-00.txt",
)
def test_investigate(self):
url = urlreverse("ietf.doc.views_doc.investigate")
login_testing_unauthorized(self, "secretary", url)
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q("form#investigate")), 1)
self.assertEqual(len(q("div#results")), 0)
r = self.client.post(url, dict(name_fragment="this-is-not-found"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q("div#results")), 1)
self.assertEqual(len(q("table#authenticated")), 0)
self.assertEqual(len(q("table#unverifiable")), 0)
self.assertEqual(len(q("table#unexpected")), 0)
r = self.client.post(url, dict(name_fragment="mixed-provenance"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q("div#results")), 1)
self.assertEqual(len(q("table#authenticated")), 1)
self.assertEqual(len(q("table#unverifiable")), 1)
self.assertEqual(len(q("table#unexpected")), 0)
r = self.client.post(url, dict(name_fragment="not-be-possible"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q("div#results")), 1)
self.assertEqual(len(q("table#authenticated")), 0)
self.assertEqual(len(q("table#unverifiable")), 0)
self.assertEqual(len(q("table#unexpected")), 1)
r = self.client.post(url, dict(name_fragment="short"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q("#id_name_fragment.is-invalid")), 1)
for char in ["*", "%", "/", "\\"]:
r = self.client.post(url, dict(name_fragment=f"bad{char}character"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertEqual(len(q("#id_name_fragment.is-invalid")), 1)

View file

@ -87,6 +87,10 @@ class ViewCharterTests(TestCase):
class EditCharterTests(TestCase):
settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['CHARTER_PATH']
def setUp(self):
super().setUp()
(Path(settings.FTP_DIR)/"charter").mkdir()
def write_charter_file(self, charter):
(Path(settings.CHARTER_PATH) / f"{charter.name}-{charter.rev}.txt").write_text("This is a charter.")
@ -506,13 +510,16 @@ class EditCharterTests(TestCase):
self.assertEqual(charter.rev, next_revision(prev_rev))
self.assertTrue("new_revision" in charter.latest_event().type)
file_contents = (
Path(settings.CHARTER_PATH) / (charter.name + "-" + charter.rev + ".txt")
).read_text("utf-8")
charter_path = Path(settings.CHARTER_PATH) / (charter.name + "-" + charter.rev + ".txt")
file_contents = (charter_path).read_text("utf-8")
self.assertEqual(
file_contents,
"Windows line\nMac line\nUnix line\n" + utf_8_snippet.decode("utf-8"),
)
ftp_charter_path = Path(settings.FTP_DIR) / "charter" / charter_path.name
self.assertTrue(ftp_charter_path.exists())
self.assertTrue(charter_path.samefile(ftp_charter_path))
def test_submit_initial_charter(self):
group = GroupFactory(type_id='wg',acronym='mars',list_email='mars-wg@ietf.org')
@ -808,9 +815,11 @@ class EditCharterTests(TestCase):
self.assertTrue(not charter.ballot_open("approve"))
self.assertEqual(charter.rev, "01")
self.assertTrue(
(Path(settings.CHARTER_PATH) / ("charter-ietf-%s-%s.txt" % (group.acronym, charter.rev))).exists()
)
charter_path = Path(settings.CHARTER_PATH) / ("charter-ietf-%s-%s.txt" % (group.acronym, charter.rev))
charter_ftp_path = Path(settings.FTP_DIR) / "charter" / charter_path.name
self.assertTrue(charter_path.exists())
self.assertTrue(charter_ftp_path.exists())
self.assertTrue(charter_path.samefile(charter_ftp_path))
self.assertEqual(len(outbox), 2)
#

View file

@ -2,6 +2,7 @@
# -*- coding: utf-8 -*-
from pathlib import Path
import datetime, os, shutil
import io
import tarfile, tempfile, mailbox
@ -47,6 +48,7 @@ class ReviewTests(TestCase):
self.review_dir = self.tempdir('review')
self.old_document_path_pattern = settings.DOCUMENT_PATH_PATTERN
settings.DOCUMENT_PATH_PATTERN = self.review_dir + "/{doc.type_id}/"
(Path(settings.FTP_DIR) / "review").mkdir()
self.review_subdir = os.path.join(self.review_dir, "review")
if not os.path.exists(self.review_subdir):
@ -57,6 +59,13 @@ class ReviewTests(TestCase):
settings.DOCUMENT_PATH_PATTERN = self.old_document_path_pattern
super().tearDown()
def verify_review_files_were_written(self, assignment, expected_content = "This is a review\nwith two lines"):
review_file = Path(self.review_subdir) / f"{assignment.review.name}.txt"
content = review_file.read_text()
self.assertEqual(content, expected_content)
review_ftp_file = Path(settings.FTP_DIR) / "review" / review_file.name
self.assertTrue(review_file.samefile(review_ftp_file))
def test_request_review(self):
doc = WgDraftFactory(group__acronym='mars',rev='01')
NewRevisionDocEventFactory(doc=doc,rev='01')
@ -830,8 +839,7 @@ class ReviewTests(TestCase):
self.assertTrue(assignment.review_request.team.acronym.lower() in assignment.review.name)
self.assertTrue(assignment.review_request.doc.rev in assignment.review.name)
with io.open(os.path.join(self.review_subdir, assignment.review.name + ".txt")) as f:
self.assertEqual(f.read(), "This is a review\nwith two lines")
self.verify_review_files_were_written(assignment)
self.assertEqual(len(outbox), 1)
self.assertIn(assignment.review_request.team.list_email, outbox[0]["To"])
@ -885,8 +893,7 @@ class ReviewTests(TestCase):
completed_time_diff = timezone.now() - assignment.completed_on
self.assertLess(completed_time_diff, datetime.timedelta(seconds=10))
with io.open(os.path.join(self.review_subdir, assignment.review.name + ".txt")) as f:
self.assertEqual(f.read(), "This is a review\nwith two lines")
self.verify_review_files_were_written(assignment)
self.assertEqual(len(outbox), 1)
self.assertIn(assignment.review_request.team.list_email, outbox[0]["To"])
@ -926,8 +933,7 @@ class ReviewTests(TestCase):
self.assertLess(event0_time_diff, datetime.timedelta(seconds=10))
self.assertEqual(events[1].time, datetime.datetime(2012, 12, 24, 12, 13, 14, tzinfo=DEADLINE_TZINFO))
with io.open(os.path.join(self.review_subdir, assignment.review.name + ".txt")) as f:
self.assertEqual(f.read(), "This is a review\nwith two lines")
self.verify_review_files_were_written(assignment)
self.assertEqual(len(outbox), 1)
self.assertIn(assignment.review_request.team.list_email, outbox[0]["To"])
@ -1013,8 +1019,7 @@ class ReviewTests(TestCase):
assignment = reload_db_objects(assignment)
self.assertEqual(assignment.state_id, "completed")
with io.open(os.path.join(self.review_subdir, assignment.review.name + ".txt")) as f:
self.assertEqual(f.read(), "This is a review\nwith two lines")
self.verify_review_files_were_written(assignment)
self.assertEqual(len(outbox), 0)
self.assertTrue("http://example.com" in assignment.review.external_url)
@ -1063,8 +1068,7 @@ class ReviewTests(TestCase):
self.assertEqual(assignment.reviewer, rev_role.person.role_email('reviewer'))
self.assertEqual(assignment.state_id, "completed")
with io.open(os.path.join(self.review_subdir, assignment.review.name + ".txt")) as f:
self.assertEqual(f.read(), "This is a review\nwith two lines")
self.verify_review_files_were_written(assignment)
self.assertEqual(len(outbox), 0)
self.assertTrue("http://example.com" in assignment.review.external_url)
@ -1172,8 +1176,9 @@ class ReviewTests(TestCase):
self.assertLess(event_time_diff, datetime.timedelta(seconds=10))
self.assertTrue('revised' in event1.desc.lower())
with io.open(os.path.join(self.review_subdir, assignment.review.name + ".txt")) as f:
self.assertEqual(f.read(), "This is a review\nwith two lines")
# See https://github.com/ietf-tools/datatracker/issues/6941
# These are _not_ getting written as a new version as intended.
self.verify_review_files_were_written(assignment)
self.assertEqual(len(outbox), 0)
@ -1200,6 +1205,8 @@ class ReviewTests(TestCase):
# Ensure that a new event was created for the new revision (#2590)
self.assertNotEqual(event1.id, event2.id)
self.verify_review_files_were_written(assignment, "This is a revised review")
self.assertEqual(len(outbox), 0)
def test_edit_comment(self):

View file

@ -66,6 +66,8 @@ urlpatterns = [
r"^shepherdwriteup-template/(?P<type>\w+)/?$",
views_doc.document_shepherd_writeup_template,
),
url(r'^investigate/?$', views_doc.investigate),
url(r'^stats/newrevisiondocevent/?$', views_stats.chart_newrevisiondocevent),
url(r'^stats/newrevisiondocevent/conf/?$', views_stats.chart_conf_newrevisiondocevent),
@ -179,7 +181,8 @@ urlpatterns = [
url(r'^%(name)s/session/' % settings.URL_REGEXPS, include('ietf.doc.urls_material')),
url(r'^(?P<name>[A-Za-z0-9._+-]+)/session/', include(session_patterns)),
url(r'^(?P<name>[A-Za-z0-9\._\+\-]+)$', views_search.search_for_name),
# latest versions - keep old URLs alive during migration period
# rfcdiff - latest versions - keep old URLs alive during migration period
url(r'^rfcdiff-latest-json/%(name)s(?:-%(rev)s)?(\.txt|\.html)?/?$' % settings.URL_REGEXPS, RedirectView.as_view(pattern_name='ietf.api.views.rfcdiff_latest_json', permanent=True)),
url(r'^rfcdiff-latest-json/(?P<name>[Rr][Ff][Cc] [0-9]+?)(\.txt|\.html)?/?$', RedirectView.as_view(pattern_name='ietf.api.views.rfcdiff_latest_json', permanent=True)),
# end of rfcdiff support URLs
]

View file

@ -13,6 +13,7 @@ import textwrap
from collections import defaultdict, namedtuple, Counter
from dataclasses import dataclass
from pathlib import Path
from typing import Iterator, Union
from zoneinfo import ZoneInfo
@ -1382,3 +1383,29 @@ class DraftAliasGenerator:
# .all = everything from above
if all:
yield alias + ".all", list(all)
def investigate_fragment(name_fragment):
can_verify = set()
for root in [settings.INTERNET_DRAFT_PATH, settings.INTERNET_DRAFT_ARCHIVE_DIR]:
can_verify.update(list(Path(root).glob(f"*{name_fragment}*")))
can_verify.update(list(Path(settings.AGENDA_PATH).glob(f"**/*{name_fragment}*")))
# N.B. This reflects the assumption that the internet draft archive dir is in the
# a directory with other collections (at /a/ietfdata/draft/collections as this is written)
unverifiable_collections = set(
Path(settings.INTERNET_DRAFT_ARCHIVE_DIR).parent.glob(f"**/*{name_fragment}*")
)
unverifiable_collections.difference_update(can_verify)
expected_names = set([p.name for p in can_verify.union(unverifiable_collections)])
maybe_unexpected = list(
Path(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR).glob(f"*{name_fragment}*")
)
unexpected = [p for p in maybe_unexpected if p.name not in expected_names]
return dict(
can_verify=can_verify,
unverifiable_collections=unverifiable_collections,
unexpected=unexpected,
)

View file

@ -92,11 +92,31 @@ def change_group_state_after_charter_approval(group, by):
def fix_charter_revision_after_approval(charter, by):
# according to spec, 00-02 becomes 01, so copy file and record new revision
try:
old = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.name, charter.rev))
new = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.name, next_approved_revision(charter.rev)))
old = os.path.join(
charter.get_file_path(), "%s-%s.txt" % (charter.name, charter.rev)
)
new = os.path.join(
charter.get_file_path(),
"%s-%s.txt" % (charter.name, next_approved_revision(charter.rev)),
)
shutil.copy(old, new)
except IOError:
log("There was an error copying %s to %s" % (old, new))
# Also provide a copy to the legacy ftp source directory, which is served by rsync
# This replaces the hardlink copy that ghostlink has made in the past
# Still using a hardlink as long as these are on the same filesystem.
# Staying with os.path vs pathlib.Path until we get to python>=3.10.
charter_dir = os.path.join(settings.FTP_DIR, "charter")
ftp_filepath = os.path.join(
charter_dir, "%s-%s.txt" % (charter.name, next_approved_revision(charter.rev))
)
try:
os.link(new, ftp_filepath)
except IOError:
log(
"There was an error creating a harlink at %s pointing to %s"
% (ftp_filepath, new)
)
events = []
e = NewRevisionDocEvent(doc=charter, by=by, type="new_revision")
@ -108,6 +128,7 @@ def fix_charter_revision_after_approval(charter, by):
charter.rev = e.rev
charter.save_with_history(events)
def historic_milestones_for_charter(charter, rev):
"""Return GroupMilestone/GroupMilestoneHistory objects for charter
document at rev by looking through the history."""

View file

@ -4,6 +4,7 @@
import datetime
import json
import os
import textwrap
from pathlib import Path
@ -42,7 +43,7 @@ from ietf.ietfauth.utils import has_role, role_required
from ietf.name.models import GroupStateName
from ietf.person.models import Person
from ietf.utils.history import find_history_active_at
from ietf.utils.log import assertion
from ietf.utils.log import assertion, log
from ietf.utils.mail import send_mail_preformatted
from ietf.utils.textupload import get_cleaned_text_file_content
from ietf.utils.response import permission_denied
@ -443,6 +444,18 @@ def submit(request, name, option=None):
destination.write(form.cleaned_data["txt"])
else:
destination.write(form.cleaned_data["content"])
# Also provide a copy to the legacy ftp source directory, which is served by rsync
# This replaces the hardlink copy that ghostlink has made in the past
# Still using a hardlink as long as these are on the same filesystem.
ftp_filename = Path(settings.FTP_DIR) / "charter" / charter_filename.name
try:
os.link(charter_filename, ftp_filename) # os.link until we are on python>=3.10
except IOError:
log(
"There was an error creating a hardlink at %s pointing to %s"
% (ftp_filename, charter_filename)
)
if option in ["initcharter", "recharter"] and charter.ad == None:
charter.ad = getattr(group.ad_role(), "person", None)

View file

@ -58,7 +58,7 @@ from ietf.doc.models import ( Document, DocHistory, DocEvent, BallotDocEvent, Ba
IESG_BALLOT_ACTIVE_STATES, STATUSCHANGE_RELATIONS, DocumentActionHolder, DocumentAuthor,
RelatedDocument, RelatedDocHistory)
from ietf.doc.utils import (augment_events_with_revision,
can_adopt_draft, can_unadopt_draft, get_chartering_type, get_tags_for_stream_id,
can_adopt_draft, can_unadopt_draft, get_chartering_type, get_tags_for_stream_id, investigate_fragment,
needed_ballot_positions, nice_consensus, update_telechat, has_same_ballot,
get_initial_notify, make_notify_changed_event, make_rev_history, default_consensus,
add_events_message_info, get_unicode_document_content,
@ -72,7 +72,7 @@ from ietf.ietfauth.utils import ( has_role, is_authorized_in_doc_stream, user_is
role_required, is_individual_draft_author, can_request_rfc_publication)
from ietf.name.models import StreamName, BallotPositionName
from ietf.utils.history import find_history_active_at
from ietf.doc.forms import TelechatForm, NotifyForm, ActionHoldersForm, DocAuthorForm, DocAuthorChangeBasisForm
from ietf.doc.forms import InvestigateForm, TelechatForm, NotifyForm, ActionHoldersForm, DocAuthorForm, DocAuthorChangeBasisForm
from ietf.doc.mails import email_comment, email_remind_action_holders
from ietf.mailtrigger.utils import gather_relevant_expansions
from ietf.meeting.models import Session, SessionPresentation
@ -2254,3 +2254,16 @@ def idnits2_state(request, name, rev=None):
content_type="text/plain;charset=utf-8",
)
@role_required("Secretariat")
def investigate(request):
results = None
if request.method == "POST":
form = InvestigateForm(request.POST)
if form.is_valid():
name_fragment = form.cleaned_data["name_fragment"]
results = investigate_fragment(name_fragment)
else:
form = InvestigateForm()
return render(
request, "doc/investigate.html", context=dict(form=form, results=results)
)

View file

@ -831,6 +831,9 @@ def restore_draft_file(request, draft):
log.log("Resurrecting %s. Moving files:" % draft.name)
for file in files:
try:
# ghostlinkd would keep this in the combined all archive since it would
# be sourced from a different place. But when ghostlinkd is removed, nothing
# new is needed here - the file will already exist in the combined archive
shutil.move(file, settings.INTERNET_DRAFT_PATH)
log.log(" Moved file %s to %s" % (file, settings.INTERNET_DRAFT_PATH))
except shutil.Error as ex:

View file

@ -2,11 +2,11 @@
# -*- coding: utf-8 -*-
import io
import itertools
import json
import os
import datetime
from pathlib import Path
import requests
import email.utils
@ -803,9 +803,13 @@ def complete_review(request, name, assignment_id=None, acronym=None):
else:
content = form.cleaned_data['review_content']
filename = os.path.join(review.get_file_path(), '{}.txt'.format(review.name))
with io.open(filename, 'w', encoding='utf-8') as destination:
destination.write(content)
review_path = Path(review.get_file_path()) / f"{review.name}.txt"
review_path.write_text(content)
review_ftp_path = Path(settings.FTP_DIR) / "review" / review_path.name
# See https://github.com/ietf-tools/datatracker/issues/6941 - when that's
# addressed, making this link should not be conditional
if not review_ftp_path.exists():
os.link(review_path, review_ftp_path) # switch this to Path.hardlink when python>=3.10 is available
completion_datetime = timezone.now()
if "completion_date" in form.cleaned_data:

View file

@ -1,294 +0,0 @@
# Copyright The IETF Trust 2023, All Rights Reserved
import csv
import datetime
import re
import shutil
import subprocess
import tempfile
from pathlib import Path
import dateutil
from django.conf import settings
from django.core.management import BaseCommand
from ietf.group.models import Appeal, AppealArtifact
class Command(BaseCommand):
help = "Performs a one-time import of IESG appeals"
def handle(self, *args, **options):
old_appeals_root = (
"/a/www/www6/iesg/appeal"
if settings.SERVER_MODE == "production"
else "/assets/www6/iesg/appeal"
)
tmpdir = tempfile.mkdtemp()
process = subprocess.Popen(
["git", "clone", "https://github.com/kesara/iesg-scraper.git", tmpdir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
sub_stdout, sub_stderr = process.communicate()
if not (Path(tmpdir) / "iesg_appeals" / "anderson-2006-03-08.md").exists():
self.stdout.write(
"Git clone of the iesg-scraper directory did not go as expected"
)
self.stdout.write("stdout:", sub_stdout)
self.stdout.write("stderr:", sub_stderr)
self.stdout.write(f"Clean up {tmpdir} manually")
exit(-1)
titles = [
"Appeal: IESG Statement on Guidance on In-Person and Online Interim Meetings (John Klensin, 2023-08-15)",
"Appeal of current Guidance on in-Person and Online meetings (Ted Hardie, Alan Frindell, 2023-07-19)",
"Appeal re: URI Scheme Application and draft-mcsweeney-drop-scheme (Tim McSweeney, 2020-07-08)",
"Appeal to the IESG re WGLC of draft-ietf-spring-srv6-network-programming (Fernando Gont, Andrew Alston, and Sander Steffann, 2020-04-22)",
"Appeal re Protocol Action: 'URI Design and Ownership' to Best \nCurrent Practice (draft-nottingham-rfc7320bis-03.txt) (John Klensin; 2020-02-04)",
"Appeal of IESG Conflict Review process and decision on draft-mavrogiannopoulos-pkcs8-validated-parameters-02 (John Klensin; 2018-07-07)",
"Appeal of IESG decision to defer action and request that ISE publish draft-klensin-dns-function-considerations (John Klensin; 2017-11-29)",
'Appeal to the IESG concerning its approval of the "draft-ietf-ianaplan-icg-response" (PDF file) (JFC Morfin; 2015-03-11)',
"Appeal re tzdist mailing list moderation (Tobias Conradi; 2014-08-28) / Withdrawn by Submitter",
"Appeal re draft-masotta-tftpexts-windowsize-opt (Patrick Masotta; 2013-11-14)",
"Appeal re draft-ietf-manet-nhdp-sec-threats (Abdussalam Baryun; 2013-06-19)",
"Appeal of decision to advance RFC6376 (Douglas Otis; 2013-05-30)",
"Appeal to the IESG in regards to RFC 6852 (PDF file) (JFC Morfin; 2013-04-05)",
"Appeal to the IESG concerning the approbation of the IDNA2008 document set (PDF file) (JFC Morfin; 2010-03-10)",
"Authentication-Results Header Field Appeal (Douglas Otis, David Rand; 2009-02-16) / Withdrawn by Submitter",
"Appeal to the IAB of IESG rejection of Appeal to Last Call draft-ietf-grow-anycast (Dean Anderson; 2008-11-14)",
"Appeal to the IESG Concerning the Way At Large Internet Lead Users Are Not Permitted To Adequately Contribute to the IETF Deliverables (JFC Morfin; 2008-09-10)",
"Appeal over suspension of posting rights for Todd Glassey (Todd Glassey; 2008-07-28)",
"Appeal against IESG blocking DISCUSS on draft-klensin-rfc2821bis (John C Klensin; 2008-06-13)",
"Appeal: Continued Abuse of Process by IPR-WG Chair (Dean Anderson; 2007-12-26)",
"Appeal to the IESG from Todd Glassey (Todd Glassey; 2007-11-26)",
"Appeal Against the Removal of the Co-Chairs of the GEOPRIV Working Group (PDF file) (Randall Gellens, Allison Mankin, and Andrew Newton; 2007-06-22)",
"Appeal concerning the WG-LTRU rechartering (JFC Morfin; 2006-10-24)",
"Appeal against decision within July 10 IESG appeal dismissal (JFC Morfin; 2006-09-09)",
"Appeal: Mandatory to implement HTTP authentication mechanism in the Atom Publishing Protocol (Robert Sayre; 2006-08-29)",
"Appeal Against IESG Decisions Regarding the draft-ietf-ltru-matching (PDF file) (JFC Morfin; 2006-08-16)",
"Amended Appeal Re: grow: Last Call: 'Operation of Anycast Services' to BCP (draft-ietf-grow-anycast) (Dean Anderson; 2006-06-14)",
"Appeal Against an IESG Decision Denying Me IANA Language Registration Process by way of PR-Action (PDF file) (JFC Morfin; 2006-05-17)",
"Appeal to the IESG of PR-Action against Dean Anderson (Dean Anderson; 2006-03-08)",
"Appeal to IESG against AD decision: one must clear the confusion opposing the RFC 3066 Bis consensus (JFC Morfin; 2006-02-20)",
"Appeal to the IESG of an IESG decision (JFC Morfin; 2006-02-17)",
"Appeal to the IESG in reference to the ietf-languages@alvestrand.no mailing list (JFC Morfin; 2006-02-07)",
"Appeal to the IESG against an IESG decision concerning RFC 3066 Bis Draft (JFC Morfin; 2006-01-14)",
"Appeal over a key change in a poor RFC 3066 bis example (JFC Morfin; 2005-10-19)",
"Additional appeal against publication of draft-lyon-senderid-* in regards to its recommended use of Resent- header fields in the way that is inconsistant with RFC2822(William Leibzon; 2005-08-29)",
"Appeal: Publication of draft-lyon-senderid-core-01 in conflict with referenced draft-schlitt-spf-classic-02 (Julian Mehnle; 2005-08-25)",
'Appeal of decision to standardize "Mapping Between the Multimedia Messaging Service (MMS) and Internet Mail" (John C Klensin; 2005-06-10)',
"Appeal regarding IESG decision on the GROW WG (David Meyer; 2003-11-15)",
"Appeal: Official notice of appeal on suspension rights (Todd Glassey; 2003-08-06)",
"Appeal: AD response to Site-Local Appeal (Tony Hain; 2003-07-31)",
"Appeal against IESG decision for draft-chiba-radius-dynamic-authorization-05.txt (Glen Zorn; 2003-01-15)",
"Appeal Against moving draft-ietf-ipngwg-addr-arch-v3 to Draft Standard (Robert Elz; 2002-11-05)",
]
date_re = re.compile(r"\d{4}-\d{2}-\d{2}")
dates = [
datetime.datetime.strptime(date_re.search(t).group(), "%Y-%m-%d").date()
for t in titles
]
parts = [
["klensin-2023-08-15.txt", "response-to-klensin-2023-08-15.txt"],
[
"hardie-frindell-2023-07-19.txt",
"response-to-hardie-frindell-2023-07-19.txt",
],
["mcsweeney-2020-07-08.txt", "response-to-mcsweeney-2020-07-08.pdf"],
["gont-2020-04-22.txt", "response-to-gont-2020-06-02.txt"],
["klensin-2020-02-04.txt", "response-to-klensin-2020-02-04.txt"],
["klensin-2018-07-07.txt", "response-to-klensin-2018-07-07.txt"],
["klensin-2017-11-29.txt", "response-to-klensin-2017-11-29.md"],
["morfin-2015-03-11.pdf", "response-to-morfin-2015-03-11.md"],
["conradi-2014-08-28.txt"],
["masotta-2013-11-14.txt", "response-to-masotta-2013-11-14.md"],
["baryun-2013-06-19.txt", "response-to-baryun-2013-06-19.md"],
["otis-2013-05-30.txt", "response-to-otis-2013-05-30.md"],
["morfin-2013-04-05.pdf", "response-to-morfin-2013-04-05.md"],
["morfin-2010-03-10.pdf", "response-to-morfin-2010-03-10.txt"],
["otis-2009-02-16.txt"],
["anderson-2008-11-14.md", "response-to-anderson-2008-11-14.txt"],
["morfin-2008-09-10.txt", "response-to-morfin-2008-09-10.txt"],
["glassey-2008-07-28.txt", "response-to-glassey-2008-07-28.txt"],
["klensin-2008-06-13.txt", "response-to-klensin-2008-06-13.txt"],
["anderson-2007-12-26.txt", "response-to-anderson-2007-12-26.txt"],
["glassey-2007-11-26.txt", "response-to-glassey-2007-11-26.txt"],
["gellens-2007-06-22.pdf", "response-to-gellens-2007-06-22.txt"],
["morfin-2006-10-24.txt", "response-to-morfin-2006-10-24.txt"],
["morfin-2006-09-09.txt", "response-to-morfin-2006-09-09.txt"],
["sayre-2006-08-29.txt", "response-to-sayre-2006-08-29.txt"],
[
"morfin-2006-08-16.pdf",
"response-to-morfin-2006-08-17.txt",
"response-to-morfin-2006-08-17-part2.txt",
],
["anderson-2006-06-13.txt", "response-to-anderson-2006-06-14.txt"],
["morfin-2006-05-17.pdf", "response-to-morfin-2006-05-17.txt"],
["anderson-2006-03-08.md", "response-to-anderson-2006-03-08.txt"],
["morfin-2006-02-20.txt", "response-to-morfin-2006-02-20.txt"],
["morfin-2006-02-17.txt", "response-to-morfin-2006-02-17.txt"],
["morfin-2006-02-07.txt", "response-to-morfin-2006-02-07.txt"],
["morfin-2006-01-14.txt", "response-to-morfin-2006-01-14.txt"],
["morfin-2005-10-19.txt", "response-to-morfin-2005-10-19.txt"],
["leibzon-2005-08-29.txt", "response-to-leibzon-2005-08-29.txt"],
["mehnle-2005-08-25.txt", "response-to-mehnle-2005-08-25.txt"],
["klensin-2005-06-10.txt", "response-to-klensin-2005-06-10.txt"],
["meyer-2003-11-15.txt", "response-to-meyer-2003-11-15.txt"],
["glassey-2003-08-06.txt", "response-to-glassey-2003-08-06.txt"],
["hain-2003-07-31.txt", "response-to-hain-2003-07-31.txt"],
["zorn-2003-01-15.txt", "response-to-zorn-2003-01-15.txt"],
["elz-2002-11-05.txt", "response-to-elz-2002-11-05.txt"],
]
assert len(titles) == len(dates)
assert len(titles) == len(parts)
part_times = dict()
part_times["klensin-2023-08-15.txt"] = "2023-08-15 15:03:55 -0400"
part_times["response-to-klensin-2023-08-15.txt"] = "2023-08-24 18:54:13 +0300"
part_times["hardie-frindell-2023-07-19.txt"] = "2023-07-19 07:17:16PDT"
part_times["response-to-hardie-frindell-2023-07-19.txt"] = (
"2023-08-15 11:58:26PDT"
)
part_times["mcsweeney-2020-07-08.txt"] = "2020-07-08 14:45:00 -0400"
part_times["response-to-mcsweeney-2020-07-08.pdf"] = "2020-07-28 12:54:04 -0000"
part_times["gont-2020-04-22.txt"] = "2020-04-22 22:26:20 -0400"
part_times["response-to-gont-2020-06-02.txt"] = "2020-06-02 20:44:29 -0400"
part_times["klensin-2020-02-04.txt"] = "2020-02-04 13:54:46 -0500"
# part_times["response-to-klensin-2020-02-04.txt"]="2020-03-24 11:49:31EDT"
part_times["response-to-klensin-2020-02-04.txt"] = "2020-03-24 11:49:31 -0400"
part_times["klensin-2018-07-07.txt"] = "2018-07-07 12:40:43PDT"
# part_times["response-to-klensin-2018-07-07.txt"]="2018-08-16 10:46:45EDT"
part_times["response-to-klensin-2018-07-07.txt"] = "2018-08-16 10:46:45 -0400"
part_times["klensin-2017-11-29.txt"] = "2017-11-29 09:35:02 -0500"
part_times["response-to-klensin-2017-11-29.md"] = "2017-11-30 11:33:04 -0500"
part_times["morfin-2015-03-11.pdf"] = "2015-03-11 18:03:44 -0000"
part_times["response-to-morfin-2015-03-11.md"] = "2015-04-16 15:18:09 -0000"
part_times["conradi-2014-08-28.txt"] = "2014-08-28 22:28:06 +0300"
part_times["masotta-2013-11-14.txt"] = "2013-11-14 15:35:19 +0200"
part_times["response-to-masotta-2013-11-14.md"] = "2014-01-27 07:39:32 -0800"
part_times["baryun-2013-06-19.txt"] = "2013-06-19 06:29:51PDT"
part_times["response-to-baryun-2013-06-19.md"] = "2013-07-02 15:24:42 -0700"
part_times["otis-2013-05-30.txt"] = "2013-05-30 19:35:18 +0000"
part_times["response-to-otis-2013-05-30.md"] = "2013-06-27 11:56:48 -0700"
part_times["morfin-2013-04-05.pdf"] = "2013-04-05 17:31:19 -0700"
part_times["response-to-morfin-2013-04-05.md"] = "2013-04-17 08:17:29 -0700"
part_times["morfin-2010-03-10.pdf"] = "2010-03-10 21:40:58 +0100"
part_times["response-to-morfin-2010-03-10.txt"] = "2010-04-07 14:26:06 -0700"
part_times["otis-2009-02-16.txt"] = "2009-02-16 15:47:15 -0800"
part_times["anderson-2008-11-14.md"] = "2008-11-14 00:16:58 -0500"
part_times["response-to-anderson-2008-11-14.txt"] = "2008-12-15 11:00:02 -0800"
part_times["morfin-2008-09-10.txt"] = "2008-09-10 04:10:13 +0200"
part_times["response-to-morfin-2008-09-10.txt"] = "2008-09-28 10:00:01PDT"
part_times["glassey-2008-07-28.txt"] = "2008-07-28 08:34:52 -0700"
part_times["response-to-glassey-2008-07-28.txt"] = "2008-09-02 11:00:01PDT"
part_times["klensin-2008-06-13.txt"] = "2008-06-13 21:14:38 -0400"
part_times["response-to-klensin-2008-06-13.txt"] = "2008-07-07 10:00:01 PDT"
# part_times["anderson-2007-12-26.txt"]="2007-12-26 17:19:34EST"
part_times["anderson-2007-12-26.txt"] = "2007-12-26 17:19:34 -0500"
part_times["response-to-anderson-2007-12-26.txt"] = "2008-01-15 17:21:05 -0500"
part_times["glassey-2007-11-26.txt"] = "2007-11-26 08:13:22 -0800"
part_times["response-to-glassey-2007-11-26.txt"] = "2008-01-23 17:38:43 -0500"
part_times["gellens-2007-06-22.pdf"] = "2007-06-22 21:45:41 -0400"
part_times["response-to-gellens-2007-06-22.txt"] = "2007-09-20 14:01:27 -0400"
part_times["morfin-2006-10-24.txt"] = "2006-10-24 05:03:17 +0200"
part_times["response-to-morfin-2006-10-24.txt"] = "2006-11-07 12:56:02 -0500"
part_times["morfin-2006-09-09.txt"] = "2006-09-09 02:54:55 +0200"
part_times["response-to-morfin-2006-09-09.txt"] = "2006-09-15 12:56:31 -0400"
part_times["sayre-2006-08-29.txt"] = "2006-08-29 17:05:03 -0400"
part_times["response-to-sayre-2006-08-29.txt"] = "2006-10-16 13:07:18 -0400"
part_times["morfin-2006-08-16.pdf"] = "2006-08-16 18:28:19 -0400"
part_times["response-to-morfin-2006-08-17.txt"] = "2006-08-22 12:05:42 -0400"
part_times["response-to-morfin-2006-08-17-part2.txt"] = (
"2006-11-07 13:00:58 -0500"
)
# part_times["anderson-2006-06-13.txt"]="2006-06-13 21:51:18EDT"
part_times["anderson-2006-06-13.txt"] = "2006-06-13 21:51:18 -0400"
part_times["response-to-anderson-2006-06-14.txt"] = "2006-07-10 14:31:08 -0400"
part_times["morfin-2006-05-17.pdf"] = "2006-05-17 06:46:18 +0200"
part_times["response-to-morfin-2006-05-17.txt"] = "2006-07-10 14:18:10 -0400"
part_times["anderson-2006-03-08.md"] = "2006-03-08 09:42:44 +0100"
part_times["response-to-anderson-2006-03-08.txt"] = "2006-03-20 14:55:38 -0500"
part_times["morfin-2006-02-20.txt"] = "2006-02-20 19:18:24 +0100"
part_times["response-to-morfin-2006-02-20.txt"] = "2006-03-06 13:08:39 -0500"
part_times["morfin-2006-02-17.txt"] = "2006-02-17 18:59:38 +0100"
part_times["response-to-morfin-2006-02-17.txt"] = "2006-07-10 14:05:15 -0400"
part_times["morfin-2006-02-07.txt"] = "2006-02-07 19:38:57 -0500"
part_times["response-to-morfin-2006-02-07.txt"] = "2006-02-21 19:09:26 -0500"
part_times["morfin-2006-01-14.txt"] = "2006-01-14 15:05:24 +0100"
part_times["response-to-morfin-2006-01-14.txt"] = "2006-02-21 12:23:38 -0500"
part_times["morfin-2005-10-19.txt"] = "2005-10-19 17:12:11 +0200"
part_times["response-to-morfin-2005-10-19.txt"] = "2005-11-15 11:42:30 -0500"
part_times["leibzon-2005-08-29.txt"] = "2005-08-29 08:28:52PDT"
part_times["response-to-leibzon-2005-08-29.txt"] = "2005-12-08 14:04:47 -0500"
part_times["mehnle-2005-08-25.txt"] = "2005-08-25 00:45:26 +0200"
part_times["response-to-mehnle-2005-08-25.txt"] = "2005-12-08 13:37:38 -0500"
part_times["klensin-2005-06-10.txt"] = "2005-06-10 14:49:17 -0400"
part_times["response-to-klensin-2005-06-10.txt"] = "2005-07-22 18:14:06 -0400"
part_times["meyer-2003-11-15.txt"] = "2003-11-15 09:47:11 -0800"
part_times["response-to-meyer-2003-11-15.txt"] = "2003-11-25 10:56:06 -0500"
part_times["glassey-2003-08-06.txt"] = "2003-08-06 02:14:24 +0000"
part_times["response-to-glassey-2003-08-06.txt"] = "2003-09-24 09:54:51 -0400"
part_times["hain-2003-07-31.txt"] = "2003-07-31 16:44:19 -0700"
part_times["response-to-hain-2003-07-31.txt"] = "2003-09-30 14:44:30 -0400"
part_times["zorn-2003-01-15.txt"] = "2003-01-15 01:22:28 -0800"
part_times["elz-2002-11-05.txt"] = "2002-11-05 10:51:13 +0700"
# No time could be found for this one:
part_times["response-to-zorn-2003-01-15.txt"] = "2003-02-08"
# This one was issued sometime between 2002-12-27 (when IESG minutes note that the
# appeal response was approved) and 2003-01-04 (when the appeal was escalated to
# the IAB) - we're using the earlier end of the window
part_times["response-to-elz-2002-11-05.txt"] = "2002-12-27"
for name in part_times:
part_times[name] = dateutil.parser.parse(part_times[name]).astimezone(
datetime.timezone.utc
)
redirects = []
for index, title in enumerate(titles):
# IESG is group 2
appeal = Appeal.objects.create(
name=titles[index], date=dates[index], group_id=2
)
for part in parts[index]:
if part.endswith(".pdf"):
content_type = "application/pdf"
else:
content_type = "text/markdown;charset=utf-8"
if part.endswith(".md"):
source_path = Path(tmpdir) / "iesg_appeals" / part
else:
source_path = Path(old_appeals_root) / part
with source_path.open("rb") as source_file:
bits = source_file.read()
if part == "morfin-2008-09-10.txt":
bits = bits.decode("macintosh")
bits = bits.replace("\r", "\n")
bits = bits.encode("utf8")
elif part in ["morfin-2006-02-07.txt", "morfin-2006-01-14.txt"]:
bits = bits.decode("windows-1252").encode("utf8")
artifact_type_id = (
"response" if part.startswith("response") else "appeal"
)
artifact = AppealArtifact.objects.create(
appeal=appeal,
artifact_type_id=artifact_type_id,
date=part_times[part].date(),
content_type=content_type,
bits=bits,
)
redirects.append(
[
f'www6.ietf.org/iesg/appeal/{part.replace(".md", ".html") if part.endswith(".md") else part}',
f"https://datatracker.ietf.org/group/iesg/appeals/artifact/{artifact.pk}",
302,
]
)
shutil.rmtree(tmpdir)
with open("iesg_appeal_redirects.csv", "w", newline="") as f:
csvwriter = csv.writer(f)
for row in redirects:
csvwriter.writerow(row)

View file

@ -1,274 +0,0 @@
# Copyright The IETF Trust 2024, All Rights Reserved
import debug # pyflakes:ignore
import csv
import datetime
import os
import shutil
import subprocess
import tempfile
from collections import namedtuple, Counter
from pathlib import Path
from django.conf import settings
from django.core.management.base import BaseCommand
from ietf.doc.models import Document, DocEvent, State
from ietf.utils.text import xslugify
class Command(BaseCommand):
help = "Performs a one-time import of IESG statements"
def handle(self, *args, **options):
if Document.objects.filter(type="statement", group__acronym="iesg").exists():
self.stdout.write("IESG statement documents already exist - exiting")
exit(-1)
tmpdir = tempfile.mkdtemp()
process = subprocess.Popen(
["git", "clone", "https://github.com/kesara/iesg-scraper.git", tmpdir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
sub_stdout, sub_stderr = process.communicate()
if not Path(tmpdir).joinpath("iesg_statements", "2000-08-29-0.md").exists():
self.stdout.write(
"Git clone of the iesg-scraper directory did not go as expected"
)
self.stdout.write("stdout:", sub_stdout)
self.stdout.write("stderr:", sub_stderr)
self.stdout.write(f"Clean up {tmpdir} manually")
exit(-1)
redirects = []
for item in self.get_work_items():
replaced = item.title.endswith(
" SUPERSEDED"
) or item.doc_time.date() == datetime.date(2007, 7, 30)
title = item.title
if title.endswith(" - SUPERSEDED"):
title = title[: -len(" - SUPERSEDED")]
name = f"statement-iesg-{xslugify(title)}-{item.doc_time:%Y%m%d}"
dest_filename = f"{name}-00.md"
# Create Document
doc = Document.objects.create(
name=name,
type_id="statement",
title=title,
group_id=2, # The IESG group
rev="00",
uploaded_filename=dest_filename,
)
doc.set_state(
State.objects.get(
type_id="statement",
slug="replaced" if replaced else "active",
)
)
e1 = DocEvent.objects.create(
time=item.doc_time,
type="published_statement",
doc=doc,
rev="00",
by_id=1, # (System)
desc="Statement published (note: The exact time of day is inaccurate - the actual time of day is not known)",
)
e2 = DocEvent.objects.create(
type="added_comment",
doc=doc,
rev="00",
by_id=1, # (System)
desc="Statement moved into datatracker from www.ietf.org",
)
doc.save_with_history([e1, e2])
# Put file in place
source = Path(tmpdir).joinpath("iesg_statements", item.source_filename)
dest = Path(settings.DOCUMENT_PATH_PATTERN.format(doc=doc)).joinpath(
dest_filename
)
if dest.exists():
self.stdout.write(
f"WARNING: {dest} already exists - not overwriting it."
)
else:
os.makedirs(dest.parent, exist_ok=True)
shutil.copy(source, dest)
redirects.append(
[
f"www.ietf.org/about/groups/iesg/statements/{item.slug}",
f"https://datatracker.ietf.org/doc/{name}",
302,
]
)
shutil.rmtree(tmpdir)
with open("iesg_statement_redirects.csv", "w", newline="") as f:
csvwriter = csv.writer(f)
for row in redirects:
csvwriter.writerow(row)
def get_work_items(self):
Item = namedtuple("Item", "doc_time source_filename title slug")
items = []
dressed_rows = " ".join(
self.cut_paste_from_www().expandtabs(1).split(" ")
).split("\n")
old_slugs = self.get_old_slugs()
# Rube-Goldberg-esque dance to deal with conflicting directions of the scrape and
# what order we want the result to sort to
dressed_rows.reverse()
old_slugs.reverse()
total_times_date_seen = Counter([row.split(" ")[0] for row in dressed_rows])
count_date_seen_so_far = Counter()
for row, slug in zip(dressed_rows, old_slugs):
date_part = row.split(" ")[0]
title_part = row[len(date_part) + 1 :]
datetime_args = list(map(int, date_part.replace("-0", "-").split("-")))
# Use the minutes in timestamps to preserve order of statements
# on the same day as they currently appear at www.ietf.org
datetime_args.extend([12, count_date_seen_so_far[date_part]])
count_date_seen_so_far[date_part] += 1
doc_time = datetime.datetime(*datetime_args, tzinfo=datetime.timezone.utc)
items.append(
Item(
doc_time,
f"{date_part}-{total_times_date_seen[date_part] - count_date_seen_so_far[date_part]}.md",
title_part,
slug,
)
)
return items
def cut_paste_from_www(self):
return """2023-08-24 Support Documents in IETF Working Groups
2023-08-14 Guidance on In-Person and Online Interim Meetings
2023-05-01 IESG Statement on EtherTypes
2023-03-15 Second Report on the RFC 8989 Experiment
2023-01-27 Guidance on In-Person and Online Interim Meetings - SUPERSEDED
2022-10-31 Statement on Restricting Access to IETF IT Systems
2022-01-21 Handling Ballot Positions
2021-09-01 Report on the RFC 8989 experiment
2021-07-21 IESG Statement on Allocation of Email Addresses in the ietf.org Domain
2021-05-11 IESG Statement on Inclusive Language
2021-05-10 IESG Statement on Internet-Draft Authorship
2021-05-07 IESG Processing of RFC Errata for the IETF Stream
2021-04-16 Last Call Guidance to the Community
2020-07-23 IESG Statement On Oppressive or Exclusionary Language
2020-05-01 Guidance on Face-to-Face and Virtual Interim Meetings - SUPERSEDED
2018-03-16 IETF Meeting Photography Policy
2018-01-11 Guidance on Face-to-Face and Virtual Interim Meetings - SUPERSEDED
2017-02-09 License File for Open Source Repositories
2016-11-13 Support Documents in IETF Working Groups - SUPERSEDED
2016-02-05 Guidance on Face-to-Face and Virtual Interim Meetings - SUPERSEDED
2016-01-11 Guidance on Face-to-Face and Virtual Interim Meetings - SUPERSEDED
2015-08-20 IESG Statement on Maximizing Encrypted Access To IETF Information
2015-06-11 IESG Statement on Internet-Draft Authorship - SUPERSEDED
2014-07-20 IESG Statement on Designating RFCs as Historic
2014-05-07 DISCUSS Criteria in IESG Review
2014-03-02 Writable MIB Module IESG Statement
2013-11-03 IETF Anti-Harassment Policy
2012-10-25 IESG Statement on Ethertypes - SUPERSEDED
2012-10-25 IESG Statement on Removal of an Internet-Draft from the IETF Web Site
2011-10-20 IESG Statement on Designating RFCs as Historic - SUPERSEDED
2011-06-27 IESG Statement on Designating RFCs as Historic - SUPERSEDED
2011-06-13 IESG Statement on IESG Processing of RFC Errata concerning RFC Metadata
2010-10-11 IESG Statement on Document Shepherds
2010-05-24 IESG Statement on the Usage of Assignable Codepoints, Addresses and Names in Specification Examples
2010-05-24 IESG Statement on NomCom Eligibility and Day Passes
2009-09-08 IESG Statement on Copyright
2009-01-20 IESG Statement on Proposed Status for IETF Documents Reserving Resources for Example Purposes
2008-09-02 Guidance on Interim Meetings, Conference Calls and Jabber Sessions - SUPERSEDED
2008-07-30 IESG Processing of RFC Errata for the IETF Stream
2008-04-14 IESG Statement on Spam Control on IETF Mailing Lists
2008-03-03 IESG Statement on Registration Requests for URIs Containing Telephone Numbers
2008-02-27 IESG Statement on RFC3406 and URN Namespaces Registry Review
2008-01-23 Advice for WG Chairs Dealing with Off-Topic Postings
2007-10-04 On Appeals of IESG and Area Director Actions and Decisions
2007-07-05 Experimental Specification of New Congestion Control Algorithms
2007-03-20 Guidance on Area Director Sponsoring of Documents
2007-01-15 Last Call Guidance to the Community - SUPERSEDED
2006-04-19 IESG Statement: Normative and Informative References
2006-02-17 IESG Statement on Disruptive Posting
2006-01-09 Guidance for Spam Control on IETF Mailing Lists - SUPERSEDED
2006-01-05 IESG Statement on AUTH48 State
2005-05-12 Syntax for Format Definitions
2003-02-11 IESG Statement on IDN
2002-11-27 Copyright Statement in MIB and PIB Modules
2002-03-13 Guidance for Spam Control on IETF Mailing Lists - SUPERSEDED
2001-12-21 On Design Teams
2001-10-01 Guidelines for the Use of Formal Languages in IETF Specifications
2001-03-21 Establishment of Temporary Sub-IP Area
2000-12-06 Plans to Organize "Sub-IP" Technologies in the IETF
2000-11-20 A New IETF Work Area
2000-08-29 Guidance on Interim IETF Working Group Meetings and Conference Calls - SUPERSEDED
2000-08-29 IESG Guidance on the Moderation of IETF Working Group Mailing Lists"""
def get_old_slugs(self):
return [
"support-documents",
"interim-meetings-guidance",
"ethertypes",
"second-report-on-the-rfc-8989-experiment",
"interim-meetings-guidance-2023-01-27",
"statement-on-restricting-access",
"handling-ballot-positions",
"report-on-rfc8989-experiment",
"email-addresses-ietf-domain",
"on-inclusive-language",
"internet-draft-authorship",
"processing-errata-ietf-stream",
"last-call-guidance",
"statement-on-oppressive-exclusionary-language",
"interim-meetings-guidance-2020-05-01",
"meeting-photography-policy",
"interim-meetings-guidance-2018-01-11",
"open-source-repositories-license",
"support-documents-2016-11-13",
"interim-meetings-guidance-2016-02-05",
"interim-meetings-guidance-2016-01-11",
"maximizing-encrypted-access",
"internet-draft-authorship-2015-06-11",
"designating-rfcs-historic",
"iesg-discuss-criteria",
"writable-mib-module",
"anti-harassment-policy",
"ethertypes-2012-10-25",
"internet-draft-removal",
"designating-rfcs-historic-2011-10-20",
"designating-rfcs-historic-2011-06-27",
"rfc-metadata-errata",
"document-shepherds",
"assignable-codepoints-addresses-names",
"nomcom-eligibility-day-passes",
"copyright-2009-09-08",
"reserving-resources-examples",
"interim-meetings-guidance-2008-09-02",
"processing-rfc-errata",
"spam-control-2008-04-14",
"registration-requests-uris",
"urn-namespaces-registry",
"off-topic-postings",
"appeals-actions-decisions",
"experimental-congestion-control",
"area-director-sponsoring-documents",
"last-call-guidance-2007-01-15",
"normative-informative-references",
"disruptive-posting",
"spam-control-2006-01-09",
"auth48",
"syntax-format-definitions",
"idn",
"copyright-2002-11-27",
"spam-control-2002-03-13",
"design-teams",
"formal-languages-use",
"sub-ip-area-2001-03-21",
"sub-ip-area-2000-11-20",
"sub-ip-area-2000-12-06",
"interim-meetings-guidance-2000-08-29",
"mailing-lists-moderation",
]

View file

@ -2,6 +2,7 @@
#
# Celery task definitions
#
import os
import shutil
import debug # pyflakes:ignore
@ -10,6 +11,9 @@ from celery import shared_task
from contextlib import AbstractContextManager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import List
from django.conf import settings
from .index import all_id_txt, all_id2_txt, id_index_txt
@ -26,10 +30,14 @@ class TempFileManager(AbstractContextManager):
tf.write(content)
return tf_path
def move_into_place(self, src_path: Path, dest_path: Path):
def move_into_place(self, src_path: Path, dest_path: Path, hardlink_dirs: List[Path] = []):
shutil.move(src_path, dest_path)
dest_path.chmod(0o644)
self.cleanup_list.remove(src_path)
for path in hardlink_dirs:
target = path / dest_path.name
target.unlink(missing_ok=True)
os.link(dest_path, target) # until python>=3.10
def cleanup(self):
for tf_path in self.cleanup_list:
@ -43,9 +51,11 @@ class TempFileManager(AbstractContextManager):
@shared_task
def idindex_update_task():
"""Update I-D indexes"""
id_path = Path("/a/ietfdata/doc/draft/repository")
derived_path = Path("/a/ietfdata/derived")
download_path = Path("/a/www/www6s/download")
id_path = Path(settings.INTERNET_DRAFT_PATH)
derived_path = Path(settings.DERIVED_DIR)
download_path = Path(settings.ALL_ID_DOWNLOAD_DIR)
ftp_path = Path(settings.FTP_DIR) / "internet-drafts"
all_archive_path = Path(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR)
with TempFileManager("/a/tmp") as tmp_mgr:
# Generate copies of new contents
@ -69,17 +79,17 @@ def idindex_update_task():
derived_all_id2_tmpfile = tmp_mgr.make_temp_file(all_id2_content)
# Move temp files as-atomically-as-possible into place
tmp_mgr.move_into_place(all_id_tmpfile, id_path / "all_id.txt")
tmp_mgr.move_into_place(all_id_tmpfile, id_path / "all_id.txt", [ftp_path, all_archive_path])
tmp_mgr.move_into_place(derived_all_id_tmpfile, derived_path / "all_id.txt")
tmp_mgr.move_into_place(download_all_id_tmpfile, download_path / "id-all.txt")
tmp_mgr.move_into_place(id_index_tmpfile, id_path / "1id-index.txt")
tmp_mgr.move_into_place(id_index_tmpfile, id_path / "1id-index.txt", [ftp_path, all_archive_path])
tmp_mgr.move_into_place(derived_id_index_tmpfile, derived_path / "1id-index.txt")
tmp_mgr.move_into_place(download_id_index_tmpfile, download_path / "id-index.txt")
tmp_mgr.move_into_place(id_abstracts_tmpfile, id_path / "1id-abstracts.txt")
tmp_mgr.move_into_place(id_abstracts_tmpfile, id_path / "1id-abstracts.txt", [ftp_path, all_archive_path])
tmp_mgr.move_into_place(derived_id_abstracts_tmpfile, derived_path / "1id-abstracts.txt")
tmp_mgr.move_into_place(download_id_abstracts_tmpfile, download_path / "id-abstract.txt")
tmp_mgr.move_into_place(all_id2_tmpfile, id_path / "all_id2.txt")
tmp_mgr.move_into_place(all_id2_tmpfile, id_path / "all_id2.txt", [ftp_path, all_archive_path])
tmp_mgr.move_into_place(derived_all_id2_tmpfile, derived_path / "all_id2.txt")

View file

@ -188,17 +188,20 @@ class TaskTests(TestCase):
def test_temp_file_manager(self):
with TemporaryDirectory() as temp_dir:
with TemporaryDirectory() as other_dir:
temp_path = Path(temp_dir)
other_path = Path(other_dir)
with TempFileManager(temp_path) as tfm:
path1 = tfm.make_temp_file("yay")
path2 = tfm.make_temp_file("boo") # do not keep this one
self.assertTrue(path1.exists())
self.assertTrue(path2.exists())
dest = temp_path / "yay.txt"
tfm.move_into_place(path1, dest)
tfm.move_into_place(path1, dest, [other_path])
# make sure things were cleaned up...
self.assertFalse(path1.exists()) # moved to dest
self.assertFalse(path2.exists()) # left behind
# check destination contents and permissions
self.assertEqual(dest.read_text(), "yay")
self.assertEqual(dest.stat().st_mode & 0o777, 0o644)
self.assertTrue(dest.samefile(other_path / "yay.txt"))

View file

@ -3,18 +3,20 @@
import datetime
import mock
from pyquery import PyQuery
from urllib.parse import quote, urlparse
from zoneinfo import ZoneInfo
from django.conf import settings
from django.test.utils import override_settings
from django.urls import reverse as urlreverse
from django.utils import timezone
import debug # pyflakes:ignore
from ietf.api.views import EmailIngestionError
from ietf.doc.factories import (
DocumentFactory,
WgDraftFactory,
@ -34,8 +36,9 @@ from ietf.ipr.mail import (process_response_email, get_reply_to, get_update_subm
from ietf.ipr.models import (IprDisclosureBase,GenericIprDisclosure,HolderIprDisclosure,
ThirdPartyIprDisclosure)
from ietf.ipr.templatetags.ipr_filters import no_revisions_message
from ietf.ipr.utils import get_genitive, get_ipr_summary
from ietf.ipr.utils import get_genitive, get_ipr_summary, ingest_response_email
from ietf.mailtrigger.utils import gather_address_lists
from ietf.message.factories import MessageFactory
from ietf.message.models import Message
from ietf.utils.mail import outbox, empty_outbox, get_payload_text
from ietf.utils.test_utils import TestCase, login_testing_unauthorized
@ -769,6 +772,39 @@ Subject: test
result = process_response_email(message_bytes)
self.assertIsNone(result)
@override_settings(ADMINS=(("Some Admin", "admin@example.com"),))
@mock.patch("ietf.ipr.utils.process_response_email")
def test_ingest_response_email(self, mock_process_response_email):
message = b"What a nice message"
mock_process_response_email.side_effect = ValueError("ouch!")
with self.assertRaises(EmailIngestionError) as context:
ingest_response_email(message)
self.assertIsNone(context.exception.email_recipients) # default recipients
self.assertIsNotNone(context.exception.email_body) # body set
self.assertIsNotNone(context.exception.email_original_message) # original message attached
self.assertEqual(context.exception.email_attach_traceback, True)
self.assertTrue(mock_process_response_email.called)
self.assertEqual(mock_process_response_email.call_args, mock.call(message))
mock_process_response_email.reset_mock()
mock_process_response_email.side_effect = None
mock_process_response_email.return_value = None # rejected message
with self.assertRaises(EmailIngestionError) as context:
ingest_response_email(message)
self.assertIsNone(context.exception.email_recipients) # default recipients
self.assertIsNotNone(context.exception.email_body) # body set
self.assertIsNotNone(context.exception.email_original_message) # original message attached
self.assertEqual(context.exception.email_attach_traceback, True)
self.assertTrue(mock_process_response_email.called)
self.assertEqual(mock_process_response_email.call_args, mock.call(message))
mock_process_response_email.reset_mock()
# successful operation
mock_process_response_email.return_value = MessageFactory()
ingest_response_email(message)
self.assertTrue(mock_process_response_email.called)
self.assertEqual(mock_process_response_email.call_args, mock.call(message))
def test_ajax_search(self):
url = urlreverse('ietf.ipr.views.ajax_search')
response=self.client.get(url+'?q=disclosure')

View file

@ -1,6 +1,9 @@
# Copyright The IETF Trust 2014-2020, All Rights Reserved
# -*- coding: utf-8 -*-
from textwrap import dedent
from ietf.ipr.mail import process_response_email
from ietf.ipr.models import IprDocRel
import debug # pyflakes:ignore
@ -86,3 +89,30 @@ def generate_draft_recursive_txt():
f.write(data)
def ingest_response_email(message: bytes):
from ietf.api.views import EmailIngestionError # avoid circular import
try:
result = process_response_email(message)
except Exception as err:
raise EmailIngestionError(
"Datatracker IPR email ingestion error",
email_body=dedent("""\
An error occurred while ingesting IPR email into the Datatracker. The original message is attached.
{error_summary}
"""),
email_original_message=message,
email_attach_traceback=True,
) from err
if result is None:
raise EmailIngestionError(
"Datatracker IPR email ingestion rejected",
email_body=dedent("""\
A message was rejected while ingesting IPR email into the Datatracker. The original message is attached.
{error_summary}
"""),
email_original_message=message,
email_attach_traceback=True,
)

View file

@ -1,364 +0,0 @@
# Copyright The IETF Trust 2023, All Rights Reserved
from collections import namedtuple
import csv
import datetime
import os
import re
import shutil
from django.conf import settings
from django.core.management import BaseCommand
from pathlib import Path
from zoneinfo import ZoneInfo
from ietf.doc.models import DocEvent, Document
from ietf.meeting.models import (
Meeting,
SchedTimeSessAssignment,
Schedule,
SchedulingEvent,
Session,
TimeSlot,
)
from ietf.name.models import DocTypeName
def add_time_of_day(bare_datetime):
"""Add a time for the iesg meeting based on a date and make it tzaware
From the secretariat - the telechats happened at these times:
2015-04-09 to present: 0700 PT America/Los Angeles
1993-02-01 to 2015-03-12: 1130 ET America/New York
1991-07-30 to 1993-01-25: 1200 ET America/New York
"""
dt = None
if bare_datetime.year > 2015:
dt = bare_datetime.replace(hour=7).replace(
tzinfo=ZoneInfo("America/Los_Angeles")
)
elif bare_datetime.year == 2015:
if bare_datetime.month >= 4:
dt = bare_datetime.replace(hour=7).replace(
tzinfo=ZoneInfo("America/Los_Angeles")
)
else:
dt = bare_datetime.replace(hour=11, minute=30).replace(
tzinfo=ZoneInfo("America/New_York")
)
elif bare_datetime.year > 1993:
dt = bare_datetime.replace(hour=11, minute=30).replace(
tzinfo=ZoneInfo("America/New_York")
)
elif bare_datetime.year == 1993:
if bare_datetime.month >= 2:
dt = bare_datetime.replace(hour=11, minute=30).replace(
tzinfo=ZoneInfo("America/New_York")
)
else:
dt = bare_datetime.replace(hour=12).replace(
tzinfo=ZoneInfo("America/New_York")
)
else:
dt = bare_datetime.replace(hour=12).replace(tzinfo=ZoneInfo("America/New_York"))
return dt.astimezone(datetime.timezone.utc)
def build_bof_coord_data():
CoordTuple = namedtuple("CoordTuple", "meeting_number source_name")
def utc_from_la_time(time):
return time.replace(tzinfo=ZoneInfo("America/Los_Angeles")).astimezone(
datetime.timezone.utc
)
data = dict()
data[utc_from_la_time(datetime.datetime(2016, 6, 10, 7, 0))] = CoordTuple(
96, "2015/bof-minutes-ietf-96.txt"
)
data[utc_from_la_time(datetime.datetime(2016, 10, 6, 7, 0))] = CoordTuple(
97, "2016/BoF-Minutes-2016-10-06.txt"
)
data[utc_from_la_time(datetime.datetime(2017, 2, 15, 8, 0))] = CoordTuple(
98, "2017/bof-minutes-ietf-98.txt"
)
data[utc_from_la_time(datetime.datetime(2017, 6, 7, 8, 0))] = CoordTuple(
99, "2017/bof-minutes-ietf-99.txt"
)
data[utc_from_la_time(datetime.datetime(2017, 10, 5, 7, 0))] = CoordTuple(
100, "2017/bof-minutes-ietf-100.txt"
)
data[utc_from_la_time(datetime.datetime(2018, 2, 5, 11, 0))] = CoordTuple(
101, "2018/bof-minutes-ietf-101.txt"
)
data[utc_from_la_time(datetime.datetime(2018, 6, 5, 8, 0))] = CoordTuple(
102, "2018/bof-minutes-ietf-102.txt"
)
data[utc_from_la_time(datetime.datetime(2018, 9, 26, 7, 0))] = CoordTuple(
103, "2018/bof-minutes-ietf-103.txt"
)
data[utc_from_la_time(datetime.datetime(2019, 2, 15, 9, 0))] = CoordTuple(
104, "2019/bof-minutes-ietf-104.txt"
)
data[utc_from_la_time(datetime.datetime(2019, 6, 11, 7, 30))] = CoordTuple(
105, "2019/bof-minutes-ietf-105.txt"
)
data[utc_from_la_time(datetime.datetime(2019, 10, 9, 6, 30))] = CoordTuple(
106, "2019/bof-minutes-ietf-106.txt"
)
data[utc_from_la_time(datetime.datetime(2020, 2, 13, 8, 0))] = CoordTuple(
107, "2020/bof-minutes-ietf-107.txt"
)
data[utc_from_la_time(datetime.datetime(2020, 6, 15, 8, 0))] = CoordTuple(
108, "2020/bof-minutes-ietf-108.txt"
)
data[utc_from_la_time(datetime.datetime(2020, 10, 9, 7, 0))] = CoordTuple(
109, "2020/bof-minutes-ietf-109.txt"
)
data[utc_from_la_time(datetime.datetime(2021, 1, 14, 13, 30))] = CoordTuple(
110, "2021/bof-minutes-ietf-110.txt"
)
data[utc_from_la_time(datetime.datetime(2021, 6, 1, 8, 0))] = CoordTuple(
111, "2021/bof-minutes-ietf-111.txt"
)
data[utc_from_la_time(datetime.datetime(2021, 9, 15, 9, 0))] = CoordTuple(
112, "2021/bof-minutes-ietf-112.txt"
)
data[utc_from_la_time(datetime.datetime(2022, 1, 28, 7, 0))] = CoordTuple(
113, "2022/bof-minutes-ietf-113.txt"
)
data[utc_from_la_time(datetime.datetime(2022, 6, 2, 10, 0))] = CoordTuple(
114, "2022/bof-minutes-ietf-114.txt"
)
data[utc_from_la_time(datetime.datetime(2022, 9, 13, 9, 0))] = CoordTuple(
115, "2022/bof-minutes-ietf-115.txt"
)
data[utc_from_la_time(datetime.datetime(2023, 2, 1, 9, 0))] = CoordTuple(
116, "2023/bof-minutes-ietf-116.txt"
)
data[utc_from_la_time(datetime.datetime(2023, 6, 1, 7, 0))] = CoordTuple(
117, "2023/bof-minutes-ietf-117.txt"
)
data[utc_from_la_time(datetime.datetime(2023, 9, 15, 8, 0))] = CoordTuple(
118, "2023/bof-minutes-ietf-118.txt"
)
return data
class Command(BaseCommand):
help = "Performs a one-time import of IESG minutes, creating Meetings to attach them to"
def handle(self, *args, **options):
old_minutes_root = (
"/a/www/www6/iesg/minutes"
if settings.SERVER_MODE == "production"
else "/assets/www6/iesg/minutes"
)
minutes_dir = Path(old_minutes_root)
date_re = re.compile(r"\d{4}-\d{2}-\d{2}")
meeting_times = set()
redirects = []
for file_prefix in ["minutes", "narrative"]:
paths = list(minutes_dir.glob(f"[12][09][0129][0-9]/{file_prefix}*.txt"))
paths.extend(
list(minutes_dir.glob(f"[12][09][0129][0-9]/{file_prefix}*.html"))
)
for path in paths:
s = date_re.search(path.name)
if s:
meeting_times.add(
add_time_of_day(
datetime.datetime.strptime(s.group(), "%Y-%m-%d")
)
)
bof_coord_data = build_bof_coord_data()
bof_times = set(bof_coord_data.keys())
assert len(bof_times.intersection(meeting_times)) == 0
meeting_times.update(bof_times)
year_seen = None
for dt in sorted(meeting_times):
if dt.year != year_seen:
counter = 1
year_seen = dt.year
meeting_name = f"interim-{dt.year}-iesg-{counter:02d}"
meeting = Meeting.objects.create(
number=meeting_name,
type_id="interim",
date=dt.date(),
days=1,
time_zone=dt.tzname(),
)
schedule = Schedule.objects.create(
meeting=meeting,
owner_id=1, # the "(System)" person
visible=True,
public=True,
)
meeting.schedule = schedule
meeting.save()
session = Session.objects.create(
meeting=meeting,
group_id=2, # The IESG group
type_id="regular",
purpose_id="regular",
name=(
f"IETF {bof_coord_data[dt].meeting_number} BOF Coordination Call"
if dt in bof_times
else "Formal Telechat"
),
)
SchedulingEvent.objects.create(
session=session,
status_id="sched",
by_id=1, # (System)
)
timeslot = TimeSlot.objects.create(
meeting=meeting,
type_id="regular",
time=dt,
duration=datetime.timedelta(seconds=2 * 60 * 60),
)
SchedTimeSessAssignment.objects.create(
timeslot=timeslot, session=session, schedule=schedule
)
if dt in bof_times:
source = minutes_dir / bof_coord_data[dt].source_name
if source.exists():
doc_name = (
f"minutes-interim-{dt.year}-iesg-{counter:02d}-{dt:%Y%m%d%H%M}"
)
doc_filename = f"{doc_name}-00.txt"
doc = Document.objects.create(
name=doc_name,
type_id="minutes",
title=f"Minutes IETF {bof_coord_data[dt].meeting_number} BOF coordination {meeting_name} {dt:%Y-%m-%d %H:%M}",
group_id=2, # the IESG group
rev="00",
uploaded_filename=doc_filename,
)
e = DocEvent.objects.create(
type="comment",
doc=doc,
rev="00",
by_id=1, # "(System)"
desc="Minutes moved into datatracker",
)
doc.save_with_history([e])
session.presentations.create(document=doc, rev=doc.rev)
dest = (
Path(settings.AGENDA_PATH)
/ meeting_name
/ "minutes"
/ doc_filename
)
if dest.exists():
self.stdout.write(
f"WARNING: {dest} already exists - not overwriting it."
)
else:
os.makedirs(dest.parent, exist_ok=True)
shutil.copy(source, dest)
redirects.append(
[
f"www6.ietf.org/iesg/minutes/{dt.year}/{bof_coord_data[dt].source_name}",
f"https://datatracker.ietf.org/doc/{doc_name}",
302,
]
)
else:
for type_id in ["minutes", "narrativeminutes"]:
source_file_prefix = (
"minutes" if type_id == "minutes" else "narrative-minutes"
)
txt_source = (
minutes_dir
/ f"{dt.year}"
/ f"{source_file_prefix}-{dt:%Y-%m-%d}.txt"
)
html_source = (
minutes_dir
/ f"{dt.year}"
/ f"{source_file_prefix}-{dt:%Y-%m-%d}.html"
)
if txt_source.exists() and html_source.exists():
self.stdout.write(
f"WARNING: Both {txt_source} and {html_source} exist."
)
if txt_source.exists() or html_source.exists():
prefix = DocTypeName.objects.get(slug=type_id).prefix
doc_name = f"{prefix}-interim-{dt.year}-iesg-{counter:02d}-{dt:%Y%m%d%H%M}"
suffix = "html" if html_source.exists() else "txt"
doc_filename = f"{doc_name}-00.{suffix}"
verbose_type = (
"Minutes" if type_id == "minutes" else "Narrative Minutes"
)
doc = Document.objects.create(
name=doc_name,
type_id=type_id,
title=f"{verbose_type} {meeting_name} {dt:%Y-%m-%d %H:%M}",
group_id=2, # the IESG group
rev="00",
uploaded_filename=doc_filename,
)
e = DocEvent.objects.create(
type="comment",
doc=doc,
rev="00",
by_id=1, # "(System)"
desc=f"{verbose_type} moved into datatracker",
)
doc.save_with_history([e])
session.presentations.create(document=doc, rev=doc.rev)
dest = (
Path(settings.AGENDA_PATH)
/ meeting_name
/ type_id
/ doc_filename
)
if dest.exists():
self.stdout.write(
f"WARNING: {dest} already exists - not overwriting it."
)
else:
os.makedirs(dest.parent, exist_ok=True)
if html_source.exists():
html_content = html_source.read_text(encoding="utf-8")
html_content = html_content.replace(
f'href="IESGnarrative-{dt:%Y-%m-%d}.html#',
'href="#',
)
html_content = re.sub(
r'<a href="file:///[^"]*"><span[^>]*>([^<]*)</span></a>',
r"\1",
html_content,
)
html_content = re.sub(
r'<a href="file:///[^"]*">([^<]*)</a>',
r"\1",
html_content,
)
html_content = re.sub(
'<a href="http://validator.w3.org/[^>]*> *<img[^>]*></a>',
"",
html_content,
)
dest.write_text(html_content, encoding="utf-8")
else:
shutil.copy(txt_source, dest)
redirects.append(
[
f"www6.ietf.org/iesg/minutes/{dt.year}/{txt_source.name if txt_source.exists() else html_source.name}",
f"https://datatracker.ietf.org/doc/{doc_name}",
302,
]
)
counter += 1
with open("iesg_minutes_redirects.csv", "w", newline="") as f:
csvwriter = csv.writer(f)
for row in redirects:
csvwriter.writerow(row)

View file

@ -24,6 +24,7 @@ from django.utils.encoding import force_str
import debug # pyflakes:ignore
from ietf.api.views import EmailIngestionError
from ietf.dbtemplate.factories import DBTemplateFactory
from ietf.dbtemplate.models import DBTemplate
from ietf.doc.factories import DocEventFactory, WgDocumentAuthorFactory, \
@ -37,14 +38,15 @@ from ietf.nomcom.test_data import nomcom_test_data, generate_cert, check_comment
MEMBER_USER, SECRETARIAT_USER, EMAIL_DOMAIN, NOMCOM_YEAR
from ietf.nomcom.models import NomineePosition, Position, Nominee, \
NomineePositionStateName, Feedback, FeedbackTypeName, \
Nomination, FeedbackLastSeen, TopicFeedbackLastSeen, ReminderDates
Nomination, FeedbackLastSeen, TopicFeedbackLastSeen, ReminderDates, \
NomCom
from ietf.nomcom.management.commands.send_reminders import Command, is_time_to_send
from ietf.nomcom.factories import NomComFactory, FeedbackFactory, TopicFactory, \
nomcom_kwargs_for_year, provide_private_key_to_test_client, \
key
from ietf.nomcom.utils import get_nomcom_by_year, make_nomineeposition, \
get_hash_nominee_position, is_eligible, list_eligible, \
get_eligibility_date, suggest_affiliation, \
get_eligibility_date, suggest_affiliation, ingest_feedback_email, \
decorate_volunteers_with_qualifications
from ietf.person.factories import PersonFactory, EmailFactory
from ietf.person.models import Email, Person
@ -1114,6 +1116,47 @@ class FeedbackTest(TestCase):
self.assertNotEqual(feedback.comments, comment_text)
self.assertEqual(check_comments(feedback.comments, comment_text, self.privatekey_file), True)
@mock.patch("ietf.nomcom.utils.create_feedback_email")
def test_ingest_feedback_email(self, mock_create_feedback_email):
message = b"This is nomcom feedback"
no_nomcom_year = date_today().year + 10 # a guess at a year with no nomcoms
while NomCom.objects.filter(group__acronym__icontains=no_nomcom_year).exists():
no_nomcom_year += 1
inactive_nomcom = NomComFactory(group__state_id="conclude", group__acronym=f"nomcom{no_nomcom_year + 1}")
# cases where the nomcom does not exist, so admins are notified
for bad_year in (no_nomcom_year, inactive_nomcom.year()):
with self.assertRaises(EmailIngestionError) as context:
ingest_feedback_email(message, bad_year)
self.assertIn("does not exist", context.exception.msg)
self.assertIsNotNone(context.exception.email_body) # error message to be sent
self.assertIsNone(context.exception.email_recipients) # default recipients (i.e., admin)
self.assertIsNone(context.exception.email_original_message) # no original message
self.assertFalse(context.exception.email_attach_traceback) # no traceback
self.assertFalse(mock_create_feedback_email.called)
# nomcom exists but an error occurs, so feedback goes to the nomcom chair
active_nomcom = NomComFactory(group__acronym=f"nomcom{no_nomcom_year + 2}")
mock_create_feedback_email.side_effect = ValueError("ouch!")
with self.assertRaises(EmailIngestionError) as context:
ingest_feedback_email(message, active_nomcom.year())
self.assertIn(f"Error ingesting nomcom {active_nomcom.year()}", context.exception.msg)
self.assertIsNotNone(context.exception.email_body) # error message to be sent
self.assertEqual(context.exception.email_recipients, active_nomcom.chair_emails())
self.assertEqual(context.exception.email_original_message, message)
self.assertFalse(context.exception.email_attach_traceback) # no traceback
self.assertTrue(mock_create_feedback_email.called)
self.assertEqual(mock_create_feedback_email.call_args, mock.call(active_nomcom, message))
mock_create_feedback_email.reset_mock()
# and, finally, success
mock_create_feedback_email.side_effect = None
mock_create_feedback_email.return_value = FeedbackFactory(author="someone@example.com")
ingest_feedback_email(message, active_nomcom.year())
self.assertTrue(mock_create_feedback_email.called)
self.assertEqual(mock_create_feedback_email.call_args, mock.call(active_nomcom, message))
class ReminderTest(TestCase):
def setUp(self):

View file

@ -16,6 +16,7 @@ from email.errors import HeaderParseError
from email.header import decode_header
from email.iterators import typed_subpart_iterator
from email.utils import parseaddr
from textwrap import dedent
from django.db.models import Q, Count
from django.conf import settings
@ -715,3 +716,34 @@ def extract_volunteers(year):
decorate_volunteers_with_qualifications(volunteers,nomcom=nomcom)
volunteers = sorted(volunteers,key=lambda v:(not v.eligible,v.person.last_name()))
return nomcom, volunteers
def ingest_feedback_email(message: bytes, year: int):
from ietf.api.views import EmailIngestionError # avoid circular import
from .models import NomCom
try:
nomcom = NomCom.objects.get(group__acronym__icontains=str(year),
group__state__slug='active')
except NomCom.DoesNotExist:
raise EmailIngestionError(
f"Error ingesting nomcom email: nomcom {year} does not exist or is not active",
email_body=dedent(f"""\
An email for nomcom {year} was posted to ingest_feedback_email, but no
active nomcom exists for that year.
"""),
)
try:
feedback = create_feedback_email(nomcom, message)
except Exception as err:
raise EmailIngestionError(
f"Error ingesting nomcom {year} feedback email",
email_recipients=nomcom.chair_emails(),
email_body=dedent(f"""\
An error occurred while ingesting feedback email for nomcom {year}.
{{error_summary}}
"""),
email_original_message=message,
) from err
log("Received nomcom email from %s" % feedback.author)

View file

@ -679,11 +679,13 @@ MEETINGHOST_LOGO_PATH = AGENDA_PATH # put these in the same place as other proc
IPR_DOCUMENT_PATH = '/a/www/ietf-ftp/ietf/IPR/'
# Move drafts to this directory when they expire
INTERNET_DRAFT_ARCHIVE_DIR = '/a/ietfdata/doc/draft/collection/draft-archive/'
# The following directory contains linked copies of all drafts, but don't
# write anything to this directory -- its content is maintained by ghostlinkd:
# The following directory contains copies of all drafts - it used to be
# a set of hardlinks maintained by ghostlinkd, but is now explicitly written to
INTERNET_ALL_DRAFTS_ARCHIVE_DIR = '/a/ietfdata/doc/draft/archive'
MEETING_RECORDINGS_DIR = '/a/www/audio'
DERIVED_DIR = '/a/ietfdata/derived'
FTP_DIR = '/a/ftp'
ALL_ID_DOWNLOAD_DIR = '/a/www/www6s/download'
DOCUMENT_FORMAT_ALLOWLIST = ["txt", "ps", "pdf", "xml", "html", ]

View file

@ -221,6 +221,7 @@ class ManualSubmissionTests(TestCase):
class SubmitTests(BaseSubmitTestCase):
def setUp(self):
super().setUp()
(Path(settings.FTP_DIR) / "internet-drafts").mkdir()
# Submit views assume there is a "next" IETF to look for cutoff dates against
MeetingFactory(type_id='ietf', date=date_today()+datetime.timedelta(days=180))
@ -954,6 +955,24 @@ class SubmitTests(BaseSubmitTestCase):
self.assertEqual(new_revision.by.name, "Submitter Name")
self.verify_bibxml_ids_creation(draft)
repository_path = Path(draft.get_file_name())
self.assertTrue(repository_path.exists()) # Note that this doesn't check that it has the right _content_
ftp_path = Path(settings.FTP_DIR) / "internet-drafts" / repository_path.name
self.assertTrue(repository_path.samefile(ftp_path))
all_archive_path = Path(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR) / repository_path.name
self.assertTrue(repository_path.samefile(all_archive_path))
for ext in settings.IDSUBMIT_FILE_TYPES:
if ext == "txt":
continue
variant_path = repository_path.parent / f"{repository_path.stem}.{ext}"
if variant_path.exists():
variant_ftp_path = Path(settings.FTP_DIR) / "internet-drafts" / variant_path.name
self.assertTrue(variant_path.samefile(variant_ftp_path))
variant_all_archive_path = Path(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR) / variant_path.name
self.assertTrue(variant_path.samefile(variant_all_archive_path))
def test_submit_new_individual_txt(self):
self.submit_new_individual(["txt"])

View file

@ -168,6 +168,9 @@ def validate_submission_rev(name, rev):
if rev != expected:
return 'Invalid revision (revision %02d is expected)' % expected
# This is not really correct, though the edges that it doesn't cover are not likely.
# It might be better just to look in the combined archive to make sure we're not colliding with
# a thing that exists there already because it was included from an approved personal collection.
for dirname in [settings.INTERNET_DRAFT_PATH, settings.INTERNET_DRAFT_ARCHIVE_DIR, ]:
dir = pathlib.Path(dirname)
pattern = '%s-%02d.*' % (name, rev)
@ -652,6 +655,10 @@ def move_files_to_repository(submission):
dest = Path(settings.IDSUBMIT_REPOSITORY_PATH) / fname
if source.exists():
move(source, dest)
all_archive_dest = Path(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR) / dest.name
ftp_dest = Path(settings.FTP_DIR) / "internet-drafts" / dest.name
os.link(dest, all_archive_dest)
os.link(dest, ftp_dest)
elif dest.exists():
log.log("Intended to move '%s' to '%s', but found source missing while destination exists.")
elif ext in submission.file_types.split(','):

View file

@ -304,3 +304,22 @@ def add_review_comment(doc_name, review_time, by, comment):
e.by = by
e.save()
def ingest_review_email(message: bytes):
from ietf.api.views import EmailIngestionError # avoid circular import
try:
doc_name, review_time, by, comment = parse_review_email(message)
except Exception as err:
raise EmailIngestionError("Unable to parse message as IANA review email") from err
log(f"Read IANA review email for {doc_name} at {review_time} by {by}")
if by.name == "(System)":
log("WARNING: person responsible for email does not have a IANA role") # (sic)
try:
add_review_comment(doc_name, review_time, by, comment)
except Document.DoesNotExist:
log(f"ERROR: unknown document {doc_name}")
raise EmailIngestionError(f"Unknown document {doc_name}")
except Exception as err:
raise EmailIngestionError("Error ingesting IANA review email") from err

View file

@ -19,10 +19,12 @@ from django.test.utils import override_settings
import debug # pyflakes:ignore
from ietf.api.views import EmailIngestionError
from ietf.doc.factories import WgDraftFactory, RfcFactory, DocumentAuthorFactory, DocEventFactory
from ietf.doc.models import Document, DocEvent, DeletedEvent, DocTagName, RelatedDocument, State, StateDocEvent
from ietf.doc.utils import add_state_change_event
from ietf.group.factories import GroupFactory
from ietf.person.factories import PersonFactory
from ietf.person.models import Person
from ietf.sync import iana, rfceditor, tasks
from ietf.utils.mail import outbox, empty_outbox
@ -214,6 +216,61 @@ ICANN
iana.add_review_comment(doc_name, review_time, by, comment)
self.assertEqual(DocEvent.objects.filter(doc=draft, type="iana_review").count(), events_before+1)
@mock.patch("ietf.sync.iana.add_review_comment")
@mock.patch("ietf.sync.iana.parse_review_email")
def test_ingest_review_email(self, mock_parse_review_email, mock_add_review_comment):
mock_parse_review_email.side_effect = ValueError("ouch!")
message = b"message"
# Error parsing mail
with self.assertRaises(EmailIngestionError) as context:
iana.ingest_review_email(message)
self.assertIsNone(context.exception.as_emailmessage()) # no email
self.assertEqual("Unable to parse message as IANA review email", str(context.exception))
self.assertTrue(mock_parse_review_email.called)
self.assertEqual(mock_parse_review_email.call_args, mock.call(message))
self.assertFalse(mock_add_review_comment.called)
mock_parse_review_email.reset_mock()
args = (
"doc-name",
datetime.datetime.now(tz=datetime.timezone.utc),
PersonFactory(),
"yadda yadda yadda",
)
mock_parse_review_email.side_effect = None
mock_parse_review_email.return_value = args
mock_add_review_comment.side_effect = Document.DoesNotExist
with self.assertRaises(EmailIngestionError) as context:
iana.ingest_review_email(message)
self.assertIsNone(context.exception.as_emailmessage()) # no email
self.assertEqual(str(context.exception), "Unknown document doc-name")
self.assertTrue(mock_parse_review_email.called)
self.assertEqual(mock_parse_review_email.call_args, mock.call(message))
self.assertTrue(mock_add_review_comment.called)
self.assertEqual(mock_add_review_comment.call_args, mock.call(*args))
mock_parse_review_email.reset_mock()
mock_add_review_comment.reset_mock()
mock_add_review_comment.side_effect = ValueError("ouch!")
with self.assertRaises(EmailIngestionError) as context:
iana.ingest_review_email(message)
self.assertIsNone(context.exception.as_emailmessage()) # no email
self.assertEqual("Error ingesting IANA review email", str(context.exception))
self.assertTrue(mock_parse_review_email.called)
self.assertEqual(mock_parse_review_email.call_args, mock.call(message))
self.assertTrue(mock_add_review_comment.called)
self.assertEqual(mock_add_review_comment.call_args, mock.call(*args))
mock_parse_review_email.reset_mock()
mock_add_review_comment.reset_mock()
mock_add_review_comment.side_effect = None
iana.ingest_review_email(message)
self.assertTrue(mock_parse_review_email.called)
self.assertEqual(mock_parse_review_email.call_args, mock.call(message))
self.assertTrue(mock_add_review_comment.called)
self.assertEqual(mock_add_review_comment.call_args, mock.call(*args))
def test_notify_page(self):
# check that we can get the notify page
url = urlreverse("ietf.sync.views.notify", kwargs=dict(org="iana", notification="changes"))

View file

@ -1,7 +1,6 @@
# Copyright The IETF Trust 2012-2020, All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
import subprocess
import os

View file

@ -140,6 +140,7 @@
{% endif %}
</span>
{% endif %}
<a class="mx-2" href="https://status.ietf.org" target="_blank">System Status</a>
<span class="mx-2 text-danger">
<i class="bi bi-bug"></i>
Report a bug:

View file

@ -440,6 +440,12 @@
Release notes
</a>
</li>
<li>
<a class="dropdown-item {% if flavor != 'top' %}text-wrap{% endif %}"
target="_blank" href="https://status.ietf.org">
System status
</a>
</li>
{% if flavor == 'top' %}
<li><hr class="dropdown-divider">
</li>

View file

@ -0,0 +1,93 @@
{% extends "base.html" %}
{# Copyright The IETF Trust 2024, All Rights Reserved #}
{% load django_bootstrap5 ietf_filters origin static %}
{% block title %}Investigate{% endblock %}
{% block pagehead %}
<link rel="stylesheet" href="{% static "ietf/css/list.css" %}">
{% endblock %}
{% block content %}
{% origin %}
<h1>Investigate</h1>
<form id="investigate" method="post">
{% csrf_token %}
{% bootstrap_form form %}
<button class="btn btn-primary" type="submit">Investigate</button>
</form>
{% if results %}
<div id="results">
{% if results.can_verify %}
<h2>These can be authenticated</h2>
<table id="authenticated" class="table table-sm table-striped tablesorter">
<thead>
<tr>
<th scope="col" data-sort="name">Name</th>
<th scope="col" data-sort="modified">Last Modified On</th>
<th scope="col" data-sort="link">Link</th>
</tr>
</thead>
<tbody>
{% for path in results.can_verify %}
{% with url=path|url_for_path %}
<tr><td>{{path.name}}</td><td>{{path|mtime|date:"DATETIME_FORMAT"}}</td><td><a href="{{url}}">{{url}}</a></td></tr>
{% endwith %}
{% endfor %}
</tbody>
</table>
{% else %}
<h2>Nothing with this name fragment can be authenticated</h2>
{% endif %}
<hr>
{% if results.unverifiable_collections %}
<h2>These are in the archive, but cannot be authenticated</h2>
<table id="unverifiable" class="table table-sm table-striped tablesorter">
<thead>
<tr>
<th scope="col" data-sort="name">Name</th>
<th scope="col" data-sort="modified">Last Modified On</th>
<th scope="col" data-sort="link">Link</th>
<th scope="col" data-sort="source">Source</th>
</tr>
</thead>
<tbody>
{% for path in results.unverifiable_collections %}
{% with url=path|url_for_path %}
<tr>
<td>{{path.name}}</td>
<td>{{path|mtime|date:"DATETIME_FORMAT"}}</td>
<td><a href="{{url}}">{{url}}</a></td>
<td>{{path}}</td>
</tr>
{% endwith %}
{% endfor %}
</tbody>
</table>
{% endif %}
{% if results.unexpected %}
<h2>These are unexpected and we do not know what their origin is. These cannot be authenticated</h2>
<table id="unexpected" class="table table-sm table-striped tablesorter">
<thead>
<tr>
<th scope="col" data-sort="name">Name</th>
<th scope="col" data-sort="modified">Last Modified On</th>
<th scope="col" data-sort="link">Link</th>
</tr>
</thead>
<tbody>
{% for path in results.unexpected %}
{% with url=path|url_for_path %}
<tr>
<td>{{path.name}}</td>
<td>{{path|mtime|date:"DATETIME_FORMAT"}}</td>
<td><a href="{{url}}">{{url}}</a></td>
</tr>
{% endwith %}
{% endfor %}
</tbody>
</table>
{% endif %}
</div>
{% endif %}
{% endblock %}
{% block js %}
<script src="{% static "ietf/js/list.js" %}"></script>
{% endblock %}

View file

@ -10,10 +10,10 @@
{% origin %}
<h1>Other (not Working Group) email lists</h1>
<p>Guidelines for these lists, including how to request a
new one to be created, are at
<a href="https://www.ietf.org/how/lists/nonwglist-guidelines/">
https://www.ietf.org/how/lists/nonwglist-guidelines/
</a>
new one to be created, can be found on the
<a href="https://www.ietf.org/participate/lists/nonwglist-guidelines/">
NonWorking Group email list guidelines
</a> webpage.
</p>
{% cache 900 nonwglisttable %}
<table class="table table-sm table-striped tablesorter">

View file

@ -211,6 +211,7 @@ class TestCase(django.test.TestCase):
'INTERNET_DRAFT_ARCHIVE_DIR',
'INTERNET_DRAFT_PATH',
'BIBXML_BASE_PATH',
'FTP_DIR',
]
parser = html5lib.HTMLParser(strict=True)