diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d62707aac..1f6e29e57 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -100,171 +100,22 @@ jobs: # ----------------------------------------------------------------- # TESTS # ----------------------------------------------------------------- - tests-python: - name: Run Tests (Python) + + tests: + name: Run Tests + uses: ./.github/workflows/tests.yml if: ${{ github.event.inputs.skiptests == 'false' || github.ref_name == 'release' }} needs: [prepare] - runs-on: ubuntu-latest - container: ghcr.io/ietf-tools/datatracker-app-base:latest - - services: - db: - image: ghcr.io/ietf-tools/datatracker-db:latest - - steps: - - uses: actions/checkout@v3 - - - name: Prepare for tests - run: | - chmod +x ./dev/tests/prepare.sh - sh ./dev/tests/prepare.sh - - - name: Ensure DB is ready - run: | - /usr/local/bin/wait-for db:5432 -- echo "DB ready" - - - name: Run all tests - shell: bash - run: | - echo "Running checks..." - ./ietf/manage.py check - ./ietf/manage.py migrate --fake-initial - echo "Validating migrations..." - if ! ( ietf/manage.py makemigrations --dry-run --check --verbosity 3 ) ; then - echo "Model changes without migrations found." - exit 1 - fi - echo "Running tests..." - if [[ "x${{ github.event.inputs.ignoreLowerCoverage }}" == "xtrue" ]]; then - echo "Lower coverage failures will be ignored." - ./ietf/manage.py test --validate-html-harder --settings=settings_test --ignore-lower-coverage - else - ./ietf/manage.py test --validate-html-harder --settings=settings_test - fi - coverage xml + with: + ignoreLowerCoverage: ${{ github.event.inputs.ignoreLowerCoverage == 'true' }} - - name: Upload Coverage Results to Codecov - uses: codecov/codecov-action@v3.1.4 - with: - files: coverage.xml - - - name: Convert Coverage Results - if: ${{ always() }} - run: | - mv latest-coverage.json coverage.json - - - name: Upload Coverage Results as Build Artifact - uses: actions/upload-artifact@v3 - if: ${{ always() }} - with: - name: coverage - path: coverage.json - - tests-playwright: - name: Run Tests (Playwright) - if: ${{ github.event.inputs.skiptests == 'false' || github.ref_name == 'release' }} - needs: [prepare] - runs-on: macos-latest - strategy: - fail-fast: false - matrix: - project: [chromium, firefox] - - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-node@v3 - with: - node-version: '18' - - - name: Run all tests - run: | - echo "Installing dependencies..." - yarn - echo "Installing Playwright..." - cd playwright - mkdir test-results - npm ci - npx playwright install --with-deps ${{ matrix.project }} - echo "Running tests..." - npx playwright test --project=${{ matrix.project }} - - - name: Upload Report - uses: actions/upload-artifact@v3 - if: ${{ always() }} - continue-on-error: true - with: - name: playwright-results-${{ matrix.project }} - path: playwright/test-results/ - if-no-files-found: ignore - - tests-playwright-legacy: - name: Run Tests (Playwright Legacy) - if: ${{ github.event.inputs.skiptests == 'false' || github.ref_name == 'release' }} - needs: [prepare] - runs-on: ubuntu-latest - container: ghcr.io/ietf-tools/datatracker-app-base:latest - strategy: - fail-fast: false - matrix: - project: [chromium, firefox] - - services: - db: - image: ghcr.io/ietf-tools/datatracker-db:latest - - steps: - - uses: actions/checkout@v3 - - - name: Prepare for tests - run: | - chmod +x ./dev/tests/prepare.sh - sh ./dev/tests/prepare.sh - - - name: Ensure DB is ready - run: | - /usr/local/bin/wait-for db:5432 -- echo "DB ready" - - - name: Start Datatracker - run: | - echo "Running checks..." - ./ietf/manage.py check - echo "Starting datatracker..." - ./ietf/manage.py runserver 0.0.0.0:8000 --settings=settings_local & - echo "Waiting for datatracker to be ready..." - /usr/local/bin/wait-for localhost:8000 -- echo "Datatracker ready" - - - name: Run all tests - env: - # Required to get firefox to run as root: - HOME: "" - run: | - echo "Installing dependencies..." - yarn - echo "Installing Playwright..." - cd playwright - mkdir test-results - npm ci - npx playwright install --with-deps ${{ matrix.project }} - echo "Running tests..." - npx playwright test --project=${{ matrix.project }} -c playwright-legacy.config.js - - - name: Upload Report - uses: actions/upload-artifact@v3 - if: ${{ always() }} - continue-on-error: true - with: - name: playwright-legacy-results-${{ matrix.project }} - path: playwright/test-results/ - if-no-files-found: ignore - # ----------------------------------------------------------------- # RELEASE # ----------------------------------------------------------------- release: name: Make Release if: ${{ !failure() && !cancelled() }} - needs: [tests-python, tests-playwright, tests-playwright-legacy, prepare] + needs: [tests, prepare] runs-on: ubuntu-latest env: SHOULD_DEPLOY: ${{needs.prepare.outputs.should_deploy}} @@ -389,7 +240,7 @@ jobs: notify: name: Notify if: ${{ always() }} - needs: [prepare, tests-python, tests-playwright, tests-playwright-legacy, release] + needs: [prepare, tests, release] runs-on: ubuntu-latest env: PKG_VERSION: ${{needs.prepare.outputs.pkg_version}} diff --git a/.github/workflows/ci-run-tests.yml b/.github/workflows/ci-run-tests.yml index 4f601734b..9121bf8ae 100644 --- a/.github/workflows/ci-run-tests.yml +++ b/.github/workflows/ci-run-tests.yml @@ -1,4 +1,4 @@ -name: Run All Tests +name: PR - Run All Tests on: pull_request: @@ -13,150 +13,7 @@ on: - 'package.json' jobs: - tests-python: - name: Run Tests (Python) - runs-on: ubuntu-latest - container: ghcr.io/ietf-tools/datatracker-app-base:latest - - services: - db: - image: ghcr.io/ietf-tools/datatracker-db:latest - - steps: - - uses: actions/checkout@v3 - - - name: Prepare for tests - run: | - chmod +x ./dev/tests/prepare.sh - sh ./dev/tests/prepare.sh - - - name: Ensure DB is ready - run: | - /usr/local/bin/wait-for db:5432 -- echo "DB ready" - - - name: Run all tests - run: | - echo "Running checks..." - ./ietf/manage.py check - ./ietf/manage.py migrate --fake-initial - echo "Validating migrations..." - if ! ( ietf/manage.py makemigrations --dry-run --check --verbosity 3 ) ; then - echo "Model changes without migrations found." - echo ${MSG} - exit 1 - fi - echo "Running tests..." - ./ietf/manage.py test --validate-html-harder --settings=settings_test - coverage xml - - - name: Upload Coverage Results to Codecov - uses: codecov/codecov-action@v3.1.4 - with: - files: coverage.xml - - - name: Convert Coverage Results - if: ${{ always() }} - run: | - mv latest-coverage.json coverage.json - - - name: Upload Coverage Results as Build Artifact - uses: actions/upload-artifact@v3.0.0 - if: ${{ always() }} - with: - name: coverage - path: coverage.json - - tests-playwright: - name: Run Tests (Playwright) - runs-on: macos-latest - strategy: - fail-fast: false - matrix: - project: [chromium, firefox] - - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-node@v3 - with: - node-version: '18' - - - name: Run all tests - run: | - echo "Installing dependencies..." - yarn - echo "Installing Playwright..." - cd playwright - mkdir test-results - npm ci - npx playwright install --with-deps ${{ matrix.project }} - echo "Running tests..." - npx playwright test --project=${{ matrix.project }} - - - name: Upload Report - uses: actions/upload-artifact@v3.0.0 - if: ${{ always() }} - continue-on-error: true - with: - name: playwright-results-${{ matrix.project }} - path: playwright/test-results/ - if-no-files-found: ignore - - tests-playwright-legacy: - name: Run Tests (Playwright Legacy) - runs-on: ubuntu-latest - container: ghcr.io/ietf-tools/datatracker-app-base:latest - strategy: - fail-fast: false - matrix: - project: [chromium, firefox] - - services: - db: - image: ghcr.io/ietf-tools/datatracker-db:latest - - steps: - - uses: actions/checkout@v3 - - - name: Prepare for tests - run: | - chmod +x ./dev/tests/prepare.sh - sh ./dev/tests/prepare.sh - - - name: Ensure DB is ready - run: | - /usr/local/bin/wait-for db:5432 -- echo "DB ready" - - - name: Start Datatracker - run: | - echo "Running checks..." - ./ietf/manage.py check - ./ietf/manage.py migrate --fake-initial - echo "Starting datatracker..." - ./ietf/manage.py runserver 0.0.0.0:8000 --settings=settings_local & - echo "Waiting for datatracker to be ready..." - /usr/local/bin/wait-for localhost:8000 -- echo "Datatracker ready" - - - name: Run all tests - env: - # Required to get firefox to run as root: - HOME: "" - run: | - echo "Installing dependencies..." - yarn - echo "Installing Playwright..." - cd playwright - mkdir test-results - npm ci - npx playwright install --with-deps ${{ matrix.project }} - echo "Running tests..." - npx playwright test --project=${{ matrix.project }} -c playwright-legacy.config.js - - - name: Upload Report - uses: actions/upload-artifact@v3 - if: ${{ always() }} - continue-on-error: true - with: - name: playwright-legacy-results-${{ matrix.project }} - path: playwright/test-results/ - if-no-files-found: ignore + tests: + uses: ./.github/workflows/tests.yml + with: + ignoreLowerCoverage: false \ No newline at end of file diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000..e8855f39e --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,164 @@ +name: Reusable Tests Workflow + +on: + workflow_call: + inputs: + ignoreLowerCoverage: + description: 'Ignore Lower Coverage' + default: false + required: true + type: boolean + +jobs: + tests-python: + name: Python Tests + runs-on: ubuntu-latest + container: ghcr.io/ietf-tools/datatracker-app-base:latest + + services: + db: + image: ghcr.io/ietf-tools/datatracker-db:latest + + steps: + - uses: actions/checkout@v3 + + - name: Prepare for tests + run: | + chmod +x ./dev/tests/prepare.sh + sh ./dev/tests/prepare.sh + + - name: Ensure DB is ready + run: | + /usr/local/bin/wait-for db:5432 -- echo "DB ready" + + - name: Run all tests + shell: bash + run: | + echo "Running checks..." + ./ietf/manage.py check + ./ietf/manage.py migrate --fake-initial + echo "Validating migrations..." + if ! ( ietf/manage.py makemigrations --dry-run --check --verbosity 3 ) ; then + echo "Model changes without migrations found." + exit 1 + fi + echo "Running tests..." + if [[ "x${{ github.event.inputs.ignoreLowerCoverage }}" == "xtrue" ]]; then + echo "Lower coverage failures will be ignored." + ./ietf/manage.py test --validate-html-harder --settings=settings_test --ignore-lower-coverage + else + ./ietf/manage.py test --validate-html-harder --settings=settings_test + fi + coverage xml + + - name: Upload Coverage Results to Codecov + uses: codecov/codecov-action@v3.1.4 + with: + files: coverage.xml + + - name: Convert Coverage Results + if: ${{ always() }} + run: | + mv latest-coverage.json coverage.json + + - name: Upload Coverage Results as Build Artifact + uses: actions/upload-artifact@v3 + if: ${{ always() }} + with: + name: coverage + path: coverage.json + + tests-playwright: + name: Playwright Tests + runs-on: macos-latest + strategy: + fail-fast: false + matrix: + project: [chromium, firefox] + + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-node@v3 + with: + node-version: '18' + + - name: Run all tests + run: | + echo "Installing dependencies..." + yarn + echo "Installing Playwright..." + cd playwright + mkdir test-results + npm ci + npx playwright install --with-deps ${{ matrix.project }} + echo "Running tests..." + npx playwright test --project=${{ matrix.project }} + + - name: Upload Report + uses: actions/upload-artifact@v3 + if: ${{ always() }} + continue-on-error: true + with: + name: playwright-results-${{ matrix.project }} + path: playwright/test-results/ + if-no-files-found: ignore + + tests-playwright-legacy: + name: Playwright Legacy Tests + runs-on: ubuntu-latest + container: ghcr.io/ietf-tools/datatracker-app-base:latest + strategy: + fail-fast: false + matrix: + project: [chromium, firefox] + + services: + db: + image: ghcr.io/ietf-tools/datatracker-db:latest + + steps: + - uses: actions/checkout@v3 + + - name: Prepare for tests + run: | + chmod +x ./dev/tests/prepare.sh + sh ./dev/tests/prepare.sh + + - name: Ensure DB is ready + run: | + /usr/local/bin/wait-for db:5432 -- echo "DB ready" + + - name: Start Datatracker + run: | + echo "Running checks..." + ./ietf/manage.py check + ./ietf/manage.py migrate --fake-initial + echo "Starting datatracker..." + ./ietf/manage.py runserver 0.0.0.0:8000 --settings=settings_local & + echo "Waiting for datatracker to be ready..." + /usr/local/bin/wait-for localhost:8000 -- echo "Datatracker ready" + + - name: Run all tests + env: + # Required to get firefox to run as root: + HOME: "" + run: | + echo "Installing dependencies..." + yarn + echo "Installing Playwright..." + cd playwright + mkdir test-results + npm ci + npx playwright install --with-deps ${{ matrix.project }} + echo "Running tests..." + npx playwright test --project=${{ matrix.project }} -c playwright-legacy.config.js + + - name: Upload Report + uses: actions/upload-artifact@v3 + if: ${{ always() }} + continue-on-error: true + with: + name: playwright-legacy-results-${{ matrix.project }} + path: playwright/test-results/ + if-no-files-found: ignore \ No newline at end of file diff --git a/bin/add-old-drafts-from-archive.py b/bin/add-old-drafts-from-archive.py new file mode 100644 index 000000000..f09c3b455 --- /dev/null +++ b/bin/add-old-drafts-from-archive.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python +import sys + +print("This is only here as documention - please read the file") +sys.exit(0) + +# #!/usr/bin/env python +# # Copyright The IETF Trust 2017-2019, All Rights Reserved + +# import datetime +# import os +# import sys +# from pathlib import Path +# from contextlib import closing + +# os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" + +# import django +# django.setup() + +# from django.conf import settings +# from django.core.validators import validate_email, ValidationError +# from ietf.utils.draft import PlaintextDraft +# from ietf.submit.utils import update_authors +# from ietf.utils.timezone import date_today + +# import debug # pyflakes:ignore + +# from ietf.doc.models import Document, NewRevisionDocEvent, DocEvent, State +# from ietf.person.models import Person + +# system = Person.objects.get(name="(System)") +# expired = State.objects.get(type='draft',slug='expired') + +# names = set() +# print 'collecting draft names ...' +# versions = 0 +# for p in Path(settings.INTERNET_DRAFT_PATH).glob('draft*.txt'): +# n = str(p).split('/')[-1].split('-') +# if n[-1][:2].isdigit(): +# name = '-'.join(n[:-1]) +# if '--' in name or '.txt' in name or '[' in name or '=' in name or '&' in name: +# continue +# if name.startswith('draft-draft-'): +# continue +# if name == 'draft-ietf-trade-iotp-v1_0-dsig': +# continue +# if len(n[-1]) != 6: +# continue +# if name.startswith('draft-mlee-'): +# continue +# names.add('-'.join(n[:-1])) + +# count=0 +# print 'iterating through names ...' +# for name in sorted(names): +# if not Document.objects.filter(name=name).exists(): +# paths = list(Path(settings.INTERNET_DRAFT_PATH).glob('%s-??.txt'%name)) +# paths.sort() +# doc = None +# for p in paths: +# n = str(p).split('/')[-1].split('-') +# rev = n[-1][:2] +# with open(str(p)) as txt_file: +# raw = txt_file.read() +# try: +# text = raw.decode('utf8') +# except UnicodeDecodeError: +# text = raw.decode('latin1') +# try: +# draft = PlaintextDraft(text, txt_file.name, name_from_source=True) +# except Exception as e: +# print name, rev, "Can't parse", p,":",e +# continue +# if draft.errors and draft.errors.keys()!=['draftname',]: +# print "Errors - could not process", name, rev, datetime.datetime.fromtimestamp(p.stat().st_mtime, datetime.timezone.utc), draft.errors, draft.get_title().encode('utf8') +# else: +# time = datetime.datetime.fromtimestamp(p.stat().st_mtime, datetime.timezone.utc) +# if not doc: +# doc = Document.objects.create(name=name, +# time=time, +# type_id='draft', +# title=draft.get_title(), +# abstract=draft.get_abstract(), +# rev = rev, +# pages=draft.get_pagecount(), +# words=draft.get_wordcount(), +# expires=time+datetime.timedelta(settings.INTERNET_DRAFT_DAYS_TO_EXPIRE), +# ) +# DocAlias.objects.create(name=doc.name).docs.add(doc) +# doc.states.add(expired) +# # update authors +# authors = [] +# for author in draft.get_author_list(): +# full_name, first_name, middle_initial, last_name, name_suffix, email, country, company = author + +# author_name = full_name.replace("\n", "").replace("\r", "").replace("<", "").replace(">", "").strip() + +# if email: +# try: +# validate_email(email) +# except ValidationError: +# email = "" + +# def turn_into_unicode(s): +# if s is None: +# return u"" + +# if isinstance(s, unicode): +# return s +# else: +# try: +# return s.decode("utf-8") +# except UnicodeDecodeError: +# try: +# return s.decode("latin-1") +# except UnicodeDecodeError: +# return "" + +# author_name = turn_into_unicode(author_name) +# email = turn_into_unicode(email) +# company = turn_into_unicode(company) + +# authors.append({ +# "name": author_name, +# "email": email, +# "affiliation": company, +# "country": country +# }) +# dummysubmission=type('', (), {})() #https://stackoverflow.com/questions/19476816/creating-an-empty-object-in-python +# dummysubmission.authors = authors +# update_authors(doc,dummysubmission) + +# # add a docevent with words explaining where this came from +# events = [] +# e = NewRevisionDocEvent.objects.create( +# type="new_revision", +# doc=doc, +# rev=rev, +# by=system, +# desc="New version available: %s-%s.txt" % (doc.name, doc.rev), +# time=time, +# ) +# events.append(e) +# e = DocEvent.objects.create( +# type="comment", +# doc = doc, +# rev = rev, +# by = system, +# desc = "Revision added from id-archive on %s by %s"%(date_today(),sys.argv[0]), +# time=time, +# ) +# events.append(e) +# doc.time = time +# doc.rev = rev +# doc.save_with_history(events) +# print "Added",name, rev diff --git a/ietf/api/tests.py b/ietf/api/tests.py index e4ac5c170..4c1440882 100644 --- a/ietf/api/tests.py +++ b/ietf/api/tests.py @@ -581,6 +581,7 @@ class CustomApiTests(TestCase): url = urlreverse('ietf.api.views.PersonalInformationExportView') login_testing_unauthorized(self, person.user.username, url) r = self.client.get(url) + self.assertEqual(r.status_code, 200) jsondata = r.json() data = jsondata['person.person'][str(person.id)] self.assertEqual(data['name'], person.name) diff --git a/ietf/bin/.gitignore b/ietf/bin/.gitignore deleted file mode 100644 index c7013ced9..000000000 --- a/ietf/bin/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/*.pyc -/settings_local.py diff --git a/ietf/bin/2016-05-25-collect-photos b/ietf/bin/2016-05-25-collect-photos deleted file mode 100755 index dedda767a..000000000 --- a/ietf/bin/2016-05-25-collect-photos +++ /dev/null @@ -1,296 +0,0 @@ -#!/usr/bin/env python - -import os, re, sys, shutil, pathlib -from collections import namedtuple -from PIL import Image - -# boilerplate -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - execfile(virtualenv_activation, dict(__file__=virtualenv_activation)) - -import django -django.setup() - -from django.conf import settings -from django.utils.text import slugify - -import debug - -from ietf.group.models import Role, Person -from ietf.person.name import name_parts - -old_images_dir = '' -new_images_dir = settings.PHOTOS_DIR - -if not os.path.exists(new_images_dir): - print("New images directory does not exist: %s" % new_images_dir) - sys.exit(1) - -old_image_files = [] -for dir in settings.OLD_PHOTO_DIRS: - if not os.path.exists(dir): - print("Old images directory does not exist: %s" % dir) - sys.exit(1) - old_image_files += [ f for f in pathlib.Path(dir).iterdir() if f.is_file() and f.suffix.lower() in ['.jpg', '.jpeg', '.png'] ] - -photo = namedtuple('photo', ['path', 'name', 'ext', 'width', 'height', 'time', 'file']) - -old_images = [] -for f in old_image_files: - path = str(f) - img = Image.open(path) - old_images.append(photo(path, f.stem.decode('utf8'), f.suffix, img.size[0], img.size[1], f.stat().st_mtime, f)) - -# Fix up some names: - -def fix_missing_surnames(images): - replacement = { - "alissa": "alissa-cooper", - "alissa1": "alissa-cooper", - "andrei": "andrei-robachevsky", - "bernard": "bernard-aboba", - "danny": "danny-mcpherson", - "danny1": "danny-mcpherson", - "dthaler": "dave-thaler", - "eliot-mug": "eliot-lear", - "erik.nordmark-300": "erik-nordmark", - "hannes": "hannes-tschofenig", - "hildebrand": "joe-hildebrand", - "housley": "russ-housley", - "jariarkko": "jari-arkko", - "joel": "joel-jaeggli", - "joel1": "joel-jaeggli", - "joel2": "joel-jaeggli", - "jon": "jon-peterson", - "kessens": "david-kessens", - "klensin": "john-klensin", - "lars": "lars-eggert", - "lars1": "lars-eggert", - "marc_blanchet": "marc-blanchet", - "marcelo": "marcelo-bagnulo", - "olaf": "olaf-kolkman", - "olaf1": "olaf-kolkman", - "ross": "ross-callon", - "spencer": "spencer-dawkins", - "spencer1": "spencer-dawkins", - "vijay": "vijay-gurbani", - "xing": "xing-li", - } - - for i in range(len(images)): - img = images[i] - name = re.sub('-[0-9]+x[0-9]+', '', img.name) - if '/iab/' in img.path and name in replacement: - name = replacement[name] - images[i] = photo(img.path, name, img.ext, img.width, img.height, img.time, img.file) - - -fix_missing_surnames(old_images) - -interesting_persons = set(Person.objects.all()) - -name_alias = { - u"andy": [u"andrew", ], - u"ben": [u"benjamin", ], - u"bill": [u"william", ], - u"bob": [u"robert", ], - u"chris": [u"christopher", u"christian"], - u"dan": [u"daniel", ], - u"dave": [u"david", ], - u"dick": [u"richard", ], - u"fred": [u"alfred", ], - u"geoff": [u"geoffrey", ], - u"jake": [u"jacob", ], - u"jerry": [u"gerald", ], - u"jim": [u"james", ], - u"joe": [u"joseph", ], - u"jon": [u"jonathan", ], - u"mike": [u"michael", ], - u"ned": [u"edward", ], - u"pete": [u"peter", ], - u"ron": [u"ronald", ], - u"russ": [u"russel", ], - u"steve": [u"stephen", ], - u"ted": [u"edward", ], - u"terry": [u"terence", ], - u"tom": [u"thomas", ], - u"wes": [u"wesley", ], - u"will": [u"william", ], - - u"beth": [u"elizabeth", ], - u"liz": [u"elizabeth", ], - u"lynn": [u"carolyn", ], - u"pat": [u"patricia", u"patrick", ], - u"sue": [u"susan", ], -} -# Add lookups from long to short, from the initial set -for key,value in name_alias.items(): - for item in value: - if item in name_alias: - name_alias[item] += [ key ]; - else: - name_alias[item] = [ key ]; - -exceptions = { -'Aboba' : 'aboba-bernard', -'Bernardos' : 'cano-carlos', -'Bormann' : 'bormann-carsten', -'Hinden' : 'hinden-bob', -'Hutton' : 'hutton-andy', -'Narten' : 'narten-thomas', # but there's no picture of him -'O\'Donoghue' : 'odonoghue-karen', -'Przygienda' : 'przygienda-antoni', -'Salowey' : 'salowey-joe', -'Gunter Van de Velde' : 'vandevelde-gunter', -'Eric Vyncke' : 'vynke-eric', -'Zuniga' : 'zuniga-carlos-juan', -'Zhen Cao' : 'zhen-cao', -'Jamal Hadi Salim': 'hadi-salim-jamal', -} - -# Manually copied Bo Burman and Thubert Pascal from wg/photos/ -# Manually copied Victor Pascual (main image, not thumb) from wg/ -# Manually copied Eric Vync?ke (main image, not thumb) from wg/photos/ -# Manually copied Danial King (main image, not thumb) from wg/photos/ -# Manually copied the thumb (not labelled as such) for Tianran Zhou as both the main and thumb image from wg/photos/ - -processed_files = [] - -for person in sorted(list(interesting_persons),key=lambda x:x.last_name()+x.ascii): - substr_pattern = None - for exception in exceptions: - if exception in person.ascii: - substr_pattern = exceptions[exception] - break - if not person.ascii.strip(): - print(" Setting person.ascii for %s" % person.name) - person.ascii = person.name.encode('ascii', errors='replace').decode('ascii') - - _, first, _, last, _ = person.ascii_parts() - first = first.lower() - last = last. lower() - if not substr_pattern: - substr_pattern = slugify("%s %s" % (last, first)) - - if first in ['', '<>'] or last in ['', '<>']: - continue - - #debug.show('1, substr_pattern') - - candidates = [x for x in old_images if x.name.lower().startswith(substr_pattern)] - # Also check the reverse the name order (necessary for Deng Hui, for instance) - substr_pattern = slugify("%s %s" % (first, last)) - #debug.show('2, substr_pattern') - prev_len = len(candidates) - candidates += [x for x in old_images if x.name.lower().startswith(substr_pattern)] - if prev_len < len(candidates) : - print(" Found match with '%s %s' for '%s %s'" % (last, first, first, last, )) - # If no joy, try a short name - if first in name_alias: - prev_len = len(candidates) - for alias in name_alias[first]: - substr_pattern = slugify("%s %s" % (last, alias)) - #debug.show('3, substr_pattern') - candidates += [x for x in old_images if x.name.lower().startswith(substr_pattern)] - if prev_len < len(candidates): - print(" Found match with '%s %s' for '%s %s'" % (alias, last, first, last, )) - - -# # If still no joy, try with Person.plain_name() (necessary for Donald Eastlake) -# if not candidates: -# prefix, first, middle, last, suffix = person.name_parts() -# name_parts = person.plain_name().lower().split() -# -# substr_pattern = u'-'.join(name_parts[-1:]+name_parts[0:1]) -# candidates = [x for x in old_images if x.name.lower().startswith(substr_pattern)] -# # If no joy, try a short name -# if not candidates and first in name_alias: -# prev_len = len(candidates) -# for alias in name_alias[first]: -# substr_pattern = u'-'.join(name_parts[-1:]+[alias]) -# candidates += [x for x in old_images if x.name.lower().startswith(substr_pattern)] -# if prev_len < len(candidates) : -# print(" Used '%s %s' instead of '%s %s'" % (alias, last, first, last, )) - -# # Fixup for other exceptional cases -# if person.ascii=="David Oran": -# candidates = ['oran-dave-th.jpg','oran-david.jpg'] -# -# if person.ascii=="Susan Hares": -# candidates = ['hares-sue-th.jpg','hares-susan.JPG'] -# -# if person.ascii=="Mahesh Jethanandani": -# candidates = ['Mahesh-Jethanandani-th.jpg','Jethanandani-Mahesh.jpg'] - - processed_files += [ c.path for c in candidates ] - - # We now have a list of candidate photos. - # * Consider anything less than 200x200 a thumbnail - # * For the full photo, sort by size (width) and time - # * For the thumbnail: - # - first look for a square photo less than 200x200 - # - if none found, then for the first in the sorted list less than 200x200 - # - if none found, then the smallest photo - if candidates: - candidates.sort(key=lambda x: "%04d-%d" % (x.width, x.time)) - iesg_cand = [ c for c in candidates if '/iesg/' in c.path ] - iab_cand = [ c for c in candidates if '/iab/' in c.path ] - if iesg_cand: - full = iesg_cand[-1] - thumb = iesg_cand[-1] - elif iab_cand: - full = iab_cand[-1] - thumb = iab_cand[0] - else: - full = candidates[-1] - thumbs = [ c for c in candidates if c.width==c.height and c.width <= 200 ] - if not thumbs: - thumbs = [ c for c in candidates if c.width==c.height ] - if not thumbs: - thumbs = [ c for c in candidates if c.width <= 200 ] - if not thumbs: - thumbs = candidates[:1] - thumb = thumbs[-1] - candidates = [ thumb, full ] - - # At this point we either have no candidates or two. If two, the first will be the thumb - - def copy(old, new): - if not os.path.exists(new): - print("Copying "+old+" to "+new) - shutil.copy(old, new) - shutil.copystat(old, new) - - assert(len(candidates) in [0,2]) - if len(candidates)==2: - thumb, full = candidates - - new_name = person.photo_name(thumb=False)+full.ext.lower() - new_thumb_name = person.photo_name(thumb=True)+thumb.ext.lower() - - copy( full.path, os.path.join(new_images_dir,new_name) ) - - # - copy( thumb.path, os.path.join(new_images_dir,new_thumb_name) ) - - -print("") -not_processed = 0 -for file in old_image_files: - if ( file.is_file() - and not file.suffix.lower() in ['.txt', '.lck', '.html',] - and not file.name.startswith('index.') - and not file.name.startswith('milestoneupdate') - and not file.name.startswith('nopicture') - and not file.name.startswith('robots.txt') - ): - if not str(file).decode('utf8') in processed_files: - not_processed += 1 - print(u"Not processed: "+str(file).decode('utf8')) -print("") -print("Not processed: %s files" % not_processed) diff --git a/ietf/bin/announce-header-change b/ietf/bin/announce-header-change deleted file mode 100755 index 256324e31..000000000 --- a/ietf/bin/announce-header-change +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env python - -import sys, os, sys -import datetime - -# boilerplate -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - execfile(virtualenv_activation, dict(__file__=virtualenv_activation)) - -import django -django.setup() - -from django.core import management -from django.template.loader import render_to_string - -from ietf import settings -from ietf.utils.mail import send_mail_preformatted -from ietf.utils.mail import send_mail - -target_date=datetime.date(year=2014,month=1,day=24) - -send_mail(request = None, - to = "IETF-Announce ", - frm = "The IESG ", - subject = "Upcoming change to announcement email header fields (using old header)", - template = "utils/header_change_content.txt", - context = dict(oldornew='old', target_date=target_date), - extra = {'Reply-To' : 'ietf@ietf.org', - 'Sender' : '', - } - ) - -send_mail(request = None, - to = "IETF-Announce:;", - frm = "The IESG ", - subject = "Upcoming change to announcement email header fields (using new header)", - template = "utils/header_change_content.txt", - context = dict(oldornew='new', target_date=target_date), - extra = {'Reply-To' : 'IETF Discussion List ', - 'Sender' : '', - }, - bcc = '', - ) diff --git a/ietf/bin/create-break-sessions b/ietf/bin/create-break-sessions deleted file mode 100755 index 52ce044d8..000000000 --- a/ietf/bin/create-break-sessions +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -*- Python -*- -# - -import os, sys - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - execfile(virtualenv_activation, dict(__file__=virtualenv_activation)) - -import django -django.setup() - -from ietf.group.models import Group -from ietf.person.models import Person -from ietf.name.models import SessionStatusName -from ietf.meeting.models import Meeting, Session, ScheduledSession as ScheduleTimeslotSSessionAssignment - -secretariat = Group.objects.get(acronym='secretariat') -system = Person.objects.get(id=1, name='(System)') -scheduled = SessionStatusName.objects.get(slug='sched') - -for meeting in Meeting.objects.filter(type="ietf").order_by("date"): - print "Checking %s schedules ..." % meeting - brk, __ = Session.objects.get_or_create(meeting=meeting, group=secretariat, requested_by=system, status=scheduled, name='Break', type_id='break',) - reg, __ = Session.objects.get_or_create(meeting=meeting, group=secretariat, requested_by=system, status=scheduled, name='Registration', type_id='reg',) - - for schedule in meeting.schedule_set.all(): - print " Checking for missing Break and Reg sessions in %s" % schedule - for timeslot in meeting.timeslot_set.all(): - if timeslot.type_id == 'break' and not (schedule.base and SchedTimeSessAssignment.objects.filter(timeslot=timeslot, session=brk, schedule=schedule.base).exists()): - assignment, created = SchedTimeSessAssignment.objects.get_or_create(timeslot=timeslot, session=brk, schedule=schedule) - if created: - print " Added %s break assignment" % timeslot - if timeslot.type_id == 'reg' and not (schedule.base and SchedTimeSessAssignment.objects.filter(timeslot=timeslot, session=reg, schedule=schedule.base).exists()): - assignment, created = SchedTimeSessAssignment.objects.get_or_create(timeslot=timeslot, session=reg, schedule=schedule) - if created: - print " Added %s registration assignment" % timeslot diff --git a/ietf/bin/create-charter-newrevisiondocevents b/ietf/bin/create-charter-newrevisiondocevents deleted file mode 100755 index d91c0b5b7..000000000 --- a/ietf/bin/create-charter-newrevisiondocevents +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python - -import os -import sys - -version = "0.10" -program = os.path.basename(sys.argv[0]) -progdir = os.path.dirname(sys.argv[0]) - -# boilerplate -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - execfile(virtualenv_activation, dict(__file__=virtualenv_activation)) - - -# ---------------------------------------------------------------------- -def note(string): - sys.stdout.write("%s\n" % (string)) - -# ---------------------------------------------------------------------- -def warn(string): - sys.stderr.write(" * %s\n" % (string)) - -# ------------------------------------------------------------------------------ - -import re -import datetime - -import django -django.setup() - -from django.conf import settings - -from ietf.utils.path import path as Path -from ietf.doc.models import Document, NewRevisionDocEvent -from ietf.person.models import Person - -system_entity = Person.objects.get(name="(System)") - -charterdir = Path(settings.CHARTER_PATH) -for file in charterdir.files("charter-ietf-*.txt"): - fname = file.name - ftime = datetime.datetime.fromtimestamp(file.mtime, datetime.timezone.utc) - match = re.search("^(?P[a-z0-9-]+)-(?P\d\d-\d\d)\.txt$", fname) - if match: - name = match.group("name") - rev = match.group("rev") - else: - match = re.search("^(?P[a-z0-9-]+)-(?P\d\d)\.txt$", fname) - if match: - name = match.group("name") - rev = match.group("rev") - else: - warn("Failed extracting revision from filename: '%s'" % fname) - try: - doc = Document.objects.get(type="charter", name=name) - try: - event = NewRevisionDocEvent.objects.get(doc=doc, type='new_revision', rev=rev) - note(".") - except NewRevisionDocEvent.MultipleObjectsReturned, e: - warn("Multiple NewRevisionDocEvent exists for '%s'" % fname) - except NewRevisionDocEvent.DoesNotExist: - event = NewRevisionDocEvent(doc=doc, type='new_revision', rev=rev, by=system_entity, time=ftime, desc="") - event.save() - note("Created new NewRevisionDocEvent for %s-%s" % (name, rev)) - except Document.DoesNotExist: - warn("Document not found: '%s'; no NewRevisionDocEvent created for '%s'" % (name, fname)) - diff --git a/ietf/bin/dump-draft-info b/ietf/bin/dump-draft-info deleted file mode 100755 index 3ac2e4a58..000000000 --- a/ietf/bin/dump-draft-info +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python - -import os -import sys - -version = "0.10" -program = os.path.basename(sys.argv[0]) -progdir = os.path.dirname(sys.argv[0]) - -# boilerplate -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - execfile(virtualenv_activation, dict(__file__=virtualenv_activation)) - -import django -django.setup() - -from django.template import Template, Context - -from ietf.doc.models import Document -from ietf.person.models import Person - -drafts = Document.objects.filter(type="draft") - -ads = {} -for p in Person.objects.filter(ad_document_set__type="draft").distinct(): - ads[p.id] = p.role_email("ad") - -for d in drafts: - d.ad_email = ads.get(d.ad_id) - -templ_text = """{% for draft in drafts %}{% if draft.notify or draft.ad_email %}{{ draft.name }}{% if draft.notify %} docnotify='{{ draft.notify|cut:"<"|cut:">" }}'{% endif %}{% if draft.ad_email %} docsponsor='{{ draft.ad_email }}'{% endif %} -{% endif %}{% endfor %}""" -template = Template(templ_text) -context = Context({ 'drafts':drafts }) - -print template.render(context).encode('utf-8') diff --git a/ietf/bin/email-sync-discrepancies b/ietf/bin/email-sync-discrepancies deleted file mode 100755 index 3593fd126..000000000 --- a/ietf/bin/email-sync-discrepancies +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python - -import sys, os, sys -import syslog - -# boilerplate -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - execfile(virtualenv_activation, dict(__file__=virtualenv_activation)) - -from optparse import OptionParser - -parser = OptionParser() -parser.add_option("-t", "--to", dest="to", - help="Email address to send report to", metavar="EMAIL") - -options, args = parser.parse_args() - -syslog.openlog(os.path.basename(__file__), syslog.LOG_PID, syslog.LOG_USER) - -import django -django.setup() - -from ietf.sync.mails import email_discrepancies - -receivers = ["iesg-secretary@ietf.org"] - -if options.to: - receivers = [options.to] - -email_discrepancies(receivers) - -syslog.syslog("Emailed sync discrepancies to %s" % receivers) diff --git a/ietf/bin/find-submission-confirmation-email-in-postfix-log b/ietf/bin/find-submission-confirmation-email-in-postfix-log deleted file mode 100755 index 6bf41574a..000000000 --- a/ietf/bin/find-submission-confirmation-email-in-postfix-log +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python - -import io -import os -import sys - -version = "0.10" -program = os.path.basename(sys.argv[0]) -progdir = os.path.dirname(sys.argv[0]) - -# boilerplate -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - execfile(virtualenv_activation, dict(__file__=virtualenv_activation)) - -# ---------------------------------------------------------------------- -def note(string): - sys.stdout.write("%s\n" % (string)) - -# ---------------------------------------------------------------------- -def warn(string): - sys.stderr.write(" * %s\n" % (string)) - -# ------------------------------------------------------------------------------ - -import re -from datetime import datetime as Datetime -import time -import warnings -warnings.filterwarnings('ignore', message='the sets module is deprecated', append=True) - -import django -django.setup() - -from django.conf import settings - -from ietf.utils.path import path as Path - -from ietf.submit.models import Submission -from ietf.doc.models import Document - - - -args = sys.argv[1:] -if len(args) < 3: - warn("Expected '$ %s DRAFTNAME USER.LOG POSTFIX.LOG', but found no arguments -- exiting" % program) - sys.exit(1) - -draft = args[0] -if re.search("\.txt$", draft): - draft = draft[:-4] -if re.search("-\d\d$", draft): - draft = draft[:-3] - -if len(args) == 1: - logfiles = [ arg[1] ] -else: - logfiles = args[1:] - -from_email = settings.IDSUBMIT_FROM_EMAIL -if "<" in from_email: - from_email = from_email.split("<")[1].split(">")[0] - -submission = Submission.objects.filter(name=draft).latest('submission_date') -document = Document.objects.get(name=draft) -emails = [ author.email.address for author in document.documentauthor_set.all() if author.email ] - -timestrings = [] -for file in [ Path(settings.INTERNET_DRAFT_PATH) / ("%s-%s.txt"%(draft, submission.rev)), - Path(settings.IDSUBMIT_STAGING_PATH) / ("%s-%s.txt"%(draft, submission.rev)) ]: - if os.path.exists(file): - upload_time = time.localtime(file.mtime) - ts = time.strftime("%b %d %H:%M", upload_time) - timestrings += [ ts ] - timestrings += [ ts[:-1] + chr(((ord(ts[-1])-ord('0')+1)%10)+ord('0')) ] - print "Looking for mail log lines timestamped %s, also checking %s ..." % (timestrings[0], timestrings[1]) - -for log in logfiles: - print "\n Checking %s ...\n" % log - if log.endswith('.gz'): - import gzip - logfile = gzip.open(log) - else: - logfile = io.open(log) - queue_ids = [] - for line in logfile: - if from_email in line and "Confirmation for Auto-Post of I-D "+draft in line: - ts = line[:12] - timestrings += [ ts ] - print "Found a mention of %s, adding timestamp %s: \n %s" % (draft, ts, line) - for ts in timestrings: - if line.startswith(ts): - if from_email in line: - for to_email in emails: - if to_email in line: - sys.stdout.write(line) - if "queued_as:" in line: - queue_ids += [ line.split("queued_as:")[1].split(",")[0] ] - elif queue_ids: - for qi in queue_ids: - if qi in line: - sys.stdout.write(line) diff --git a/ietf/bin/interim_minutes_reminder b/ietf/bin/interim_minutes_reminder deleted file mode 100755 index 7f2f84f73..000000000 --- a/ietf/bin/interim_minutes_reminder +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -*- Python -*- -# -''' -This script calls ietf.meeting.helpers.check_interim_minutes() which sends -a reminder email for interim meetings that occurred 10 days ago but still -don't have minutes. -''' - -# Set PYTHONPATH and load environment variables for standalone script ----------------- -import os, sys -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - execfile(virtualenv_activation, dict(__file__=virtualenv_activation)) - -import django -django.setup() -# ------------------------------------------------------------------------------------- - -from ietf.meeting.helpers import check_interim_minutes - -check_interim_minutes() diff --git a/ietf/bin/list-role-holder-emails b/ietf/bin/list-role-holder-emails deleted file mode 100755 index 6d6c16046..000000000 --- a/ietf/bin/list-role-holder-emails +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python - - -import os, sys -import syslog - -# boilerplate -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - execfile(virtualenv_activation, dict(__file__=virtualenv_activation)) - -syslog.openlog(os.path.basename(__file__), syslog.LOG_PID, syslog.LOG_USER) - -import django -django.setup() - -from django.utils.encoding import force_str -from ietf.group.models import Role - -addresses = set() -for role in Role.objects.filter( - group__state__slug='active', - group__type__in=['ag','area','dir','iab','ietf','irtf','nomcom','rg','team','wg','rag']): - #sys.stderr.write(str(role)+'\n') - for e in role.person.email_set.all(): - if e.active and not e.address.startswith('unknown-email-'): - addresses.add(e.address) - -addresses = list(addresses) -addresses.sort() -for a in addresses: - print(force_str(a)) diff --git a/ietf/bin/pretty-xml-dump b/ietf/bin/pretty-xml-dump deleted file mode 100755 index 22abc08a6..000000000 --- a/ietf/bin/pretty-xml-dump +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -python manage.py dumpdata --format=xml "$@" | sed -e 's/<\/*object/\ - &/g' -e 's/')][1:])) +# else: +# retval.append(ParsedAuthor(a.strip(),None)) + +# return retval + +# def calculate_changes(tracker_persons,tracker_emails,names,emails): +# adds = set() +# deletes = set() +# for email in emails: +# if email and email!='none' and email not in ignore_addresses: +# p = Person.objects.filter(email__address=email).first() +# if p: +# if not set(map(unicode.lower,p.email_set.values_list('address',flat=True))).intersection(tracker_emails): +# adds.add(email) +# else: +# #person_name = names[emails.index(email)] +# adds.add(email) +# for person in tracker_persons: +# if not set(map(unicode.lower,person.email_set.values_list('address',flat=True))).intersection(emails): +# match = False +# for index in [i for i,j in enumerate(emails) if j=='none' or not j]: +# if names[index].split()[-1].lower()==person.last_name().lower(): +# match = True +# if not match: +# deletes.add(person) +# return adds, deletes + +# def _main(): + +# parser = argparse.ArgumentParser(description="Recalculate RFC documentauthor_set"+'\n\n'+__doc__, +# formatter_class=argparse.RawDescriptionHelpFormatter,) +# parser.add_argument('-v','--verbose',help="Show the action taken for each RFC",action='store_true') +# parser.add_argument('--rfc',type=int, nargs='*',help="Only recalculate the given rfc numbers",dest='rfcnumberlist') +# args = parser.parse_args() + +# probable_email_match = set() +# probable_duplicates = [] + +# all_the_email = get_all_the_email() +# author_names, author_emails = get_rfc_data() + +# stats = { 'rfc not in tracker' :0, +# 'same addresses' :0, +# 'different addresses belonging to same people' :0, +# 'same names, rfced emails do not match' :0, +# 'rfced data is unusable' :0, +# "data doesn't match but no changes found" :0, +# 'changed authors' :0, } + +# for rfc_num in args.rfcnumberlist or sorted(author_names.keys()): + +# rfc = Document.objects.filter(docalias__name='rfc%s'%rfc_num).first() + +# if not rfc: +# if args.verbose: +# show_verbose(rfc_num,'rfc not in tracker') +# stats['rfc not in tracker'] += 1 +# continue + +# rfced_emails = set(author_emails[rfc_num]) +# tracker_emails = set(map(unicode.lower,rfc.authors.values_list('address',flat=True))) +# tracker_persons = set([x.person for x in rfc.authors.all()]) +# matching_emails = get_matching_emails(all_the_email,rfced_emails) +# rfced_persons = set([x.person for x in matching_emails]) +# known_emails = set([e.l_address for e in matching_emails]) +# unknown_emails = rfced_emails - known_emails +# unknown_persons = tracker_persons-rfced_persons + +# rfced_lastnames = sorted([n.split()[-1].lower() for n in author_names[rfc_num]]) +# tracker_lastnames = sorted([p.last_name().lower() for p in tracker_persons]) + +# if rfced_emails == tracker_emails: +# if args.verbose: +# show_verbose(rfc_num,'tracker and rfc editor have the same addresses') +# stats['same addresses'] += 1 +# continue + +# if len(rfced_emails)==len(tracker_emails) and not 'none' in author_emails[rfc_num]: +# if tracker_persons == rfced_persons: +# if args.verbose: +# show_verbose(rfc_num,'tracker and rfc editor have the different addresses belonging to same people') +# stats['different addresses belonging to same people'] += 1 +# continue +# else: +# if len(unknown_emails)==1 and len(tracker_persons-rfced_persons)==1: +# p = list(tracker_persons-rfced_persons)[0] +# probable_email_match.add(u"%s is probably %s (%s) : %s "%(list(unknown_emails)[0], p, p.pk, rfc_num)) +# elif len(unknown_emails)==len(unknown_persons): +# probable_email_match.add(u"%s are probably %s : %s"%(unknown_emails,[(p.ascii,p.pk) for p in unknown_persons],rfc_num)) +# else: +# probable_duplicates.append((tracker_persons^rfced_persons,rfc_num)) + +# if tracker_lastnames == rfced_lastnames: +# if args.verbose: +# show_verbose(rfc_num,"emails don't match up, but person names appear to be the same") +# stats[ 'same names, rfced emails do not match'] += 1 +# continue + +# use_rfc_data = bool(len(author_emails[rfc_num])==len(author_names[rfc_num])) +# if not use_rfc_data: +# if args.verbose: +# print 'Ignoring rfc database for rfc%d'%rfc_num +# stats[ 'rfced data is unusable'] += 1 + +# if use_rfc_data: +# adds, deletes = calculate_changes(tracker_persons,tracker_emails,author_names[rfc_num],author_emails[rfc_num]) +# parsed_authors=get_parsed_authors(rfc_num) +# parsed_adds, parsed_deletes = calculate_changes(tracker_persons,tracker_emails,[x.name for x in parsed_authors],[x.address for x in parsed_authors]) + +# for e in adds.union(parsed_adds) if use_rfc_data else parsed_adds: +# if not e or e in ignore_addresses: +# continue +# if not Person.objects.filter(email__address=e).exists(): +# if e not in parsed_adds: +# #print rfc_num,"Would add",e,"as",author_names[rfc_num][author_emails[rfc_num].index(e)],"(rfced database)" +# print "(address='%s',name='%s'),"%(e,author_names[rfc_num][author_emails[rfc_num].index(e)]),"# (rfced %d)"%rfc_num +# for p in Person.objects.filter(name__iendswith=author_names[rfc_num][author_emails[rfc_num].index(e)].split(' ')[-1]): +# print "\t", p.pk, p.ascii +# else: +# name = [x.name for x in parsed_authors if x.address==e][0] +# p = Person.objects.filter(name=name).first() +# if p: +# #print e,"is probably",p.pk,p +# print "'%s': %d, # %s (%d)"%(e,p.pk,p.ascii,rfc_num) + +# else: +# p = Person.objects.filter(ascii=name).first() +# if p: +# print e,"is probably",p.pk,p +# print "'%s': %d, # %s (%d)"%(e,p.pk,p.ascii,rfc_num) +# else: +# p = Person.objects.filter(ascii_short=name).first() +# if p: +# print e,"is probably",p.pk,p +# print "'%s': %d, # %s (%d)"%(e,p.pk,p.ascii,rfc_num) +# #print rfc_num,"Would add",e,"as",name,"(parsed)" +# print "(address='%s',name='%s'),"%(e,name),"# (parsed %d)"%rfc_num +# for p in Person.objects.filter(name__iendswith=name.split(' ')[-1]): +# print "\t", p.pk, p.ascii + +# if False: # This was a little useful, but the noise in the rfc_ed file keeps it from being completely useful +# for p in deletes: +# for n in author_names[rfc_num]: +# if p.last_name().lower()==n.split()[-1].lower(): +# email_candidate = author_emails[rfc_num][author_names[rfc_num].index(n)] +# email_found = Email.objects.filter(address=email_candidate).first() +# if email_found: +# probable_duplicates.append((set([p,email_found.person]),rfc_num)) +# else: +# probable_email_match.add(u"%s is probably %s (%s) : %s"%(email_candidate, p, p.pk, rfc_num)) + +# if args.verbose: +# if use_rfc_data: +# working_adds = parsed_adds +# seen_people = set(Email.objects.get(address=e).person for e in parsed_adds) +# for addr in adds: +# person = Email.objects.get(address=addr).person +# if person not in seen_people: +# working_adds.add(addr) +# seen_people.add(person) +# working_deletes = deletes.union(parsed_deletes) +# else: +# working_adds = parsed_adds +# working_deletes = parsed_deletes +# # unique_adds = set() # TODO don't add different addresses for the same person from the two sources +# if working_adds or working_deletes: +# show_verbose(rfc_num,"Changing original list",tracker_persons,"by adding",working_adds," and deleting",working_deletes) +# print "(",rfc_num,",",[e for e in working_adds],",",[p.pk for p in working_deletes],"), #",[p.ascii for p in working_deletes] +# else: +# stats["data doesn't match but no changes found"] += 1 +# show_verbose(rfc_num,"Couldn't figure out what to change") + +# if False: +# #if tracker_persons: +# #if any(['iab@' in e for e in adds]) or any(['iesg@' in e for e in adds]) or any(['IESG'==p.name for p in deletes]) or any(['IAB'==p.name for p in deletes]): +# print rfc_num +# print "tracker_persons",tracker_persons +# print "author_names",author_names[rfc_num] +# print "author_emails",author_emails[rfc_num] +# print "Adds:", adds +# print "Deletes:", deletes + +# stats['changed authors'] += 1 + +# if False: +# debug.show('rfc_num') +# debug.show('rfced_emails') +# debug.show('tracker_emails') +# debug.show('known_emails') +# debug.show('unknown_emails') +# debug.show('tracker_persons') +# debug.show('rfced_persons') +# debug.show('tracker_persons==rfced_persons') +# debug.show('[p.id for p in tracker_persons]') +# debug.show('[p.id for p in rfced_persons]') +# exit() + +# if True: +# for p in sorted(list(probable_email_match)): +# print p +# if True: +# print "Probable duplicate persons" +# for d,r in sorted(probable_duplicates): +# print [(p,p.pk) for p in d], r +# else: +# print len(probable_duplicates)," probable duplicate persons" + +# print stats + +# if __name__ == "__main__": +# _main() + diff --git a/ietf/bin/redirect-dump b/ietf/bin/redirect-dump deleted file mode 100755 index ef35bbf0d..000000000 --- a/ietf/bin/redirect-dump +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -# -# Copyright The IETF Trust 2007, All Rights Reserved -# -#python manage.py dumpdata --format=xml redirects | xmllint --format - -python manage.py dumpdata --format=xml redirects | sed -e 's/<\/*object/\ - &/g' -e 's/=1.4.12 djlint>=1.0.0 # To auto-indent templates via "djlint --profile django --reformat" docutils>=0.18.1 # Used only by dbtemplates for RestructuredText types-docutils>=0.18.1 -factory-boy>=3.2.1,<3.3 +factory-boy>=3.3 github3.py>=3.2.0 gunicorn>=20.1.0 hashids>=1.3.1