From 26008c7c549675922e4f0b353331a24c6e2de386 Mon Sep 17 00:00:00 2001 From: Robert Sparks Date: Thu, 20 Dec 2018 18:43:06 +0000 Subject: [PATCH 1/2] removed redesign files and a utility that only the redesign files used. Commit ready for merge. - Legacy-Id: 15844 --- ietf/name/utils.py | 11 - redesign/__init__.py | 0 redesign/importing/__init__.py | 0 redesign/importing/import-all.sh | 18 - redesign/importing/import-announcements.py | 162 --- redesign/importing/import-docs.py | 1322 -------------------- redesign/importing/import-groups.py | 410 ------ redesign/importing/import-interim.py | 155 --- redesign/importing/import-ipr.py | 59 - redesign/importing/import-liaison.py | 211 ---- redesign/importing/import-meetings.py | 481 ------- redesign/importing/import-persons.py | 195 --- redesign/importing/import-reused-tables.py | 97 -- redesign/importing/import-roles.py | 318 ----- redesign/importing/import-states.py | 193 --- redesign/importing/move-tables-to-db.sh | 40 - redesign/importing/utils.py | 182 --- redesign/interim/__init__.py | 0 redesign/interim/models.py | 95 -- 19 files changed, 3949 deletions(-) delete mode 100644 ietf/name/utils.py delete mode 100644 redesign/__init__.py delete mode 100644 redesign/importing/__init__.py delete mode 100755 redesign/importing/import-all.sh delete mode 100755 redesign/importing/import-announcements.py delete mode 100755 redesign/importing/import-docs.py delete mode 100755 redesign/importing/import-groups.py delete mode 100755 redesign/importing/import-interim.py delete mode 100755 redesign/importing/import-ipr.py delete mode 100755 redesign/importing/import-liaison.py delete mode 100755 redesign/importing/import-meetings.py delete mode 100755 redesign/importing/import-persons.py delete mode 100755 redesign/importing/import-reused-tables.py delete mode 100755 redesign/importing/import-roles.py delete mode 100755 redesign/importing/import-states.py delete mode 100755 redesign/importing/move-tables-to-db.sh delete mode 100644 redesign/importing/utils.py delete mode 100644 redesign/interim/__init__.py delete mode 100644 redesign/interim/models.py diff --git a/ietf/name/utils.py b/ietf/name/utils.py deleted file mode 100644 index 25b85ba4f..000000000 --- a/ietf/name/utils.py +++ /dev/null @@ -1,11 +0,0 @@ -def name(name_class, slug, name, desc="", order=0, **kwargs): - # create if it doesn't exist, set name and desc - obj, created = name_class.objects.get_or_create(slug=slug) - if created: - obj.name = name - obj.desc = desc - obj.order = order - for k, v in kwargs.iteritems(): - setattr(obj, k, v) - obj.save() - return obj diff --git a/redesign/__init__.py b/redesign/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/redesign/importing/__init__.py b/redesign/importing/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/redesign/importing/import-all.sh b/redesign/importing/import-all.sh deleted file mode 100755 index 2d28895a3..000000000 --- a/redesign/importing/import-all.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# basic dependencies -set -e -python import-reused-tables.py base -python import-persons.py -python import-states.py -python import-groups.py -python import-roles.py - -python import-reused-tables.py others -python import-meetings.py -python import-announcements.py -python import-docs.py -python import-ipr.py # sets up links to drafts/RFCs so needs them -python import-liaison.py - -python import-interim.py # requires ietf_ams database being set up diff --git a/redesign/importing/import-announcements.py b/redesign/importing/import-announcements.py deleted file mode 100755 index a0a5c53bb..000000000 --- a/redesign/importing/import-announcements.py +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/python - -import sys, os, re, datetime - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False -settings.IMPORTING_FROM_OLD_SCHEMA = True - -from django.core import management -management.setup_environ(settings) - -from ietf.person.models import * -from ietf.group.models import * -from ietf.name.utils import name -from ietf.message.models import Message, SendQueue -from redesign.importing.utils import old_person_to_person -from ietf.announcements.models import Announcement, PersonOrOrgInfo, AnnouncedTo, AnnouncedFrom, ScheduledAnnouncement -from ietf.idtracker.models import IESGLogin - -# assumptions: -# - nomcom groups have been imported -# - persons have been imported (Announcement originators and IESGLogins) - -# imports Announcement, ScheduledAnnouncement - -system = Person.objects.get(name="(System)") - -# Announcement -for o in Announcement.objects.all().select_related('announced_to', 'announced_from').order_by('announcement_id').iterator(): - print "importing Announcement", o.pk - try: - message = Message.objects.get(id=o.announcement_id) - except Message.DoesNotExist: - message = Message(id=o.announcement_id) - - message.time = datetime.datetime.combine(o.announced_date, - datetime.time(*(int(x) for x in o.announced_time.split(":")))) - - try: - x = o.announced_by - except PersonOrOrgInfo.DoesNotExist: - message.by = system - else: - if not o.announced_by.first_name and o.announced_by.last_name == 'None': - message.by = system - else: - message.by = old_person_to_person(o.announced_by) - - message.subject = o.subject.strip() - if o.announced_from_id == 99: - message.frm = o.other_val or "" - elif o.announced_from_id == 18 and o.nomcom_chair_id != 0: - message.frm = u"%s <%s>" % o.nomcom_chair.person.email() - else: - if '<' in o.announced_from.announced_from: - message.frm = o.announced_from.announced_from - else: - message.frm = u"%s <%s>" % (o.announced_from.announced_from, o.announced_from.email) - if o.announced_to_id == 99: - message.to = o.other_val or "" - else: - try: - message.to = u"%s <%s>" % (o.announced_to.announced_to, o.announced_to.email) - except AnnouncedTo.DoesNotExist: - message.to = "" - - message.cc = o.cc or "" - for l in (o.extra or "").strip().replace("^", "\n").replace("\r", "").split("\n"): - l = l.strip() - if l.lower().startswith("bcc:"): - message.bcc = l[len("bcc:"):].strip() - elif l.lower().startswith("reply-to:"): - message.reply_to = l[len("reply-to:"):].strip() - message.body = o.text - message.save() - - message.related_groups.clear() - - if o.nomcom: - nomcom = Group.objects.filter(role__name="chair", - role__person=old_person_to_person(o.nomcom_chair.person), - acronym__startswith="nomcom").exclude(acronym="nomcom").get() - - message.related_groups.add(nomcom) - - -# precompute scheduled_by's to speed up the loop a bit -scheduled_by_mapping = {} -for by in ScheduledAnnouncement.objects.all().values_list("scheduled_by", flat=True).distinct(): - logins = IESGLogin.objects.filter(login_name=by) - if logins: - l = logins[0] - person = l.person - if not person: - person = PersonOrOrgInfo.objects.get(first_name=l.first_name, last_name=l.last_name) - found = old_person_to_person(person) - else: - found = system - - print "mapping", by, "to", found - scheduled_by_mapping[by] = found - -# ScheduledAnnouncement -for o in ScheduledAnnouncement.objects.all().order_by('id').iterator(): - print "importing ScheduledAnnouncement", o.pk - try: - q = SendQueue.objects.get(id=o.id) - except SendQueue.DoesNotExist: - q = SendQueue(id=o.id) - # make sure there's no id overlap with ordinary already-imported announcements - q.message = Message(id=o.id + 4000) - - time = datetime.datetime.combine(o.scheduled_date, - datetime.time(*(int(x) for x in o.scheduled_time.split(":")))) - by = scheduled_by_mapping[o.scheduled_by] - - q.message.time = time - q.message.by = by - - q.message.subject = (o.subject or "").strip() - q.message.to = (o.to_val or "").strip() - q.message.frm = (o.from_val or "").strip() - q.message.cc = (o.cc_val or "").strip() - q.message.bcc = (o.bcc_val or "").strip() - q.message.reply_to = (o.replyto or "").strip() - q.message.body = o.body or "" - q.message.content_type = o.content_type or "" - q.message.save() - - q.time = time - q.by = by - - d = None - if o.to_be_sent_date: - try: - t = datetime.time(*(int(x) for x in o.to_be_sent_time.split(":"))) - except ValueError: - t = datetime.time(0, 0, 0) - d = datetime.datetime.combine(o.to_be_sent_date, t) - - q.send_at = d - - d = None - if o.actual_sent_date: - try: - t = datetime.time(*(int(x) for x in o.scheduled_time.split(":"))) - except ValueError: - t = datetime.time(0, 0, 0) - - d = datetime.datetime.combine(o.actual_sent_date, t) - - q.sent_at = d - - n = (o.note or "").strip() - if n.startswith("
"): - n = n[len("
"):] - q.note = n - - q.save() diff --git a/redesign/importing/import-docs.py b/redesign/importing/import-docs.py deleted file mode 100755 index df6e04c3b..000000000 --- a/redesign/importing/import-docs.py +++ /dev/null @@ -1,1322 +0,0 @@ -#!/usr/bin/python - -import sys, os, re, datetime - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False - -from django.core import management -management.setup_environ(settings) - -from django.template.defaultfilters import pluralize - -from ietf.doc.models import * -from ietf.doc.utils import get_tags_for_stream_id -from ietf.group.models import * -from ietf.name.models import * -from ietf.person.models import * -from ietf.person.name import name_parts -from redesign.importing.utils import old_person_to_person, person_name, dont_save_queries -from ietf.name.utils import name -from ietf.idtracker.models import InternetDraft, IDInternal, IESGLogin, DocumentComment, PersonOrOrgInfo, Rfc, IESGComment, IESGDiscuss, BallotInfo, Position, Area -from ietf.idrfc.models import RfcIndex, DraftVersions -from ietf.idrfc.mirror_rfc_index import get_std_level_mapping, get_stream_mapping -from ietf.ietfworkflows.models import StreamedID, AnnotationTag, ContentType, ObjectHistoryEntry, ObjectWorkflowHistoryEntry, ObjectAnnotationTagHistoryEntry, ObjectStreamHistoryEntry, StateObjectRelationMetadata -from ietf.wgchairs.models import ProtoWriteUp - -from workflows.models import State as StateOld - -import_docs_from = document_name_to_import = None -if len(sys.argv) > 1: - try: - import_docs_from = datetime.datetime.strptime(sys.argv[1], "%Y-%m-%d") - except: - document_name_to_import = sys.argv[1] - -dont_save_queries() - -# assumptions: -# - states have been imported -# - groups have been imported -# - IESG login emails/roles have been imported -# - IDAuthor emails/persons have been imported - -# Regarding history, we currently don't try to create DocumentHistory -# objects, we just import the comments as events. - -# imports drafts and RFCs, more specifically InternetDraft, -# IDInternal, BallotInfo, Position, IESGComment, IESGDiscuss, -# DocumentComment, IDAuthor, idrfc.RfcIndex, idrfc.DraftVersions, -# StreamedID - - -def alias_doc(name, doc): - DocAlias.objects.filter(name=name).exclude(document=doc).delete() - alias, _ = DocAlias.objects.get_or_create(name=name, document=doc) - return alias - -type_draft = name(DocTypeName, "draft", "Draft") - -stream_mapping = get_stream_mapping() -stream_mapping["ISE"] = stream_mapping["INDEPENDENT"] - -relationship_replaces = name(DocRelationshipName, "replaces", "Replaces") -relationship_updates = name(DocRelationshipName, "updates", "Updates") -relationship_obsoletes = name(DocRelationshipName, "obs", "Obsoletes") - -intended_std_level_mapping = { - "Proposed Standard": name(IntendedStdLevelName, "ps", name="Proposed Standard", order=1), - "Draft Standard": name(IntendedStdLevelName, "ds", name="Draft Standard", order=2), - "Standard": name(IntendedStdLevelName, "std", name="Standard", order=3), - "BCP": name(IntendedStdLevelName, "bcp", "Best Current Practice", order=4), - "Informational": name(IntendedStdLevelName, "inf", name="Informational", order=5), - "Experimental": name(IntendedStdLevelName, "exp", name="Experimental", order=6), - "Historic": name(IntendedStdLevelName, "hist", name="Historic", order=7), - "None": None, - "Request": None, - } - -# add aliases from rfc_intend_status -intended_std_level_mapping["Proposed"] = intended_std_level_mapping["Proposed Standard"] -intended_std_level_mapping["Draft"] = intended_std_level_mapping["Draft Standard"] - -std_level_mapping = get_std_level_mapping() - -state_mapping = { - 'Active': State.objects.get(type="draft", slug="active"), - 'Expired': State.objects.get(type="draft", slug="expired"), - 'RFC': State.objects.get(type="draft", slug="rfc"), - 'Withdrawn by Submitter': State.objects.get(type="draft", slug="auth-rm"), - 'Replaced': State.objects.get(type="draft", slug="repl"), - 'Withdrawn by IETF': State.objects.get(type="draft", slug="ietf-rm"), - } - -iesg_state_mapping = { - 'RFC Published': State.objects.get(type="draft-iesg", slug="pub"), - 'Dead': State.objects.get(type="draft-iesg", slug="dead"), - 'Approved-announcement to be sent': State.objects.get(type="draft-iesg", slug="approved"), - 'Approved-announcement sent': State.objects.get(type="draft-iesg", slug="ann"), - 'AD is watching': State.objects.get(type="draft-iesg", slug="watching"), - 'IESG Evaluation': State.objects.get(type="draft-iesg", slug="iesg-eva"), - 'AD Evaluation': State.objects.get(type="draft-iesg", slug="ad-eval"), - 'Last Call Requested': State.objects.get(type="draft-iesg", slug="lc-req"), - 'In Last Call': State.objects.get(type="draft-iesg", slug="lc"), - 'Publication Requested': State.objects.get(type="draft-iesg", slug="pub-req"), - 'RFC Ed Queue': State.objects.get(type="draft-iesg", slug="rfcqueue"), - 'IESG Evaluation - Defer': State.objects.get(type="draft-iesg", slug="defer"), - 'Waiting for Writeup': State.objects.get(type="draft-iesg", slug="writeupw"), - 'Waiting for AD Go-Ahead': State.objects.get(type="draft-iesg", slug="goaheadw"), - 'Expert Review': State.objects.get(type="draft-iesg", slug="review-e"), - 'DNP-waiting for AD note': State.objects.get(type="draft-iesg", slug="nopubadw"), - 'DNP-announcement to be sent': State.objects.get(type="draft-iesg", slug="nopubanw"), - None: None, # FIXME: consider introducing the ID-exists state - } - -ballot_position_mapping = { - 'Yes': name(BallotPositionName, 'yes', 'Yes', order=1), - 'No Objection': name(BallotPositionName, 'noobj', 'No Objection', order=2), - 'Discuss': name(BallotPositionName, 'discuss', 'Discuss', order=3), - 'Abstain': name(BallotPositionName, 'abstain', 'Abstain', order=4), - 'Recuse': name(BallotPositionName, 'recuse', 'Recuse', order=5), - 'No Record': name(BallotPositionName, 'norecord', 'No Record', order=6), - } -ballot_position_mapping["no"] = ballot_position_mapping['No Objection'] -ballot_position_mapping["yes"] = ballot_position_mapping['Yes'] -ballot_position_mapping["discuss"] = ballot_position_mapping['Discuss'] -ballot_position_mapping["abstain"] = ballot_position_mapping['Abstain'] -ballot_position_mapping["recuse"] = ballot_position_mapping['Recuse'] -ballot_position_mapping[None] = ballot_position_mapping["No Record"] -ballot_position_mapping["Undefined"] = ballot_position_mapping["No Record"] - -# tags -substate_mapping = { - "External Party": name(DocTagName, 'extpty', "External Party", 'The document is awaiting review or input from an external party (i.e, someone other than the shepherding AD, the authors, or the WG). See the "note" field for more details on who has the action.', 3), - "Revised ID Needed": name(DocTagName, 'need-rev', "Revised ID Needed", 'An updated ID is needed to address the issues that have been raised.', 5), - "AD Followup": name(DocTagName, 'ad-f-up', "AD Followup", """A generic substate indicating that the shepherding AD has the action item to determine appropriate next steps. In particular, the appropriate steps (and the corresponding next state or substate) depend entirely on the nature of the issues that were raised and can only be decided with active involvement of the shepherding AD. Examples include: - -- if another AD raises an issue, the shepherding AD may first iterate with the other AD to get a better understanding of the exact issue. Or, the shepherding AD may attempt to argue that the issue is not serious enough to bring to the attention of the authors/WG. - -- if a documented issue is forwarded to a WG, some further iteration may be needed before it can be determined whether a new revision is needed or whether the WG response to an issue clarifies the issue sufficiently. - -- when a new revision appears, the shepherding AD will first look at the changes to determine whether they believe all outstanding issues have been raised satisfactorily, prior to asking the ADs who raised the original issues to verify the changes.""", 2), - "Point Raised - writeup needed": name(DocTagName, 'point', "Point Raised - writeup needed", 'IESG discussions on the document have raised some issues that need to be brought to the attention of the authors/WG, but those issues have not been written down yet. (It is common for discussions during a telechat to result in such situations. An AD may raise a possible issue during a telechat and only decide as a result of that discussion whether the issue is worth formally writing up and bringing to the attention of the authors/WG). A document stays in the "Point Raised - Writeup Needed" state until *ALL* IESG comments that have been raised have been documented.', 1) - } - -tag_review_by_rfc_editor = name(DocTagName, 'rfc-rev', "Review by RFC Editor") -tag_via_rfc_editor = name(DocTagName, 'via-rfc', "Via RFC Editor") -tag_approved_in_minute = name(DocTagName, 'app-min', "Approved in minute") -tag_has_errata = name(DocTagName, 'errata', "Has errata") - -name(DocTagName, "w-expert", "Awaiting Expert Review/Resolution of Issues Raised", order=1) -name(DocTagName, "w-extern", "Awaiting External Review/Resolution of Issues Raised", order=2) -name(DocTagName, "w-merge", "Awaiting Merge with Other Document", order=3) -name(DocTagName, "need-aut", "Author or Editor Needed", order=4) -name(DocTagName, "w-refdoc", "Waiting for Referenced Document", order=5) -name(DocTagName, "w-refing", "Waiting for Referencing Document", order=6) -name(DocTagName, "rev-wglc", "Revised I-D Needed - Issue raised by WGLC", order=7) -name(DocTagName, "rev-ad", "Revised I-D Needed - Issue raised by AD", order=8) -name(DocTagName, "rev-iesg", "Revised I-D Needed - Issue raised by IESG", order=9) -name(DocTagName, "sheph-u", "Doc Shepherd Follow-up Underway", order=10) -name(DocTagName, "other", "Other - see Comment Log", order=11) -name(DocTagName, "need-ed", "Editor Needed", order=1) -name(DocTagName, "w-part", "Waiting for Partner Feedback", order=2) -name(DocTagName, "w-review", "Awaiting Reviews", order=3) -name(DocTagName, "sh-f-up", "Document Shepherd Followup", order=4) -name(DocTagName, "need-sh", "Shepherd Needed") -name(DocTagName, "w-dep", "Waiting for Dependency on Other Document") -name(DocTagName, "iesg-com", "IESG Review Completed") - -stream_state_reminder_type = name(DocReminderTypeName, "stream-s", "Stream state should change") - -#tag_mapping = dict((t.name, t) for t in DocTagName.objects.all()) - - -# helpers -def extract_authors_from_dump(): - authors_re = re.compile(r"docauthors='([^']*);....-..-..'") - name_email_re = re.compile(r"(.*) <([^>]+)>") - email_brackets_re = re.compile(r" <[^>]*>") - comma_re = re.compile(r".*,") - colon_re = re.compile(r".*:") - - email_mapping = { - "barryleiba@computer.org": "barryleiba@gmail.com", - "greg.daley@eng.monash.edu.au": "gdaley@netstarnetworks.com", - "radia.perlman@sun.com": "radia@alum.mit.edu", - "lisa@osafoundation.org": "lisa.dusseault@gmail.com", - "lisa.dusseault@messagingarchitects.com": "lisa.dusseault@gmail.com", - "scott.lawrence@nortel.com": "scottlawrenc@avaya.com", - "charliep@computer.org": "charliep@computer.org, charles.perkins@earthlink.net", - "yaronf@checkpoint.com": "yaronf.ietf@gmail.com", - "mary.barnes@nortel.com": "mary.ietf.barnes@gmail.com", - "scottlawrenc@avaya.com": "xmlscott@gmail.com", - "henk@ripe.net": "henk@uijterwaal.nl", - "jonne.soininen@nsn.com": "jonne.soininen@renesasmobile.com", - "tom.taylor@rogers.com": "tom.taylor.stds@gmail.com", - "rahul@juniper.net": "raggarwa_1@yahoo.com", - "dward@juniper.net": "dward@cisco.com", - "alan.ford@roke.co.uk": "alanford@cisco.com", - } - - res = {} - - if not os.path.exists(ALL_IDS_STATE): - print "WARNING: proceeding without author information in all_ids.state" - return res - - with open(ALL_IDS_STATE, "r") as author_source: - for line in author_source: - if line.startswith("#"): - continue - - draft_name = line.split(" ")[1] - - m = authors_re.search(line) - if not m: - continue - - l = [] - reliable = True - for a in m.group(1).replace("\\x27", "'").replace("\\'", "'").decode("latin-1").split(", "): - n = name_email_re.match(a) - if n: - name = n.group(1) - email = n.group(2) - else: - name = a - email = "" - - if "@" not in email or not email: - reliable = False - - name = email_brackets_re.sub("", name) - name = comma_re.sub("", name) - name = colon_re.sub("", name) - name = name.strip() - - if "VCARD" in name or len(name.split()) > 5: - reliable = False - - if not reliable: - break - - email = email_mapping.get(email, email) - - l.append((name, email)) - - if reliable: - res[draft_name] = l - - return res - -author_dump = extract_authors_from_dump() - -def save_docevent(doc, event, comment): - event.time = comment.datetime() - event.by = iesg_login_to_person(comment.created_by) - event.doc = doc - if not event.desc: - event.desc = comment.comment_text # FIXME: consider unquoting here - event.save() - -def sync_tag(d, include, tag): - if include: - d.tags.add(tag) - else: - d.tags.remove(tag) - -buggy_iesg_logins_cache = {} - -system = Person.objects.get(name="(System)") - -def iesg_login_to_person(l): - if not l: - return system - else: - # there's a bunch of old weird comments made by "IESG - # Member", transform these into "System" instead - if l.id == 2: - return system - - # fix logins without the right person - if not l.person: - if l.id not in buggy_iesg_logins_cache: - logins = IESGLogin.objects.filter(first_name=l.first_name, last_name=l.last_name).exclude(id=l.id) - if logins: - buggy_iesg_logins_cache[l.id] = logins[0] - else: - persons = PersonOrOrgInfo.objects.filter(first_name=l.first_name, last_name=l.last_name) - if persons: - l.person = persons[0] - buggy_iesg_logins_cache[l.id] = l - else: - buggy_iesg_logins_cache[l.id] = None - l = buggy_iesg_logins_cache[l.id] - - if not l: - return system - - try: - return old_person_to_person(l.person) - except Person.DoesNotExist: - print "MISSING IESG LOGIN", l.person, l.person.email() - return None - -def iesg_login_is_secretary(l): - # Amy has two users, for some reason, we sometimes get the wrong one - return l.user_level == IESGLogin.SECRETARIAT_LEVEL or (l.first_name == "Amy" and l.last_name == "Vezza") - -old_internetdraft_content_type_id = ContentType.objects.using("legacy").get(app_label="idtracker", model="internetdraft").pk - -# regexps for parsing document comments - -date_re_str = "(?P[0-9][0-9][0-9][0-9])-(?P[0-9][0-9]?)-(?P[0-9][0-9]?)" -def date_in_match(match): - y = int(match.group('year')) - m = int(match.group('month')) - d = int(match.group('day')) - if d == 35: # borked status date - d = 25 - return datetime.date(y, m, d) - -re_telechat_agenda = re.compile(r"(Placed on|Removed from) agenda for telechat( - %s|)" % date_re_str) -re_telechat_changed = re.compile(r"Telechat date (was|has been) changed to ()?%s()? from" % date_re_str) -re_ballot_position = re.compile(r"\[Ballot Position Update\] (New position, (?P.*), has been recorded( for (?P\w+ \w+) |)|Position (|for (?P.*) )has been changed to (?P.*) from .*)(by (?P.*)|)") -re_ballot_issued = re.compile(r"Ballot has been issued") -re_state_changed = re.compile(r"(State (has been changed|changed|Changes) to (?P.*) from (|)(?P.*)( by|)|Sub state has been changed to (?P.*) from (?P.*))") -re_note_changed = re.compile(r"(\[Note\]: .*'.*'|Note field has been cleared)", re.DOTALL) -re_draft_added = re.compile(r"Draft [Aa]dded (by .*)?( in state (?P.*))?") -re_last_call_requested = re.compile(r"Last Call was requested") -re_document_approved = re.compile(r"IESG has approved and state has been changed to") -re_document_disapproved = re.compile(r"(Do Not Publish|DNP) note has been sent to RFC Editor and state has been changed to") -re_resurrection_requested = re.compile(r"(I-D |)Resurrection was requested") -re_completed_resurrect = re.compile(r"(This document has been resurrected|This document has been resurrected per RFC Editor's request|Resurrection was completed)") - -re_status_date_changed = re.compile(r"Status [dD]ate has been changed to ()?" + date_re_str) -re_responsible_ad_changed = re.compile(r"(Responsible AD|Shepherding AD|responsible) has been changed to ()?") -re_intended_status_changed = re.compile(r"Intended [sS]tatus has been changed to ()?") -re_state_change_notice = re.compile(r"State Change Notice email list (have been change|has been changed) ()?") -re_area_acronym_changed = re.compile(r"Area acronymn? has been changed to \w+ from \w+()?") - -re_comment_discuss_by_tag = re.compile(r" by [\w-]+ [\w-]+$") - -def import_from_idinternal(d, idinternal): - d.time = idinternal.event_date - d.ad = iesg_login_to_person(idinternal.job_owner) - d.notify = idinternal.state_change_notice_to or "" - d.note = (idinternal.note or "").replace('
', '\n').strip().replace('\n', '
') - - try: - if idinternal.area_acronym and d.group.type_id == "individ": - d.group = Group.objects.get(acronym=idinternal.area_acronym.area_acronym.acronym) - except (Area.DoesNotExist, AttributeError): - pass - - d.save() - - d.set_state(iesg_state_mapping[idinternal.cur_state.state]) - - # extract events - last_note_change_text = "" - started_iesg_process = "" - - document_comments = DocumentComment.objects.filter(document=idinternal.draft_id).order_by('date', 'time', 'id') - for c in document_comments: - handled = False - - # telechat agenda schedulings - match = re_telechat_agenda.search(c.comment_text) or re_telechat_changed.search(c.comment_text) - if match: - e = TelechatDocEvent() - e.type = "scheduled_for_telechat" - e.telechat_date = date_in_match(match) if "Placed on" in c.comment_text else None - # can't extract this from history so we just take the latest value - e.returning_item = bool(idinternal.returning_item) - save_docevent(d, e, c) - handled = True - - # ballot issued - match = re_ballot_issued.search(c.comment_text) - if match: - e = DocEvent() - e.type = "sent_ballot_announcement" - save_docevent(d, e, c) - handled = True - - ad = iesg_login_to_person(c.created_by) - last_pos = d.latest_event(BallotPositionDocEvent, type="changed_ballot_position", ad=ad) - if not last_pos and not iesg_login_is_secretary(c.created_by): - # when you issue a ballot, you also vote yes; add that vote - e = BallotPositionDocEvent() - e.type = "changed_ballot_position" - e.ad = ad - e.desc = u"[Ballot Position Update] New position, Yes, has been recorded for %s" % e.ad.plain_name() - - e.pos = ballot_position_mapping["Yes"] - e.discuss = last_pos.discuss if last_pos else "" - e.discuss_time = last_pos.discuss_time if last_pos else None - e.comment = last_pos.comment if last_pos else "" - e.comment_time = last_pos.comment_time if last_pos else None - save_docevent(d, e, c) - - # ballot positions - match = re_ballot_position.search(c.comment_text) - if match: - position = ballot_position_mapping[match.group('position') or match.group('position2')] - # some of the old positions don't specify who it's for, in - # that case assume it's "by", the person who entered the - # position - ad_name = match.group('for') or match.group('for2') or match.group('by') or (u"%s %s" % (c.created_by.first_name, c.created_by.last_name) if c.created_by else "") - ad_first, ad_last = ad_name.split(' ') - login = IESGLogin.objects.filter(first_name=ad_first, last_name=ad_last).order_by('user_level')[0] - if iesg_login_is_secretary(login): - # now we're in trouble, a secretariat person isn't an - # AD, instead try to find a position object that - # matches and that we haven't taken yet - positions = Position.objects.filter(ballot=idinternal.ballot) - if position.slug == "noobj": - positions = positions.filter(noobj=1) - elif position.slug == "yes": - positions = positions.filter(yes=1) - elif position.slug == "abstain": - positions = positions.filter(models.Q(abstain=1)|models.Q(abstain=2)) - elif position.slug == "recuse": - positions = positions.filter(recuse=1) - elif position.slug == "discuss": - positions = positions.filter(models.Q(discuss=1)|models.Q(discuss=2)) - assert position.slug != "norecord" - - found = False - for p in positions: - if not BallotPositionDocEvent.objects.filter(doc=d, type="changed_ballot_position", pos=position, ad=iesg_login_to_person(p.ad)): - login = p.ad - found = True - break - - if not found: - # in even more trouble, we can try and see if it - # belongs to a nearby discuss - if position.slug == "discuss": - index_c = list(document_comments).index(c) - start = c.datetime() - end = c.datetime() + datetime.timedelta(seconds=30 * 60) - for i, x in enumerate(document_comments): - if (x.ballot == DocumentComment.BALLOT_DISCUSS - and (c.datetime() <= x.datetime() <= end - or abs(index_c - i) <= 2) - and not iesg_login_is_secretary(x.created_by)): - login = x.created_by - found = True - - if not found: - print "BALLOT BY SECRETARIAT", login - - - e = BallotPositionDocEvent() - e.type = "changed_ballot_position" - e.ad = iesg_login_to_person(login) - last_pos = d.latest_event(BallotPositionDocEvent, type="changed_ballot_position", ad=e.ad) - e.pos = position - e.discuss = last_pos.discuss if last_pos else "" - e.discuss_time = last_pos.discuss_time if last_pos else None - if e.pos_id == "discuss" and not e.discuss_time: - # in a few cases, we don't have the discuss - # text/time, fudge the time so it's not null - e.discuss_time = c.datetime() - e.comment = last_pos.comment if last_pos else "" - e.comment_time = last_pos.comment_time if last_pos else None - save_docevent(d, e, c) - handled = True - - # ballot discusses/comments - if c.ballot in (DocumentComment.BALLOT_DISCUSS, DocumentComment.BALLOT_COMMENT): - skip = False - - e = BallotPositionDocEvent() - e.type = "changed_ballot_position" - e.ad = iesg_login_to_person(c.created_by) - last_pos = d.latest_event(BallotPositionDocEvent, type="changed_ballot_position", ad=e.ad) - e.pos = last_pos.pos if last_pos else ballot_position_mapping[None] - c.comment_text = re_comment_discuss_by_tag.sub("", c.comment_text).strip() - if c.ballot == DocumentComment.BALLOT_DISCUSS: - e.discuss = c.comment_text - - if not e.discuss and (not last_pos or not last_pos.discuss): - skip = True # skip some bogus empty entries - - e.discuss_time = c.datetime() - e.comment = last_pos.comment if last_pos else "" - e.comment_time = last_pos.comment_time if last_pos else None - # put header into description - c.comment_text = "[Ballot discuss]\n" + c.comment_text - else: - e.discuss = last_pos.discuss if last_pos else "" - e.discuss_time = last_pos.discuss_time if last_pos else None - if e.pos_id == "discuss" and not e.discuss_time: - # in a few cases, we don't have the discuss - # text/time, fudge the time so it's not null - e.discuss_time = c.datetime() - e.comment = c.comment_text - if not e.comment and (not last_pos or not last_pos.comment): - skip = True # skip some bogus empty entries - - e.comment_time = c.datetime() - # put header into description - c.comment_text = "[Ballot comment]\n" + c.comment_text - - # there are some bogus copies where a secretary has the - # same discuss comment as an AD - if iesg_login_is_secretary(c.created_by) and DocumentComment.objects.filter(ballot=c.ballot, document=c.document).exclude(created_by=c.created_by): - skip = True - - if not skip: - save_docevent(d, e, c) - - handled = True - - # last call requested - match = re_last_call_requested.search(c.comment_text) - if match: - e = DocEvent(type="requested_last_call") - save_docevent(d, e, c) - handled = True - - # state changes - match = re_state_changed.search(c.comment_text) - if match: - e = DocEvent(type="changed_document") - save_docevent(d, e, c) - handled = True - - # note changed - match = re_note_changed.search(c.comment_text) - if match: - # watch out for duplicates of which the old data's got many - if c.comment_text != last_note_change_text: - last_note_change_text = c.comment_text - e = DocEvent(type="changed_document") - save_docevent(d, e, c) - handled = True - - # draft added - match = re_draft_added.search(c.comment_text) - if match: - # watch out for extraneous starts, the old data contains - # some phony ones - if not started_iesg_process: - started_iesg_process = c.comment_text - e = DocEvent(type="started_iesg_process") - save_docevent(d, e, c) - handled = True - - # new version - if c.comment_text == "New version available": - e = NewRevisionDocEvent(type="new_revision", rev=c.version) - c.comment_text = "Added new revision" - save_docevent(d, e, c) - handled = True - - # resurrect requested - match = re_resurrection_requested.search(c.comment_text) - if match: - e = DocEvent(type="requested_resurrect") - save_docevent(d, e, c) - handled = True - - # completed resurrect - match = re_completed_resurrect.search(c.comment_text) - if match: - e = DocEvent(type="completed_resurrect") - save_docevent(d, e, c) - handled = True - - # document expiration - if c.comment_text == "Document is expired by system": - e = DocEvent(type="expired_document") - c.comment_text = "Document has expired" - save_docevent(d, e, c) - handled = True - - # approved document - match = re_document_approved.search(c.comment_text) - if match: - e = DocEvent(type="iesg_approved") - save_docevent(d, e, c) - handled = True - - # disapproved document - match = re_document_disapproved.search(c.comment_text) - if match: - e = DocEvent(type="iesg_disapproved") - save_docevent(d, e, c) - handled = True - - - # some changes can be bundled - this is not entirely - # convenient, especially since it makes it hard to give - # each a type, so unbundle them - if not handled: - unhandled_lines = [] - for line in c.comment_text.split("
"): - line = line.replace(" ", " ") - # status date changed - match = re_status_date_changed.search(line) - if match: - e = DocEvent(type="added_comment") - e.desc = line - save_docevent(d, e, c) - handled = True - - # AD/job owner changed - match = re_responsible_ad_changed.search(line) - if match: - e = DocEvent(type="changed_document") - e.desc = line - save_docevent(d, e, c) - handled = True - - # intended standard level changed - match = re_intended_status_changed.search(line) - if match: - e = DocEvent(type="changed_document") - e.desc = line - save_docevent(d, e, c) - handled = True - - # state change notice - match = re_state_change_notice.search(line) - if match: - e = DocEvent(type="changed_document") - e.desc = line - save_docevent(d, e, c) - handled = True - - # area acronym - match = re_area_acronym_changed.search(line) - if match: - e = DocEvent(type="changed_document") - e.desc = line - save_docevent(d, e, c) - handled = True - - # multiline change bundles end with a single "by xyz" that we skip - if not handled and not line.startswith("by "): - unhandled_lines.append(line) - - if handled: - c.comment_text = "
".join(unhandled_lines) - - if c.comment_text: - if "Due date has been changed" not in c.comment_text: - print "DID NOT HANDLE multi-line comment %s '%s'" % (c.id, c.comment_text.replace("\n", " ").replace("\r", "")[0:80]) - - # all others are added as comments - if not handled: - e = DocEvent(type="added_comment") - save_docevent(d, e, c) - - # stop typical comments from being output - typical_comments = [ - "Document Shepherd Write-up for %s" % d.name, - "Who is the Document Shepherd for this document", - "We understand that this document doesn't require any IANA actions", - "IANA questions", - "IANA has questions", - "IANA comments", - "IANA Comments", - "IANA Evaluation Comment", - "IANA Last Call Comments", - "ublished as RFC", - "A new comment added", - "Due date has been changed", - "Due date has been changed", - "by ", - "AD-review comments", - "IANA Last Call", - "Subject:", - "Merged with", - ] - for t in typical_comments: - if t in c.comment_text: - handled = True - break - - if not handled: - print (u"DID NOT HANDLE comment %s '%s' by %s" % (c.id, c.comment_text.replace("\n", " ").replace("\r", "")[0:80], c.created_by)).encode("utf-8") - - e = d.latest_event() - if e: - made_up_date = e.time - else: - made_up_date = d.time - made_up_date += datetime.timedelta(seconds=1) - - e = d.latest_event(TelechatDocEvent, type="scheduled_for_telechat") - telechat_date = e.telechat_date if e else None - if not idinternal.agenda: - idinternal.telechat_date = None # normalize - - if idinternal.telechat_date != telechat_date: - e = TelechatDocEvent(type="scheduled_for_telechat", - telechat_date=idinternal.telechat_date, - returning_item=bool(idinternal.returning_item)) - # a common case is that it has been removed from the - # agenda automatically by a script without a notice in the - # comments, in that case the time is simply the day after - # the telechat - e.time = telechat_date + datetime.timedelta(days=1) if telechat_date and not idinternal.telechat_date else made_up_date - e.by = system - args = ("Placed on", idinternal.telechat_date) if idinternal.telechat_date else ("Removed from", telechat_date) - e.doc = d - e.desc = "%s agenda for telechat - %s" % args - e.save() - - try: - # sad fact: some ballots haven't been generated yet - ballot = idinternal.ballot - except BallotInfo.DoesNotExist: - ballot = None - - if ballot: - e = d.docevent_set.filter(type__in=("changed_ballot_position", "sent_ballot_announcement", "requested_last_call")).order_by('-time')[:1] - if e: - position_date = e[0].time + datetime.timedelta(seconds=1) - else: - position_date = made_up_date - - # make sure we got all the positions - existing = BallotPositionDocEvent.objects.filter(doc=d, type="changed_ballot_position").order_by("-time", '-id') - - for p in Position.objects.filter(ballot=ballot): - # there are some bogus ones - if iesg_login_is_secretary(p.ad): - continue - - ad = iesg_login_to_person(p.ad) - if p.noobj > 0: - pos = ballot_position_mapping["No Objection"] - elif p.yes > 0: - pos = ballot_position_mapping["Yes"] - elif p.abstain > 0: - pos = ballot_position_mapping["Abstain"] - elif p.recuse > 0: - pos = ballot_position_mapping["Recuse"] - elif p.discuss > 0: - pos = ballot_position_mapping["Discuss"] - else: - pos = ballot_position_mapping[None] - - found = False - for x in existing: - if x.ad == ad and x.pos == pos: - found = True - break - - if not found: - e = BallotPositionDocEvent() - e.type = "changed_ballot_position" - e.doc = d - e.time = position_date - e.by = system - e.ad = ad - last_pos = d.latest_event(BallotPositionDocEvent, type="changed_ballot_position", ad=e.ad) - e.pos = pos - e.discuss = last_pos.discuss if last_pos else "" - e.discuss_time = last_pos.discuss_time if last_pos else None - if e.pos_id == "discuss" and not e.discuss_time: - # in a few cases, we don't have the discuss - # text/time, fudge the time so it's not null - e.discuss_time = e.time - e.comment = last_pos.comment if last_pos else "" - e.comment_time = last_pos.comment_time if last_pos else None - if last_pos: - e.desc = u"[Ballot Position Update] Position for %s has been changed to %s from %s" % (ad.plain_name(), pos.name, last_pos.pos.name) - else: - e.desc = u"[Ballot Position Update] New position, %s, has been recorded for %s" % (pos.name, ad.plain_name()) - e.save() - - # make sure we got the ballot issued event - if ballot.ballot_issued and not d.docevent_set.filter(type="sent_ballot_announcement"): - position = d.docevent_set.filter(type=("changed_ballot_position")).order_by('time', 'id')[:1] - if position: - sent_date = position[0].time - else: - sent_date = made_up_date - - e = DocEvent() - e.type = "sent_ballot_announcement" - e.doc = d - e.time = sent_date - e.by = system - e.desc = "Ballot has been issued" - e.save() - - # make sure the comments and discusses are updated - positions = list(BallotPositionDocEvent.objects.filter(doc=d).order_by("-time", '-id')) - for c in IESGComment.objects.filter(ballot=ballot): - ad = iesg_login_to_person(c.ad) - for p in positions: - if p.ad == ad: - if p.comment != c.text: - p.comment = c.text - p.comment_time = c.date if p.time.date() != c.date else p.time - p.save() - break - - for c in IESGDiscuss.objects.filter(ballot=ballot): - ad = iesg_login_to_person(c.ad) - for p in positions: - if p.ad == ad: - if p.discuss != c.text: - p.discuss = c.text - p.discuss_time = c.date if p.time.date() != c.date else p.time - p.save() - break - - # if any of these events have happened, they're closer to - # the real time - e = d.docevent_set.filter(type__in=("requested_last_call", "sent_last_call", "sent_ballot_announcement", "iesg_approved", "iesg_disapproved")).order_by('time')[:1] - if e: - text_date = e[0].time - datetime.timedelta(seconds=1) - else: - text_date = made_up_date - - if idinternal.ballot.approval_text: - e, _ = WriteupDocEvent.objects.get_or_create(type="changed_ballot_approval_text", doc=d, - defaults=dict(by=system)) - e.text = idinternal.ballot.approval_text - e.time = text_date - e.desc = "Ballot approval text was added" - e.save() - - if idinternal.ballot.last_call_text: - e, _ = WriteupDocEvent.objects.get_or_create(type="changed_last_call_text", doc=d, - defaults=dict(by=system)) - e.text = idinternal.ballot.last_call_text - e.time = text_date - e.desc = "Last call text was added" - e.save() - - if idinternal.ballot.ballot_writeup: - e, _ = WriteupDocEvent.objects.get_or_create(type="changed_ballot_writeup_text", doc=d, - defaults=dict(by=system)) - e.text = idinternal.ballot.ballot_writeup - e.time = text_date - e.desc = "Ballot writeup text was added" - e.save() - - ballot_set = idinternal.ballot_set() - if len(ballot_set) > 1: - others = sorted(b.draft.filename for b in ballot_set if b != idinternal) - desc = u"This was part of a ballot set with: %s" % ",".join(others) - DocEvent.objects.get_or_create(type="added_comment", doc=d, desc=desc, - defaults=dict(time=made_up_date, - by=system)) - - # fix tags - sync_tag(d, idinternal.via_rfc_editor, tag_via_rfc_editor) - - n = idinternal.cur_sub_state and idinternal.cur_sub_state.sub_state - for k, v in substate_mapping.iteritems(): - sync_tag(d, k == n, v) - # currently we ignore prev_sub_state - - sync_tag(d, idinternal.approved_in_minute, tag_approved_in_minute) - - - -all_drafts = InternetDraft.objects.all().order_by('pk').select_related() -if import_docs_from: - all_drafts = all_drafts.filter(last_modified_date__gte=import_docs_from) | all_drafts.filter(idinternal__event_date__gte=import_docs_from) - -if document_name_to_import: - if document_name_to_import.startswith("rfc"): - all_drafts = all_drafts.filter(rfc_number=document_name_to_import[3:]) - else: - all_drafts = all_drafts.filter(filename=document_name_to_import) - - -for index, o in enumerate(all_drafts.iterator()): - print "importing", o.id_document_tag, o.filename, index, "ballot %s" % o.idinternal.ballot_id if o.idinternal and o.idinternal.ballot_id else "" - - try: - d = Document.objects.get(name=o.filename) - except Document.DoesNotExist: - d = Document(name=o.filename) - - d.time = o.revision_date - d.type = type_draft - d.title = o.title - d.group = Group.objects.get(acronym=o.group.acronym) - - d.states = [] - - d.set_state(state_mapping[o.status.status]) - - # try guess stream to have a default for old submissions - d.stream = None - if o.filename.startswith("draft-iab-"): - d.stream = stream_mapping["IAB"] - elif o.filename.startswith("draft-irtf-"): - d.stream = stream_mapping["IRTF"] - elif o.idinternal and o.idinternal.via_rfc_editor: - d.stream = stream_mapping["INDEPENDENT"] - elif d.name.startswith("draft-ietf-") and (d.group.type_id != "individ" or state_mapping[o.status.status].slug == "rfc" or o.idinternal): - d.stream = stream_mapping["IETF"] - - sid = StreamedID.objects.filter(draft=o) - if sid and sid[0].stream: - d.stream = stream_mapping[sid[0].stream.name] - - try: - s = StateOld.objects.get(stateobjectrelation__content_type=old_internetdraft_content_type_id, - stateobjectrelation__content_id=o.pk) - except StateOld.DoesNotExist: - s = None - - if s: - try: - # there may be a mismatch between the stream type and the - # state because of a bug in the ietfworkflows code so try - # first without type constraint - new_s = State.objects.get(name=s.name) - except State.MultipleObjectsReturned: - new_s = State.objects.get(type="draft-stream-%s" % d.stream_id, name=s.name) - - # fix some bugs in the old data - skip = False - if s.name == "WG Document" and d.group.type_id == "individ": - skip = True - - if d.name.startswith("draft-ietf"): - if d.name not in ("draft-ietf-proto-wgchair-tracker-ext", "draft-ietf-proto-iab-irtf-tracker-ext", "draft-ietf-sipping-nat-scenarios", "draft-ietf-sipping-sip-offeranswer"): - skip = False - - group_acronym = d.name.split("-")[2] - if group_acronym == "pppext": - group_acronym = "trill" - - d.group = Group.objects.get(acronym=group_acronym) - - if not skip: - d.set_state(new_s) - - - # there was a bug in ietfworkflows so the group wasn't set on adopted documents - if s.name in ("Call for Adoption by WG Issued", "Adopted by a WG") and d.group.type_id == "individ" and o.replaced_by and o.replaced_by.group: - d.group = Group.objects.get(acronym=o.replaced_by.group.acronym) - - d.rev = o.revision_display() - d.abstract = o.abstract - d.pages = o.txt_page_count - d.intended_std_level = intended_std_level_mapping[o.intended_status.intended_status] - d.ad = None - d.shepherd = old_person_to_person(o.shepherd) if o.shepherd else None - d.notify = "" - d.external_url = "" - d.note = "" - d.internal_comments = o.comments or "" - d.save() - - # make sure our alias is updated - d_alias = alias_doc(d.name, d) - - # RFC alias - if o.rfc_number: - alias_doc("rfc%s" % o.rfc_number, d) - - # authors - d.authors.clear() - - authors_from_dump = author_dump.get(d.name) - if authors_from_dump: - for i, a in enumerate(authors_from_dump): - name, email = a - try: - e = Email.objects.get(address__iexact=email) - except Email.DoesNotExist: - e = Email(address=email) - - ps = Person.objects.filter(alias__name=name) - if ps: - p = ps[0] - else: - _, first, _, last, _ = name_parts(name) - first = first.replace(".", "") - - ps = Person.objects.filter(name__regex=u".*%s.*%s.*" % (first, last)) - if len(ps) == 1: - p = ps[0] - else: - from ietf.utils import unaccent - p = Person.objects.create(name=name, ascii=unaccent.asciify(name)) - Alias.objects.create(name=p.name, person=p) - if p.ascii != p.name: - Alias.objects.create(name=p.ascii, person=p) - - e.person = p - e.save() - - DocumentAuthor.objects.create(document=d, author=e, order=i) - else: - for i, a in enumerate(o.authors.all().select_related("person").order_by('author_order', 'person')): - try: - e = Email.objects.get(address__iexact=a.email() or a.person.email()[1] or u"unknown-email-%s" % person_name(a.person).replace(" ", "-")) - # renumber since old numbers may be a bit borked - DocumentAuthor.objects.create(document=d, author=e, order=i) - except Email.DoesNotExist: - print "SKIPPED author", unicode(a.person).encode('utf-8') - - # clear any already imported events - d.docevent_set.all().delete() - - if o.idinternal: - # import attributes and events - import_from_idinternal(d, o.idinternal) - - # import missing revision changes from DraftVersions - known_revisions = set(e.rev for e in NewRevisionDocEvent.objects.filter(doc=d, type="new_revision")) - draft_versions = list(DraftVersions.objects.filter(filename=d.name)) - # DraftVersions is not entirely accurate, make sure we got the current one - draft_versions.append(DraftVersions(filename=d.name, revision=o.revision_display(), revision_date=o.revision_date)) - draft_versions.sort(key=lambda v: (v.revision, v.revision_date)) - for v in draft_versions: - if v.revision not in known_revisions: - e = NewRevisionDocEvent(type="new_revision") - e.rev = v.revision - # we don't have time information in this source, so - # hack the seconds to include the revision to ensure - # they're ordered correctly - e.time = datetime.datetime.combine(v.revision_date, datetime.time(0, 0, 0)) + datetime.timedelta(seconds=int(v.revision)) - e.by = system - e.doc = d - e.desc = "Added new revision" - e.save() - known_revisions.add(v.revision) - - # check that the revision number is accurate, there are some bugs - # in the old system, presumably because of the tombstone revision - # hack - revs = list(sorted(known_revisions, reverse=True)) - if revs and revs[0] > d.rev: - d.rev = revs[0] - - # ietfworkflows history entries - for h in ObjectHistoryEntry.objects.filter(content_type=old_internetdraft_content_type_id, content_id=o.pk).order_by('date', 'id'): - e = DocEvent(type="changed_document") - e.time = h.date - e.by = old_person_to_person(h.person) - e.doc = d - r = h.get_real_instance() - if r: - if isinstance(r, ObjectWorkflowHistoryEntry): - s = State.objects.filter(type="draft-stream-%s" % d.stream_id, name=r.to_state) - if not s: - s = State.objects.filter(name=r.to_state) - start = "State changed" - if s: - start = "%s changed" % s[0].type.label - - e.desc = u"%s to %s from %s" % (start, r.to_state, r.from_state) - elif isinstance(r, ObjectAnnotationTagHistoryEntry): - l = [] - if r.setted: - s = r.setted.split(",") - l.append(u"Annotation tag%s %s set." % (pluralize(s), ", ".join(s))) - if r.unsetted: - s = r.unsetted.split(",") - l.append(u"Annotation tag%s %s cleared." % (pluralize(s), ", ".join(s))) - e.desc = " ".join(l) - elif isinstance(r, ObjectStreamHistoryEntry): - e.type = "changed_stream" - e.desc = u"Stream changed to %s from %s" % (r.to_stream, r.from_stream) - else: - raise Exception("Unknown ObjectHistoryEntry type: %s" % type(r)) - e.save() - - if r and isinstance(r, ObjectWorkflowHistoryEntry): - # may need to add reminder - try: - metadata = StateObjectRelationMetadata.objects.get(relation__state__name=r.to_state, - relation__content_id=o.pk, - relation__content_type=ContentType.objects.get_for_model(o)) - if metadata.estimated_date: - try: - reminder = DocReminder.objects.get(event__doc=d, type=stream_state_reminder_type) - except DocReminder.DoesNotExist: - reminder = DocReminder(type=stream_state_reminder_type) - - reminder.event = e - reminder.due = metadata.estimated_date - reminder.active = metadata.estimated_date > datetime.datetime.now() - reminder.save() - except StateObjectRelationMetadata.DoesNotExist: - pass - - if h.comment and h.comment.strip() and not d.docevent_set.filter(type="added_comment", desc=h.comment.strip(), time=h.date): - e = DocEvent(type="added_comment") - e.time = h.date - e.by = old_person_to_person(h.person) - e.doc = d - e.desc = h.comment.strip() - e.save() - - - # wgchairs protocol writeups - for w in ProtoWriteUp.objects.filter(draft=o).order_by('date'): - e = WriteupDocEvent(type="changed_protocol_writeup") - e.time = w.date - e.by = old_person_to_person(w.person) - e.doc = d - e.desc = e.get_type_display() - e.text = w.writeup - e.save() - - # import events that might be missing, we can't be sure who did - # them or when but if we don't generate them, we'll be missing the - # information completely - - # make sure last decision is recorded - e = d.latest_event(type__in=("iesg_approved", "iesg_disapproved")) - decision_date = e.time.date() if e else None - if o.b_approve_date != decision_date: - disapproved = o.idinternal and o.idinternal.dnp - e = DocEvent(type="iesg_disapproved" if disapproved else "iesg_approved") - e.time = o.b_approve_date - e.by = system - e.doc = d - e.desc = "Do Not Publish note has been sent to RFC Editor" if disapproved else "IESG has approved" - e.save() - - if o.lc_expiration_date: - e = LastCallDocEvent(type="sent_last_call", expires=o.lc_expiration_date) - # let's try to find the actual change - events = d.docevent_set.filter(type="changed_document", desc__contains=" to In Last Call").order_by('-time')[:1] - # event time is more accurate with actual time instead of just - # date, gives better sorting - e.time = events[0].time if events else o.lc_sent_date - e.by = events[0].by if events else system - e.doc = d - e.desc = "Last call sent" - e.save() - - e = d.latest_event(type="expired_document") - if o.expiration_date and not e: - e = DocEvent(type="expired_document") - e.time = o.expiration_date - e.by = system - e.doc = d - e.desc = "Document has expired" - e.save() - - - # import other attributes - - # when to expire - e = d.latest_event(type__in=("completed_resurrect", "new_revision")) - if e: - d.expires = e.time + datetime.timedelta(days=InternetDraft.DAYS_TO_EXPIRE) - else: - d.expires = None - - # tags - sync_tag(d, o.review_by_rfc_editor, tag_review_by_rfc_editor) - - used_tags = DocTagName.objects.filter(name__in=list(AnnotationTag.objects.filter(annotationtagobjectrelation__content_type=old_internetdraft_content_type_id, annotationtagobjectrelation__content_id=o.pk).values_list('name', flat=True))).values_list('slug', flat=True) - possible_tags = get_tags_for_stream_id(d.stream_id) - for name in possible_tags: - if name == "need-rev" and o.idinternal and o.idinternal.cur_sub_state and o.idinternal.cur_sub_state.sub_state == "Revised ID Needed": - continue # don't overwrite tag from IESG substate - - sync_tag(d, name in used_tags, name) - - # replacements - if o.replaced_by: - replacement, _ = Document.objects.get_or_create(name=o.replaced_by.filename, defaults=dict(time=datetime.datetime(1970, 1, 1, 0, 0, 0))) - RelatedDocument.objects.get_or_create(source=replacement, target=d_alias, relationship=relationship_replaces) - - # the RFC-related attributes are imported when we handle the RFCs below - - d.save() - -# now process RFCs - -def get_or_create_rfc_document(rfc_number): - name = "rfc%s" % rfc_number - - # try to find a draft that can form the base of the document - draft = None - - ids = InternetDraft.objects.filter(rfc_number=rfc_number)[:1] - if ids: - draft = ids[0] - else: - r = RfcIndex.objects.get(rfc_number=rfc_number) - # rfcindex occasionally includes drafts that were not - # really submitted to IETF (e.g. April 1st) - if r.draft: - ids = InternetDraft.objects.filter(filename=r.draft)[:1] - if ids: - draft = ids[0] - - if rfc_number in (2604, 3025): - # prevent merge for some botched RFCs that are obsoleted by - # another RFC coming from the same draft, in practice this is - # just these two, so we hardcode rather than querying for it - draft = None - - if draft: - name = draft.filename - - d, _ = Document.objects.get_or_create(name=name, type=type_draft) - if not name.startswith('rfc'): - # make sure draft also got an alias - alias_doc(name, d) - - alias = alias_doc("rfc%s" % rfc_number, d) - - return (d, alias) - - -all_rfcs = RfcIndex.objects.all().order_by("rfc_number") - -if all_drafts.count() != InternetDraft.objects.count(): - if document_name_to_import and document_name_to_import.startswith("rfc"): - # we wanted to import an RFC - all_rfcs = all_rfcs.filter(rfc_number=document_name_to_import[3:]) - else: - # if we didn't process all drafts, limit the RFCs to the ones we - # did process - rfc_numbers = set(d.rfc_number for d in all_drafts if d.rfc_number) - if import_docs_from: - all_rfcs = all_rfcs.filter(rfc_number__in=rfc_numbers) | all_rfcs.filter(rfc_published_date__gte=import_docs_from) - else: - all_rfcs = all_rfcs.filter(rfc_number__in=rfc_numbers) - -for index, o in enumerate(all_rfcs.iterator()): - print "importing rfc%s" % o.rfc_number, index - - d, d_alias = get_or_create_rfc_document(o.rfc_number) - d.time = datetime.datetime.now() - d.title = o.title - d.std_level = std_level_mapping[o.current_status] - d.set_state(state_mapping["RFC"]) - - d.stream = stream_mapping[o.stream] - if not d.group and o.wg: - d.group = Group.objects.get(acronym=o.wg) - - # get some values from the rfc table - rfcs = Rfc.objects.filter(rfc_number=o.rfc_number).select_related() - if rfcs: - r = rfcs[0] - l = intended_std_level_mapping[r.intended_status.status] - if l: # skip some bogus None values - d.intended_std_level = l - d.save() - - # a few RFCs have an IDInternal so we may have to import the - # events and attributes - internals = IDInternal.objects.filter(rfc_flag=1, draft=o.rfc_number) - if internals: - if d.name.startswith("rfc"): - # clear any already imported events, we don't do it for - # drafts as they've already been cleared above - d.docevent_set.all().delete() - import_from_idinternal(d, internals[0]) - - # publication date - e, _ = DocEvent.objects.get_or_create(doc=d, type="published_rfc", - defaults=dict(by=system)) - e.time = o.rfc_published_date - e.desc = "RFC published" - e.save() - - # import obsoletes/updates - def make_relation(other_rfc, rel_type, reverse): - other_number = int(other_rfc.replace("RFC", "")) - other, other_alias = get_or_create_rfc_document(other_number) - if reverse: - RelatedDocument.objects.get_or_create(source=other, target=d_alias, relationship=rel_type) - else: - RelatedDocument.objects.get_or_create(source=d, target=other_alias, relationship=rel_type) - - def parse_relation_list(s): - if not s: - return [] - res = [] - for x in s.split(","): - if x[:3] in ("NIC", "IEN", "STD", "RTR"): - # try translating this to RFC numbers that we can - # handle sensibly; otherwise we'll have to ignore them - l = ["RFC%s" % y.rfc_number for y in RfcIndex.objects.filter(also=x).order_by('rfc_number')] - if l: - print "translated", x, "to", ", ".join(l) - for y in l: - if y not in res: - res.append(y) - else: - print "SKIPPED relation to", x - else: - res.append(x) - return res - - for x in parse_relation_list(o.obsoletes): - make_relation(x, relationship_obsoletes, False) - for x in parse_relation_list(o.obsoleted_by): - make_relation(x, relationship_obsoletes, True) - for x in parse_relation_list(o.updates): - make_relation(x, relationship_updates, False) - for x in parse_relation_list(o.updated_by): - make_relation(x, relationship_updates, True) - - if o.also: - for a in o.also.lower().split(","): - alias_doc(a, d) - - sync_tag(d, o.has_errata, tag_has_errata) - - # FIXME: import RFC authors? diff --git a/redesign/importing/import-groups.py b/redesign/importing/import-groups.py deleted file mode 100755 index ed0fd540b..000000000 --- a/redesign/importing/import-groups.py +++ /dev/null @@ -1,410 +0,0 @@ -#!/usr/bin/python - -import sys, os, datetime - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False - -from django.core import management -management.setup_environ(settings) - -from django.template.defaultfilters import slugify - -from ietf.group.models import * -from ietf.name.models import * -from ietf.doc.models import State, StateType -from ietf.doc.utils import get_tags_for_stream_id -from ietf.doc.models import Document -from ietf.name.utils import name -from redesign.importing.utils import old_person_to_person, make_revision_event -from ietf.idtracker.models import AreaGroup, IETFWG, Area, AreaGroup, Acronym, AreaWGURL, IRTF, ChairsHistory, Role, AreaDirector -from ietf.liaisons.models import SDOs -from ietf.iesg.models import TelechatDates, Telechat, TelechatDate -from ietf.wgcharter.utils import set_or_create_charter -import workflows.utils - -# imports IETFWG, Area, AreaGroup, Acronym, IRTF, AreaWGURL, SDOs, TelechatDates, dates from Telechat - -# also creates nomcom groups - -# assumptions: persons and states have been imported - -doc_type_charter = name(DocTypeName, "charter", "Charter") - -state_names = dict( - bof=name(GroupStateName, slug="bof", name="BOF"), - proposed=name(GroupStateName, slug="proposed", name="Proposed"), - active=name(GroupStateName, slug="active", name="Active"), - dormant=name(GroupStateName, slug="dormant", name="Dormant"), - conclude=name(GroupStateName, slug="conclude", name="Concluded"), - unknown=name(GroupStateName, slug="unknown", name="Unknown"), - ) - -type_names = dict( - ietf=name(GroupTypeName, slug="ietf", name="IETF"), - area=name(GroupTypeName, slug="area", name="Area"), - ag=name(GroupTypeName, slug="ag", name="AG", desc="Area group"), - wg=name(GroupTypeName, slug="wg", name="WG", desc="Working group"), - rg=name(GroupTypeName, slug="rg", name="RG", desc="Research group"), - team=name(GroupTypeName, slug="team", name="Team"), - individ=name(GroupTypeName, slug="individ", name="Individual"), - sdo=name(GroupTypeName, slug="sdo", name="SDO", desc="Standards organization"), - ) - -group_ballot_names = { - 'No': name(GroupBallotPositionName, 'no', 'No'), - 'Yes': name(GroupBallotPositionName, 'yes', 'Yes'), - 'Abstain': name(GroupBallotPositionName, 'abstain', 'Abstain'), - 'Block': name(GroupBallotPositionName, 'block', 'Block'), - 'No Record': name(GroupBallotPositionName, 'norecord', 'No record'), - } - - -# make sure we got the IETF as high-level parent -ietf_group, _ = Group.objects.get_or_create(acronym="ietf") -ietf_group.name = "IETF" -ietf_group.state = state_names["active"] -ietf_group.type = type_names["ietf"] -ietf_group.save() - -# make sure we got the IESG so we can use it as parent for areas -iesg_group, _ = Group.objects.get_or_create(acronym="iesg") -iesg_group.name = "IESG" -iesg_group.state = state_names["active"] -iesg_group.type = type_names["ietf"] -iesg_group.parent = ietf_group -iesg_group.save() - -# make sure we got the IRTF as parent for RGs -irtf_group, _ = Group.objects.get_or_create(acronym="irtf") -irtf_group.name = "IRTF" -irtf_group.state = state_names["active"] -irtf_group.type = type_names["ietf"] -irtf_group.save() - -# create Secretariat for use with roles -secretariat_group, _ = Group.objects.get_or_create(acronym="secretariat") -secretariat_group.name = "IETF Secretariat" -secretariat_group.state = state_names["active"] -secretariat_group.type = type_names["ietf"] -secretariat_group.save() - -# create ISE for use with streams -ise_group, _ = Group.objects.get_or_create(acronym="ise") -ise_group.name = "Independent Submission Editor" -ise_group.state = state_names["active"] -ise_group.type = type_names["ietf"] -ise_group.save() - -# create RSOC for use with roles -rsoc_group, _ = Group.objects.get_or_create(acronym="rsoc") -rsoc_group.name = "RFC Series Oversight Committee" -rsoc_group.state = state_names["active"] -rsoc_group.type = type_names["ietf"] -rsoc_group.save() - -# create IAB for use with liaison statements and streams -iab_group, _ = Group.objects.get_or_create(acronym="iab") -iab_group.name = "Internet Architecture Board" -iab_group.state = state_names["active"] -iab_group.type = type_names["ietf"] -iab_group.save() - -# create IANA for use with roles for authorization -iana_group, _ = Group.objects.get_or_create(acronym="iana") -iana_group.name = "IANA" -iana_group.state = state_names["active"] -iana_group.type = type_names["ietf"] -iana_group.save() - -# create IEPG for use with meetings -iepg_group, _ = Group.objects.get_or_create(acronym="iepg") -iepg_group.name = "IEPG" -iepg_group.state = state_names["active"] -iepg_group.type = type_names["ietf"] -iepg_group.save() - - -system = Person.objects.get(name="(System)") - -for o in Telechat.objects.all().order_by("pk"): - if o.pk <= 3: - print "skipping phony Telechat", o.pk - continue - print "importing Telechat", o.pk, o.telechat_date - TelechatDate.objects.get_or_create(date=o.telechat_date) - -for o in TelechatDates.objects.all(): - print "importing TelechatDates" - for x in range(1, 5): - d = getattr(o, "date%s" % x) - if d: - TelechatDate.objects.get_or_create(date=d) - -# NomCom -for o in ChairsHistory.objects.filter(chair_type=Role.NOMCOM_CHAIR).order_by("start_year"): - print "importing ChairsHistory/Nomcom", o.pk, "nomcom%s" % o.start_year - group, _ = Group.objects.get_or_create(acronym="nomcom%s" % o.start_year) - group.name = "IAB/IESG Nominating Committee %s/%s" % (o.start_year, o.end_year) - if o.chair_type.person == o.person: - s = state_names["active"] - else: - s = state_names["conclude"] - group.state = s - group.type = type_names["ietf"] - group.parent = None - group.save() - - # we need start/end year so fudge events - group.groupevent_set.all().delete() - - e = ChangeStateGroupEvent(group=group, type="changed_state") - e.time = datetime.datetime(o.start_year, 5, 1, 12, 0, 0) - e.by = system - e.desc = "Started group" - e.state = state_names["active"] - e.save() - - e = ChangeStateGroupEvent(group=group, type="changed_state") - e.time = datetime.datetime(o.end_year, 5, 1, 12, 0, 0) - e.by = system - e.desc = "Concluded group" - e.state = state_names["conclude"] - e.save() - -# IRTF -for o in IRTF.objects.all(): - print "importing IRTF", o.pk, o.acronym - - try: - group = Group.objects.get(acronym=o.acronym.lower()) - except Group.DoesNotExist: - group = Group(acronym=o.acronym.lower()) - - group.name = o.name - group.state = state_names["active"] # we assume all to be active - group.type = type_names["rg"] - group.parent = irtf_group - - group.comments = o.charter_text or "" - - group.save() - - # FIXME: missing fields from old: meeting_scheduled - -# SDOs -for o in SDOs.objects.all().order_by("pk"): - # we import SDOs as groups, this makes it easy to take advantage - # of the rest of the role/person models for authentication and - # authorization - print "importing SDOs", o.pk, o.sdo_name - try: - group = Group.objects.get(name=o.sdo_name, type=type_names["sdo"]) - except Group.DoesNotExist: - group = Group(name=o.sdo_name, type=type_names["sdo"]) - - group.state_id = "active" - group.acronym = slugify(group.name) - group.save() - -def import_date_event(group, name, state_id, desc): - d = getattr(o, "%s_date" % name) - if d: - e = ChangeStateGroupEvent(group=group, type="changed_state") - e.time = datetime.datetime.combine(d, datetime.time(12, 0, 0)) - e.by = system - e.state = state_names[state_id] - e.desc = desc - e.save() - -# Area -for o in Area.objects.all(): - print "importing Area", o.pk, o.area_acronym.acronym - - try: - group = Group.objects.get(acronym=o.area_acronym.acronym) - except Group.DoesNotExist: - group = Group(acronym=o.area_acronym.acronym) - group.id = o.area_acronym_id # transfer id - - # we could use last_modified_date for group.time, but in the new - # schema, group.time is supposed to change when the roles change - # too and some of the history logic depends on this, so it's going - # to cause us too much trouble - - group.name = o.area_acronym.name - if o.status.status == "Active": - s = state_names["active"] - elif o.status.status == "Concluded": - s = state_names["conclude"] - elif o.status.status == "Unknown": - s = state_names["unknown"] - group.state = s - group.type = type_names["area"] - group.parent = iesg_group - group.comments = o.comments.strip() if o.comments else "" - - group.save() - - for u in o.additional_urls(): - url, _ = GroupURL.objects.get_or_create(group=group, url=u.url) - url.name = u.description.strip() - url.save() - - # import events - group.groupevent_set.all().delete() - - import_date_event(group, "start", "active", "Started group") - import_date_event(group, "concluded", "conclude", "Concluded group") - - # FIXME: missing fields from old: extra_email_addresses - -# IETFWG, AreaGroup -for o in IETFWG.objects.all().order_by("pk"): - print "importing IETFWG", o.pk, o.group_acronym.acronym - - try: - group = Group.objects.get(acronym=o.group_acronym.acronym) - except Group.DoesNotExist: - group = Group(acronym=o.group_acronym.acronym) - group.id = o.group_acronym_id # transfer id - - if o.last_modified_date: - group.time = datetime.datetime.combine(o.last_modified_date, datetime.time(12, 0, 0)) - group.name = o.group_acronym.name - # state - if o.group_type.type == "BOF": - s = state_names["bof"] - if o.status.status == "Concluded": - s = state_names["conclude"] - elif o.group_type.type == "PWG": - s = state_names["proposed"] - if o.status.status == "Concluded": - s = state_names["conclude"] - elif o.status.status == "Active": - s = state_names["active"] - elif o.status.status == "Dormant": - s = state_names["dormant"] - elif o.status.status == "Concluded": - s = state_names["conclude"] - group.state = s - # type - if o.group_type.type == "TEAM": - group.type = type_names["team"] - elif o.group_type.type == "AG": - if o.group_acronym.acronym == "none": - # none means individual - group.type = type_names["individ"] - elif o.group_acronym.acronym == "iab": - group.type = type_names["ietf"] - group.parent = None - elif o.group_acronym.acronym in ("tsvdir", "secdir", "saag", "usac"): - group.type = type_names["team"] - elif o.group_acronym.acronym == "iesg": - pass # we already treated iesg - elif o.group_acronym.acronym in ("apparea", "opsarea", "rtgarea", "usvarea", "genarea", "tsvarea", "raiarea", "apptsv"): - group.type = type_names["ag"] - else: - # the remaining groups are - # apples, null, dirdir - # for now, we don't transfer them - if group.id: - group.delete() - print "not transferring", o.group_acronym.acronym, o.group_acronym.name - continue - else: # PWG/BOF/WG - # some BOFs aren't WG-forming but we currently classify all as WGs - group.type = type_names["wg"] - - if o.area: - group.parent = Group.objects.get(acronym=o.area.area.area_acronym.acronym) - elif not group.parent: - print "no area/parent for", group.acronym, group.name, group.type, group.state - - try: - area_director = o.area_director - except AreaDirector.DoesNotExist: - area_director = None - if area_director and not area_director.area_id: - area_director = None # fake TBD guy - - group.ad = old_person_to_person(area_director.person) if area_director else None - group.list_email = o.email_address if o.email_address else "" - group.list_subscribe = (o.email_subscribe or "").replace("//listinfo", "/listinfo").strip() - l = o.clean_email_archive().strip() if o.email_archive else "" - if l in ("none", "not available"): - l = "" - group.list_archive = l - - group.comments = o.comments.strip() if o.comments else "" - - group.save() - - for u in o.additional_urls(): - url, _ = GroupURL.objects.get_or_create(group=group, url=u.url) - url.name = u.description.strip() - url.save() - - for m in o.milestones(): - desc = m.description.strip() - try: - milestone = GroupMilestone.objects.get(group=group, desc=desc) - except GroupMilestone.DoesNotExist: - milestone = GroupMilestone(group=group, desc=desc) - - milestone.expected_due_date = m.expected_due_date - milestone.done = m.done == "Done" - milestone.done_date = m.done_date - milestone.time = datetime.datetime.combine(m.last_modified_date, datetime.time(12, 0, 0)) - milestone.save() - - # import workflow states and transitions - w = workflows.utils.get_workflow_for_object(o) - if w: - try: - w = w.wgworkflow - except WGWorkflow.DoesNotExist: - w = None - if w: - w.unused_states = State.objects.filter(type="draft-stream-ietf").exclude(name__in=[x.name for x in w.selected_states.all()]) - w.unused_tags = DocTagName.objects.filter(slug__in=get_tags_for_stream_id("draft-stream-ietf")).exclude(name__in=[x.name for x in w.selected_tags.all()]) - - # custom transitions - states = dict((s.name, s) for s in State.objects.filter(type="draft-stream-ietf")) - old_states = dict((s.name, s) for s in w.states.filter(name__in=[name for name in states]).select_related('transitions')) - for name in old_states: - s = states[name] - o = old_states[name] - n = [states[t.destination.name] for t in o.transitions.filter(workflow=workflow)] - if set(s.next_states) != set(n): - g, _ = GroupStateTransitions.objects.get_or_create(group=group, state=s) - g.next_states = n - # import events - group.groupevent_set.all().delete() - - import_date_event(group, "proposed", "proposed", "Proposed group") - import_date_event(group, "start", "active", "Started group") - import_date_event(group, "concluded", "conclude", "Concluded group") - # dormant_date is empty on all so don't bother with that - - # import charter - charter = set_or_create_charter(group) - if group.state_id in ("active", "conclude"): - charter.rev = "01" - charter.set_state(State.objects.get(type="charter", slug="approved")) - else: - charter.rev = "00" - charter.set_state(State.objects.get(type="charter", slug="notrev")) - # the best estimate of the charter time is when we changed state - e = group.groupevent_set.order_by("-time")[:1] - charter.time = e[0].time if e else group.time - charter.save() - - e = make_revision_event(charter, system) - e.save() - - # FIXME: missing fields from old: meeting_scheduled, email_keyword, meeting_scheduled_old - diff --git a/redesign/importing/import-interim.py b/redesign/importing/import-interim.py deleted file mode 100755 index 3dbbc571f..000000000 --- a/redesign/importing/import-interim.py +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/python - -import sys, os, re, datetime, pytz - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False - -from django.core import management -management.setup_environ(settings) - -from django.template.defaultfilters import slugify - -import datetime - -from ietf.idtracker.models import AreaDirector, IETFWG, Acronym, IRTF, PersonOrOrgInfo -from ietf.meeting.models import * -from ietf.person.models import * -from ietf.doc.models import Document, DocAlias, State, DocEvent -from redesign.importing.utils import old_person_to_person, dont_save_queries, make_revision_event -from redesign.interim.models import * -from ietf.name.models import * -from ietf.name.utils import name - -dont_save_queries() - -# assumptions: -# - persons have been imported -# - groups have been imported -# - regular meetings have been imported - -database = "ietf_ams" - -system_person = Person.objects.get(name="(System)") - -agenda_doctype = name(DocTypeName, "agenda", "Agenda") -minutes_doctype = name(DocTypeName, "minutes", "Minutes") -slides_doctype = name(DocTypeName, "slides", "Slides") - -group_meetings_in_year = {} - -for o in InterimMeetings.objects.using(database).order_by("start_date"): - print "importing InterimMeeting", o.pk - - group = Group.objects.get(pk=o.group_acronym_id) - meeting_key = "%s-%s" % (group.acronym, o.start_date.year) - if not group.acronym in group_meetings_in_year: - group_meetings_in_year[meeting_key] = 0 - - group_meetings_in_year[meeting_key] += 1 - - num = "interim-%s-%s-%s" % (o.start_date.year, group.acronym, group_meetings_in_year[meeting_key]) - - try: - m = Meeting.objects.get(number=num) - except: - m = Meeting(number=num) - m.pk = o.pk - - m.type_id = "interim" - m.date = o.start_date - - # we don't have any other fields - - m.save() - - if m.session_set.all(): - session = m.session_set.all()[0] - else: - session = Session() - session.meeting = m - - session.group = group - session.requested_by = system_person - session.status_id = "appr" - session.modified = datetime.datetime.combine(m.date, datetime.time(0, 0, 0)) - session.save() - - meeting = m - interim_meeting = o - - def import_material_kind(kind, doctype): - # import agendas - found = kind.objects.filter(meeting_num=m.pk, - group_acronym_id=interim_meeting.group_acronym_id, - irtf=1 if session.group.parent.acronym == "irtf" else 0, - interim=1).using(database) - - for o in found: - name = "%s-%s" % (doctype.slug, m.number) - if kind == InterimSlides: - name += "-%s" % o.slide_num - - name = name.lower() - - try: - d = Document.objects.get(type=doctype, docalias__name=name) - except Document.DoesNotExist: - d = Document(type=doctype, name=name) - - if kind == InterimSlides: - d.title = o.slide_name.strip() - l = o.file_loc() - d.external_url = l[l.find("slides/") + len("slides/"):] - d.order = o.order_num or 1 - else: - session_name = session.name if session.name else session.group.acronym.upper() - d.title = u"%s for %s at %s" % (doctype.name, session_name, session.meeting) - d.external_url = o.filename # save filenames for now as they don't appear to be quite regular - d.rev = "00" - d.group = session.group - d.time = datetime.datetime.combine(meeting.date, datetime.time(0, 0, 0)) # we may have better estimate below - - d.save() - - d.set_state(State.objects.get(type=doctype, slug="active")) - - DocAlias.objects.get_or_create(document=d, name=name) - - session.materials.add(d) - - # try to create a doc event to figure out who uploaded it - e = make_revision_event(d, system_person) - - t = d.type_id - if d.type_id == "slides": - t = "slide, '%s" % d.title - activities = InterimActivities.objects.filter(group_acronym_id=interim_meeting.group_acronym_id, - meeting_num=interim_meeting.meeting_num, - activity__startswith=t, - activity__endswith="was uploaded").using(database)[:1] - - if activities: - a = activities[0] - - e.time = datetime.datetime.combine(a.act_date, a.act_time) - try: - e.by = old_person_to_person(PersonOrOrgInfo.objects.get(pk=a.act_by)) or system_person - except PersonOrOrgInfo.DoesNotExist: - pass - - d.time = e.time - d.save() - else: - print "NO UPLOAD ACTIVITY RECORD for", d.name.encode("utf-8"), t.encode("utf-8"), interim_meeting.group_acronym_id, interim_meeting.meeting_num - - e.save() - - - import_material_kind(InterimAgenda, agenda_doctype) - import_material_kind(InterimMinutes, minutes_doctype) - import_material_kind(InterimSlides, slides_doctype) - diff --git a/redesign/importing/import-ipr.py b/redesign/importing/import-ipr.py deleted file mode 100755 index 72e6d7e90..000000000 --- a/redesign/importing/import-ipr.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/python - -import sys, os, re, datetime - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False -settings.IMPORTING_IPR = True - -from django.core import management -management.setup_environ(settings) - -from ietf.ipr.models import IprDraftOld, IprRfcOld, IprDocAlias, IprDetail -from ietf.doc.models import DocAlias - -# imports IprDraft and IprRfc, converting them to IprDocAlias links to Document - -# assumptions: documents have been imported - -# some links are borked, only import those that reference an existing IprDetail -ipr_ids = IprDetail.objects.all() - -for o in IprDraftOld.objects.filter(ipr__in=ipr_ids).select_related("document").order_by("id").iterator(): - try: - alias = DocAlias.objects.get(name=o.document.filename) - except DocAlias.DoesNotExist: - print "COULDN'T FIND DOCUMENT", o.document.filename - continue - - try: - IprDocAlias.objects.get(ipr=o.ipr_id, doc_alias=alias) - except IprDocAlias.DoesNotExist: - link = IprDocAlias() - link.ipr_id = o.ipr_id - link.doc_alias = alias - link.rev = o.revision or "" - link.save() - - print "importing IprDraft", o.pk, "linking", o.ipr_id, o.document.filename - -for o in IprRfcOld.objects.filter(ipr__in=ipr_ids).select_related("document").order_by("id").iterator(): - try: - alias = DocAlias.objects.get(name="rfc%s" % o.document.rfc_number) - except DocAlias.DoesNotExist: - print "COULDN'T FIND RFC%s", o.document.rfc_number - continue - - try: - IprDocAlias.objects.get(ipr=o.ipr_id, doc_alias=alias) - except IprDocAlias.DoesNotExist: - link = IprDocAlias() - link.ipr_id = o.ipr_id - link.doc_alias = alias - link.rev = "" - link.save() - - print "importing IprRfc", o.pk, "linking", o.ipr_id, o.document.rfc_number diff --git a/redesign/importing/import-liaison.py b/redesign/importing/import-liaison.py deleted file mode 100755 index d3258adfc..000000000 --- a/redesign/importing/import-liaison.py +++ /dev/null @@ -1,211 +0,0 @@ -#!/usr/bin/python - -import sys, os, re, datetime, pytz - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False -settings.IMPORTING_FROM_OLD_SCHEMA = True - -from django.core import management -management.setup_environ(settings) - -from django.template.defaultfilters import slugify - -from ietf.idtracker.models import Acronym, EmailAddress -from ietf.liaisons.models import * -from ietf.doc.models import Document, DocAlias -from ietf.person.models import * -from redesign.importing.utils import old_person_to_person, make_revision_event -from ietf.name.models import * -from ietf.name.utils import name - - -# imports LiaisonDetail, OutgoingLiaisonApproval, Uploads - -# todo: LiaisonStatementManager, LiaisonManagers, SDOAuthorizedIndividual - -# assumptions: -# - persons have been imported -# - groups have been imported - -purpose_mapping = { - 1: name(LiaisonStatementPurposeName, "action", "For action", order=1), - 2: name(LiaisonStatementPurposeName, "comment", "For comment", order=2), - 3: name(LiaisonStatementPurposeName, "info", "For information", order=3), - 4: name(LiaisonStatementPurposeName, "response", "In response", order=4), - # we drop the "other" category here, it was virtuall unused in the old schema - } - -liaison_attachment_doctype = name(DocTypeName, "liai-att", "Liaison Attachment") - -purpose_mapping[None] = purpose_mapping[0] = purpose_mapping[3] # map unknown to "For information" -purpose_mapping[5] = purpose_mapping[3] # "Other" is mapped to "For information" as default - -system_email = Email.objects.get(person__name="(System)") -system_person = Person.objects.get(name="(System)") -obviously_bogus_date = datetime.date(1970, 1, 1) - -bodies = { - 'IESG': Group.objects.get(acronym="iesg"), - 'IETF': Group.objects.get(acronym="ietf"), - 'IETF IESG': Group.objects.get(acronym="iesg"), - 'The IETF': Group.objects.get(acronym="ietf"), - 'IAB/ISOC': Group.objects.get(acronym="iab"), - 'ISOC/IAB': Group.objects.get(acronym="iab"), - 'IAB/IESG': Group.objects.get(acronym="iab"), - 'IAB': Group.objects.get(acronym="iab"), - 'IETF IAB': Group.objects.get(acronym="iab"), - 'IETF Transport Directorate': Group.objects.get(acronym="tsvdir"), - 'Sigtran': Group.objects.get(acronym="sigtran", type="wg"), - 'IETF RAI WG': Group.objects.get(acronym="rai", type="area"), - 'IETF RAI': Group.objects.get(acronym="rai", type="area"), - 'IETF Mobile IP WG': Group.objects.get(acronym="mobileip", type="wg"), - "IETF Operations and Management Area": Group.objects.get(acronym="ops", type="area"), - "IETF/Operations and Management Area": Group.objects.get(acronym="ops", type="area"), - "IETF OAM Area": Group.objects.get(acronym="ops", type="area"), - "IETF O&M Area": Group.objects.get(acronym="ops", type="area"), - "IETF O&M area": Group.objects.get(acronym="ops", type="area"), - "IETF O&M": Group.objects.get(acronym="ops", type="area"), - "IETF O&M Area Directors": Group.objects.get(acronym="ops", type="area"), - "PWE3 Working Greoup": Group.objects.get(acronym="pwe3", type="wg"), - "IETF PWE 3 WG": Group.objects.get(acronym="pwe3", type="wg"), - "IETF/Routing Area": Group.objects.get(acronym="rtg", type="area"), - "IRTF Internet Area": Group.objects.get(acronym="int", type="area"), - "IETF Sub IP Area": Group.objects.get(acronym="sub", type="area"), - } - -def get_body(name, raw_code): - if raw_code: - # new tool is storing some group info directly, try decoding it - b = None - t = raw_code.split("_") - if len(t) == 2: - if t[0] == "area": - b = lookup_group(acronym=Acronym.objects.get(pk=t[1]).acronym, type="area") - elif t[0] == "wg": - b = lookup_group(acronym=Acronym.objects.get(pk=t[1]).acronym, type="wg") - elif t[0] == "sdo": - b = lookup_group(name=SDOs.objects.get(pk=t[1]).sdo_name, type="sdo") - - if not b: - b = lookup_group(acronym=raw_code) - - return b - - # the from body name is a nice case study in how inconsistencies - # build up over time - name = (name.replace("(", "").replace(")", "").replace(" Chairs", "") - .replace("Working Group", "WG").replace("working group", "WG")) - b = bodies.get(name) - t = name.split() - if not b and name.startswith("IETF"): - if len(t) == 1: - if "-" in name: - t = name.split("-") - elif "/" in name: - t = name.split("/") - b = lookup_group(acronym=t[1].lower(), type="wg") - elif len(t) < 3 or t[2].lower() == "wg": - b = lookup_group(acronym=t[1].lower(), type="wg") - elif t[2].lower() in ("area", "ad"): - b = lookup_group(acronym=t[1].lower(), type="area") - if not b: - b = lookup_group(name=u"%s %s" % (t[1], t[2]), type="area") - - if not b and name.endswith(" WG"): - b = lookup_group(acronym=t[-2].lower(), type="wg") - - if not b: - b = lookup_group(name=name, type="sdo") - - return b - -for o in LiaisonDetail.objects.all().order_by("pk"): - print "importing LiaisonDetail", o.pk - - try: - l = LiaisonStatement.objects.get(pk=o.pk) - except LiaisonStatement.DoesNotExist: - l = LiaisonStatement(pk=o.pk) - - l.title = (o.title or "").strip() - l.purpose = purpose_mapping[o.purpose_id] - if o.purpose_text and not o.purpose and "action" in o.purpose_text.lower(): - o.purpose = purpose_mapping[1] - l.body = (o.body or "").strip() - l.deadline = o.deadline_date - - l.related_to_id = o.related_to_id # should not dangle as we process ids in turn - - def lookup_group(**kwargs): - try: - return Group.objects.get(**kwargs) - except Group.DoesNotExist: - return None - - l.from_name = o.from_body().strip() - l.from_group = get_body(l.from_name, o.from_raw_code) # try to establish link - if not o.person: - l.from_contact = None - else: - try: - l.from_contact = Email.objects.get(address__iexact=o.from_email().address) - except EmailAddress.DoesNotExist: - l.from_contact = old_person_to_person(o.person).email_set.order_by('-active')[0] - - if o.by_secretariat: - l.to_name = o.submitter_name - if o.submitter_email: - l.to_name += " <%s>" % o.submitter_email - else: - l.to_name = o.to_body - l.to_name = l.to_name.strip() - l.to_group = get_body(l.to_name, o.to_raw_code) # try to establish link - l.to_contact = (o.to_poc or "").strip() - - l.reply_to = (o.replyto or "").strip() - - l.response_contact = (o.response_contact or "").strip() - l.technical_contact = (o.technical_contact or "").strip() - l.cc = (o.cc1 or "").strip() - - l.submitted = o.submitted_date - l.modified = o.last_modified_date - if not l.modified and l.submitted: - l.modified = l.submitted - if not o.approval: - # no approval object means it's approved alright - weird, we - # have to fake the approved date then - l.approved = l.modified or l.submitted or datetime.datetime.now() - else: - l.approved = o.approval.approval_date if o.approval.approved else None - - l.action_taken = o.action_taken - - l.save() - - l.attachments.all().delete() - for i, u in enumerate(o.uploads_set.order_by("pk")): - attachment = Document() - attachment.title = u.file_title - attachment.type = liaison_attachment_doctype - attachment.name = l.name() + ("-attachment-%s" % (i + 1)) - attachment.time = l.submitted - # we should fixup the filenames, but meanwhile, store it here - attachment.external_url = "file%s%s" % (u.file_id, u.file_extension) - attachment.save() - - DocAlias.objects.get_or_create(document=attachment, name=attachment.name) - - e = make_revision_event(attachment, system_person) - if l.from_contact and l.from_contact.person: - e.by = l.from_contact.person - print e.by - e.save() - - l.attachments.add(attachment) - - diff --git a/redesign/importing/import-meetings.py b/redesign/importing/import-meetings.py deleted file mode 100755 index 09213ae98..000000000 --- a/redesign/importing/import-meetings.py +++ /dev/null @@ -1,481 +0,0 @@ -#!/usr/bin/python - -import sys, os, re, datetime, pytz - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False - -from django.core import management -management.setup_environ(settings) - -from django.template.defaultfilters import slugify - -import datetime - -from ietf.idtracker.models import AreaDirector, IETFWG, Acronym, IRTF, PersonOrOrgInfo -from ietf.meeting.models import * -from ietf.proceedings.models import Meeting as MeetingOld, MeetingVenue, MeetingRoom, NonSession, WgMeetingSession, WgAgenda, Minute, Slide, WgProceedingsActivities, NotMeetingGroup -from ietf.person.models import * -from ietf.doc.models import Document, DocAlias, State, DocEvent -from redesign.importing.utils import old_person_to_person, dont_save_queries, make_revision_event -from ietf.name.models import * -from ietf.name.utils import name - -import_meetings_from = None -if len(sys.argv) > 1: - import_meetings_from = datetime.datetime.strptime(sys.argv[1], "%Y-%m-%d") - - -dont_save_queries() - -# imports Meeting, MeetingVenue, MeetingRoom, NonSession, -# WgMeetingSession, WgAgenda, Minute, Slide, upload events from -# WgProceedingsActivities, NotMeetingGroup - -# assumptions: -# - persons have been imported -# - groups have been imported - -ietf_meeting = name(MeetingTypeName, "ietf", "IETF") -interim_meeting = name(MeetingTypeName, "interim", "Interim") - -session_status_mapping = { - 1: name(SessionStatusName, "schedw", "Waiting for Scheduling"), - 2: name(SessionStatusName, "apprw", "Waiting for Approval"), - 3: name(SessionStatusName, "appr", "Approved"), - 4: name(SessionStatusName, "sched", "Scheduled"), - 5: name(SessionStatusName, "canceled", "Canceled"), - 6: name(SessionStatusName, "disappr", "Disapproved"), - } - -status_not_meeting = name(SessionStatusName, "notmeet", "Not meeting") -status_deleted = name(SessionStatusName, "deleted", "Deleted") - -session_status_mapping[0] = session_status_mapping[1] # assume broken statuses of 0 are actually cancelled - -other_slot = name(TimeSlotTypeName, "other", "Other") -session_slot = name(TimeSlotTypeName, "session", "Session") -break_slot = name(TimeSlotTypeName, "break", "Break") -registration_slot = name(TimeSlotTypeName, "reg", "Registration") -plenary_slot = name(TimeSlotTypeName, "plenary", "Plenary") - -conflict_constraints = { - 1: name(ConstraintName, "conflict", "Conflicts with"), - 2: name(ConstraintName, "conflic2", "Conflicts with (secondary)"), - 3: name(ConstraintName, "conflic3", "Conflicts with (tertiary)"), - } - -agenda_doctype = name(DocTypeName, "agenda", "Agenda") -minutes_doctype = name(DocTypeName, "minutes", "Minutes") -slides_doctype = name(DocTypeName, "slides", "Slides") - -system_person = Person.objects.get(name="(System)") -obviously_bogus_date = datetime.date(1970, 1, 1) - -for o in MeetingOld.objects.all(): - print "importing Meeting", o.pk - - try: - m = Meeting.objects.get(number=o.meeting_num) - except: - m = Meeting(number="%s" % o.meeting_num) - m.pk = o.pk - - m.type = ietf_meeting - m.date = o.start_date - m.city = o.city - - # convert country to code - country_code = None - for k, v in pytz.country_names.iteritems(): - if v == o.country: - country_code = k - break - - if not country_code: - country_fallbacks = { - 'USA': 'US' - } - - country_code = country_fallbacks.get(o.country) - - if country_code: - m.country = country_code - else: - print "unknown country", o.country - - - time_zone_lookup = { - ("IE", "Dublin"): "Europe/Dublin", - ("FR", "Paris"): "Europe/Paris", - ("CA", "Vancouver"): "America/Vancouver", - ("CZ", "Prague"): "Europe/Prague", - ("US", "Chicago"): "America/Chicago", - ("US", "Anaheim"): "America/Los_Angeles", - ("NL", "Maastricht"): "Europe/Amsterdam", - ("CN", "Beijing"): "Asia/Shanghai", - ("JP", "Hiroshima"): "Asia/Tokyo", - ("SE", "Stockholm"): "Europe/Stockholm", - ("US", "San Francisco"): "America/Los_Angeles", - ("US", "Minneapolis"): "America/Menominee", - } - - m.time_zone = time_zone_lookup.get((m.country, m.city), "") - if not m.time_zone: - print "unknown time zone for", m.get_country_display(), m.city - - m.venue_name = "" # no source for that in the old DB? - m.venue_addr = "" # no source for that in the old DB? - try: - venue = o.meetingvenue_set.get() - m.break_area = venue.break_area_name - m.reg_area = venue.reg_area_name - except MeetingVenue.DoesNotExist: - pass - - # missing following semi-used fields from old Meeting: end_date, - # ack, agenda_html/agenda_text, future_meeting - - m.save() - -meeting_cache = {} -def get_meeting(num): - if not num in meeting_cache: - meeting_cache[num] = Meeting.objects.get(number="%s" % num) - return meeting_cache[num] - -for o in MeetingRoom.objects.all(): - print "importing MeetingRoom", o.pk - - try: - r = Room.objects.get(pk=o.pk) - except Room.DoesNotExist: - r = Room(pk=o.pk) - - r.meeting = get_meeting(o.meeting_id) - r.name = o.room_name - r.save() - -def parse_time_desc(o): - t = o.time_desc.replace(' ', '') - - start_time = datetime.time(int(t[0:2]), int(t[2:4])) - end_time = datetime.time(int(t[5:7]), int(t[7:9])) - - d = o.meeting.start_date + datetime.timedelta(days=o.day_id) - - return (datetime.datetime.combine(d, start_time), datetime.datetime.combine(d, end_time)) - -requested_length_mapping = { - None: 0, # assume NULL to mean nothing particular requested - "1": 60 * 60, - "2": 90 * 60, - "3": 120 * 60, - "4": 150 * 60, - } - -non_group_mapping = { - "plenaryw": "ietf", - "plenaryt": "ietf", - "newcomer": "edu", - "editor": "edu", - "wgchair": "edu", - "sectut": "edu", - "protut": "edu", - "iepg": "iepg", - "rfc": "edu", - "wgleader": "edu", - "xml2rfc": "edu", - "rbst": "edu", - "recp": "ietf", - "MIBDOC": "edu", - "IE": "iepg", - "newcomF": "edu", - "WritRFC": "edu", - "Orien": "edu", - "newwork": "edu", - "leadership": "edu", - "ipv6spec": "edu", - "Wel": "ietf", - "IDRTut": "edu", - "ToolsTut": "edu", - "cosp": "tools", - "doclife": "edu", - "dnstut": "edu", - "xmltut": "edu", - "RFCEd": "edu", - "IDRBasics": "edu", - "newcomSWED": "edu", - "MIBTut": "edu", - "IDR75": "edu", - "NewcomerJP": "edu", - "MIBT": "edu", - "DNSProg": "edu", - "natTUT": "edu", - "NewcomerCHINA": "edu", - "CreatingID": "edu", - "NewMeetGreet": "ietf", - "appsprepmeeting": "edu", - "NewcomersFrench": "edu", - "NewComMandar": "edu", - "AdminP": "ietf", - } - -def import_materials(wg_meeting_session, session): - def import_material_kind(kind, doctype): - # import agendas - irtf = 0 - if wg_meeting_session.irtf: - irtf = wg_meeting_session.group_acronym_id - found = kind.objects.filter(meeting=wg_meeting_session.meeting_id, - group_acronym_id=wg_meeting_session.group_acronym_id, - irtf=irtf, - interim=0) - - for o in found: - name = "%s-%s-%s" % (doctype.slug, session.meeting.number, session.group.acronym) - if kind == Slide: - name += "-%s" % o.slide_num - - if session.name: - name += "-%s" % slugify(session.name) - - name = name.lower() - - try: - d = Document.objects.get(type=doctype, docalias__name=name) - except Document.DoesNotExist: - d = Document(type=doctype, name=name) - - if kind == Slide: - d.title = o.slide_name.strip() - l = o.file_loc() - d.external_url = l[l.find("slides/") + len("slides/"):] - d.order = o.order_num or 1 - else: - session_name = session.name if session.name else session.group.acronym.upper() - d.title = u"%s for %s at %s" % (doctype.name, session_name, session.meeting) - d.external_url = o.filename # save filenames for now as they don't appear to be quite regular - d.rev = "00" - d.group = session.group - d.time = datetime.datetime.combine(session.meeting.date, datetime.time(0, 0, 0)) # we may have better estimate below - d.save() - - d.set_state(State.objects.get(type=doctype, slug="active")) - - DocAlias.objects.get_or_create(document=d, name=name) - - session.materials.add(d) - - # try to create a doc event to figure out who uploaded it - e = make_revision_event(d, system_person) - - t = d.type_id - if d.type_id == "slides": - t = "slide, '%s" % d.title - activities = WgProceedingsActivities.objects.filter(group_acronym=wg_meeting_session.group_acronym_id, - meeting=wg_meeting_session.meeting_id, - activity__startswith=t, - activity__endswith="was uploaded")[:1] - if activities: - a = activities[0] - - e.time = datetime.datetime.combine(a.act_date, datetime.time(*[int(s) for s in a.act_time.split(":")])) - try: - e.by = old_person_to_person(a.act_by) or system_person - except PersonOrOrgInfo.DoesNotExist: - pass - - d.time = e.time - d.save() - else: - print "NO UPLOAD ACTIVITY RECORD for", d.name.encode("utf-8"), t.encode("utf-8"), wg_meeting_session.group_acronym_id, wg_meeting_session.meeting_id - - e.save() - - import_material_kind(WgAgenda, agenda_doctype) - import_material_kind(Minute, minutes_doctype) - import_material_kind(Slide, slides_doctype) - -obviously_bogus_date = datetime.date(1970, 1, 1) - -all_sessions = WgMeetingSession.objects.all().order_by("pk") -if import_meetings_from: - all_sessions = all_sessions.filter(last_modified_date__gte=import_meetings_from) - -for o in all_sessions.iterator(): - # num_session is unfortunately not quite reliable, seems to be - # right for 1 or 2 but not 3 and it's sometimes null - sessions = o.num_session or 1 - if o.sched_time_id3: - sessions = 3 - - print "importing WgMeetingSession", o.pk, "subsessions", sessions - - for i in range(1, 1 + sessions): - pk = o.pk + (i - 1) * 10000 # move extra session out of the way - try: - s = Session.objects.get(pk=pk) - except: - s = Session(pk=pk) - s.meeting = get_meeting(o.meeting_id) - - def get_timeslot(attr): - meeting_time = getattr(o, attr) - if not meeting_time: - return None - room = Room.objects.get(pk=getattr(o, attr.replace("time", "room") + "_id")) - - starts, ends = parse_time_desc(meeting_time) - - slots = TimeSlot.objects.filter(meeting=s.meeting, time=starts, location=room).filter(models.Q(session=s) | models.Q(session=None)) - if slots: - slot = slots[0] - else: - slot = TimeSlot(meeting=s.meeting, time=starts, location=room) - - slot.type = session_slot - slot.name = meeting_time.session_name.session_name if meeting_time.session_name_id else "Unknown" - slot.duration = ends - starts - - return slot - - timeslot = get_timeslot("sched_time_id%s" % i) - if o.irtf: - s.group = Group.objects.get(acronym=IRTF.objects.get(pk=o.group_acronym_id).acronym.lower()) - else: - acronym = Acronym.objects.get(pk=o.group_acronym_id) - if o.group_acronym_id < 0: - # this wasn't actually a WG session, but rather a tutorial - # or similar - a = non_group_mapping.get(acronym.acronym) - if not a: - a = "ietf" - print "UNKNOWN phony group", o.group_acronym_id, acronym.acronym, "falling back to '%s'" % a - s.group = Group.objects.get(acronym=a) - s.name = acronym.name - - if timeslot: - if timeslot.name == "Unknown": - timeslot.name = acronym.name - - if "Plenary" in timeslot.name: - timeslot.type = plenary_slot - else: - timeslot.type = other_slot - else: - s.group = Group.objects.get(acronym=acronym.acronym) - s.attendees = o.number_attendee - s.agenda_note = (o.special_agenda_note or "").strip() - s.requested = o.requested_date or obviously_bogus_date - s.requested_by = old_person_to_person(o.requested_by) if o.requested_by else system_person - s.requested_duration = requested_length_mapping[getattr(o, "length_session%s" % i)] - s.comments = (o.special_req or "").strip() - conflict_other = (o.conflict_other or "").strip() - if conflict_other: - if s.comments: - s.comments += " " - s.comments += u"(other conflicts: %s)" % conflict_other - s.status = session_status_mapping[o.status_id or 5] - - s.scheduled = o.scheduled_date - s.modified = o.last_modified_date or obviously_bogus_date - - s.save() - - if timeslot: - timeslot.session = s - timeslot.modified = s.modified - timeslot.save() - - import_materials(o, s) - - # some sessions have been scheduled over multiple time slots - if i < 3: - timeslot = get_timeslot("combined_time_id%s" % i) - if timeslot: - timeslot.session = s - timeslot.modified = s.modified - timeslot.save() - - - for i in (1, 2, 3): - conflict = (getattr(o, "conflict%s" % i) or "").replace(",", " ").lower() - conflicting_groups = [g for g in conflict.split() if g] - for target in Group.objects.filter(acronym__in=conflicting_groups): - Constraint.objects.get_or_create( - meeting=s.meeting, - source=s.group, - target=target, - name=conflict_constraints[i]) - - - # missing following fields from old: ts_status_id (= third session - # status id, third session required AD approval), - # combined_room_id1/2, combined_time_id1/2 - -for o in NonSession.objects.all().order_by('pk').select_related("meeting").iterator(): - print "importing NonSession", o.pk - - if o.time_desc in ("", "0"): - print "IGNORING non-scheduled NonSession", o.non_session_ref.name - continue - - meeting = get_meeting(o.meeting_id) - - # some non-sessions are scheduled every day, but only if there's a - # session nearby, figure out which days this corresponds to - days = set() - if o.day_id == None: - t = datetime.time(int(o.time_desc[-4:][0:2]), int(o.time_desc[-4:][2:4])) - - for s in TimeSlot.objects.filter(meeting=meeting): - if s.time.time() == t: - days.add((s.time.date() - meeting.date).days) - else: - days.add(o.day_id) - - for day in days: - o.day_id = day - starts, ends = parse_time_desc(o) - name = o.non_session_ref.name - - try: - slot = TimeSlot.objects.get(meeting=meeting, time=starts, name=name) - except TimeSlot.DoesNotExist: - slot = TimeSlot(meeting=meeting, time=starts, name=name) - - slot.location = None - if o.non_session_ref_id == 1: - slot.type = registration_slot - else: - slot.type = break_slot - - slot.duration = ends - starts - slot.show_location = o.show_break_location - slot.save() - - -for o in NotMeetingGroup.objects.all().select_related('group_acronym'): - if o.group_acronym_id == None or o.group_acronym == None: - print "SKIPPING NotMeetingGroup with group_acronym_id", o.group_acronym_id - continue # bogus data - - print "importing NotMeetingGroup", o.group_acronym.acronym, o.meeting_id - try: - group = Group.objects.get(acronym=o.group_acronym.acronym) - except Group.DoesNotExist: - print "SKIPPING", o.group_acronym.acronym - continue - meeting = get_meeting(o.meeting_id) - - if not Session.objects.filter(meeting=meeting, group=group): - Session.objects.get_or_create(meeting=meeting, - group=group, - status=status_not_meeting, - defaults=dict(requested_by=system_person, - requested_duration=0)) - diff --git a/redesign/importing/import-persons.py b/redesign/importing/import-persons.py deleted file mode 100755 index c64e469d5..000000000 --- a/redesign/importing/import-persons.py +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/python - -import sys, os, re, datetime - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False - -from django.core import management -management.setup_environ(settings) - -from ietf.idtracker.models import IESGLogin, AreaDirector, IETFWG, PersonOrOrgInfo, IDAuthor -from ietf.ietfauth.models import LegacyWgPassword, LegacyLiaisonUser -from ietf.liaisons.models import LiaisonDetail, LiaisonManagers, SDOAuthorizedIndividual -from ietf.person.models import * -from redesign.importing.utils import * - -# creates system person and email - -# imports AreaDirector persons that are connected to an IETFWG, -# persons from IDAuthor, announcement originators from Announcements, -# requesters from WgMeetingSession, LiaisonDetail persons, -# LiaisonManagers/SDOAuthorizedIndividual persons, -# WgProceedingsActivities persons - -# should probably import -# PersonOrOrgInfo/PostalAddress/EmailAddress/PhoneNumber fully - -import_docs_from = None -if len(sys.argv) > 1: - import_docs_from = datetime.datetime.strptime(sys.argv[1], "%Y-%m-%d") - - -# make sure special system user/email is created -print "creating (System) person and email" -try: - system_person = Person.objects.get(name="(System)") -except Person.DoesNotExist: - system_person = Person.objects.create( - id=0, # special value - name="(System)", - ascii="(System)", - address="", - ) - - system_person = Person.objects.get(name="(System)") - -if system_person.id != 0: # work around bug in Django - Person.objects.filter(id=system_person.id).update(id=0) - system_person = Person.objects.get(id=0) - -system_alias = Alias.objects.get_or_create( - person=system_person, - name=system_person.name - ) - -system_email = Email.objects.get_or_create( - address="(System)", - defaults=dict(active=True, person=system_person) - ) - -# LegacyWgPassword -for o in LegacyWgPassword.objects.all(): - print "importing LegacyWgPassword", o.pk, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8') - - email = get_or_create_email(o, create_fake=False) - if not email: - continue - - username = o.login_name[:30] - persons = Person.objects.filter(user__username=username) - if persons: - if persons[0] != email.person: - print "SKIPPING", o.login_name, "who is connected to another person " - continue - - user, _ = User.objects.get_or_create(username=username) - email.person.user = user - email.person.save() - -# LegacyLiaisonUser -for o in LegacyLiaisonUser.objects.all(): - print "importing LegacyLiaisonUser", o.pk, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8') - - email = get_or_create_email(o, create_fake=False) - if not email: - continue - - username = o.login_name[:30] - persons = Person.objects.filter(user__username=username) - if persons: - if persons[0] != email.person: - print "SKIPPING", o.login_name, "who is connected to another person " - continue - - user, _ = User.objects.get_or_create(username=username) - email.person.user = user - email.person.save() - -# IESGLogin -for o in IESGLogin.objects.all(): - print "importing IESGLogin", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8') - - if not o.person: - persons = PersonOrOrgInfo.objects.filter(first_name=o.first_name, last_name=o.last_name) - if persons: - o.person = persons[0] - else: - print "NO PERSON", o.person_id - continue - - email = get_or_create_email(o, create_fake=False) - if not email: - continue - - user, _ = User.objects.get_or_create(username=o.login_name) - email.person.user = user - email.person.save() - -# AreaDirector from IETFWG persons -for o in AreaDirector.objects.filter(ietfwg__in=IETFWG.objects.all()).exclude(area=None).distinct().order_by("pk").iterator(): - print "importing AreaDirector (from IETFWG) persons", o.pk - - get_or_create_email(o, create_fake=False) - -# IESGHistory persons -for o in PersonOrOrgInfo.objects.filter(iesghistory__id__gte=1).order_by("pk").distinct(): - print "importing IESGHistory person", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8') - - email = get_or_create_email(o, create_fake=False) - -# WgMeetingSession persons -for o in PersonOrOrgInfo.objects.filter(wgmeetingsession__pk__gte=1).distinct().order_by("pk").iterator(): - print "importing WgMeetingSession persons", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8') - - get_or_create_email(o, create_fake=False) - -# Announcement persons -for o in PersonOrOrgInfo.objects.filter(announcement__announcement_id__gte=1).order_by("pk").distinct(): - print "importing Announcement originator", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8') - - email = get_or_create_email(o, create_fake=False) - -# LiaisonManagers persons -for o in LiaisonManagers.objects.order_by("pk"): - print "importing LiaisonManagers person", o.pk, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8') - - email = get_or_create_email(o, create_fake=False) - addresses = o.person.emailaddress_set.filter(priority=o.email_priority).filter(address__contains="@")[:1] - if addresses: - possibly_import_other_priority_email(email, addresses[0]) - -# SDOAuthorizedIndividual persons -for o in PersonOrOrgInfo.objects.filter(sdoauthorizedindividual__pk__gte=1).order_by("pk").distinct(): - print "importing SDOAuthorizedIndividual person", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8') - - email = get_or_create_email(o, create_fake=False) - -# Liaison persons (these are used as from contacts) -for o in LiaisonDetail.objects.exclude(person=None).order_by("pk"): - print "importing LiaisonDetail person", o.pk, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8') - - email = get_or_create_email(o, create_fake=True) - - # we may also need to import email address used specifically for - # the document - if "@" in email.address: - try: - possibly_import_other_priority_email(email, o.from_email()) - except EmailAddress.DoesNotExist: - pass - -# WgProceedingsActivities persons -for o in PersonOrOrgInfo.objects.filter(wgproceedingsactivities__id__gte=1).order_by("pk").distinct(): - print "importing WgProceedingsActivities person", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8') - - email = get_or_create_email(o, create_fake=True) - -# IDAuthor persons -all_authors = IDAuthor.objects.all().order_by('id').select_related('person') -if import_docs_from: - all_authors = all_authors.filter(document__last_modified_date__gte=import_docs_from) - -for o in all_authors.iterator(): - print "importing IDAuthor", o.id, o.person_id, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8') - email = get_or_create_email(o, create_fake=True) - - # we may also need to import email address used specifically for - # the document - - addresses = o.person.emailaddress_set.filter(type='I-D', priority=o.document_id).filter(address__contains="@")[:1] - if addresses: - possibly_import_other_priority_email(email, addresses[0]) diff --git a/redesign/importing/import-reused-tables.py b/redesign/importing/import-reused-tables.py deleted file mode 100755 index a82d8add7..000000000 --- a/redesign/importing/import-reused-tables.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/python - -# boiler plate -import sys, os, re, datetime - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False - -from django.core import management -management.setup_environ(settings) - -# script - -from django.db.models import TextField, CharField - -from django.contrib.sites.models import Site -from ietf.redirects.models import Redirect, Suffix, Command -from ietf.iesg.models import TelechatAgendaItem, WGAction -from ietf.ipr.models import IprSelecttype, IprLicensing, IprDetail, IprContact, IprNotification, IprUpdate -from ietf.submit.models import IdSubmissionStatus, IdSubmissionDetail, IdApprovedDetail, TempIdAuthors -from django.contrib.auth.models import User - -known_models = { - 'base': [User], - 'others': [Site, - Redirect, Suffix, Command, - TelechatAgendaItem, WGAction, - IprSelecttype, IprLicensing, IprDetail, IprContact, IprNotification, IprUpdate, - IdSubmissionStatus, IdSubmissionDetail, IdApprovedDetail, - TempIdAuthors] - } - -models_to_copy = known_models[sys.argv[1]] - -def queryset_chunks(q, n): - """Split queryset q up in chunks of max size n.""" - return (q[i:i+n] for i in range(0, q.count(), n)) - -def insert_many_including_pk(objects, using="default", table=None): - """Insert list of Django objects in one SQL query. Objects must be - of the same Django model. Note that save is not called and signals - on the model are not raised.""" - if not objects: - return - - import django.db.models - from django.db import connections - con = connections[using] - - model = objects[0].__class__ - fields = [f for f in model._meta.fields] - parameters = [] - for o in objects: - pars = [] - for f in fields: - pars.append(f.get_db_prep_save(f.pre_save(o, True), connection=con)) - parameters.append(pars) - - if not table: - table = model._meta.db_table - column_names = ",".join(con.ops.quote_name(f.column) for f in fields) - placeholders = ",".join(("%s",) * len(fields)) - con.cursor().executemany( - "replace into %s (%s) values (%s)" % (table, column_names, placeholders), - parameters) - -def clean_chunk(model, chunk): - for o in chunk: - if model == IprDetail: - if o.applies_to_all == "": - o.applies_to_all = None - - for f in model._meta.fields: - # change non-nullable nulls on string fields to "" - if type(f) in (CharField, TextField) and not f.null and getattr(o, f.name) == None: - setattr(o, f.name, "") - -for model in models_to_copy: - sys.stdout.write("copying %s " % model._meta.object_name) - sys.stdout.flush() - - irregular_models = [Site] - if model in irregular_models: - table_name = Site._meta.db_table - else: - table_name = "%s_%s" % (model._meta.app_label, model._meta.object_name.lower()) - - for chunk in queryset_chunks(model.objects.using("legacy").all(), 1000): - clean_chunk(model, chunk) - insert_many_including_pk(chunk, using="default", table=table_name) - sys.stdout.write(".") - sys.stdout.flush() - - sys.stdout.write("\n") diff --git a/redesign/importing/import-roles.py b/redesign/importing/import-roles.py deleted file mode 100755 index 036ed03ca..000000000 --- a/redesign/importing/import-roles.py +++ /dev/null @@ -1,318 +0,0 @@ -#!/usr/bin/python - -import sys, os, re, datetime - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False - -from django.core import management -management.setup_environ(settings) - -from ietf.person.models import * -from ietf.group.models import * -from ietf.name.models import * -from ietf.name.utils import name -from redesign.importing.utils import get_or_create_email - -from ietf.idtracker.models import IESGLogin, AreaDirector, PersonOrOrgInfo, WGChair, WGEditor, WGSecretary, WGTechAdvisor, ChairsHistory, Role as OldRole, Acronym, IRTFChair -from ietf.liaisons.models import LiaisonManagers, SDOAuthorizedIndividual -from ietf.wgchairs.models import WGDelegate -from ietf.proceedings.models import IESGHistory -from ietf.utils.history import * - -# assumptions: -# - persons have been imported -# - groups have been imported - -# imports roles from IESGLogin, AreaDirector, WGEditor, WGChair, -# IRTFChair, WGSecretary, WGTechAdvisor, NomCom chairs from -# ChairsHistory, IESGHistory, Role, LiaisonManagers, -# SDOAuthorizedIndividual, WGDelegate - -area_director_role = name(RoleName, "ad", "Area Director") -pre_area_director_role = name(RoleName, "pre-ad", "Incoming Area Director") -chair_role = name(RoleName, "chair", "Chair") -editor_role = name(RoleName, "editor", "Editor") -secretary_role = name(RoleName, "secr", "Secretary") -techadvisor_role = name(RoleName, "techadv", "Tech Advisor") -exec_director_role = name(RoleName, "execdir", "Executive Director") -adm_director_role = name(RoleName, "admdir", "Administrative Director") -liaison_manager_role = name(RoleName, "liaiman", "Liaison Manager") -authorized_role = name(RoleName, "auth", "Authorized Individual") -delegate_role = name(RoleName, "delegate", "Delegate") - -# import IANA authorized individuals -for o in User.objects.using("legacy").filter(groups__name="IANA"): - print "Importing IANA group member", o - - if o.username == "amanda.barber@icann.org": - o.username = "amanda.baber@icann.org" - - person = PersonOrOrgInfo.objects.filter(iesglogin__login_name=o.username)[0] - - group = Group.objects.get(acronym="iana") - email = get_or_create_email(person, create_fake=False) - - Role.objects.get_or_create(name=authorized_role, group=group, person=email.person, email=email) - -# WGDelegate -for o in WGDelegate.objects.all().order_by("pk"): - print "importing WGDelegate", o.pk, unicode(o.wg).encode("utf-8"), unicode(o.person).encode("utf-8") - - group = Group.objects.get(acronym=o.wg.group_acronym.acronym) - email = get_or_create_email(o, create_fake=False) - - Role.objects.get_or_create(name=delegate_role, group=group, person=email.person, email=email) - -# SDOAuthorizedIndividual -for o in SDOAuthorizedIndividual.objects.all().order_by("pk"): - print "importing SDOAuthorizedIndividual", o.pk, unicode(o.sdo).encode("utf-8"), unicode(o.person).encode("utf-8") - - group = Group.objects.get(name=o.sdo.sdo_name, type="sdo") - email = get_or_create_email(o, create_fake=False) - - Role.objects.get_or_create(name=authorized_role, group=group, person=email.person, email=email) - -# LiaisonManagers -for o in LiaisonManagers.objects.all().order_by("pk"): - print "importing LiaisonManagers", o.pk, unicode(o.sdo).encode("utf-8"), unicode(o.person).encode("utf-8") - - group = Group.objects.get(name=o.sdo.sdo_name, type="sdo") - email = Email.objects.get(address__iexact=o.person.email(priority=o.email_priority)[1]) - - Role.objects.get_or_create(name=liaison_manager_role, group=group, person=email.person, email=email) - -# Role -for o in OldRole.objects.all().order_by('pk'): - acronym = o.role_name.lower() - role = chair_role - - if o.id == OldRole.NOMCOM_CHAIR: - continue # handled elsewhere - - print "importing Role", o.id, o.role_name, unicode(o.person).encode("utf-8") - - email = get_or_create_email(o, create_fake=False) - official_email = email - - if o.role_name.endswith("Executive Director"): - acronym = acronym[:-(len("Executive Director") + 1)] - role = exec_director_role - - if o.id == OldRole.IAD_CHAIR: - acronym = "ietf" - role = adm_director_role - official_email, _ = Email.objects.get_or_create(address="iad@ietf.org") - - if o.id == OldRole.IETF_CHAIR: - official_email, _ = Email.objects.get_or_create(address="chair@ietf.org") - - if o.id == OldRole.IAB_CHAIR: - official_email, _ = Email.objects.get_or_create(address="iab-chair@ietf.org") - - if o.id == OldRole.RSOC_CHAIR: - official_email, _ = Email.objects.get_or_create(address="rsoc-chair@iab.org") - - if o.id == 9: - official_email, _ = Email.objects.get_or_create(address="rfc-ise@rfc-editor.org") - - group = Group.objects.get(acronym=acronym) - - Role.objects.get_or_create(name=role, group=group, person=email.person, email=official_email) - -# WGEditor -for o in WGEditor.objects.all(): - acronym = Acronym.objects.get(acronym_id=o.group_acronym_id).acronym - print "importing WGEditor", acronym, o.person - - email = get_or_create_email(o, create_fake=True) - group = Group.objects.get(acronym=acronym) - - Role.objects.get_or_create(name=editor_role, group=group, person=email.person, email=email) - -# WGSecretary -for o in WGSecretary.objects.all(): - acronym = Acronym.objects.get(acronym_id=o.group_acronym_id).acronym - print "importing WGSecretary", acronym, o.person - - email = get_or_create_email(o, create_fake=True) - group = Group.objects.get(acronym=acronym) - - Role.objects.get_or_create(name=secretary_role, group=group, person=email.person, email=email) - -# WGTechAdvisor -for o in WGTechAdvisor.objects.all(): - acronym = Acronym.objects.get(acronym_id=o.group_acronym_id).acronym - print "importing WGTechAdvisor", acronym, o.person - - email = get_or_create_email(o, create_fake=True) - group = Group.objects.get(acronym=acronym) - - Role.objects.get_or_create(name=techadvisor_role, group=group, person=email.person, email=email) - -# WGChair -for o in WGChair.objects.all(): - # there's some garbage in this table, so wear double safety belts - try: - acronym = Acronym.objects.get(acronym_id=o.group_acronym_id).acronym - except Acronym.DoesNotExist: - print "SKIPPING WGChair with unknown acronym id", o.group_acronym_id - continue - - try: - person = o.person - except PersonOrOrgInfo.DoesNotExist: - print "SKIPPING WGChair", acronym, "with invalid person id", o.person_id - continue - - try: - group = Group.objects.get(acronym=acronym) - except Group.DoesNotExist: - print "SKIPPING WGChair", o.person, "with non-existing group", acronym - continue - - if group.acronym == "none": - print "SKIPPING WGChair", o.person, "with bogus group", group.acronym - continue - - print "importing WGChair", acronym, o.person - - email = get_or_create_email(o, create_fake=True) - - Role.objects.get_or_create(name=chair_role, group=group, person=email.person, email=email) - -# IRTFChair -for o in IRTFChair.objects.all(): - acronym = o.irtf.acronym.lower() - if acronym == "irtf": - # we already got the IRTF chair from Role, and the data in here is buggy - continue - - print "importing IRTFChair", acronym, o.person - - email = get_or_create_email(o, create_fake=True) - group = Group.objects.get(acronym=acronym) - - Role.objects.get_or_create(name=chair_role, group=group, person=email.person, email=email) - -# NomCom chairs -official_email, _ = Email.objects.get_or_create(address="nomcom-chair@ietf.org") -nomcom_groups = list(Group.objects.filter(acronym__startswith="nomcom").exclude(acronym="nomcom")) -for o in ChairsHistory.objects.filter(chair_type=OldRole.NOMCOM_CHAIR): - print "importing NOMCOM chair", o - for g in nomcom_groups: - if ("%s/%s" % (o.start_year, o.end_year)) in g.name: - break - - email = get_or_create_email(o, create_fake=False) - - Role.objects.get_or_create(name=chair_role, group=g, person=email.person, email=official_email) - -# IESGLogin -for o in IESGLogin.objects.all(): - print "importing IESGLogin", o.pk, o.first_name.encode("utf-8"), o.last_name.encode("utf-8") - - if not o.person: - persons = PersonOrOrgInfo.objects.filter(first_name=o.first_name, last_name=o.last_name) - if persons: - o.person = persons[0] - else: - print "NO PERSON", o.person_id - continue - - email = get_or_create_email(o, create_fake=False) - # current ADs are imported below - if email and o.user_level == IESGLogin.SECRETARIAT_LEVEL: - if not Role.objects.filter(name=secretary_role, person=email.person): - Role.objects.create(name=secretary_role, group=Group.objects.get(acronym="secretariat"), person=email.person, email=email) - u = email.person.user - if u: - u.is_staff = True - u.is_superuser = True - u.save() - -# AreaDirector -for o in AreaDirector.objects.all(): - if not o.area: - print "NO AREA", o.person, o.area_id - continue - - print "importing AreaDirector", o.area, o.person - email = get_or_create_email(o, create_fake=False) - - area = Group.objects.get(acronym=o.area.area_acronym.acronym) - - role_type = area_director_role - - try: - if IESGLogin.objects.get(person=o.person).user_level == 4: - role_type = pre_area_director_role - except IESGLogin.DoesNotExist: - pass - - r = Role.objects.filter(name=role_type, - person=email.person) - if r and r[0].group == "iesg": - r[0].group = area - r[0].name = role_type - r[0].save() - else: - Role.objects.get_or_create(name=role_type, group=area, person=email.person, email=email) - -# IESGHistory -emails_for_time = {} -for o in IESGHistory.objects.all().order_by('meeting__start_date', 'pk'): - print "importing IESGHistory", o.pk, o.area, o.person, o.meeting - email = get_or_create_email(o, create_fake=False) - if not email: - "SKIPPING IESGHistory with unknown email" - continue - - # our job here is to make sure we either have the same AD today or - # got proper GroupHistory and RoleHistory objects in the database; - # there's only incomplete information available in the database so - # the reconstructed history will necessarily not be entirely - # accurate, just good enough to conclude who was AD - area = Group.objects.get(acronym=o.area.area_acronym.acronym, type="area") - meeting_time = datetime.datetime.combine(o.meeting.start_date, datetime.time(0, 0, 0)) - - key = (area, meeting_time) - if not key in emails_for_time: - emails_for_time[key] = [] - - emails_for_time[key].append(email) - - history = find_history_active_at(area, meeting_time) - if (history and history.rolehistory_set.filter(person=email.person) or - not history and area.role_set.filter(person=email.person)): - continue - - if history and history.time == meeting_time: - # add to existing GroupHistory - RoleHistory.objects.create(name=area_director_role, group=history, person=email.person, email=email) - else: - existing = history if history else area - - h = GroupHistory(group=area, - time=meeting_time, - name=existing.name, - acronym=existing.acronym, - state=existing.state, - type=existing.type, - parent=existing.parent, - ad=existing.ad, - list_email=existing.list_email, - list_subscribe=existing.list_subscribe, - list_archive=existing.list_archive, - comments=existing.comments, - ) - h.save() - - # we need to add all emails for this area at this time - # because the new GroupHistory resets the known roles - for e in emails_for_time[key]: - RoleHistory.objects.get_or_create(name=area_director_role, group=h, person=e.person, email=e) - diff --git a/redesign/importing/import-states.py b/redesign/importing/import-states.py deleted file mode 100755 index 87da1e2cd..000000000 --- a/redesign/importing/import-states.py +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/python - -import sys, os, datetime - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path - -from ietf import settings -settings.USE_DB_REDESIGN_PROXY_CLASSES = False - -from django.core import management -management.setup_environ(settings) - - -import workflows.models -from ietf.ietfworkflows.models import StateDescription -from ietf.idrfc.mirror_rfc_editor_queue import get_rfc_state_mapping -from ietf.doc.models import * - -# adds states for documents and import states from workflows.Workflow -# and ietfworkflows.StateDescription - -# state types -draft_type, _ = StateType.objects.get_or_create(slug="draft", label="State") -draft_iesg_type, _ = StateType.objects.get_or_create(slug="draft-iesg", label="IESG state") -draft_iana_type, _ = StateType.objects.get_or_create(slug="draft-iana", label="IANA state") -draft_rfc_type, _ = StateType.objects.get_or_create(slug="draft-rfceditor", label="RFC Editor state") -ietf_type, _ = StateType.objects.get_or_create(slug="draft-stream-ietf", label="IETF state") -irtf_type, _ = StateType.objects.get_or_create(slug="draft-stream-irtf", label="IRTF state") -ise_type, _ = StateType.objects.get_or_create(slug="draft-stream-ise", label="ISE state") -iab_type, _ = StateType.objects.get_or_create(slug="draft-stream-iab", label="IAB state") - -slides_type, _ = StateType.objects.get_or_create(slug="slides", label="State") -minutes_type, _ = StateType.objects.get_or_create(slug="minutes", label="State") -agenda_type, _ = StateType.objects.get_or_create(slug="agenda", label="State") -liaison_att_type, _ = StateType.objects.get_or_create(slug="liai-att", label="State") -charter_type, _ = StateType.objects.get_or_create(slug="charter", label="State") - -# draft states -print "importing draft states" -State.objects.get_or_create(type=draft_type, slug="active", name="Active", order=1) -State.objects.get_or_create(type=draft_type, slug="expired", name="Expired", order=2) -State.objects.get_or_create(type=draft_type, slug="rfc", name="RFC", order=3) -State.objects.get_or_create(type=draft_type, slug="repl", name="Replaced", order=4) -State.objects.get_or_create(type=draft_type, slug="auth-rm", name="Withdrawn by Submitter", order=5) -State.objects.get_or_create(type=draft_type, slug="ietf-rm", name="Withdrawn by IETF", order=6) - -# IESG draft states -State.objects.get_or_create(type=draft_iesg_type, slug="pub", name="RFC Published", desc='The ID has been published as an RFC.', order=32) -State.objects.get_or_create(type=draft_iesg_type, slug="dead", name="Dead", desc='Document is "dead" and is no longer being tracked. (E.g., it has been replaced by another document with a different name, it has been withdrawn, etc.)', order=99) -State.objects.get_or_create(type=draft_iesg_type, slug="approved", name="Approved-announcement to be sent", desc='The IESG has approved the document for publication, but the Secretariat has not yet sent out on official approval message.', order=27) -State.objects.get_or_create(type=draft_iesg_type, slug="ann", name="Approved-announcement sent", desc='The IESG has approved the document for publication, and the Secretariat has sent out the official approval message to the RFC editor.', order=30) -State.objects.get_or_create(type=draft_iesg_type, slug="watching", name="AD is watching", desc='An AD is aware of the document and has chosen to place the document in a separate state in order to keep a closer eye on it (for whatever reason). Documents in this state are still not being actively tracked in the sense that no formal request has been made to publish or advance the document. The sole difference between this state and "I-D Exists" is that an AD has chosen to put it in a separate state, to make it easier to keep track of (for the AD\'s own reasons).', order=42) -State.objects.get_or_create(type=draft_iesg_type, slug="iesg-eva", name="IESG Evaluation", desc='The document is now (finally!) being formally reviewed by the entire IESG. Documents are discussed in email or during a bi-weekly IESG telechat. In this phase, each AD reviews the document and airs any issues they may have. Unresolvable issues are documented as "discuss" comments that can be forwarded to the authors/WG. See the description of substates for additional details about the current state of the IESG discussion.', order=20) -State.objects.get_or_create(type=draft_iesg_type, slug="ad-eval", name="AD Evaluation", desc='A specific AD (e.g., the Area Advisor for the WG) has begun reviewing the document to verify that it is ready for advancement. The shepherding AD is responsible for doing any necessary review before starting an IETF Last Call or sending the document directly to the IESG as a whole.', order=11) -State.objects.get_or_create(type=draft_iesg_type, slug="lc-req", name="Last Call Requested", desc='The AD has requested that the Secretariat start an IETF Last Call, but the the actual Last Call message has not been sent yet.', order=15) -State.objects.get_or_create(type=draft_iesg_type, slug="lc", name="In Last Call", desc='The document is currently waiting for IETF Last Call to complete. Last Calls for WG documents typically last 2 weeks, those for individual submissions last 4 weeks.', order=16) -State.objects.get_or_create(type=draft_iesg_type, slug="pub-req", name="Publication Requested", desc='A formal request has been made to advance/publish the document, following the procedures in Section 7.5 of RFC 2418. The request could be from a WG chair, from an individual through the RFC Editor, etc. (The Secretariat (iesg-secretary@ietf.org) is copied on these requests to ensure that the request makes it into the Datatracker.) A document in this state has not (yet) been reviewed by an AD nor has any official action been taken on it yet (other than to note that its publication has been requested.', order=10) -State.objects.get_or_create(type=draft_iesg_type, slug="rfcqueue", name="RFC Ed Queue", desc='The document is in the RFC editor Queue (as confirmed by http://www.rfc-editor.org/queue.html).', order=31) -State.objects.get_or_create(type=draft_iesg_type, slug="defer", name="IESG Evaluation - Defer", desc='During a telechat, one or more ADs requested an additional 2 weeks to review the document. A defer is designed to be an exception mechanism, and can only be invoked once, the first time the document comes up for discussion during a telechat.', order=21) -State.objects.get_or_create(type=draft_iesg_type, slug="writeupw", name="Waiting for Writeup", desc='Before a standards-track or BCP document is formally considered by the entire IESG, the AD must write up a protocol action. The protocol action is included in the approval message that the Secretariat sends out when the document is approved for publication as an RFC.', order=18) -State.objects.get_or_create(type=draft_iesg_type, slug="goaheadw", name="Waiting for AD Go-Ahead", desc='As a result of the IETF Last Call, comments may need to be responded to and a revision of the ID may be needed as well. The AD is responsible for verifying that all Last Call comments have been adequately addressed and that the (possibly revised) document is in the ID directory and ready for consideration by the IESG as a whole.', order=19) -State.objects.get_or_create(type=draft_iesg_type, slug="review-e", name="Expert Review", desc='An AD sometimes asks for an external review by an outside party as part of evaluating whether a document is ready for advancement. MIBs, for example, are reviewed by the "MIB doctors". Other types of reviews may also be requested (e.g., security, operations impact, etc.). Documents stay in this state until the review is complete and possibly until the issues raised in the review are addressed. See the "note" field for specific details on the nature of the review.', order=12) -State.objects.get_or_create(type=draft_iesg_type, slug="nopubadw", name="DNP-waiting for AD note", desc='Do Not Publish: The IESG recommends against publishing the document, but the writeup explaining its reasoning has not yet been produced. DNPs apply primarily to individual submissions received through the RFC editor. See the "note" field for more details on who has the action item.', order=33) -State.objects.get_or_create(type=draft_iesg_type, slug="nopubanw", name="DNP-announcement to be sent", desc='The IESG recommends against publishing the document, the writeup explaining its reasoning has been produced, but the Secretariat has not yet sent out the official "do not publish" recommendation message.', order=34) - -for s in State.objects.filter(type=draft_iesg_type): - n = { - "pub-req": ("ad-eval", "watching", "dead"), - "ad-eval": ("watching", "lc-req", "review-e", "iesg-eva"), - "review-e": ("ad-eval", ), - "lc-req": ("lc", ), - "lc": ("writeupw", "goaheadw"), - "writeupw": ("goaheadw", ), - "goaheadw": ("iesg-eva", ), - "iesg-eva": ("nopubadw", "defer", "approved"), - "defer": ("iesg-eva", ), - "approved": ("ann", ), - "ann": ("rfcqueue", ), - "rfcqueue": ("pub", ), - "pub": ("dead", ), - "nopubadw": ("nopubanw", ), - "nopubanw": ("dead", ), - "watching": ("pub-req", ), - "dead": ("pub-req", ), - } - - s.next_states = State.objects.filter(type=draft_iesg_type, slug__in=n[s.slug]) - -# import RFC Editor queue states -print "importing RFC Editor states" -get_rfc_state_mapping() - -# WG states, we can get them from the state descriptions -wg_doc_state_slug = { - "Call For Adoption By WG Issued": 'c-adopt', - "Adopted by a WG": 'adopt-wg', - "Adopted for WG Info Only": 'info', - "WG Document": 'wg-doc', - "Parked WG Document": 'parked', - "Dead WG Document": 'dead', - "In WG Last Call": 'wg-lc', - "Waiting for WG Chair Go-Ahead": 'chair-w', - "WG Consensus: Waiting for Write-Up": 'writeupw', - "Submitted to IESG for Publication": 'sub-pub', - } - -for o in StateDescription.objects.all().order_by('order'): - print "importing StateDescription", o.state.name - s, _ = State.objects.get_or_create(type=ietf_type, slug=wg_doc_state_slug[o.state.name], name=o.state.name) - s.desc = o.definition.replace(" ", " ").replace("\n ", "\n").replace("\n\n", "DUMMY").replace("\n", "").replace("DUMMY", "\n\n") # get rid of linebreaks, but keep paragraphs - s.order = o.order - s.save() - -# IAB -print "importing IAB stream states" -State.objects.get_or_create(type=iab_type, slug="candidat", name="Candidate IAB Document", desc="A document being considered for the IAB stream.", order=1) -State.objects.get_or_create(type=iab_type, slug="active", name="Active IAB Document", desc="This document has been adopted by the IAB and is being actively developed.", order=2) -State.objects.get_or_create(type=iab_type, slug="parked", name="Parked IAB Document", desc="This document has lost its author or editor, is waiting for another document to be written, or cannot currently be worked on by the IAB for some other reason. Annotations probably explain why this document is parked.", order=3) -State.objects.get_or_create(type=iab_type, slug="review-i", name="IAB Review", desc="This document is awaiting the IAB itself to come to internal consensus.", order=4) -State.objects.get_or_create(type=iab_type, slug="review-c", name="Community Review", desc="This document has completed internal consensus within the IAB and is now under community review.", order=5) -State.objects.get_or_create(type=iab_type, slug="approved", name="Approved by IAB, To Be Sent to RFC Editor", desc="The consideration of this document is complete, but it has not yet been sent to the RFC Editor for publication (although that is going to happen soon).", order=6) -State.objects.get_or_create(type=iab_type, slug="diff-org", name="Sent to a Different Organization for Publication", desc="The IAB does not expect to publish the document itself, but has passed it on to a different organization that might continue work on the document. The expectation is that the other organization will eventually publish the document.", order=7) -State.objects.get_or_create(type=iab_type, slug="rfc-edit", name="Sent to the RFC Editor", desc="The IAB processing of this document is complete and it has been sent to the RFC Editor for publication. The document may be in the RFC Editor's queue, or it may have been published as an RFC; this state doesn't distinguish between different states occurring after the document has left the IAB.", order=8) -State.objects.get_or_create(type=iab_type, slug="pub", name="Published RFC", desc="The document has been published as an RFC.", order=9) -State.objects.get_or_create(type=iab_type, slug="dead", name="Dead IAB Document", desc="This document was an active IAB document, but for some reason it is no longer being pursued for the IAB stream. It is possible that the document might be revived later, possibly in another stream.", order=10) - -# IRTF -print "importing IRTF stream states" -State.objects.get_or_create(type=irtf_type, slug="candidat", name="Candidate RG Document", desc="This document is under consideration in an RG for becoming an IRTF document. A document in this state does not imply any RG consensus and does not imply any precedence or selection. It's simply a way to indicate that somebody has asked for a document to be considered for adoption by an RG.", order=1) -State.objects.get_or_create(type=irtf_type, slug="active", name="Active RG Document", desc="This document has been adopted by the RG and is being actively developed.", order=2) -State.objects.get_or_create(type=irtf_type, slug="parked", name="Parked RG Document", desc="This document has lost its author or editor, is waiting for another document to be written, or cannot currently be worked on by the RG for some other reason.", order=3) -State.objects.get_or_create(type=irtf_type, slug="rg-lc", name="In RG Last Call", desc="The document is in its final review in the RG.", order=4) -State.objects.get_or_create(type=irtf_type, slug="sheph-w", name="Waiting for Document Shepherd", desc="IRTF documents have document shepherds who help RG documents through the process after the RG has finished with the document.", order=5) -State.objects.get_or_create(type=irtf_type, slug="chair-w", name="Waiting for IRTF Chair", desc="The IRTF Chair is meant to be performing some task such as sending a request for IESG Review.", order=6) -State.objects.get_or_create(type=irtf_type, slug="irsg-w", name="Awaiting IRSG Reviews", desc="The document shepherd has taken the document to the IRSG and solicited reviews from one or more IRSG members.", order=7) -State.objects.get_or_create(type=irtf_type, slug="irsgpoll", name="In IRSG Poll", desc="The IRSG is taking a poll on whether or not the document is ready to be published.", order=8) -State.objects.get_or_create(type=irtf_type, slug="iesg-rev", name="In IESG Review", desc="The IRSG has asked the IESG to do a review of the document, as described in RFC5742.", order=9) -State.objects.get_or_create(type=irtf_type, slug="rfc-edit", name="Sent to the RFC Editor", desc="The RG processing of this document is complete and it has been sent to the RFC Editor for publication. The document may be in the RFC Editor's queue, or it may have been published as an RFC; this state doesn't distinguish between different states occurring after the document has left the RG.", order=10) -State.objects.get_or_create(type=irtf_type, slug="pub", name="Published RFC", desc="The document has been published as an RFC.", order=11) -State.objects.get_or_create(type=irtf_type, slug="iesghold", name="Document on Hold Based On IESG Request", desc="The IESG has requested that the document be held pending further review, as specified in RFC 5742, and the IRTF has agreed to such a hold.", order=12) -State.objects.get_or_create(type=irtf_type, slug="dead", name="Dead IRTF Document", desc="This document was an active IRTF document, but for some reason it is no longer being pursued for the IRTF stream. It is possible that the document might be revived later, possibly in another stream.", order=13) - -# ISE -print "importing ISE stream states" -State.objects.get_or_create(type=ise_type, slug="receive", name="Submission Received", desc="The draft has been sent to the ISE with a request for publication.", order=1) -State.objects.get_or_create(type=ise_type, slug="find-rev", name="Finding Reviewers", desc=" The ISE is finding initial reviewers for the document.", order=2) -State.objects.get_or_create(type=ise_type, slug="ise-rev", name="In ISE Review", desc="The ISE is actively working on the document.", order=3) -State.objects.get_or_create(type=ise_type, slug="need-res", name="Response to Review Needed", desc=" One or more reviews have been sent to the author, and the ISE is awaiting response.", order=4) -State.objects.get_or_create(type=ise_type, slug="iesg-rev", name="In IESG Review", desc="The ISE has asked the IESG to do a review of the document, as described in RFC5742.", order=5) -State.objects.get_or_create(type=ise_type, slug="rfc-edit", name="Sent to the RFC Editor", desc="The ISE processing of this document is complete and it has been sent to the RFC Editor for publication. The document may be in the RFC Editor's queue, or it may have been published as an RFC; this state doesn't distinguish between different states occurring after the document has left the ISE.", order=6) -State.objects.get_or_create(type=ise_type, slug="pub", name="Published RFC", desc="The document has been published as an RFC.", order=7) -State.objects.get_or_create(type=ise_type, slug="dead", name="No Longer In Independent Submission Stream", desc="This document was actively considered in the Independent Submission stream, but the ISE chose not to publish it. It is possible that the document might be revived later. A document in this state may have a comment explaining the reasoning of the ISE (such as if the document was going to move to a different stream).", order=8) -State.objects.get_or_create(type=ise_type, slug="iesghold", name="Document on Hold Based On IESG Request", desc="The IESG has requested that the document be held pending further review, as specified in RFC 5742, and the ISE has agreed to such a hold.", order=9) - -# now import the next_states; we only go for the default ones, the -# WG-specific are handled in the group importer - -workflows = [(ietf_type, workflows.models.Workflow.objects.get(name="Default WG Workflow")), - (irtf_type, workflows.models.Workflow.objects.get(name="IRTF Workflow")), - (ise_type, workflows.models.Workflow.objects.get(name="ISE Workflow")), - (iab_type, workflows.models.Workflow.objects.get(name="IAB Workflow")), - ] - -for state_type, workflow in workflows: - states = dict((s.name, s) for s in State.objects.filter(type=state_type)) - old_states = dict((s.name, s) for s in workflow.states.filter(name__in=[name for name in states]).select_related('transitions')) - for name in states: - print "importing workflow transitions", workflow.name, name - s = states[name] - try: - o = old_states[name] - except KeyError: - print "MISSING state", name, "in workflow", workflow.name - continue - s.next_states = [states[t.destination.name] for t in o.transitions.filter(workflow=workflow)] - - -# meeting material states -for t in (slides_type, minutes_type, agenda_type): - print "importing states for", t.slug - State.objects.get_or_create(type=t, slug="active", name="Active", order=1) - State.objects.get_or_create(type=t, slug="deleted", name="Deleted", order=2) - -# charter states -print "importing states for charters" -State.objects.get_or_create(type=charter_type, slug="notrev", name="Not currently under review", desc="The proposed charter is not being considered at this time. A proposed charter will remain in this state until an AD moves it to Informal IESG review.") -State.objects.get_or_create(type=charter_type, slug="infrev", name="Informal IESG review", desc="This is the initial state when an AD proposes a new charter. The normal next state is Internal review if the idea is accepted, or Not currently under review if the idea is abandoned.") -State.objects.get_or_create(type=charter_type, slug="intrev", name="Internal review", desc="The IESG and IAB are reviewing the early draft of the charter; this is the initial IESG and IAB review. The usual next state is External review if the idea is adopted, or Informal IESG review if the IESG decides the idea needs more work, or Not currently under review is the idea is abandoned") -State.objects.get_or_create(type=charter_type, slug="extrev", name="External review", desc="The IETF community and possibly other standards development organizations (SDOs) are reviewing the proposed charter. The usual next state is IESG review, although it might move to Not currently under review is the idea is abandoned during the external review.") -State.objects.get_or_create(type=charter_type, slug="iesgrev", name="IESG review", desc="The IESG is reviewing the discussion from the external review of the proposed charter. The usual next state is Approved, or Not currently under review if the idea is abandoned.") -State.objects.get_or_create(type=charter_type, slug="approved", name="Approved", desc="The charter is approved by the IESG.") - diff --git a/redesign/importing/move-tables-to-db.sh b/redesign/importing/move-tables-to-db.sh deleted file mode 100755 index 1aad8dd09..000000000 --- a/redesign/importing/move-tables-to-db.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# -# usage: move-tables-to-db.sh old-db-name new-db-name -# -# Do the grunt work of moving tables from old-db-name to new-db-name, -# the new database is created if it doesn't exist. Note that -# permissions on the old database are not moved (so the old ones are -# kept, and the new database won't have any). - -OLD_DB=$1 -NEW_DB=$2 - -# read access info at start so we don't get asked a gazillion times about them by MySQL - -read -p "MySQL user: " MYSQL_USER -read -s -p "MySQL password for \"$MYSQL_USER\": " MYSQL_PASSWORD - -MYSQL_CMD="mysql -NB -u $MYSQL_USER --password=$MYSQL_PASSWORD" - -echo . - -echo "Extracting table names" - -TABLES=`echo "SHOW TABLES IN $1;" | $MYSQL_CMD | sed -e 's/^/\`/' -e 's/$/\`/'` - -echo "Found `echo \"$TABLES\" | wc -l` tables" - - -echo "Creating database \`$NEW_DB\`" - -echo "CREATE DATABASE \`$NEW_DB\`;" | $MYSQL_CMD - - -echo "Moving tables from \`$OLD_DB\` to \`$NEW_DB\`" - -for TABLE in $TABLES; do - echo "RENAME TABLE \`$OLD_DB\`.$TABLE TO \`$NEW_DB\`.$TABLE;" | $MYSQL_CMD -done - -echo "Done" diff --git a/redesign/importing/utils.py b/redesign/importing/utils.py deleted file mode 100644 index 9a9f95a5f..000000000 --- a/redesign/importing/utils.py +++ /dev/null @@ -1,182 +0,0 @@ -import datetime - -from ietf.utils import unaccent -from ietf.person.models import Person, Email, Alias -from ietf.doc.models import NewRevisionDocEvent -from ietf.idtracker.models import EmailAddress - -def clean_email_address(addr): - addr = addr.replace("!", "@").replace("(at)", "@") # some obvious @ replacements - # whack surrounding <...> - addr = addr[addr.rfind('<') + 1:] - end = addr.find('>') - if end != -1: - addr = addr[:end] - addr = addr.strip() - if not "@" in addr: - return "" - else: - return addr - -def person_name(person): - def clean_prefix(n): - n = clean(n) - if n in [".", "Mr.", " e.person.name: - e.person.name = name - e.person.save() - - return e - -def possibly_import_other_priority_email(email, old_email): - addr = clean_email_address(old_email.address or "") - if not addr or addr.lower() == email.address.lower(): - return - - try: - e = Email.objects.get(address=addr) - if e.person != email.person: - e.person = email.person - e.save() - except Email.DoesNotExist: - Email.objects.create(address=addr, person=email.person, - time=calc_email_import_time(old_email.priority)) - -def make_revision_event(doc, system_person): - try: - e = NewRevisionDocEvent.objects.get(doc=doc, type="new_revision") - except NewRevisionDocEvent.DoesNotExist: - e = NewRevisionDocEvent(doc=doc, type="new_revision") - e.rev = doc.rev - e.time = doc.time - e.by = system_person - e.desc = "Added new revision" - - return e - - -def dont_save_queries(): - # prevent memory from leaking when settings.DEBUG=True - from django.db import connection - class DontSaveQueries(object): - def append(self, x): - pass - connection.queries = DontSaveQueries() diff --git a/redesign/interim/__init__.py b/redesign/interim/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/redesign/interim/models.py b/redesign/interim/models.py deleted file mode 100644 index 231552ce5..000000000 --- a/redesign/interim/models.py +++ /dev/null @@ -1,95 +0,0 @@ -from django.db import models - -class InterimActivities(models.Model): - id = models.IntegerField(primary_key=True) - group_acronym_id = models.IntegerField() - meeting_num = models.IntegerField() - activity = models.TextField() - act_date = models.DateField() - act_time = models.TimeField() - act_by = models.IntegerField() - class Meta: - db_table = u'interim_activities' - -class InterimAgenda(models.Model): - id = models.IntegerField(primary_key=True) - meeting_num = models.IntegerField() - group_acronym_id = models.IntegerField() - filename = models.CharField(max_length=765) - irtf = models.IntegerField() - interim = models.IntegerField() - class Meta: - db_table = u'interim_agenda' - -class InterimInfo(models.Model): - id = models.IntegerField(primary_key=True) - group_acronym_id = models.IntegerField(null=True, blank=True) - meeting_num = models.IntegerField(null=True, blank=True) - meeting_date = models.CharField(max_length=765, blank=True) - message_body = models.TextField(blank=True) - class Meta: - db_table = u'interim_info' - -class InterimMeetings(models.Model): - meeting_num = models.IntegerField(primary_key=True) - start_date = models.DateField(null=True, blank=True) - end_date = models.DateField(null=True, blank=True) - city = models.CharField(max_length=765, blank=True) - state = models.CharField(max_length=765, blank=True) - country = models.CharField(max_length=765, blank=True) - time_zone = models.IntegerField(null=True, blank=True) - ack = models.TextField(blank=True) - agenda_html = models.TextField(blank=True) - agenda_text = models.TextField(blank=True) - future_meeting = models.TextField(blank=True) - overview1 = models.TextField(blank=True) - overview2 = models.TextField(blank=True) - group_acronym_id = models.IntegerField(null=True, blank=True) - class Meta: - db_table = u'interim_meetings' - -class InterimMinutes(models.Model): - id = models.IntegerField(primary_key=True) - meeting_num = models.IntegerField() - group_acronym_id = models.IntegerField() - filename = models.CharField(max_length=765) - irtf = models.IntegerField() - interim = models.IntegerField() - class Meta: - db_table = u'interim_minutes' - -class InterimSlides(models.Model): - id = models.IntegerField(primary_key=True) - meeting_num = models.IntegerField() - group_acronym_id = models.IntegerField(null=True, blank=True) - slide_num = models.IntegerField(null=True, blank=True) - slide_type_id = models.IntegerField() - slide_name = models.CharField(max_length=765) - irtf = models.IntegerField() - interim = models.IntegerField() - order_num = models.IntegerField(null=True, blank=True) - in_q = models.IntegerField(null=True, blank=True) - class Meta: - db_table = u'interim_slides' - - def file_loc(self): - from ietf.idtracker.models import Acronym - dir = self.meeting_num - acronym = Acronym.objects.get(pk=self.group_acronym_id).acronym - if self.slide_type_id==1: - #return "%s/slides/%s-%s/sld1.htm" % (dir,self.acronym(),self.slide_num) - return "%s/slides/%s-%s/%s-%s.htm" % (dir,acronym,self.slide_num,self.acronym,self.slide_num) - else: - if self.slide_type_id == 2: - ext = ".pdf" - elif self.slide_type_id == 3: - ext = ".txt" - elif self.slide_type_id == 4: - ext = ".ppt" - elif self.slide_type_id == 5: - ext = ".doc" - elif self.slide_type_id == 6: - ext = ".pptx" - else: - ext = "" - return "%s/slides/%s-%s%s" % (dir,acronym,self.slide_num,ext) From a4f334195e291347124a4570cf3e9b7ded10de9a Mon Sep 17 00:00:00 2001 From: Robert Sparks Date: Thu, 20 Dec 2018 19:13:35 +0000 Subject: [PATCH 2/2] simple test of BowerStorageFinder. Commit ready for merge. - Legacy-Id: 15845 --- ietf/utils/tests.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ietf/utils/tests.py b/ietf/utils/tests.py index 3284f4b6a..5cf06ff28 100644 --- a/ietf/utils/tests.py +++ b/ietf/utils/tests.py @@ -33,6 +33,7 @@ from ietf.group.factories import GroupFactory from ietf.group.models import Group from ietf.person.name import name_parts, unidecode_name from ietf.submit.tests import submission_file +from ietf.utils.bower_storage import BowerStorageFinder from ietf.utils.draft import Draft, getmeta from ietf.utils.mail import send_mail_preformatted, send_mail_text, send_mail_mime, outbox from ietf.utils.management.commands import pyflakes @@ -403,6 +404,13 @@ class AdminTestCase(TestCase): # r = self.client.get(url) # self.assertEqual(r.status_code, 200) +class TestBowerStaticFiles(TestCase): + + def test_bower_storage_finder(self): + bfs = BowerStorageFinder() + files = bfs.find('.') + self.assertNotEqual(files,[]) + class DraftTests(TestCase):