Merged in [15845] from rjsparks@nostrum.com:

simple test of BowerStorageFinder.
 - Legacy-Id: 15874
Note: SVN reference [15845] has been migrated to Git commit a4f334195e
This commit is contained in:
Henrik Levkowetz 2019-01-07 21:34:52 +00:00
commit 8e60e19d01
20 changed files with 8 additions and 3949 deletions

View file

@ -1,11 +0,0 @@
def name(name_class, slug, name, desc="", order=0, **kwargs):
# create if it doesn't exist, set name and desc
obj, created = name_class.objects.get_or_create(slug=slug)
if created:
obj.name = name
obj.desc = desc
obj.order = order
for k, v in kwargs.iteritems():
setattr(obj, k, v)
obj.save()
return obj

View file

@ -33,6 +33,7 @@ from ietf.group.factories import GroupFactory
from ietf.group.models import Group
from ietf.person.name import name_parts, unidecode_name
from ietf.submit.tests import submission_file
from ietf.utils.bower_storage import BowerStorageFinder
from ietf.utils.draft import Draft, getmeta
from ietf.utils.mail import send_mail_preformatted, send_mail_text, send_mail_mime, outbox
from ietf.utils.management.commands import pyflakes
@ -403,6 +404,13 @@ class AdminTestCase(TestCase):
# r = self.client.get(url)
# self.assertEqual(r.status_code, 200)
class TestBowerStaticFiles(TestCase):
def test_bower_storage_finder(self):
bfs = BowerStorageFinder()
files = bfs.find('.')
self.assertNotEqual(files,[])
class DraftTests(TestCase):

View file

View file

@ -1,18 +0,0 @@
#!/bin/bash
# basic dependencies
set -e
python import-reused-tables.py base
python import-persons.py
python import-states.py
python import-groups.py
python import-roles.py
python import-reused-tables.py others
python import-meetings.py
python import-announcements.py
python import-docs.py
python import-ipr.py # sets up links to drafts/RFCs so needs them
python import-liaison.py
python import-interim.py # requires ietf_ams database being set up

View file

@ -1,162 +0,0 @@
#!/usr/bin/python
import sys, os, re, datetime
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
settings.IMPORTING_FROM_OLD_SCHEMA = True
from django.core import management
management.setup_environ(settings)
from ietf.person.models import *
from ietf.group.models import *
from ietf.name.utils import name
from ietf.message.models import Message, SendQueue
from redesign.importing.utils import old_person_to_person
from ietf.announcements.models import Announcement, PersonOrOrgInfo, AnnouncedTo, AnnouncedFrom, ScheduledAnnouncement
from ietf.idtracker.models import IESGLogin
# assumptions:
# - nomcom groups have been imported
# - persons have been imported (Announcement originators and IESGLogins)
# imports Announcement, ScheduledAnnouncement
system = Person.objects.get(name="(System)")
# Announcement
for o in Announcement.objects.all().select_related('announced_to', 'announced_from').order_by('announcement_id').iterator():
print "importing Announcement", o.pk
try:
message = Message.objects.get(id=o.announcement_id)
except Message.DoesNotExist:
message = Message(id=o.announcement_id)
message.time = datetime.datetime.combine(o.announced_date,
datetime.time(*(int(x) for x in o.announced_time.split(":"))))
try:
x = o.announced_by
except PersonOrOrgInfo.DoesNotExist:
message.by = system
else:
if not o.announced_by.first_name and o.announced_by.last_name == 'None':
message.by = system
else:
message.by = old_person_to_person(o.announced_by)
message.subject = o.subject.strip()
if o.announced_from_id == 99:
message.frm = o.other_val or ""
elif o.announced_from_id == 18 and o.nomcom_chair_id != 0:
message.frm = u"%s <%s>" % o.nomcom_chair.person.email()
else:
if '<' in o.announced_from.announced_from:
message.frm = o.announced_from.announced_from
else:
message.frm = u"%s <%s>" % (o.announced_from.announced_from, o.announced_from.email)
if o.announced_to_id == 99:
message.to = o.other_val or ""
else:
try:
message.to = u"%s <%s>" % (o.announced_to.announced_to, o.announced_to.email)
except AnnouncedTo.DoesNotExist:
message.to = ""
message.cc = o.cc or ""
for l in (o.extra or "").strip().replace("^", "\n").replace("\r", "").split("\n"):
l = l.strip()
if l.lower().startswith("bcc:"):
message.bcc = l[len("bcc:"):].strip()
elif l.lower().startswith("reply-to:"):
message.reply_to = l[len("reply-to:"):].strip()
message.body = o.text
message.save()
message.related_groups.clear()
if o.nomcom:
nomcom = Group.objects.filter(role__name="chair",
role__person=old_person_to_person(o.nomcom_chair.person),
acronym__startswith="nomcom").exclude(acronym="nomcom").get()
message.related_groups.add(nomcom)
# precompute scheduled_by's to speed up the loop a bit
scheduled_by_mapping = {}
for by in ScheduledAnnouncement.objects.all().values_list("scheduled_by", flat=True).distinct():
logins = IESGLogin.objects.filter(login_name=by)
if logins:
l = logins[0]
person = l.person
if not person:
person = PersonOrOrgInfo.objects.get(first_name=l.first_name, last_name=l.last_name)
found = old_person_to_person(person)
else:
found = system
print "mapping", by, "to", found
scheduled_by_mapping[by] = found
# ScheduledAnnouncement
for o in ScheduledAnnouncement.objects.all().order_by('id').iterator():
print "importing ScheduledAnnouncement", o.pk
try:
q = SendQueue.objects.get(id=o.id)
except SendQueue.DoesNotExist:
q = SendQueue(id=o.id)
# make sure there's no id overlap with ordinary already-imported announcements
q.message = Message(id=o.id + 4000)
time = datetime.datetime.combine(o.scheduled_date,
datetime.time(*(int(x) for x in o.scheduled_time.split(":"))))
by = scheduled_by_mapping[o.scheduled_by]
q.message.time = time
q.message.by = by
q.message.subject = (o.subject or "").strip()
q.message.to = (o.to_val or "").strip()
q.message.frm = (o.from_val or "").strip()
q.message.cc = (o.cc_val or "").strip()
q.message.bcc = (o.bcc_val or "").strip()
q.message.reply_to = (o.replyto or "").strip()
q.message.body = o.body or ""
q.message.content_type = o.content_type or ""
q.message.save()
q.time = time
q.by = by
d = None
if o.to_be_sent_date:
try:
t = datetime.time(*(int(x) for x in o.to_be_sent_time.split(":")))
except ValueError:
t = datetime.time(0, 0, 0)
d = datetime.datetime.combine(o.to_be_sent_date, t)
q.send_at = d
d = None
if o.actual_sent_date:
try:
t = datetime.time(*(int(x) for x in o.scheduled_time.split(":")))
except ValueError:
t = datetime.time(0, 0, 0)
d = datetime.datetime.combine(o.actual_sent_date, t)
q.sent_at = d
n = (o.note or "").strip()
if n.startswith("<br>"):
n = n[len("<br>"):]
q.note = n
q.save()

File diff suppressed because it is too large Load diff

View file

@ -1,410 +0,0 @@
#!/usr/bin/python
import sys, os, datetime
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
from django.core import management
management.setup_environ(settings)
from django.template.defaultfilters import slugify
from ietf.group.models import *
from ietf.name.models import *
from ietf.doc.models import State, StateType
from ietf.doc.utils import get_tags_for_stream_id
from ietf.doc.models import Document
from ietf.name.utils import name
from redesign.importing.utils import old_person_to_person, make_revision_event
from ietf.idtracker.models import AreaGroup, IETFWG, Area, AreaGroup, Acronym, AreaWGURL, IRTF, ChairsHistory, Role, AreaDirector
from ietf.liaisons.models import SDOs
from ietf.iesg.models import TelechatDates, Telechat, TelechatDate
from ietf.wgcharter.utils import set_or_create_charter
import workflows.utils
# imports IETFWG, Area, AreaGroup, Acronym, IRTF, AreaWGURL, SDOs, TelechatDates, dates from Telechat
# also creates nomcom groups
# assumptions: persons and states have been imported
doc_type_charter = name(DocTypeName, "charter", "Charter")
state_names = dict(
bof=name(GroupStateName, slug="bof", name="BOF"),
proposed=name(GroupStateName, slug="proposed", name="Proposed"),
active=name(GroupStateName, slug="active", name="Active"),
dormant=name(GroupStateName, slug="dormant", name="Dormant"),
conclude=name(GroupStateName, slug="conclude", name="Concluded"),
unknown=name(GroupStateName, slug="unknown", name="Unknown"),
)
type_names = dict(
ietf=name(GroupTypeName, slug="ietf", name="IETF"),
area=name(GroupTypeName, slug="area", name="Area"),
ag=name(GroupTypeName, slug="ag", name="AG", desc="Area group"),
wg=name(GroupTypeName, slug="wg", name="WG", desc="Working group"),
rg=name(GroupTypeName, slug="rg", name="RG", desc="Research group"),
team=name(GroupTypeName, slug="team", name="Team"),
individ=name(GroupTypeName, slug="individ", name="Individual"),
sdo=name(GroupTypeName, slug="sdo", name="SDO", desc="Standards organization"),
)
group_ballot_names = {
'No': name(GroupBallotPositionName, 'no', 'No'),
'Yes': name(GroupBallotPositionName, 'yes', 'Yes'),
'Abstain': name(GroupBallotPositionName, 'abstain', 'Abstain'),
'Block': name(GroupBallotPositionName, 'block', 'Block'),
'No Record': name(GroupBallotPositionName, 'norecord', 'No record'),
}
# make sure we got the IETF as high-level parent
ietf_group, _ = Group.objects.get_or_create(acronym="ietf")
ietf_group.name = "IETF"
ietf_group.state = state_names["active"]
ietf_group.type = type_names["ietf"]
ietf_group.save()
# make sure we got the IESG so we can use it as parent for areas
iesg_group, _ = Group.objects.get_or_create(acronym="iesg")
iesg_group.name = "IESG"
iesg_group.state = state_names["active"]
iesg_group.type = type_names["ietf"]
iesg_group.parent = ietf_group
iesg_group.save()
# make sure we got the IRTF as parent for RGs
irtf_group, _ = Group.objects.get_or_create(acronym="irtf")
irtf_group.name = "IRTF"
irtf_group.state = state_names["active"]
irtf_group.type = type_names["ietf"]
irtf_group.save()
# create Secretariat for use with roles
secretariat_group, _ = Group.objects.get_or_create(acronym="secretariat")
secretariat_group.name = "IETF Secretariat"
secretariat_group.state = state_names["active"]
secretariat_group.type = type_names["ietf"]
secretariat_group.save()
# create ISE for use with streams
ise_group, _ = Group.objects.get_or_create(acronym="ise")
ise_group.name = "Independent Submission Editor"
ise_group.state = state_names["active"]
ise_group.type = type_names["ietf"]
ise_group.save()
# create RSOC for use with roles
rsoc_group, _ = Group.objects.get_or_create(acronym="rsoc")
rsoc_group.name = "RFC Series Oversight Committee"
rsoc_group.state = state_names["active"]
rsoc_group.type = type_names["ietf"]
rsoc_group.save()
# create IAB for use with liaison statements and streams
iab_group, _ = Group.objects.get_or_create(acronym="iab")
iab_group.name = "Internet Architecture Board"
iab_group.state = state_names["active"]
iab_group.type = type_names["ietf"]
iab_group.save()
# create IANA for use with roles for authorization
iana_group, _ = Group.objects.get_or_create(acronym="iana")
iana_group.name = "IANA"
iana_group.state = state_names["active"]
iana_group.type = type_names["ietf"]
iana_group.save()
# create IEPG for use with meetings
iepg_group, _ = Group.objects.get_or_create(acronym="iepg")
iepg_group.name = "IEPG"
iepg_group.state = state_names["active"]
iepg_group.type = type_names["ietf"]
iepg_group.save()
system = Person.objects.get(name="(System)")
for o in Telechat.objects.all().order_by("pk"):
if o.pk <= 3:
print "skipping phony Telechat", o.pk
continue
print "importing Telechat", o.pk, o.telechat_date
TelechatDate.objects.get_or_create(date=o.telechat_date)
for o in TelechatDates.objects.all():
print "importing TelechatDates"
for x in range(1, 5):
d = getattr(o, "date%s" % x)
if d:
TelechatDate.objects.get_or_create(date=d)
# NomCom
for o in ChairsHistory.objects.filter(chair_type=Role.NOMCOM_CHAIR).order_by("start_year"):
print "importing ChairsHistory/Nomcom", o.pk, "nomcom%s" % o.start_year
group, _ = Group.objects.get_or_create(acronym="nomcom%s" % o.start_year)
group.name = "IAB/IESG Nominating Committee %s/%s" % (o.start_year, o.end_year)
if o.chair_type.person == o.person:
s = state_names["active"]
else:
s = state_names["conclude"]
group.state = s
group.type = type_names["ietf"]
group.parent = None
group.save()
# we need start/end year so fudge events
group.groupevent_set.all().delete()
e = ChangeStateGroupEvent(group=group, type="changed_state")
e.time = datetime.datetime(o.start_year, 5, 1, 12, 0, 0)
e.by = system
e.desc = "Started group"
e.state = state_names["active"]
e.save()
e = ChangeStateGroupEvent(group=group, type="changed_state")
e.time = datetime.datetime(o.end_year, 5, 1, 12, 0, 0)
e.by = system
e.desc = "Concluded group"
e.state = state_names["conclude"]
e.save()
# IRTF
for o in IRTF.objects.all():
print "importing IRTF", o.pk, o.acronym
try:
group = Group.objects.get(acronym=o.acronym.lower())
except Group.DoesNotExist:
group = Group(acronym=o.acronym.lower())
group.name = o.name
group.state = state_names["active"] # we assume all to be active
group.type = type_names["rg"]
group.parent = irtf_group
group.comments = o.charter_text or ""
group.save()
# FIXME: missing fields from old: meeting_scheduled
# SDOs
for o in SDOs.objects.all().order_by("pk"):
# we import SDOs as groups, this makes it easy to take advantage
# of the rest of the role/person models for authentication and
# authorization
print "importing SDOs", o.pk, o.sdo_name
try:
group = Group.objects.get(name=o.sdo_name, type=type_names["sdo"])
except Group.DoesNotExist:
group = Group(name=o.sdo_name, type=type_names["sdo"])
group.state_id = "active"
group.acronym = slugify(group.name)
group.save()
def import_date_event(group, name, state_id, desc):
d = getattr(o, "%s_date" % name)
if d:
e = ChangeStateGroupEvent(group=group, type="changed_state")
e.time = datetime.datetime.combine(d, datetime.time(12, 0, 0))
e.by = system
e.state = state_names[state_id]
e.desc = desc
e.save()
# Area
for o in Area.objects.all():
print "importing Area", o.pk, o.area_acronym.acronym
try:
group = Group.objects.get(acronym=o.area_acronym.acronym)
except Group.DoesNotExist:
group = Group(acronym=o.area_acronym.acronym)
group.id = o.area_acronym_id # transfer id
# we could use last_modified_date for group.time, but in the new
# schema, group.time is supposed to change when the roles change
# too and some of the history logic depends on this, so it's going
# to cause us too much trouble
group.name = o.area_acronym.name
if o.status.status == "Active":
s = state_names["active"]
elif o.status.status == "Concluded":
s = state_names["conclude"]
elif o.status.status == "Unknown":
s = state_names["unknown"]
group.state = s
group.type = type_names["area"]
group.parent = iesg_group
group.comments = o.comments.strip() if o.comments else ""
group.save()
for u in o.additional_urls():
url, _ = GroupURL.objects.get_or_create(group=group, url=u.url)
url.name = u.description.strip()
url.save()
# import events
group.groupevent_set.all().delete()
import_date_event(group, "start", "active", "Started group")
import_date_event(group, "concluded", "conclude", "Concluded group")
# FIXME: missing fields from old: extra_email_addresses
# IETFWG, AreaGroup
for o in IETFWG.objects.all().order_by("pk"):
print "importing IETFWG", o.pk, o.group_acronym.acronym
try:
group = Group.objects.get(acronym=o.group_acronym.acronym)
except Group.DoesNotExist:
group = Group(acronym=o.group_acronym.acronym)
group.id = o.group_acronym_id # transfer id
if o.last_modified_date:
group.time = datetime.datetime.combine(o.last_modified_date, datetime.time(12, 0, 0))
group.name = o.group_acronym.name
# state
if o.group_type.type == "BOF":
s = state_names["bof"]
if o.status.status == "Concluded":
s = state_names["conclude"]
elif o.group_type.type == "PWG":
s = state_names["proposed"]
if o.status.status == "Concluded":
s = state_names["conclude"]
elif o.status.status == "Active":
s = state_names["active"]
elif o.status.status == "Dormant":
s = state_names["dormant"]
elif o.status.status == "Concluded":
s = state_names["conclude"]
group.state = s
# type
if o.group_type.type == "TEAM":
group.type = type_names["team"]
elif o.group_type.type == "AG":
if o.group_acronym.acronym == "none":
# none means individual
group.type = type_names["individ"]
elif o.group_acronym.acronym == "iab":
group.type = type_names["ietf"]
group.parent = None
elif o.group_acronym.acronym in ("tsvdir", "secdir", "saag", "usac"):
group.type = type_names["team"]
elif o.group_acronym.acronym == "iesg":
pass # we already treated iesg
elif o.group_acronym.acronym in ("apparea", "opsarea", "rtgarea", "usvarea", "genarea", "tsvarea", "raiarea", "apptsv"):
group.type = type_names["ag"]
else:
# the remaining groups are
# apples, null, dirdir
# for now, we don't transfer them
if group.id:
group.delete()
print "not transferring", o.group_acronym.acronym, o.group_acronym.name
continue
else: # PWG/BOF/WG
# some BOFs aren't WG-forming but we currently classify all as WGs
group.type = type_names["wg"]
if o.area:
group.parent = Group.objects.get(acronym=o.area.area.area_acronym.acronym)
elif not group.parent:
print "no area/parent for", group.acronym, group.name, group.type, group.state
try:
area_director = o.area_director
except AreaDirector.DoesNotExist:
area_director = None
if area_director and not area_director.area_id:
area_director = None # fake TBD guy
group.ad = old_person_to_person(area_director.person) if area_director else None
group.list_email = o.email_address if o.email_address else ""
group.list_subscribe = (o.email_subscribe or "").replace("//listinfo", "/listinfo").strip()
l = o.clean_email_archive().strip() if o.email_archive else ""
if l in ("none", "not available"):
l = ""
group.list_archive = l
group.comments = o.comments.strip() if o.comments else ""
group.save()
for u in o.additional_urls():
url, _ = GroupURL.objects.get_or_create(group=group, url=u.url)
url.name = u.description.strip()
url.save()
for m in o.milestones():
desc = m.description.strip()
try:
milestone = GroupMilestone.objects.get(group=group, desc=desc)
except GroupMilestone.DoesNotExist:
milestone = GroupMilestone(group=group, desc=desc)
milestone.expected_due_date = m.expected_due_date
milestone.done = m.done == "Done"
milestone.done_date = m.done_date
milestone.time = datetime.datetime.combine(m.last_modified_date, datetime.time(12, 0, 0))
milestone.save()
# import workflow states and transitions
w = workflows.utils.get_workflow_for_object(o)
if w:
try:
w = w.wgworkflow
except WGWorkflow.DoesNotExist:
w = None
if w:
w.unused_states = State.objects.filter(type="draft-stream-ietf").exclude(name__in=[x.name for x in w.selected_states.all()])
w.unused_tags = DocTagName.objects.filter(slug__in=get_tags_for_stream_id("draft-stream-ietf")).exclude(name__in=[x.name for x in w.selected_tags.all()])
# custom transitions
states = dict((s.name, s) for s in State.objects.filter(type="draft-stream-ietf"))
old_states = dict((s.name, s) for s in w.states.filter(name__in=[name for name in states]).select_related('transitions'))
for name in old_states:
s = states[name]
o = old_states[name]
n = [states[t.destination.name] for t in o.transitions.filter(workflow=workflow)]
if set(s.next_states) != set(n):
g, _ = GroupStateTransitions.objects.get_or_create(group=group, state=s)
g.next_states = n
# import events
group.groupevent_set.all().delete()
import_date_event(group, "proposed", "proposed", "Proposed group")
import_date_event(group, "start", "active", "Started group")
import_date_event(group, "concluded", "conclude", "Concluded group")
# dormant_date is empty on all so don't bother with that
# import charter
charter = set_or_create_charter(group)
if group.state_id in ("active", "conclude"):
charter.rev = "01"
charter.set_state(State.objects.get(type="charter", slug="approved"))
else:
charter.rev = "00"
charter.set_state(State.objects.get(type="charter", slug="notrev"))
# the best estimate of the charter time is when we changed state
e = group.groupevent_set.order_by("-time")[:1]
charter.time = e[0].time if e else group.time
charter.save()
e = make_revision_event(charter, system)
e.save()
# FIXME: missing fields from old: meeting_scheduled, email_keyword, meeting_scheduled_old

View file

@ -1,155 +0,0 @@
#!/usr/bin/python
import sys, os, re, datetime, pytz
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
from django.core import management
management.setup_environ(settings)
from django.template.defaultfilters import slugify
import datetime
from ietf.idtracker.models import AreaDirector, IETFWG, Acronym, IRTF, PersonOrOrgInfo
from ietf.meeting.models import *
from ietf.person.models import *
from ietf.doc.models import Document, DocAlias, State, DocEvent
from redesign.importing.utils import old_person_to_person, dont_save_queries, make_revision_event
from redesign.interim.models import *
from ietf.name.models import *
from ietf.name.utils import name
dont_save_queries()
# assumptions:
# - persons have been imported
# - groups have been imported
# - regular meetings have been imported
database = "ietf_ams"
system_person = Person.objects.get(name="(System)")
agenda_doctype = name(DocTypeName, "agenda", "Agenda")
minutes_doctype = name(DocTypeName, "minutes", "Minutes")
slides_doctype = name(DocTypeName, "slides", "Slides")
group_meetings_in_year = {}
for o in InterimMeetings.objects.using(database).order_by("start_date"):
print "importing InterimMeeting", o.pk
group = Group.objects.get(pk=o.group_acronym_id)
meeting_key = "%s-%s" % (group.acronym, o.start_date.year)
if not group.acronym in group_meetings_in_year:
group_meetings_in_year[meeting_key] = 0
group_meetings_in_year[meeting_key] += 1
num = "interim-%s-%s-%s" % (o.start_date.year, group.acronym, group_meetings_in_year[meeting_key])
try:
m = Meeting.objects.get(number=num)
except:
m = Meeting(number=num)
m.pk = o.pk
m.type_id = "interim"
m.date = o.start_date
# we don't have any other fields
m.save()
if m.session_set.all():
session = m.session_set.all()[0]
else:
session = Session()
session.meeting = m
session.group = group
session.requested_by = system_person
session.status_id = "appr"
session.modified = datetime.datetime.combine(m.date, datetime.time(0, 0, 0))
session.save()
meeting = m
interim_meeting = o
def import_material_kind(kind, doctype):
# import agendas
found = kind.objects.filter(meeting_num=m.pk,
group_acronym_id=interim_meeting.group_acronym_id,
irtf=1 if session.group.parent.acronym == "irtf" else 0,
interim=1).using(database)
for o in found:
name = "%s-%s" % (doctype.slug, m.number)
if kind == InterimSlides:
name += "-%s" % o.slide_num
name = name.lower()
try:
d = Document.objects.get(type=doctype, docalias__name=name)
except Document.DoesNotExist:
d = Document(type=doctype, name=name)
if kind == InterimSlides:
d.title = o.slide_name.strip()
l = o.file_loc()
d.external_url = l[l.find("slides/") + len("slides/"):]
d.order = o.order_num or 1
else:
session_name = session.name if session.name else session.group.acronym.upper()
d.title = u"%s for %s at %s" % (doctype.name, session_name, session.meeting)
d.external_url = o.filename # save filenames for now as they don't appear to be quite regular
d.rev = "00"
d.group = session.group
d.time = datetime.datetime.combine(meeting.date, datetime.time(0, 0, 0)) # we may have better estimate below
d.save()
d.set_state(State.objects.get(type=doctype, slug="active"))
DocAlias.objects.get_or_create(document=d, name=name)
session.materials.add(d)
# try to create a doc event to figure out who uploaded it
e = make_revision_event(d, system_person)
t = d.type_id
if d.type_id == "slides":
t = "slide, '%s" % d.title
activities = InterimActivities.objects.filter(group_acronym_id=interim_meeting.group_acronym_id,
meeting_num=interim_meeting.meeting_num,
activity__startswith=t,
activity__endswith="was uploaded").using(database)[:1]
if activities:
a = activities[0]
e.time = datetime.datetime.combine(a.act_date, a.act_time)
try:
e.by = old_person_to_person(PersonOrOrgInfo.objects.get(pk=a.act_by)) or system_person
except PersonOrOrgInfo.DoesNotExist:
pass
d.time = e.time
d.save()
else:
print "NO UPLOAD ACTIVITY RECORD for", d.name.encode("utf-8"), t.encode("utf-8"), interim_meeting.group_acronym_id, interim_meeting.meeting_num
e.save()
import_material_kind(InterimAgenda, agenda_doctype)
import_material_kind(InterimMinutes, minutes_doctype)
import_material_kind(InterimSlides, slides_doctype)

View file

@ -1,59 +0,0 @@
#!/usr/bin/python
import sys, os, re, datetime
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
settings.IMPORTING_IPR = True
from django.core import management
management.setup_environ(settings)
from ietf.ipr.models import IprDraftOld, IprRfcOld, IprDocAlias, IprDetail
from ietf.doc.models import DocAlias
# imports IprDraft and IprRfc, converting them to IprDocAlias links to Document
# assumptions: documents have been imported
# some links are borked, only import those that reference an existing IprDetail
ipr_ids = IprDetail.objects.all()
for o in IprDraftOld.objects.filter(ipr__in=ipr_ids).select_related("document").order_by("id").iterator():
try:
alias = DocAlias.objects.get(name=o.document.filename)
except DocAlias.DoesNotExist:
print "COULDN'T FIND DOCUMENT", o.document.filename
continue
try:
IprDocAlias.objects.get(ipr=o.ipr_id, doc_alias=alias)
except IprDocAlias.DoesNotExist:
link = IprDocAlias()
link.ipr_id = o.ipr_id
link.doc_alias = alias
link.rev = o.revision or ""
link.save()
print "importing IprDraft", o.pk, "linking", o.ipr_id, o.document.filename
for o in IprRfcOld.objects.filter(ipr__in=ipr_ids).select_related("document").order_by("id").iterator():
try:
alias = DocAlias.objects.get(name="rfc%s" % o.document.rfc_number)
except DocAlias.DoesNotExist:
print "COULDN'T FIND RFC%s", o.document.rfc_number
continue
try:
IprDocAlias.objects.get(ipr=o.ipr_id, doc_alias=alias)
except IprDocAlias.DoesNotExist:
link = IprDocAlias()
link.ipr_id = o.ipr_id
link.doc_alias = alias
link.rev = ""
link.save()
print "importing IprRfc", o.pk, "linking", o.ipr_id, o.document.rfc_number

View file

@ -1,211 +0,0 @@
#!/usr/bin/python
import sys, os, re, datetime, pytz
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
settings.IMPORTING_FROM_OLD_SCHEMA = True
from django.core import management
management.setup_environ(settings)
from django.template.defaultfilters import slugify
from ietf.idtracker.models import Acronym, EmailAddress
from ietf.liaisons.models import *
from ietf.doc.models import Document, DocAlias
from ietf.person.models import *
from redesign.importing.utils import old_person_to_person, make_revision_event
from ietf.name.models import *
from ietf.name.utils import name
# imports LiaisonDetail, OutgoingLiaisonApproval, Uploads
# todo: LiaisonStatementManager, LiaisonManagers, SDOAuthorizedIndividual
# assumptions:
# - persons have been imported
# - groups have been imported
purpose_mapping = {
1: name(LiaisonStatementPurposeName, "action", "For action", order=1),
2: name(LiaisonStatementPurposeName, "comment", "For comment", order=2),
3: name(LiaisonStatementPurposeName, "info", "For information", order=3),
4: name(LiaisonStatementPurposeName, "response", "In response", order=4),
# we drop the "other" category here, it was virtuall unused in the old schema
}
liaison_attachment_doctype = name(DocTypeName, "liai-att", "Liaison Attachment")
purpose_mapping[None] = purpose_mapping[0] = purpose_mapping[3] # map unknown to "For information"
purpose_mapping[5] = purpose_mapping[3] # "Other" is mapped to "For information" as default
system_email = Email.objects.get(person__name="(System)")
system_person = Person.objects.get(name="(System)")
obviously_bogus_date = datetime.date(1970, 1, 1)
bodies = {
'IESG': Group.objects.get(acronym="iesg"),
'IETF': Group.objects.get(acronym="ietf"),
'IETF IESG': Group.objects.get(acronym="iesg"),
'The IETF': Group.objects.get(acronym="ietf"),
'IAB/ISOC': Group.objects.get(acronym="iab"),
'ISOC/IAB': Group.objects.get(acronym="iab"),
'IAB/IESG': Group.objects.get(acronym="iab"),
'IAB': Group.objects.get(acronym="iab"),
'IETF IAB': Group.objects.get(acronym="iab"),
'IETF Transport Directorate': Group.objects.get(acronym="tsvdir"),
'Sigtran': Group.objects.get(acronym="sigtran", type="wg"),
'IETF RAI WG': Group.objects.get(acronym="rai", type="area"),
'IETF RAI': Group.objects.get(acronym="rai", type="area"),
'IETF Mobile IP WG': Group.objects.get(acronym="mobileip", type="wg"),
"IETF Operations and Management Area": Group.objects.get(acronym="ops", type="area"),
"IETF/Operations and Management Area": Group.objects.get(acronym="ops", type="area"),
"IETF OAM Area": Group.objects.get(acronym="ops", type="area"),
"IETF O&M Area": Group.objects.get(acronym="ops", type="area"),
"IETF O&M area": Group.objects.get(acronym="ops", type="area"),
"IETF O&M": Group.objects.get(acronym="ops", type="area"),
"IETF O&M Area Directors": Group.objects.get(acronym="ops", type="area"),
"PWE3 Working Greoup": Group.objects.get(acronym="pwe3", type="wg"),
"IETF PWE 3 WG": Group.objects.get(acronym="pwe3", type="wg"),
"IETF/Routing Area": Group.objects.get(acronym="rtg", type="area"),
"IRTF Internet Area": Group.objects.get(acronym="int", type="area"),
"IETF Sub IP Area": Group.objects.get(acronym="sub", type="area"),
}
def get_body(name, raw_code):
if raw_code:
# new tool is storing some group info directly, try decoding it
b = None
t = raw_code.split("_")
if len(t) == 2:
if t[0] == "area":
b = lookup_group(acronym=Acronym.objects.get(pk=t[1]).acronym, type="area")
elif t[0] == "wg":
b = lookup_group(acronym=Acronym.objects.get(pk=t[1]).acronym, type="wg")
elif t[0] == "sdo":
b = lookup_group(name=SDOs.objects.get(pk=t[1]).sdo_name, type="sdo")
if not b:
b = lookup_group(acronym=raw_code)
return b
# the from body name is a nice case study in how inconsistencies
# build up over time
name = (name.replace("(", "").replace(")", "").replace(" Chairs", "")
.replace("Working Group", "WG").replace("working group", "WG"))
b = bodies.get(name)
t = name.split()
if not b and name.startswith("IETF"):
if len(t) == 1:
if "-" in name:
t = name.split("-")
elif "/" in name:
t = name.split("/")
b = lookup_group(acronym=t[1].lower(), type="wg")
elif len(t) < 3 or t[2].lower() == "wg":
b = lookup_group(acronym=t[1].lower(), type="wg")
elif t[2].lower() in ("area", "ad"):
b = lookup_group(acronym=t[1].lower(), type="area")
if not b:
b = lookup_group(name=u"%s %s" % (t[1], t[2]), type="area")
if not b and name.endswith(" WG"):
b = lookup_group(acronym=t[-2].lower(), type="wg")
if not b:
b = lookup_group(name=name, type="sdo")
return b
for o in LiaisonDetail.objects.all().order_by("pk"):
print "importing LiaisonDetail", o.pk
try:
l = LiaisonStatement.objects.get(pk=o.pk)
except LiaisonStatement.DoesNotExist:
l = LiaisonStatement(pk=o.pk)
l.title = (o.title or "").strip()
l.purpose = purpose_mapping[o.purpose_id]
if o.purpose_text and not o.purpose and "action" in o.purpose_text.lower():
o.purpose = purpose_mapping[1]
l.body = (o.body or "").strip()
l.deadline = o.deadline_date
l.related_to_id = o.related_to_id # should not dangle as we process ids in turn
def lookup_group(**kwargs):
try:
return Group.objects.get(**kwargs)
except Group.DoesNotExist:
return None
l.from_name = o.from_body().strip()
l.from_group = get_body(l.from_name, o.from_raw_code) # try to establish link
if not o.person:
l.from_contact = None
else:
try:
l.from_contact = Email.objects.get(address__iexact=o.from_email().address)
except EmailAddress.DoesNotExist:
l.from_contact = old_person_to_person(o.person).email_set.order_by('-active')[0]
if o.by_secretariat:
l.to_name = o.submitter_name
if o.submitter_email:
l.to_name += " <%s>" % o.submitter_email
else:
l.to_name = o.to_body
l.to_name = l.to_name.strip()
l.to_group = get_body(l.to_name, o.to_raw_code) # try to establish link
l.to_contact = (o.to_poc or "").strip()
l.reply_to = (o.replyto or "").strip()
l.response_contact = (o.response_contact or "").strip()
l.technical_contact = (o.technical_contact or "").strip()
l.cc = (o.cc1 or "").strip()
l.submitted = o.submitted_date
l.modified = o.last_modified_date
if not l.modified and l.submitted:
l.modified = l.submitted
if not o.approval:
# no approval object means it's approved alright - weird, we
# have to fake the approved date then
l.approved = l.modified or l.submitted or datetime.datetime.now()
else:
l.approved = o.approval.approval_date if o.approval.approved else None
l.action_taken = o.action_taken
l.save()
l.attachments.all().delete()
for i, u in enumerate(o.uploads_set.order_by("pk")):
attachment = Document()
attachment.title = u.file_title
attachment.type = liaison_attachment_doctype
attachment.name = l.name() + ("-attachment-%s" % (i + 1))
attachment.time = l.submitted
# we should fixup the filenames, but meanwhile, store it here
attachment.external_url = "file%s%s" % (u.file_id, u.file_extension)
attachment.save()
DocAlias.objects.get_or_create(document=attachment, name=attachment.name)
e = make_revision_event(attachment, system_person)
if l.from_contact and l.from_contact.person:
e.by = l.from_contact.person
print e.by
e.save()
l.attachments.add(attachment)

View file

@ -1,481 +0,0 @@
#!/usr/bin/python
import sys, os, re, datetime, pytz
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
from django.core import management
management.setup_environ(settings)
from django.template.defaultfilters import slugify
import datetime
from ietf.idtracker.models import AreaDirector, IETFWG, Acronym, IRTF, PersonOrOrgInfo
from ietf.meeting.models import *
from ietf.proceedings.models import Meeting as MeetingOld, MeetingVenue, MeetingRoom, NonSession, WgMeetingSession, WgAgenda, Minute, Slide, WgProceedingsActivities, NotMeetingGroup
from ietf.person.models import *
from ietf.doc.models import Document, DocAlias, State, DocEvent
from redesign.importing.utils import old_person_to_person, dont_save_queries, make_revision_event
from ietf.name.models import *
from ietf.name.utils import name
import_meetings_from = None
if len(sys.argv) > 1:
import_meetings_from = datetime.datetime.strptime(sys.argv[1], "%Y-%m-%d")
dont_save_queries()
# imports Meeting, MeetingVenue, MeetingRoom, NonSession,
# WgMeetingSession, WgAgenda, Minute, Slide, upload events from
# WgProceedingsActivities, NotMeetingGroup
# assumptions:
# - persons have been imported
# - groups have been imported
ietf_meeting = name(MeetingTypeName, "ietf", "IETF")
interim_meeting = name(MeetingTypeName, "interim", "Interim")
session_status_mapping = {
1: name(SessionStatusName, "schedw", "Waiting for Scheduling"),
2: name(SessionStatusName, "apprw", "Waiting for Approval"),
3: name(SessionStatusName, "appr", "Approved"),
4: name(SessionStatusName, "sched", "Scheduled"),
5: name(SessionStatusName, "canceled", "Canceled"),
6: name(SessionStatusName, "disappr", "Disapproved"),
}
status_not_meeting = name(SessionStatusName, "notmeet", "Not meeting")
status_deleted = name(SessionStatusName, "deleted", "Deleted")
session_status_mapping[0] = session_status_mapping[1] # assume broken statuses of 0 are actually cancelled
other_slot = name(TimeSlotTypeName, "other", "Other")
session_slot = name(TimeSlotTypeName, "session", "Session")
break_slot = name(TimeSlotTypeName, "break", "Break")
registration_slot = name(TimeSlotTypeName, "reg", "Registration")
plenary_slot = name(TimeSlotTypeName, "plenary", "Plenary")
conflict_constraints = {
1: name(ConstraintName, "conflict", "Conflicts with"),
2: name(ConstraintName, "conflic2", "Conflicts with (secondary)"),
3: name(ConstraintName, "conflic3", "Conflicts with (tertiary)"),
}
agenda_doctype = name(DocTypeName, "agenda", "Agenda")
minutes_doctype = name(DocTypeName, "minutes", "Minutes")
slides_doctype = name(DocTypeName, "slides", "Slides")
system_person = Person.objects.get(name="(System)")
obviously_bogus_date = datetime.date(1970, 1, 1)
for o in MeetingOld.objects.all():
print "importing Meeting", o.pk
try:
m = Meeting.objects.get(number=o.meeting_num)
except:
m = Meeting(number="%s" % o.meeting_num)
m.pk = o.pk
m.type = ietf_meeting
m.date = o.start_date
m.city = o.city
# convert country to code
country_code = None
for k, v in pytz.country_names.iteritems():
if v == o.country:
country_code = k
break
if not country_code:
country_fallbacks = {
'USA': 'US'
}
country_code = country_fallbacks.get(o.country)
if country_code:
m.country = country_code
else:
print "unknown country", o.country
time_zone_lookup = {
("IE", "Dublin"): "Europe/Dublin",
("FR", "Paris"): "Europe/Paris",
("CA", "Vancouver"): "America/Vancouver",
("CZ", "Prague"): "Europe/Prague",
("US", "Chicago"): "America/Chicago",
("US", "Anaheim"): "America/Los_Angeles",
("NL", "Maastricht"): "Europe/Amsterdam",
("CN", "Beijing"): "Asia/Shanghai",
("JP", "Hiroshima"): "Asia/Tokyo",
("SE", "Stockholm"): "Europe/Stockholm",
("US", "San Francisco"): "America/Los_Angeles",
("US", "Minneapolis"): "America/Menominee",
}
m.time_zone = time_zone_lookup.get((m.country, m.city), "")
if not m.time_zone:
print "unknown time zone for", m.get_country_display(), m.city
m.venue_name = "" # no source for that in the old DB?
m.venue_addr = "" # no source for that in the old DB?
try:
venue = o.meetingvenue_set.get()
m.break_area = venue.break_area_name
m.reg_area = venue.reg_area_name
except MeetingVenue.DoesNotExist:
pass
# missing following semi-used fields from old Meeting: end_date,
# ack, agenda_html/agenda_text, future_meeting
m.save()
meeting_cache = {}
def get_meeting(num):
if not num in meeting_cache:
meeting_cache[num] = Meeting.objects.get(number="%s" % num)
return meeting_cache[num]
for o in MeetingRoom.objects.all():
print "importing MeetingRoom", o.pk
try:
r = Room.objects.get(pk=o.pk)
except Room.DoesNotExist:
r = Room(pk=o.pk)
r.meeting = get_meeting(o.meeting_id)
r.name = o.room_name
r.save()
def parse_time_desc(o):
t = o.time_desc.replace(' ', '')
start_time = datetime.time(int(t[0:2]), int(t[2:4]))
end_time = datetime.time(int(t[5:7]), int(t[7:9]))
d = o.meeting.start_date + datetime.timedelta(days=o.day_id)
return (datetime.datetime.combine(d, start_time), datetime.datetime.combine(d, end_time))
requested_length_mapping = {
None: 0, # assume NULL to mean nothing particular requested
"1": 60 * 60,
"2": 90 * 60,
"3": 120 * 60,
"4": 150 * 60,
}
non_group_mapping = {
"plenaryw": "ietf",
"plenaryt": "ietf",
"newcomer": "edu",
"editor": "edu",
"wgchair": "edu",
"sectut": "edu",
"protut": "edu",
"iepg": "iepg",
"rfc": "edu",
"wgleader": "edu",
"xml2rfc": "edu",
"rbst": "edu",
"recp": "ietf",
"MIBDOC": "edu",
"IE": "iepg",
"newcomF": "edu",
"WritRFC": "edu",
"Orien": "edu",
"newwork": "edu",
"leadership": "edu",
"ipv6spec": "edu",
"Wel": "ietf",
"IDRTut": "edu",
"ToolsTut": "edu",
"cosp": "tools",
"doclife": "edu",
"dnstut": "edu",
"xmltut": "edu",
"RFCEd": "edu",
"IDRBasics": "edu",
"newcomSWED": "edu",
"MIBTut": "edu",
"IDR75": "edu",
"NewcomerJP": "edu",
"MIBT": "edu",
"DNSProg": "edu",
"natTUT": "edu",
"NewcomerCHINA": "edu",
"CreatingID": "edu",
"NewMeetGreet": "ietf",
"appsprepmeeting": "edu",
"NewcomersFrench": "edu",
"NewComMandar": "edu",
"AdminP": "ietf",
}
def import_materials(wg_meeting_session, session):
def import_material_kind(kind, doctype):
# import agendas
irtf = 0
if wg_meeting_session.irtf:
irtf = wg_meeting_session.group_acronym_id
found = kind.objects.filter(meeting=wg_meeting_session.meeting_id,
group_acronym_id=wg_meeting_session.group_acronym_id,
irtf=irtf,
interim=0)
for o in found:
name = "%s-%s-%s" % (doctype.slug, session.meeting.number, session.group.acronym)
if kind == Slide:
name += "-%s" % o.slide_num
if session.name:
name += "-%s" % slugify(session.name)
name = name.lower()
try:
d = Document.objects.get(type=doctype, docalias__name=name)
except Document.DoesNotExist:
d = Document(type=doctype, name=name)
if kind == Slide:
d.title = o.slide_name.strip()
l = o.file_loc()
d.external_url = l[l.find("slides/") + len("slides/"):]
d.order = o.order_num or 1
else:
session_name = session.name if session.name else session.group.acronym.upper()
d.title = u"%s for %s at %s" % (doctype.name, session_name, session.meeting)
d.external_url = o.filename # save filenames for now as they don't appear to be quite regular
d.rev = "00"
d.group = session.group
d.time = datetime.datetime.combine(session.meeting.date, datetime.time(0, 0, 0)) # we may have better estimate below
d.save()
d.set_state(State.objects.get(type=doctype, slug="active"))
DocAlias.objects.get_or_create(document=d, name=name)
session.materials.add(d)
# try to create a doc event to figure out who uploaded it
e = make_revision_event(d, system_person)
t = d.type_id
if d.type_id == "slides":
t = "slide, '%s" % d.title
activities = WgProceedingsActivities.objects.filter(group_acronym=wg_meeting_session.group_acronym_id,
meeting=wg_meeting_session.meeting_id,
activity__startswith=t,
activity__endswith="was uploaded")[:1]
if activities:
a = activities[0]
e.time = datetime.datetime.combine(a.act_date, datetime.time(*[int(s) for s in a.act_time.split(":")]))
try:
e.by = old_person_to_person(a.act_by) or system_person
except PersonOrOrgInfo.DoesNotExist:
pass
d.time = e.time
d.save()
else:
print "NO UPLOAD ACTIVITY RECORD for", d.name.encode("utf-8"), t.encode("utf-8"), wg_meeting_session.group_acronym_id, wg_meeting_session.meeting_id
e.save()
import_material_kind(WgAgenda, agenda_doctype)
import_material_kind(Minute, minutes_doctype)
import_material_kind(Slide, slides_doctype)
obviously_bogus_date = datetime.date(1970, 1, 1)
all_sessions = WgMeetingSession.objects.all().order_by("pk")
if import_meetings_from:
all_sessions = all_sessions.filter(last_modified_date__gte=import_meetings_from)
for o in all_sessions.iterator():
# num_session is unfortunately not quite reliable, seems to be
# right for 1 or 2 but not 3 and it's sometimes null
sessions = o.num_session or 1
if o.sched_time_id3:
sessions = 3
print "importing WgMeetingSession", o.pk, "subsessions", sessions
for i in range(1, 1 + sessions):
pk = o.pk + (i - 1) * 10000 # move extra session out of the way
try:
s = Session.objects.get(pk=pk)
except:
s = Session(pk=pk)
s.meeting = get_meeting(o.meeting_id)
def get_timeslot(attr):
meeting_time = getattr(o, attr)
if not meeting_time:
return None
room = Room.objects.get(pk=getattr(o, attr.replace("time", "room") + "_id"))
starts, ends = parse_time_desc(meeting_time)
slots = TimeSlot.objects.filter(meeting=s.meeting, time=starts, location=room).filter(models.Q(session=s) | models.Q(session=None))
if slots:
slot = slots[0]
else:
slot = TimeSlot(meeting=s.meeting, time=starts, location=room)
slot.type = session_slot
slot.name = meeting_time.session_name.session_name if meeting_time.session_name_id else "Unknown"
slot.duration = ends - starts
return slot
timeslot = get_timeslot("sched_time_id%s" % i)
if o.irtf:
s.group = Group.objects.get(acronym=IRTF.objects.get(pk=o.group_acronym_id).acronym.lower())
else:
acronym = Acronym.objects.get(pk=o.group_acronym_id)
if o.group_acronym_id < 0:
# this wasn't actually a WG session, but rather a tutorial
# or similar
a = non_group_mapping.get(acronym.acronym)
if not a:
a = "ietf"
print "UNKNOWN phony group", o.group_acronym_id, acronym.acronym, "falling back to '%s'" % a
s.group = Group.objects.get(acronym=a)
s.name = acronym.name
if timeslot:
if timeslot.name == "Unknown":
timeslot.name = acronym.name
if "Plenary" in timeslot.name:
timeslot.type = plenary_slot
else:
timeslot.type = other_slot
else:
s.group = Group.objects.get(acronym=acronym.acronym)
s.attendees = o.number_attendee
s.agenda_note = (o.special_agenda_note or "").strip()
s.requested = o.requested_date or obviously_bogus_date
s.requested_by = old_person_to_person(o.requested_by) if o.requested_by else system_person
s.requested_duration = requested_length_mapping[getattr(o, "length_session%s" % i)]
s.comments = (o.special_req or "").strip()
conflict_other = (o.conflict_other or "").strip()
if conflict_other:
if s.comments:
s.comments += " "
s.comments += u"(other conflicts: %s)" % conflict_other
s.status = session_status_mapping[o.status_id or 5]
s.scheduled = o.scheduled_date
s.modified = o.last_modified_date or obviously_bogus_date
s.save()
if timeslot:
timeslot.session = s
timeslot.modified = s.modified
timeslot.save()
import_materials(o, s)
# some sessions have been scheduled over multiple time slots
if i < 3:
timeslot = get_timeslot("combined_time_id%s" % i)
if timeslot:
timeslot.session = s
timeslot.modified = s.modified
timeslot.save()
for i in (1, 2, 3):
conflict = (getattr(o, "conflict%s" % i) or "").replace(",", " ").lower()
conflicting_groups = [g for g in conflict.split() if g]
for target in Group.objects.filter(acronym__in=conflicting_groups):
Constraint.objects.get_or_create(
meeting=s.meeting,
source=s.group,
target=target,
name=conflict_constraints[i])
# missing following fields from old: ts_status_id (= third session
# status id, third session required AD approval),
# combined_room_id1/2, combined_time_id1/2
for o in NonSession.objects.all().order_by('pk').select_related("meeting").iterator():
print "importing NonSession", o.pk
if o.time_desc in ("", "0"):
print "IGNORING non-scheduled NonSession", o.non_session_ref.name
continue
meeting = get_meeting(o.meeting_id)
# some non-sessions are scheduled every day, but only if there's a
# session nearby, figure out which days this corresponds to
days = set()
if o.day_id == None:
t = datetime.time(int(o.time_desc[-4:][0:2]), int(o.time_desc[-4:][2:4]))
for s in TimeSlot.objects.filter(meeting=meeting):
if s.time.time() == t:
days.add((s.time.date() - meeting.date).days)
else:
days.add(o.day_id)
for day in days:
o.day_id = day
starts, ends = parse_time_desc(o)
name = o.non_session_ref.name
try:
slot = TimeSlot.objects.get(meeting=meeting, time=starts, name=name)
except TimeSlot.DoesNotExist:
slot = TimeSlot(meeting=meeting, time=starts, name=name)
slot.location = None
if o.non_session_ref_id == 1:
slot.type = registration_slot
else:
slot.type = break_slot
slot.duration = ends - starts
slot.show_location = o.show_break_location
slot.save()
for o in NotMeetingGroup.objects.all().select_related('group_acronym'):
if o.group_acronym_id == None or o.group_acronym == None:
print "SKIPPING NotMeetingGroup with group_acronym_id", o.group_acronym_id
continue # bogus data
print "importing NotMeetingGroup", o.group_acronym.acronym, o.meeting_id
try:
group = Group.objects.get(acronym=o.group_acronym.acronym)
except Group.DoesNotExist:
print "SKIPPING", o.group_acronym.acronym
continue
meeting = get_meeting(o.meeting_id)
if not Session.objects.filter(meeting=meeting, group=group):
Session.objects.get_or_create(meeting=meeting,
group=group,
status=status_not_meeting,
defaults=dict(requested_by=system_person,
requested_duration=0))

View file

@ -1,195 +0,0 @@
#!/usr/bin/python
import sys, os, re, datetime
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
from django.core import management
management.setup_environ(settings)
from ietf.idtracker.models import IESGLogin, AreaDirector, IETFWG, PersonOrOrgInfo, IDAuthor
from ietf.ietfauth.models import LegacyWgPassword, LegacyLiaisonUser
from ietf.liaisons.models import LiaisonDetail, LiaisonManagers, SDOAuthorizedIndividual
from ietf.person.models import *
from redesign.importing.utils import *
# creates system person and email
# imports AreaDirector persons that are connected to an IETFWG,
# persons from IDAuthor, announcement originators from Announcements,
# requesters from WgMeetingSession, LiaisonDetail persons,
# LiaisonManagers/SDOAuthorizedIndividual persons,
# WgProceedingsActivities persons
# should probably import
# PersonOrOrgInfo/PostalAddress/EmailAddress/PhoneNumber fully
import_docs_from = None
if len(sys.argv) > 1:
import_docs_from = datetime.datetime.strptime(sys.argv[1], "%Y-%m-%d")
# make sure special system user/email is created
print "creating (System) person and email"
try:
system_person = Person.objects.get(name="(System)")
except Person.DoesNotExist:
system_person = Person.objects.create(
id=0, # special value
name="(System)",
ascii="(System)",
address="",
)
system_person = Person.objects.get(name="(System)")
if system_person.id != 0: # work around bug in Django
Person.objects.filter(id=system_person.id).update(id=0)
system_person = Person.objects.get(id=0)
system_alias = Alias.objects.get_or_create(
person=system_person,
name=system_person.name
)
system_email = Email.objects.get_or_create(
address="(System)",
defaults=dict(active=True, person=system_person)
)
# LegacyWgPassword
for o in LegacyWgPassword.objects.all():
print "importing LegacyWgPassword", o.pk, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8')
email = get_or_create_email(o, create_fake=False)
if not email:
continue
username = o.login_name[:30]
persons = Person.objects.filter(user__username=username)
if persons:
if persons[0] != email.person:
print "SKIPPING", o.login_name, "who is connected to another person "
continue
user, _ = User.objects.get_or_create(username=username)
email.person.user = user
email.person.save()
# LegacyLiaisonUser
for o in LegacyLiaisonUser.objects.all():
print "importing LegacyLiaisonUser", o.pk, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8')
email = get_or_create_email(o, create_fake=False)
if not email:
continue
username = o.login_name[:30]
persons = Person.objects.filter(user__username=username)
if persons:
if persons[0] != email.person:
print "SKIPPING", o.login_name, "who is connected to another person "
continue
user, _ = User.objects.get_or_create(username=username)
email.person.user = user
email.person.save()
# IESGLogin
for o in IESGLogin.objects.all():
print "importing IESGLogin", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8')
if not o.person:
persons = PersonOrOrgInfo.objects.filter(first_name=o.first_name, last_name=o.last_name)
if persons:
o.person = persons[0]
else:
print "NO PERSON", o.person_id
continue
email = get_or_create_email(o, create_fake=False)
if not email:
continue
user, _ = User.objects.get_or_create(username=o.login_name)
email.person.user = user
email.person.save()
# AreaDirector from IETFWG persons
for o in AreaDirector.objects.filter(ietfwg__in=IETFWG.objects.all()).exclude(area=None).distinct().order_by("pk").iterator():
print "importing AreaDirector (from IETFWG) persons", o.pk
get_or_create_email(o, create_fake=False)
# IESGHistory persons
for o in PersonOrOrgInfo.objects.filter(iesghistory__id__gte=1).order_by("pk").distinct():
print "importing IESGHistory person", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8')
email = get_or_create_email(o, create_fake=False)
# WgMeetingSession persons
for o in PersonOrOrgInfo.objects.filter(wgmeetingsession__pk__gte=1).distinct().order_by("pk").iterator():
print "importing WgMeetingSession persons", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8')
get_or_create_email(o, create_fake=False)
# Announcement persons
for o in PersonOrOrgInfo.objects.filter(announcement__announcement_id__gte=1).order_by("pk").distinct():
print "importing Announcement originator", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8')
email = get_or_create_email(o, create_fake=False)
# LiaisonManagers persons
for o in LiaisonManagers.objects.order_by("pk"):
print "importing LiaisonManagers person", o.pk, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8')
email = get_or_create_email(o, create_fake=False)
addresses = o.person.emailaddress_set.filter(priority=o.email_priority).filter(address__contains="@")[:1]
if addresses:
possibly_import_other_priority_email(email, addresses[0])
# SDOAuthorizedIndividual persons
for o in PersonOrOrgInfo.objects.filter(sdoauthorizedindividual__pk__gte=1).order_by("pk").distinct():
print "importing SDOAuthorizedIndividual person", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8')
email = get_or_create_email(o, create_fake=False)
# Liaison persons (these are used as from contacts)
for o in LiaisonDetail.objects.exclude(person=None).order_by("pk"):
print "importing LiaisonDetail person", o.pk, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8')
email = get_or_create_email(o, create_fake=True)
# we may also need to import email address used specifically for
# the document
if "@" in email.address:
try:
possibly_import_other_priority_email(email, o.from_email())
except EmailAddress.DoesNotExist:
pass
# WgProceedingsActivities persons
for o in PersonOrOrgInfo.objects.filter(wgproceedingsactivities__id__gte=1).order_by("pk").distinct():
print "importing WgProceedingsActivities person", o.pk, o.first_name.encode('utf-8'), o.last_name.encode('utf-8')
email = get_or_create_email(o, create_fake=True)
# IDAuthor persons
all_authors = IDAuthor.objects.all().order_by('id').select_related('person')
if import_docs_from:
all_authors = all_authors.filter(document__last_modified_date__gte=import_docs_from)
for o in all_authors.iterator():
print "importing IDAuthor", o.id, o.person_id, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8')
email = get_or_create_email(o, create_fake=True)
# we may also need to import email address used specifically for
# the document
addresses = o.person.emailaddress_set.filter(type='I-D', priority=o.document_id).filter(address__contains="@")[:1]
if addresses:
possibly_import_other_priority_email(email, addresses[0])

View file

@ -1,97 +0,0 @@
#!/usr/bin/python
# boiler plate
import sys, os, re, datetime
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
from django.core import management
management.setup_environ(settings)
# script
from django.db.models import TextField, CharField
from django.contrib.sites.models import Site
from ietf.redirects.models import Redirect, Suffix, Command
from ietf.iesg.models import TelechatAgendaItem, WGAction
from ietf.ipr.models import IprSelecttype, IprLicensing, IprDetail, IprContact, IprNotification, IprUpdate
from ietf.submit.models import IdSubmissionStatus, IdSubmissionDetail, IdApprovedDetail, TempIdAuthors
from django.contrib.auth.models import User
known_models = {
'base': [User],
'others': [Site,
Redirect, Suffix, Command,
TelechatAgendaItem, WGAction,
IprSelecttype, IprLicensing, IprDetail, IprContact, IprNotification, IprUpdate,
IdSubmissionStatus, IdSubmissionDetail, IdApprovedDetail,
TempIdAuthors]
}
models_to_copy = known_models[sys.argv[1]]
def queryset_chunks(q, n):
"""Split queryset q up in chunks of max size n."""
return (q[i:i+n] for i in range(0, q.count(), n))
def insert_many_including_pk(objects, using="default", table=None):
"""Insert list of Django objects in one SQL query. Objects must be
of the same Django model. Note that save is not called and signals
on the model are not raised."""
if not objects:
return
import django.db.models
from django.db import connections
con = connections[using]
model = objects[0].__class__
fields = [f for f in model._meta.fields]
parameters = []
for o in objects:
pars = []
for f in fields:
pars.append(f.get_db_prep_save(f.pre_save(o, True), connection=con))
parameters.append(pars)
if not table:
table = model._meta.db_table
column_names = ",".join(con.ops.quote_name(f.column) for f in fields)
placeholders = ",".join(("%s",) * len(fields))
con.cursor().executemany(
"replace into %s (%s) values (%s)" % (table, column_names, placeholders),
parameters)
def clean_chunk(model, chunk):
for o in chunk:
if model == IprDetail:
if o.applies_to_all == "":
o.applies_to_all = None
for f in model._meta.fields:
# change non-nullable nulls on string fields to ""
if type(f) in (CharField, TextField) and not f.null and getattr(o, f.name) == None:
setattr(o, f.name, "")
for model in models_to_copy:
sys.stdout.write("copying %s " % model._meta.object_name)
sys.stdout.flush()
irregular_models = [Site]
if model in irregular_models:
table_name = Site._meta.db_table
else:
table_name = "%s_%s" % (model._meta.app_label, model._meta.object_name.lower())
for chunk in queryset_chunks(model.objects.using("legacy").all(), 1000):
clean_chunk(model, chunk)
insert_many_including_pk(chunk, using="default", table=table_name)
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")

View file

@ -1,318 +0,0 @@
#!/usr/bin/python
import sys, os, re, datetime
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
from django.core import management
management.setup_environ(settings)
from ietf.person.models import *
from ietf.group.models import *
from ietf.name.models import *
from ietf.name.utils import name
from redesign.importing.utils import get_or_create_email
from ietf.idtracker.models import IESGLogin, AreaDirector, PersonOrOrgInfo, WGChair, WGEditor, WGSecretary, WGTechAdvisor, ChairsHistory, Role as OldRole, Acronym, IRTFChair
from ietf.liaisons.models import LiaisonManagers, SDOAuthorizedIndividual
from ietf.wgchairs.models import WGDelegate
from ietf.proceedings.models import IESGHistory
from ietf.utils.history import *
# assumptions:
# - persons have been imported
# - groups have been imported
# imports roles from IESGLogin, AreaDirector, WGEditor, WGChair,
# IRTFChair, WGSecretary, WGTechAdvisor, NomCom chairs from
# ChairsHistory, IESGHistory, Role, LiaisonManagers,
# SDOAuthorizedIndividual, WGDelegate
area_director_role = name(RoleName, "ad", "Area Director")
pre_area_director_role = name(RoleName, "pre-ad", "Incoming Area Director")
chair_role = name(RoleName, "chair", "Chair")
editor_role = name(RoleName, "editor", "Editor")
secretary_role = name(RoleName, "secr", "Secretary")
techadvisor_role = name(RoleName, "techadv", "Tech Advisor")
exec_director_role = name(RoleName, "execdir", "Executive Director")
adm_director_role = name(RoleName, "admdir", "Administrative Director")
liaison_manager_role = name(RoleName, "liaiman", "Liaison Manager")
authorized_role = name(RoleName, "auth", "Authorized Individual")
delegate_role = name(RoleName, "delegate", "Delegate")
# import IANA authorized individuals
for o in User.objects.using("legacy").filter(groups__name="IANA"):
print "Importing IANA group member", o
if o.username == "amanda.barber@icann.org":
o.username = "amanda.baber@icann.org"
person = PersonOrOrgInfo.objects.filter(iesglogin__login_name=o.username)[0]
group = Group.objects.get(acronym="iana")
email = get_or_create_email(person, create_fake=False)
Role.objects.get_or_create(name=authorized_role, group=group, person=email.person, email=email)
# WGDelegate
for o in WGDelegate.objects.all().order_by("pk"):
print "importing WGDelegate", o.pk, unicode(o.wg).encode("utf-8"), unicode(o.person).encode("utf-8")
group = Group.objects.get(acronym=o.wg.group_acronym.acronym)
email = get_or_create_email(o, create_fake=False)
Role.objects.get_or_create(name=delegate_role, group=group, person=email.person, email=email)
# SDOAuthorizedIndividual
for o in SDOAuthorizedIndividual.objects.all().order_by("pk"):
print "importing SDOAuthorizedIndividual", o.pk, unicode(o.sdo).encode("utf-8"), unicode(o.person).encode("utf-8")
group = Group.objects.get(name=o.sdo.sdo_name, type="sdo")
email = get_or_create_email(o, create_fake=False)
Role.objects.get_or_create(name=authorized_role, group=group, person=email.person, email=email)
# LiaisonManagers
for o in LiaisonManagers.objects.all().order_by("pk"):
print "importing LiaisonManagers", o.pk, unicode(o.sdo).encode("utf-8"), unicode(o.person).encode("utf-8")
group = Group.objects.get(name=o.sdo.sdo_name, type="sdo")
email = Email.objects.get(address__iexact=o.person.email(priority=o.email_priority)[1])
Role.objects.get_or_create(name=liaison_manager_role, group=group, person=email.person, email=email)
# Role
for o in OldRole.objects.all().order_by('pk'):
acronym = o.role_name.lower()
role = chair_role
if o.id == OldRole.NOMCOM_CHAIR:
continue # handled elsewhere
print "importing Role", o.id, o.role_name, unicode(o.person).encode("utf-8")
email = get_or_create_email(o, create_fake=False)
official_email = email
if o.role_name.endswith("Executive Director"):
acronym = acronym[:-(len("Executive Director") + 1)]
role = exec_director_role
if o.id == OldRole.IAD_CHAIR:
acronym = "ietf"
role = adm_director_role
official_email, _ = Email.objects.get_or_create(address="iad@ietf.org")
if o.id == OldRole.IETF_CHAIR:
official_email, _ = Email.objects.get_or_create(address="chair@ietf.org")
if o.id == OldRole.IAB_CHAIR:
official_email, _ = Email.objects.get_or_create(address="iab-chair@ietf.org")
if o.id == OldRole.RSOC_CHAIR:
official_email, _ = Email.objects.get_or_create(address="rsoc-chair@iab.org")
if o.id == 9:
official_email, _ = Email.objects.get_or_create(address="rfc-ise@rfc-editor.org")
group = Group.objects.get(acronym=acronym)
Role.objects.get_or_create(name=role, group=group, person=email.person, email=official_email)
# WGEditor
for o in WGEditor.objects.all():
acronym = Acronym.objects.get(acronym_id=o.group_acronym_id).acronym
print "importing WGEditor", acronym, o.person
email = get_or_create_email(o, create_fake=True)
group = Group.objects.get(acronym=acronym)
Role.objects.get_or_create(name=editor_role, group=group, person=email.person, email=email)
# WGSecretary
for o in WGSecretary.objects.all():
acronym = Acronym.objects.get(acronym_id=o.group_acronym_id).acronym
print "importing WGSecretary", acronym, o.person
email = get_or_create_email(o, create_fake=True)
group = Group.objects.get(acronym=acronym)
Role.objects.get_or_create(name=secretary_role, group=group, person=email.person, email=email)
# WGTechAdvisor
for o in WGTechAdvisor.objects.all():
acronym = Acronym.objects.get(acronym_id=o.group_acronym_id).acronym
print "importing WGTechAdvisor", acronym, o.person
email = get_or_create_email(o, create_fake=True)
group = Group.objects.get(acronym=acronym)
Role.objects.get_or_create(name=techadvisor_role, group=group, person=email.person, email=email)
# WGChair
for o in WGChair.objects.all():
# there's some garbage in this table, so wear double safety belts
try:
acronym = Acronym.objects.get(acronym_id=o.group_acronym_id).acronym
except Acronym.DoesNotExist:
print "SKIPPING WGChair with unknown acronym id", o.group_acronym_id
continue
try:
person = o.person
except PersonOrOrgInfo.DoesNotExist:
print "SKIPPING WGChair", acronym, "with invalid person id", o.person_id
continue
try:
group = Group.objects.get(acronym=acronym)
except Group.DoesNotExist:
print "SKIPPING WGChair", o.person, "with non-existing group", acronym
continue
if group.acronym == "none":
print "SKIPPING WGChair", o.person, "with bogus group", group.acronym
continue
print "importing WGChair", acronym, o.person
email = get_or_create_email(o, create_fake=True)
Role.objects.get_or_create(name=chair_role, group=group, person=email.person, email=email)
# IRTFChair
for o in IRTFChair.objects.all():
acronym = o.irtf.acronym.lower()
if acronym == "irtf":
# we already got the IRTF chair from Role, and the data in here is buggy
continue
print "importing IRTFChair", acronym, o.person
email = get_or_create_email(o, create_fake=True)
group = Group.objects.get(acronym=acronym)
Role.objects.get_or_create(name=chair_role, group=group, person=email.person, email=email)
# NomCom chairs
official_email, _ = Email.objects.get_or_create(address="nomcom-chair@ietf.org")
nomcom_groups = list(Group.objects.filter(acronym__startswith="nomcom").exclude(acronym="nomcom"))
for o in ChairsHistory.objects.filter(chair_type=OldRole.NOMCOM_CHAIR):
print "importing NOMCOM chair", o
for g in nomcom_groups:
if ("%s/%s" % (o.start_year, o.end_year)) in g.name:
break
email = get_or_create_email(o, create_fake=False)
Role.objects.get_or_create(name=chair_role, group=g, person=email.person, email=official_email)
# IESGLogin
for o in IESGLogin.objects.all():
print "importing IESGLogin", o.pk, o.first_name.encode("utf-8"), o.last_name.encode("utf-8")
if not o.person:
persons = PersonOrOrgInfo.objects.filter(first_name=o.first_name, last_name=o.last_name)
if persons:
o.person = persons[0]
else:
print "NO PERSON", o.person_id
continue
email = get_or_create_email(o, create_fake=False)
# current ADs are imported below
if email and o.user_level == IESGLogin.SECRETARIAT_LEVEL:
if not Role.objects.filter(name=secretary_role, person=email.person):
Role.objects.create(name=secretary_role, group=Group.objects.get(acronym="secretariat"), person=email.person, email=email)
u = email.person.user
if u:
u.is_staff = True
u.is_superuser = True
u.save()
# AreaDirector
for o in AreaDirector.objects.all():
if not o.area:
print "NO AREA", o.person, o.area_id
continue
print "importing AreaDirector", o.area, o.person
email = get_or_create_email(o, create_fake=False)
area = Group.objects.get(acronym=o.area.area_acronym.acronym)
role_type = area_director_role
try:
if IESGLogin.objects.get(person=o.person).user_level == 4:
role_type = pre_area_director_role
except IESGLogin.DoesNotExist:
pass
r = Role.objects.filter(name=role_type,
person=email.person)
if r and r[0].group == "iesg":
r[0].group = area
r[0].name = role_type
r[0].save()
else:
Role.objects.get_or_create(name=role_type, group=area, person=email.person, email=email)
# IESGHistory
emails_for_time = {}
for o in IESGHistory.objects.all().order_by('meeting__start_date', 'pk'):
print "importing IESGHistory", o.pk, o.area, o.person, o.meeting
email = get_or_create_email(o, create_fake=False)
if not email:
"SKIPPING IESGHistory with unknown email"
continue
# our job here is to make sure we either have the same AD today or
# got proper GroupHistory and RoleHistory objects in the database;
# there's only incomplete information available in the database so
# the reconstructed history will necessarily not be entirely
# accurate, just good enough to conclude who was AD
area = Group.objects.get(acronym=o.area.area_acronym.acronym, type="area")
meeting_time = datetime.datetime.combine(o.meeting.start_date, datetime.time(0, 0, 0))
key = (area, meeting_time)
if not key in emails_for_time:
emails_for_time[key] = []
emails_for_time[key].append(email)
history = find_history_active_at(area, meeting_time)
if (history and history.rolehistory_set.filter(person=email.person) or
not history and area.role_set.filter(person=email.person)):
continue
if history and history.time == meeting_time:
# add to existing GroupHistory
RoleHistory.objects.create(name=area_director_role, group=history, person=email.person, email=email)
else:
existing = history if history else area
h = GroupHistory(group=area,
time=meeting_time,
name=existing.name,
acronym=existing.acronym,
state=existing.state,
type=existing.type,
parent=existing.parent,
ad=existing.ad,
list_email=existing.list_email,
list_subscribe=existing.list_subscribe,
list_archive=existing.list_archive,
comments=existing.comments,
)
h.save()
# we need to add all emails for this area at this time
# because the new GroupHistory resets the known roles
for e in emails_for_time[key]:
RoleHistory.objects.get_or_create(name=area_director_role, group=h, person=e.person, email=e)

View file

@ -1,193 +0,0 @@
#!/usr/bin/python
import sys, os, datetime
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
sys.path = [ basedir ] + sys.path
from ietf import settings
settings.USE_DB_REDESIGN_PROXY_CLASSES = False
from django.core import management
management.setup_environ(settings)
import workflows.models
from ietf.ietfworkflows.models import StateDescription
from ietf.idrfc.mirror_rfc_editor_queue import get_rfc_state_mapping
from ietf.doc.models import *
# adds states for documents and import states from workflows.Workflow
# and ietfworkflows.StateDescription
# state types
draft_type, _ = StateType.objects.get_or_create(slug="draft", label="State")
draft_iesg_type, _ = StateType.objects.get_or_create(slug="draft-iesg", label="IESG state")
draft_iana_type, _ = StateType.objects.get_or_create(slug="draft-iana", label="IANA state")
draft_rfc_type, _ = StateType.objects.get_or_create(slug="draft-rfceditor", label="RFC Editor state")
ietf_type, _ = StateType.objects.get_or_create(slug="draft-stream-ietf", label="IETF state")
irtf_type, _ = StateType.objects.get_or_create(slug="draft-stream-irtf", label="IRTF state")
ise_type, _ = StateType.objects.get_or_create(slug="draft-stream-ise", label="ISE state")
iab_type, _ = StateType.objects.get_or_create(slug="draft-stream-iab", label="IAB state")
slides_type, _ = StateType.objects.get_or_create(slug="slides", label="State")
minutes_type, _ = StateType.objects.get_or_create(slug="minutes", label="State")
agenda_type, _ = StateType.objects.get_or_create(slug="agenda", label="State")
liaison_att_type, _ = StateType.objects.get_or_create(slug="liai-att", label="State")
charter_type, _ = StateType.objects.get_or_create(slug="charter", label="State")
# draft states
print "importing draft states"
State.objects.get_or_create(type=draft_type, slug="active", name="Active", order=1)
State.objects.get_or_create(type=draft_type, slug="expired", name="Expired", order=2)
State.objects.get_or_create(type=draft_type, slug="rfc", name="RFC", order=3)
State.objects.get_or_create(type=draft_type, slug="repl", name="Replaced", order=4)
State.objects.get_or_create(type=draft_type, slug="auth-rm", name="Withdrawn by Submitter", order=5)
State.objects.get_or_create(type=draft_type, slug="ietf-rm", name="Withdrawn by IETF", order=6)
# IESG draft states
State.objects.get_or_create(type=draft_iesg_type, slug="pub", name="RFC Published", desc='The ID has been published as an RFC.', order=32)
State.objects.get_or_create(type=draft_iesg_type, slug="dead", name="Dead", desc='Document is "dead" and is no longer being tracked. (E.g., it has been replaced by another document with a different name, it has been withdrawn, etc.)', order=99)
State.objects.get_or_create(type=draft_iesg_type, slug="approved", name="Approved-announcement to be sent", desc='The IESG has approved the document for publication, but the Secretariat has not yet sent out on official approval message.', order=27)
State.objects.get_or_create(type=draft_iesg_type, slug="ann", name="Approved-announcement sent", desc='The IESG has approved the document for publication, and the Secretariat has sent out the official approval message to the RFC editor.', order=30)
State.objects.get_or_create(type=draft_iesg_type, slug="watching", name="AD is watching", desc='An AD is aware of the document and has chosen to place the document in a separate state in order to keep a closer eye on it (for whatever reason). Documents in this state are still not being actively tracked in the sense that no formal request has been made to publish or advance the document. The sole difference between this state and "I-D Exists" is that an AD has chosen to put it in a separate state, to make it easier to keep track of (for the AD\'s own reasons).', order=42)
State.objects.get_or_create(type=draft_iesg_type, slug="iesg-eva", name="IESG Evaluation", desc='The document is now (finally!) being formally reviewed by the entire IESG. Documents are discussed in email or during a bi-weekly IESG telechat. In this phase, each AD reviews the document and airs any issues they may have. Unresolvable issues are documented as "discuss" comments that can be forwarded to the authors/WG. See the description of substates for additional details about the current state of the IESG discussion.', order=20)
State.objects.get_or_create(type=draft_iesg_type, slug="ad-eval", name="AD Evaluation", desc='A specific AD (e.g., the Area Advisor for the WG) has begun reviewing the document to verify that it is ready for advancement. The shepherding AD is responsible for doing any necessary review before starting an IETF Last Call or sending the document directly to the IESG as a whole.', order=11)
State.objects.get_or_create(type=draft_iesg_type, slug="lc-req", name="Last Call Requested", desc='The AD has requested that the Secretariat start an IETF Last Call, but the the actual Last Call message has not been sent yet.', order=15)
State.objects.get_or_create(type=draft_iesg_type, slug="lc", name="In Last Call", desc='The document is currently waiting for IETF Last Call to complete. Last Calls for WG documents typically last 2 weeks, those for individual submissions last 4 weeks.', order=16)
State.objects.get_or_create(type=draft_iesg_type, slug="pub-req", name="Publication Requested", desc='A formal request has been made to advance/publish the document, following the procedures in Section 7.5 of RFC 2418. The request could be from a WG chair, from an individual through the RFC Editor, etc. (The Secretariat (iesg-secretary@ietf.org) is copied on these requests to ensure that the request makes it into the Datatracker.) A document in this state has not (yet) been reviewed by an AD nor has any official action been taken on it yet (other than to note that its publication has been requested.', order=10)
State.objects.get_or_create(type=draft_iesg_type, slug="rfcqueue", name="RFC Ed Queue", desc='The document is in the RFC editor Queue (as confirmed by http://www.rfc-editor.org/queue.html).', order=31)
State.objects.get_or_create(type=draft_iesg_type, slug="defer", name="IESG Evaluation - Defer", desc='During a telechat, one or more ADs requested an additional 2 weeks to review the document. A defer is designed to be an exception mechanism, and can only be invoked once, the first time the document comes up for discussion during a telechat.', order=21)
State.objects.get_or_create(type=draft_iesg_type, slug="writeupw", name="Waiting for Writeup", desc='Before a standards-track or BCP document is formally considered by the entire IESG, the AD must write up a protocol action. The protocol action is included in the approval message that the Secretariat sends out when the document is approved for publication as an RFC.', order=18)
State.objects.get_or_create(type=draft_iesg_type, slug="goaheadw", name="Waiting for AD Go-Ahead", desc='As a result of the IETF Last Call, comments may need to be responded to and a revision of the ID may be needed as well. The AD is responsible for verifying that all Last Call comments have been adequately addressed and that the (possibly revised) document is in the ID directory and ready for consideration by the IESG as a whole.', order=19)
State.objects.get_or_create(type=draft_iesg_type, slug="review-e", name="Expert Review", desc='An AD sometimes asks for an external review by an outside party as part of evaluating whether a document is ready for advancement. MIBs, for example, are reviewed by the "MIB doctors". Other types of reviews may also be requested (e.g., security, operations impact, etc.). Documents stay in this state until the review is complete and possibly until the issues raised in the review are addressed. See the "note" field for specific details on the nature of the review.', order=12)
State.objects.get_or_create(type=draft_iesg_type, slug="nopubadw", name="DNP-waiting for AD note", desc='Do Not Publish: The IESG recommends against publishing the document, but the writeup explaining its reasoning has not yet been produced. DNPs apply primarily to individual submissions received through the RFC editor. See the "note" field for more details on who has the action item.', order=33)
State.objects.get_or_create(type=draft_iesg_type, slug="nopubanw", name="DNP-announcement to be sent", desc='The IESG recommends against publishing the document, the writeup explaining its reasoning has been produced, but the Secretariat has not yet sent out the official "do not publish" recommendation message.', order=34)
for s in State.objects.filter(type=draft_iesg_type):
n = {
"pub-req": ("ad-eval", "watching", "dead"),
"ad-eval": ("watching", "lc-req", "review-e", "iesg-eva"),
"review-e": ("ad-eval", ),
"lc-req": ("lc", ),
"lc": ("writeupw", "goaheadw"),
"writeupw": ("goaheadw", ),
"goaheadw": ("iesg-eva", ),
"iesg-eva": ("nopubadw", "defer", "approved"),
"defer": ("iesg-eva", ),
"approved": ("ann", ),
"ann": ("rfcqueue", ),
"rfcqueue": ("pub", ),
"pub": ("dead", ),
"nopubadw": ("nopubanw", ),
"nopubanw": ("dead", ),
"watching": ("pub-req", ),
"dead": ("pub-req", ),
}
s.next_states = State.objects.filter(type=draft_iesg_type, slug__in=n[s.slug])
# import RFC Editor queue states
print "importing RFC Editor states"
get_rfc_state_mapping()
# WG states, we can get them from the state descriptions
wg_doc_state_slug = {
"Call For Adoption By WG Issued": 'c-adopt',
"Adopted by a WG": 'adopt-wg',
"Adopted for WG Info Only": 'info',
"WG Document": 'wg-doc',
"Parked WG Document": 'parked',
"Dead WG Document": 'dead',
"In WG Last Call": 'wg-lc',
"Waiting for WG Chair Go-Ahead": 'chair-w',
"WG Consensus: Waiting for Write-Up": 'writeupw',
"Submitted to IESG for Publication": 'sub-pub',
}
for o in StateDescription.objects.all().order_by('order'):
print "importing StateDescription", o.state.name
s, _ = State.objects.get_or_create(type=ietf_type, slug=wg_doc_state_slug[o.state.name], name=o.state.name)
s.desc = o.definition.replace(" ", " ").replace("\n ", "\n").replace("\n\n", "DUMMY").replace("\n", "").replace("DUMMY", "\n\n") # get rid of linebreaks, but keep paragraphs
s.order = o.order
s.save()
# IAB
print "importing IAB stream states"
State.objects.get_or_create(type=iab_type, slug="candidat", name="Candidate IAB Document", desc="A document being considered for the IAB stream.", order=1)
State.objects.get_or_create(type=iab_type, slug="active", name="Active IAB Document", desc="This document has been adopted by the IAB and is being actively developed.", order=2)
State.objects.get_or_create(type=iab_type, slug="parked", name="Parked IAB Document", desc="This document has lost its author or editor, is waiting for another document to be written, or cannot currently be worked on by the IAB for some other reason. Annotations probably explain why this document is parked.", order=3)
State.objects.get_or_create(type=iab_type, slug="review-i", name="IAB Review", desc="This document is awaiting the IAB itself to come to internal consensus.", order=4)
State.objects.get_or_create(type=iab_type, slug="review-c", name="Community Review", desc="This document has completed internal consensus within the IAB and is now under community review.", order=5)
State.objects.get_or_create(type=iab_type, slug="approved", name="Approved by IAB, To Be Sent to RFC Editor", desc="The consideration of this document is complete, but it has not yet been sent to the RFC Editor for publication (although that is going to happen soon).", order=6)
State.objects.get_or_create(type=iab_type, slug="diff-org", name="Sent to a Different Organization for Publication", desc="The IAB does not expect to publish the document itself, but has passed it on to a different organization that might continue work on the document. The expectation is that the other organization will eventually publish the document.", order=7)
State.objects.get_or_create(type=iab_type, slug="rfc-edit", name="Sent to the RFC Editor", desc="The IAB processing of this document is complete and it has been sent to the RFC Editor for publication. The document may be in the RFC Editor's queue, or it may have been published as an RFC; this state doesn't distinguish between different states occurring after the document has left the IAB.", order=8)
State.objects.get_or_create(type=iab_type, slug="pub", name="Published RFC", desc="The document has been published as an RFC.", order=9)
State.objects.get_or_create(type=iab_type, slug="dead", name="Dead IAB Document", desc="This document was an active IAB document, but for some reason it is no longer being pursued for the IAB stream. It is possible that the document might be revived later, possibly in another stream.", order=10)
# IRTF
print "importing IRTF stream states"
State.objects.get_or_create(type=irtf_type, slug="candidat", name="Candidate RG Document", desc="This document is under consideration in an RG for becoming an IRTF document. A document in this state does not imply any RG consensus and does not imply any precedence or selection. It's simply a way to indicate that somebody has asked for a document to be considered for adoption by an RG.", order=1)
State.objects.get_or_create(type=irtf_type, slug="active", name="Active RG Document", desc="This document has been adopted by the RG and is being actively developed.", order=2)
State.objects.get_or_create(type=irtf_type, slug="parked", name="Parked RG Document", desc="This document has lost its author or editor, is waiting for another document to be written, or cannot currently be worked on by the RG for some other reason.", order=3)
State.objects.get_or_create(type=irtf_type, slug="rg-lc", name="In RG Last Call", desc="The document is in its final review in the RG.", order=4)
State.objects.get_or_create(type=irtf_type, slug="sheph-w", name="Waiting for Document Shepherd", desc="IRTF documents have document shepherds who help RG documents through the process after the RG has finished with the document.", order=5)
State.objects.get_or_create(type=irtf_type, slug="chair-w", name="Waiting for IRTF Chair", desc="The IRTF Chair is meant to be performing some task such as sending a request for IESG Review.", order=6)
State.objects.get_or_create(type=irtf_type, slug="irsg-w", name="Awaiting IRSG Reviews", desc="The document shepherd has taken the document to the IRSG and solicited reviews from one or more IRSG members.", order=7)
State.objects.get_or_create(type=irtf_type, slug="irsgpoll", name="In IRSG Poll", desc="The IRSG is taking a poll on whether or not the document is ready to be published.", order=8)
State.objects.get_or_create(type=irtf_type, slug="iesg-rev", name="In IESG Review", desc="The IRSG has asked the IESG to do a review of the document, as described in RFC5742.", order=9)
State.objects.get_or_create(type=irtf_type, slug="rfc-edit", name="Sent to the RFC Editor", desc="The RG processing of this document is complete and it has been sent to the RFC Editor for publication. The document may be in the RFC Editor's queue, or it may have been published as an RFC; this state doesn't distinguish between different states occurring after the document has left the RG.", order=10)
State.objects.get_or_create(type=irtf_type, slug="pub", name="Published RFC", desc="The document has been published as an RFC.", order=11)
State.objects.get_or_create(type=irtf_type, slug="iesghold", name="Document on Hold Based On IESG Request", desc="The IESG has requested that the document be held pending further review, as specified in RFC 5742, and the IRTF has agreed to such a hold.", order=12)
State.objects.get_or_create(type=irtf_type, slug="dead", name="Dead IRTF Document", desc="This document was an active IRTF document, but for some reason it is no longer being pursued for the IRTF stream. It is possible that the document might be revived later, possibly in another stream.", order=13)
# ISE
print "importing ISE stream states"
State.objects.get_or_create(type=ise_type, slug="receive", name="Submission Received", desc="The draft has been sent to the ISE with a request for publication.", order=1)
State.objects.get_or_create(type=ise_type, slug="find-rev", name="Finding Reviewers", desc=" The ISE is finding initial reviewers for the document.", order=2)
State.objects.get_or_create(type=ise_type, slug="ise-rev", name="In ISE Review", desc="The ISE is actively working on the document.", order=3)
State.objects.get_or_create(type=ise_type, slug="need-res", name="Response to Review Needed", desc=" One or more reviews have been sent to the author, and the ISE is awaiting response.", order=4)
State.objects.get_or_create(type=ise_type, slug="iesg-rev", name="In IESG Review", desc="The ISE has asked the IESG to do a review of the document, as described in RFC5742.", order=5)
State.objects.get_or_create(type=ise_type, slug="rfc-edit", name="Sent to the RFC Editor", desc="The ISE processing of this document is complete and it has been sent to the RFC Editor for publication. The document may be in the RFC Editor's queue, or it may have been published as an RFC; this state doesn't distinguish between different states occurring after the document has left the ISE.", order=6)
State.objects.get_or_create(type=ise_type, slug="pub", name="Published RFC", desc="The document has been published as an RFC.", order=7)
State.objects.get_or_create(type=ise_type, slug="dead", name="No Longer In Independent Submission Stream", desc="This document was actively considered in the Independent Submission stream, but the ISE chose not to publish it. It is possible that the document might be revived later. A document in this state may have a comment explaining the reasoning of the ISE (such as if the document was going to move to a different stream).", order=8)
State.objects.get_or_create(type=ise_type, slug="iesghold", name="Document on Hold Based On IESG Request", desc="The IESG has requested that the document be held pending further review, as specified in RFC 5742, and the ISE has agreed to such a hold.", order=9)
# now import the next_states; we only go for the default ones, the
# WG-specific are handled in the group importer
workflows = [(ietf_type, workflows.models.Workflow.objects.get(name="Default WG Workflow")),
(irtf_type, workflows.models.Workflow.objects.get(name="IRTF Workflow")),
(ise_type, workflows.models.Workflow.objects.get(name="ISE Workflow")),
(iab_type, workflows.models.Workflow.objects.get(name="IAB Workflow")),
]
for state_type, workflow in workflows:
states = dict((s.name, s) for s in State.objects.filter(type=state_type))
old_states = dict((s.name, s) for s in workflow.states.filter(name__in=[name for name in states]).select_related('transitions'))
for name in states:
print "importing workflow transitions", workflow.name, name
s = states[name]
try:
o = old_states[name]
except KeyError:
print "MISSING state", name, "in workflow", workflow.name
continue
s.next_states = [states[t.destination.name] for t in o.transitions.filter(workflow=workflow)]
# meeting material states
for t in (slides_type, minutes_type, agenda_type):
print "importing states for", t.slug
State.objects.get_or_create(type=t, slug="active", name="Active", order=1)
State.objects.get_or_create(type=t, slug="deleted", name="Deleted", order=2)
# charter states
print "importing states for charters"
State.objects.get_or_create(type=charter_type, slug="notrev", name="Not currently under review", desc="The proposed charter is not being considered at this time. A proposed charter will remain in this state until an AD moves it to Informal IESG review.")
State.objects.get_or_create(type=charter_type, slug="infrev", name="Informal IESG review", desc="This is the initial state when an AD proposes a new charter. The normal next state is Internal review if the idea is accepted, or Not currently under review if the idea is abandoned.")
State.objects.get_or_create(type=charter_type, slug="intrev", name="Internal review", desc="The IESG and IAB are reviewing the early draft of the charter; this is the initial IESG and IAB review. The usual next state is External review if the idea is adopted, or Informal IESG review if the IESG decides the idea needs more work, or Not currently under review is the idea is abandoned")
State.objects.get_or_create(type=charter_type, slug="extrev", name="External review", desc="The IETF community and possibly other standards development organizations (SDOs) are reviewing the proposed charter. The usual next state is IESG review, although it might move to Not currently under review is the idea is abandoned during the external review.")
State.objects.get_or_create(type=charter_type, slug="iesgrev", name="IESG review", desc="The IESG is reviewing the discussion from the external review of the proposed charter. The usual next state is Approved, or Not currently under review if the idea is abandoned.")
State.objects.get_or_create(type=charter_type, slug="approved", name="Approved", desc="The charter is approved by the IESG.")

View file

@ -1,40 +0,0 @@
#!/bin/bash
#
# usage: move-tables-to-db.sh old-db-name new-db-name
#
# Do the grunt work of moving tables from old-db-name to new-db-name,
# the new database is created if it doesn't exist. Note that
# permissions on the old database are not moved (so the old ones are
# kept, and the new database won't have any).
OLD_DB=$1
NEW_DB=$2
# read access info at start so we don't get asked a gazillion times about them by MySQL
read -p "MySQL user: " MYSQL_USER
read -s -p "MySQL password for \"$MYSQL_USER\": " MYSQL_PASSWORD
MYSQL_CMD="mysql -NB -u $MYSQL_USER --password=$MYSQL_PASSWORD"
echo .
echo "Extracting table names"
TABLES=`echo "SHOW TABLES IN $1;" | $MYSQL_CMD | sed -e 's/^/\`/' -e 's/$/\`/'`
echo "Found `echo \"$TABLES\" | wc -l` tables"
echo "Creating database \`$NEW_DB\`"
echo "CREATE DATABASE \`$NEW_DB\`;" | $MYSQL_CMD
echo "Moving tables from \`$OLD_DB\` to \`$NEW_DB\`"
for TABLE in $TABLES; do
echo "RENAME TABLE \`$OLD_DB\`.$TABLE TO \`$NEW_DB\`.$TABLE;" | $MYSQL_CMD
done
echo "Done"

View file

@ -1,182 +0,0 @@
import datetime
from ietf.utils import unaccent
from ietf.person.models import Person, Email, Alias
from ietf.doc.models import NewRevisionDocEvent
from ietf.idtracker.models import EmailAddress
def clean_email_address(addr):
addr = addr.replace("!", "@").replace("(at)", "@") # some obvious @ replacements
# whack surrounding <...>
addr = addr[addr.rfind('<') + 1:]
end = addr.find('>')
if end != -1:
addr = addr[:end]
addr = addr.strip()
if not "@" in addr:
return ""
else:
return addr
def person_name(person):
def clean_prefix(n):
n = clean(n)
if n in [".", "Mr.", "<s/", "e", "fas", "lk", "Miss", "Mr", "Mr,", "Mr.", "Mr..", "MRS", "Mrs.", "ms", "Ms,", "Ms.", "Ms. L", "mw", "prefix", "q", "qjfglesjtg", "s", "te mr", "\Mr.", "M.", "M"]:
return "" # skip
fixes = { "Dr": "Dr.", "Lt.Colonel": "Lt. Col.", "Prof": "Prof.", "Prof.Dr.": "Prof. Dr.", "Professort": "Professor" }
return fixes.get(n, n)
def clean_suffix(n):
n = clean(n)
if n in ["q", "a", "suffix", "u", "w", "x", "\\"]:
return "" # skip
fixes = { "Jr": "Jr.", "Ph. D.": "Ph.D.", "Ph.D": "Ph.D.", "PhD":"Ph.D.", "Phd.": "Phd.", "Scd": "Sc.D." }
return fixes.get(n, n)
def clean(n):
if not n:
return ""
return n.replace("]", "").strip()
def initial_fixup(n):
if len(n) == 1:
return n + "."
return n
names = [clean_prefix(person.name_prefix), clean(person.first_name),
initial_fixup(clean(person.middle_initial)), clean(person.last_name), clean_suffix(person.name_suffix)]
return u" ".join(n for n in names if n)
def old_person_to_person(person):
try:
return Person.objects.get(id=person.pk)
except Person.DoesNotExist:
return Person.objects.get(alias__name=person_name(person))
def old_person_to_email(person):
# try connected addresses
addresses = person.emailaddress_set.filter(address__contains="@").order_by('priority')[:1]
if addresses:
addr = clean_email_address(addresses[0].address)
priority = addresses[0].priority
return (addr, priority)
# try to see if there's a person with the same name and an email address
addresses = EmailAddress.objects.filter(person_or_org__first_name=person.first_name, person_or_org__last_name=person.last_name).filter(address__contains="@").order_by('priority')[:1]
if addresses:
addr = clean_email_address(addresses[0].address)
priority = addresses[0].priority
return (addr, priority)
# otherwise try the short list
hardcoded_emails = {
"Dinara Suleymanova": "dinaras@ietf.org",
"Dow Street": "dow.street@linquest.com",
"Xiaoya Yang": "xiaoya.yang@itu.int",
}
addr = hardcoded_emails.get(u"%s %s" % (person.first_name, person.last_name), "")
priority = 1
return (addr, priority)
def calc_email_import_time(priority):
# we may import some old email addresses that are now
# inactive, to ensure everything is not completely borked, we
# want to ensure that high-priority (< 100) email addresses
# end up later (in reverse of priority - I-D addresses follow
# the normal ordering, since higher I-D id usually means later)
if priority < 100:
d = -priority
else:
d = priority - 36000
return datetime.datetime(1970, 1, 2, 0, 0, 0) + datetime.timedelta(seconds=d)
def get_or_create_email(o, create_fake):
# take o.person (or o) and get or create new Email and Person objects
person = o.person if hasattr(o, "person") else o
name = person_name(person)
email, priority = old_person_to_email(person)
if not email:
if create_fake:
email = u"unknown-email-%s" % name.replace(" ", "-")
print ("USING FAKE EMAIL %s for %s %s" % (email, person.pk, name)).encode('utf-8')
else:
print ("NO EMAIL FOR %s %s %s %s" % (o.__class__, o.pk, person.pk, name)).encode('utf-8')
return None
e, _ = Email.objects.select_related("person").get_or_create(address=email)
if not e.person:
asciified = unaccent.asciify(name)
aliases = Alias.objects.filter(name__in=(name, asciified)).select_related('person')
if aliases:
p = aliases[0].person
else:
p = Person(id=person.pk, name=name, ascii=asciified)
from ietf.idtracker.models import PostalAddress
addresses = person.postaladdress_set.filter(address_priority=1)
if addresses:
p.affiliation = (addresses[0].affiliated_company or "").strip()
# should probably import p.address here
p.save()
Alias.objects.create(name=p.name, person=p)
if p.ascii != p.name:
Alias.objects.create(name=p.ascii, person=p)
e.person = p
e.time = calc_email_import_time(priority)
e.save()
else:
if e.person.name != name:
if not Alias.objects.filter(name=name):
Alias.objects.create(name=name, person=e.person)
# take longest name rather than the first we encounter
if len(name) > e.person.name:
e.person.name = name
e.person.save()
return e
def possibly_import_other_priority_email(email, old_email):
addr = clean_email_address(old_email.address or "")
if not addr or addr.lower() == email.address.lower():
return
try:
e = Email.objects.get(address=addr)
if e.person != email.person:
e.person = email.person
e.save()
except Email.DoesNotExist:
Email.objects.create(address=addr, person=email.person,
time=calc_email_import_time(old_email.priority))
def make_revision_event(doc, system_person):
try:
e = NewRevisionDocEvent.objects.get(doc=doc, type="new_revision")
except NewRevisionDocEvent.DoesNotExist:
e = NewRevisionDocEvent(doc=doc, type="new_revision")
e.rev = doc.rev
e.time = doc.time
e.by = system_person
e.desc = "Added new revision"
return e
def dont_save_queries():
# prevent memory from leaking when settings.DEBUG=True
from django.db import connection
class DontSaveQueries(object):
def append(self, x):
pass
connection.queries = DontSaveQueries()

View file

@ -1,95 +0,0 @@
from django.db import models
class InterimActivities(models.Model):
id = models.IntegerField(primary_key=True)
group_acronym_id = models.IntegerField()
meeting_num = models.IntegerField()
activity = models.TextField()
act_date = models.DateField()
act_time = models.TimeField()
act_by = models.IntegerField()
class Meta:
db_table = u'interim_activities'
class InterimAgenda(models.Model):
id = models.IntegerField(primary_key=True)
meeting_num = models.IntegerField()
group_acronym_id = models.IntegerField()
filename = models.CharField(max_length=765)
irtf = models.IntegerField()
interim = models.IntegerField()
class Meta:
db_table = u'interim_agenda'
class InterimInfo(models.Model):
id = models.IntegerField(primary_key=True)
group_acronym_id = models.IntegerField(null=True, blank=True)
meeting_num = models.IntegerField(null=True, blank=True)
meeting_date = models.CharField(max_length=765, blank=True)
message_body = models.TextField(blank=True)
class Meta:
db_table = u'interim_info'
class InterimMeetings(models.Model):
meeting_num = models.IntegerField(primary_key=True)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
city = models.CharField(max_length=765, blank=True)
state = models.CharField(max_length=765, blank=True)
country = models.CharField(max_length=765, blank=True)
time_zone = models.IntegerField(null=True, blank=True)
ack = models.TextField(blank=True)
agenda_html = models.TextField(blank=True)
agenda_text = models.TextField(blank=True)
future_meeting = models.TextField(blank=True)
overview1 = models.TextField(blank=True)
overview2 = models.TextField(blank=True)
group_acronym_id = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'interim_meetings'
class InterimMinutes(models.Model):
id = models.IntegerField(primary_key=True)
meeting_num = models.IntegerField()
group_acronym_id = models.IntegerField()
filename = models.CharField(max_length=765)
irtf = models.IntegerField()
interim = models.IntegerField()
class Meta:
db_table = u'interim_minutes'
class InterimSlides(models.Model):
id = models.IntegerField(primary_key=True)
meeting_num = models.IntegerField()
group_acronym_id = models.IntegerField(null=True, blank=True)
slide_num = models.IntegerField(null=True, blank=True)
slide_type_id = models.IntegerField()
slide_name = models.CharField(max_length=765)
irtf = models.IntegerField()
interim = models.IntegerField()
order_num = models.IntegerField(null=True, blank=True)
in_q = models.IntegerField(null=True, blank=True)
class Meta:
db_table = u'interim_slides'
def file_loc(self):
from ietf.idtracker.models import Acronym
dir = self.meeting_num
acronym = Acronym.objects.get(pk=self.group_acronym_id).acronym
if self.slide_type_id==1:
#return "%s/slides/%s-%s/sld1.htm" % (dir,self.acronym(),self.slide_num)
return "%s/slides/%s-%s/%s-%s.htm" % (dir,acronym,self.slide_num,self.acronym,self.slide_num)
else:
if self.slide_type_id == 2:
ext = ".pdf"
elif self.slide_type_id == 3:
ext = ".txt"
elif self.slide_type_id == 4:
ext = ".ppt"
elif self.slide_type_id == 5:
ext = ".doc"
elif self.slide_type_id == 6:
ext = ".pptx"
else:
ext = ""
return "%s/slides/%s-%s%s" % (dir,acronym,self.slide_num,ext)