Import remaining RfcIndex attributes, import IDAuthor, bug fixes
- Legacy-Id: 2738
This commit is contained in:
parent
34d9f5f890
commit
754469cd3d
|
@ -87,7 +87,7 @@ class IdWrapper:
|
|||
|
||||
def __init__(self, draft):
|
||||
self.id = self
|
||||
if isinstance(draft, IDInternal):
|
||||
if isinstance(draft, IDInternal) and not settings.USE_DB_REDESIGN_PROXY_CLASSES:
|
||||
self._idinternal = draft
|
||||
self._draft = self._idinternal.draft
|
||||
else:
|
||||
|
@ -261,10 +261,16 @@ class RfcWrapper:
|
|||
self.rfc = self
|
||||
|
||||
if not self._idinternal:
|
||||
try:
|
||||
self._idinternal = IDInternal.objects.get(rfc_flag=1, draft=self._rfcindex.rfc_number)
|
||||
except IDInternal.DoesNotExist:
|
||||
pass
|
||||
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
|
||||
pub = rfcindex.latest_event(type="published_rfc")
|
||||
started = rfcindex.latest_event(type="started_iesg_process")
|
||||
if pub and started and pub.time < started.time:
|
||||
self._idinternal = rfcindex
|
||||
else:
|
||||
try:
|
||||
self._idinternal = IDInternal.objects.get(rfc_flag=1, draft=self._rfcindex.rfc_number)
|
||||
except IDInternal.DoesNotExist:
|
||||
pass
|
||||
|
||||
if self._idinternal:
|
||||
self.ietf_process = IetfProcessData(self._idinternal)
|
||||
|
@ -275,7 +281,10 @@ class RfcWrapper:
|
|||
self.maturity_level = self._rfcindex.current_status
|
||||
if not self.maturity_level:
|
||||
self.maturity_level = "Unknown"
|
||||
|
||||
|
||||
if settings.USE_DB_REDESIGN_PROXY_CLASSES and rfcindex.filename.startswith('rfc'):
|
||||
return # we've already done the lookup while importing so skip the rest
|
||||
|
||||
ids = InternetDraft.objects.filter(rfc_number=self.rfc_number)
|
||||
if len(ids) >= 1:
|
||||
self.draft_name = ids[0].filename
|
||||
|
@ -658,7 +667,6 @@ class BallotWrapper:
|
|||
position="No Record",
|
||||
)
|
||||
positions.append(d)
|
||||
|
||||
self._positions = positions
|
||||
|
||||
def old_init(self):
|
||||
|
|
|
@ -199,7 +199,7 @@ def _get_history(doc, versions):
|
|||
results.insert(0, v)
|
||||
if doc.is_id_wrapper and doc.draft_status == "Expired" and doc._draft.expiration_date:
|
||||
results.append({'is_text':True, 'date':doc._draft.expiration_date, 'text':'Draft expired'})
|
||||
if doc.is_rfc_wrapper:
|
||||
if not settings.USE_DB_REDESIGN_PROXY_CLASSES and doc.is_rfc_wrapper:
|
||||
if doc.draft_name:
|
||||
text = 'RFC Published (see <a href="/doc/%s/">%s</a> for earlier history)' % (doc.draft_name,doc.draft_name)
|
||||
else:
|
||||
|
|
|
@ -8,7 +8,6 @@ if not settings.USE_DB_REDESIGN_PROXY_CLASSES:
|
|||
list_display=('acronym', 'name')
|
||||
admin.site.register(Acronym, AcronymAdmin)
|
||||
|
||||
if not settings.USE_DB_REDESIGN_PROXY_CLASSES:
|
||||
class AreaAdmin(admin.ModelAdmin):
|
||||
list_display=('area_acronym', 'status')
|
||||
admin.site.register(Area, AreaAdmin)
|
||||
|
@ -25,9 +24,10 @@ class AreaWGURLAdmin(admin.ModelAdmin):
|
|||
pass
|
||||
admin.site.register(AreaWGURL, AreaWGURLAdmin)
|
||||
|
||||
class BallotInfoAdmin(admin.ModelAdmin):
|
||||
pass
|
||||
admin.site.register(BallotInfo, BallotInfoAdmin)
|
||||
if not settings.USE_DB_REDESIGN_PROXY_CLASSES:
|
||||
class BallotInfoAdmin(admin.ModelAdmin):
|
||||
pass
|
||||
admin.site.register(BallotInfo, BallotInfoAdmin)
|
||||
|
||||
class ChairsHistoryAdmin(admin.ModelAdmin):
|
||||
list_display=('person', 'chair_type', 'start_year', 'end_year')
|
||||
|
@ -52,12 +52,13 @@ class IDIntendedStatusAdmin(admin.ModelAdmin):
|
|||
pass
|
||||
admin.site.register(IDIntendedStatus, IDIntendedStatusAdmin)
|
||||
|
||||
class IDInternalAdmin(admin.ModelAdmin):
|
||||
ordering=['draft']
|
||||
list_display=['draft', 'token_email', 'note']
|
||||
search_fields=['draft__filename']
|
||||
raw_id_fields=['draft','ballot']
|
||||
admin.site.register(IDInternal, IDInternalAdmin)
|
||||
if not settings.USE_DB_REDESIGN_PROXY_CLASSES:
|
||||
class IDInternalAdmin(admin.ModelAdmin):
|
||||
ordering=['draft']
|
||||
list_display=['draft', 'token_email', 'note']
|
||||
search_fields=['draft__filename']
|
||||
raw_id_fields=['draft','ballot']
|
||||
admin.site.register(IDInternal, IDInternalAdmin)
|
||||
|
||||
class IDNextStateAdmin(admin.ModelAdmin):
|
||||
pass
|
||||
|
|
|
@ -264,19 +264,12 @@ class PersonOrOrgInfo(models.Model):
|
|||
if self.first_name == '' and self.last_name == '':
|
||||
return u"(Person #%s)" % self.person_or_org_tag
|
||||
return u"%s %s" % ( self.first_name or u"<nofirst>", self.last_name or u"<nolast>")
|
||||
def email(self, priority=1, type=None):
|
||||
name = str(self)
|
||||
def email(self, priority=1):
|
||||
name = unicode(self)
|
||||
email = ''
|
||||
types = type and [ type ] or [ "INET", "Prim", None ]
|
||||
for type in types:
|
||||
try:
|
||||
if type:
|
||||
email = self.emailaddress_set.get(priority=priority, type=type).address
|
||||
else:
|
||||
email = self.emailaddress_set.get(priority=priority).address
|
||||
break
|
||||
except (EmailAddress.DoesNotExist, AssertionError):
|
||||
pass
|
||||
addresses = self.emailaddress_set.filter(address__contains="@").order_by('priority')[:1]
|
||||
if addresses:
|
||||
email = addresses[0].address.replace('<', '').replace('>', '')
|
||||
return (name, email)
|
||||
# Added by Sunny Lee to display person's affiliation - 5/26/2007
|
||||
def affiliation(self, priority=1):
|
||||
|
@ -1088,7 +1081,12 @@ class DocumentWrapper(object):
|
|||
self.document = document
|
||||
|
||||
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
|
||||
from redesign.doc.proxy import InternetDraft
|
||||
InternetDraftOld = InternetDraft
|
||||
IDInternalOld = IDInternal
|
||||
BallotInfoOld = BallotInfo
|
||||
AreaOld = Area
|
||||
AcronymOld = Acronym
|
||||
from redesign.doc.proxy import InternetDraft, IDInternal, BallotInfo
|
||||
from redesign.group.proxy import Area
|
||||
from redesign.group.proxy import Acronym
|
||||
|
||||
|
|
|
@ -27,9 +27,8 @@ class DocumentInfo(models.Model):
|
|||
abstract = models.TextField()
|
||||
rev = models.CharField(max_length=16)
|
||||
pages = models.IntegerField(blank=True, null=True)
|
||||
intended_std_level = models.ForeignKey(IntendedStatusName, blank=True, null=True)
|
||||
std_level = models.ForeignKey(StdStatusName, blank=True, null=True)
|
||||
authors = models.ManyToManyField(Email, blank=True, null=True)
|
||||
intended_std_level = models.ForeignKey(IntendedStdLevelName, blank=True, null=True)
|
||||
std_level = models.ForeignKey(StdLevelName, blank=True, null=True)
|
||||
ad = models.ForeignKey(Email, related_name='ad_%(class)s_set', blank=True, null=True)
|
||||
shepherd = models.ForeignKey(Email, related_name='shepherd_%(class)s_set', blank=True, null=True)
|
||||
notify = models.CharField(max_length=255, blank=True)
|
||||
|
@ -42,9 +41,10 @@ class DocumentInfo(models.Model):
|
|||
def author_list(self):
|
||||
return ", ".join(email.address for email in self.authors.all())
|
||||
def latest_event(self, *args, **filter_args):
|
||||
"""Get latest event with specific requirements, e.g.
|
||||
d.latest_event(type="xyz") returns an Event while
|
||||
d.latest_event(Status, type="xyz") returns a Status event."""
|
||||
"""Get latest event of optional Python type and with filter
|
||||
arguments, e.g. d.latest_event(type="xyz") returns an Event
|
||||
while d.latest_event(Status, type="xyz") returns a Status
|
||||
event."""
|
||||
model = args[0] if args else Event
|
||||
e = model.objects.filter(doc=self).filter(**filter_args).order_by('-time')[:1]
|
||||
return e[0] if e else None
|
||||
|
@ -56,9 +56,21 @@ class RelatedDocument(models.Model):
|
|||
def __unicode__(self):
|
||||
return u"%s %s %s" % (self.document.name, self.relationship.name.lower(), self.doc_alias.name)
|
||||
|
||||
class DocumentAuthor(models.Model):
|
||||
document = models.ForeignKey('Document')
|
||||
author = models.ForeignKey(Email)
|
||||
order = models.IntegerField()
|
||||
|
||||
def __unicode__(self):
|
||||
return u"%s %s (%s)" % (self.document.name, self.email.get_name(), self.order)
|
||||
|
||||
class Meta:
|
||||
ordering = ["document", "order"]
|
||||
|
||||
class Document(DocumentInfo):
|
||||
name = models.CharField(max_length=255, primary_key=True) # immutable
|
||||
related = models.ManyToManyField('DocAlias', through=RelatedDocument, blank=True, related_name="reversely_related_document_set")
|
||||
authors = models.ManyToManyField(Email, through=DocumentAuthor, blank=True)
|
||||
def __unicode__(self):
|
||||
return self.name
|
||||
def values(self):
|
||||
|
@ -89,7 +101,7 @@ class Document(DocumentInfo):
|
|||
snap = DocHistory(**fields)
|
||||
snap.save()
|
||||
for m in many2many:
|
||||
# FIXME: check that this works with related
|
||||
# FIXME: check that this works with related/authors
|
||||
#print "m2m:", m, many2many[m]
|
||||
rel = getattr(snap, m)
|
||||
for item in many2many[m]:
|
||||
|
@ -107,11 +119,23 @@ class RelatedDocHistory(models.Model):
|
|||
doc_alias = models.ForeignKey('DocAlias', related_name="reversely_related_document_history_set") # target
|
||||
relationship = models.ForeignKey(DocRelationshipName)
|
||||
def __unicode__(self):
|
||||
return u"%s %s %s" % (self.document.name, self.relationship.name.lower(), self.doc_alias.name)
|
||||
return u"%s %s %s" % (self.document.doc.name, self.relationship.name.lower(), self.doc_alias.name)
|
||||
|
||||
class DocHistoryAuthor(models.Model):
|
||||
document = models.ForeignKey('DocHistory')
|
||||
author = models.ForeignKey(Email)
|
||||
order = models.IntegerField()
|
||||
|
||||
def __unicode__(self):
|
||||
return u"%s %s (%s)" % (self.document.doc.name, self.email.get_name(), self.order)
|
||||
|
||||
class Meta:
|
||||
ordering = ["document", "order"]
|
||||
|
||||
class DocHistory(DocumentInfo):
|
||||
doc = models.ForeignKey(Document) # ID of the Document this relates to
|
||||
related = models.ManyToManyField('DocAlias', through=RelatedDocHistory, blank=True)
|
||||
authors = models.ManyToManyField(Email, through=DocHistoryAuthor, blank=True)
|
||||
def __unicode__(self):
|
||||
return unicode(self.doc.name)
|
||||
|
||||
|
@ -169,6 +193,7 @@ EVENT_TYPES = [
|
|||
("published_rfc", "Published RFC"),
|
||||
|
||||
# IESG events
|
||||
("started_iesg_process", "Started IESG process on document"),
|
||||
("sent_ballot_announcement", "Sent ballot announcement"),
|
||||
("changed_ballot_position", "Changed ballot position"),
|
||||
("changed_ballot_approval_text", "Changed ballot approval text"),
|
||||
|
|
|
@ -44,17 +44,19 @@ class InternetDraft(Document):
|
|||
def revision_date(self):
|
||||
e = self.latest_event(type="new_revision")
|
||||
return e.time.date() if e else None
|
||||
# helper function
|
||||
def get_file_type_matches_from(self, glob_path):
|
||||
possible_types = [".txt", ".pdf", ".xml", ".ps"]
|
||||
res = []
|
||||
for m in glob.glob(glob_path):
|
||||
for t in possible_types:
|
||||
if m.endswith(t):
|
||||
res.append(t)
|
||||
return ",".join(res)
|
||||
#file_type = models.CharField(max_length=20)
|
||||
@property
|
||||
def file_type(self):
|
||||
matches = glob.glob(os.path.join(settings.INTERNET_DRAFT_PATH, self.name + "*.*"))
|
||||
possible_types = [".txt", ".pdf", ".xml", ".ps"]
|
||||
res = set()
|
||||
for m in matches:
|
||||
for t in possible_types:
|
||||
if m.endswith(t):
|
||||
res.add(t)
|
||||
return ",".join(res) or ".txt"
|
||||
return self.get_file_type_matches_from(os.path.join(settings.INTERNET_DRAFT_PATH, self.name + "-" + self.rev + ".*")) or ".txt"
|
||||
#txt_page_count = models.IntegerField()
|
||||
@property
|
||||
def txt_page_count(self):
|
||||
|
@ -160,8 +162,15 @@ class InternetDraft(Document):
|
|||
#idinternal = FKAsOneToOne('idinternal', reverse=True, query=models.Q(rfc_flag = 0))
|
||||
@property
|
||||
def idinternal(self):
|
||||
print self.iesg_state
|
||||
return self if self.iesg_state else None
|
||||
|
||||
# reverse relationship
|
||||
@property
|
||||
def authors(self):
|
||||
from person.models import Person
|
||||
return IDAuthor.objects.filter(document=self)
|
||||
|
||||
# methods from InternetDraft
|
||||
def displayname(self):
|
||||
return self.name
|
||||
|
@ -554,7 +563,7 @@ class InternetDraft(Document):
|
|||
|
||||
#rfc_number = models.IntegerField(primary_key=True) # already taken care of
|
||||
#title = models.CharField(max_length=250) # same name
|
||||
#authors = models.CharField(max_length=250) FIXME
|
||||
#authors = models.CharField(max_length=250) # exists already
|
||||
#rfc_published_date = models.DateField()
|
||||
@property
|
||||
def rfc_published_date(self):
|
||||
|
@ -564,7 +573,7 @@ class InternetDraft(Document):
|
|||
#current_status = models.CharField(max_length=50,null=True)
|
||||
@property
|
||||
def current_status(self):
|
||||
return self.std_level
|
||||
return self.std_level.name
|
||||
|
||||
#updates = models.CharField(max_length=200,blank=True,null=True)
|
||||
@property
|
||||
|
@ -594,26 +603,54 @@ class InternetDraft(Document):
|
|||
models.Q(name__startswith="bcp"))
|
||||
return aliases[0].name.upper() if aliases else None
|
||||
|
||||
#draft = models.CharField(max_length=200,null=True)
|
||||
@property
|
||||
def draft(self):
|
||||
if not self.name.startswith("rfc"):
|
||||
return self.name
|
||||
else:
|
||||
return None
|
||||
#draft = models.CharField(max_length=200,null=True) # have to ignore this, it's already implemented
|
||||
|
||||
#has_errata = models.BooleanField() FIXME
|
||||
#has_errata = models.BooleanField()
|
||||
@property
|
||||
def has_errata(self):
|
||||
return bool(self.tags.filter(slug="errata"))
|
||||
|
||||
#stream = models.CharField(max_length=15,blank=True,null=True)
|
||||
@property
|
||||
def stream(self):
|
||||
return super(InternetDraft, self).stream.name
|
||||
#wg = models.CharField(max_length=15,blank=True,null=True) FIXME
|
||||
|
||||
#wg = models.CharField(max_length=15,blank=True,null=True)
|
||||
@property
|
||||
def wg(self):
|
||||
return self.group.acronym
|
||||
|
||||
#file_formats = models.CharField(max_length=20,blank=True,null=True)
|
||||
@property
|
||||
def file_formats(self):
|
||||
return self.file_type.replace(".", "").replace("txt", "ascii")
|
||||
return self.get_file_type_matches_from(os.path.join(settings.RFC_PATH, "rfc" + str(self.rfc_number) + ".*")).replace(".", "").replace("txt", "ascii")
|
||||
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
IDInternal = InternetDraft
|
||||
BallotInfo = InternetDraft
|
||||
RfcIndex = InternetDraft
|
||||
|
||||
|
||||
class IDAuthor(DocumentAuthor):
|
||||
#document = models.ForeignKey(InternetDraft, db_column='id_document_tag', related_name='authors') # same name
|
||||
#person = models.ForeignKey(PersonOrOrgInfo, db_column='person_or_org_tag')
|
||||
@property
|
||||
def person(self):
|
||||
return self.author.person
|
||||
|
||||
#author_order = models.IntegerField()
|
||||
@property
|
||||
def author_order(self):
|
||||
return self.order
|
||||
|
||||
def email(self):
|
||||
return self.author.address
|
||||
|
||||
def final_author_order(self):
|
||||
return self.order
|
||||
|
||||
class Meta:
|
||||
proxy = True
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ management.setup_environ(settings)
|
|||
from redesign.doc.models import *
|
||||
from redesign.group.models import *
|
||||
from redesign.name.models import *
|
||||
from ietf.idtracker.models import InternetDraft, IESGLogin, DocumentComment, PersonOrOrgInfo, RfcObsolete
|
||||
from ietf.idtracker.models import InternetDraft, IDInternal, IESGLogin, DocumentComment, PersonOrOrgInfo, Rfc, IESGComment, IESGDiscuss, BallotInfo
|
||||
from ietf.idrfc.models import RfcIndex, DraftVersions
|
||||
|
||||
import sys
|
||||
|
@ -26,15 +26,14 @@ if len(sys.argv) > 1:
|
|||
# assumptions:
|
||||
# - groups have been imported
|
||||
# - IESG login emails/roles have been imported
|
||||
|
||||
# FIXME: what about RFCs
|
||||
# - IDAuthor emails/persons have been imported
|
||||
|
||||
# Regarding history, we currently don't try to create DocumentHistory
|
||||
# objects, we just import the comments as events.
|
||||
|
||||
# imports InternetDraft, IDInternal, BallotInfo, Position,
|
||||
# IESGComment, IESGDiscuss, DocumentComment, RfcObsolete,
|
||||
# idrfc.RfcIndex, idrfc.DraftVersions
|
||||
# IESGComment, IESGDiscuss, DocumentComment, IDAuthor, idrfc.RfcIndex,
|
||||
# idrfc.DraftVersions
|
||||
|
||||
def name(name_class, slug, name, desc=""):
|
||||
# create if it doesn't exist, set name and desc
|
||||
|
@ -50,25 +49,47 @@ def alias_doc(name, doc):
|
|||
return alias
|
||||
|
||||
type_draft = name(DocTypeName, "draft", "Draft")
|
||||
stream_ietf = name(DocStreamName, "ietf", "IETF")
|
||||
|
||||
stream_mapping = {
|
||||
"Legacy": name(DocStreamName, "legacy", "Legacy"),
|
||||
"IETF": name(DocStreamName, "ietf", "IETF"),
|
||||
"INDEPENDENT": name(DocStreamName, "indie", "Independent Submission"),
|
||||
"IAB": name(DocStreamName, "iab", "IAB"),
|
||||
"IRTF": name(DocStreamName, "irtf", "IRTF"),
|
||||
}
|
||||
|
||||
relationship_replaces = name(DocRelationshipName, "replaces", "Replaces")
|
||||
relationship_updates = name(DocRelationshipName, "updates", "Updates")
|
||||
relationship_obsoletes = name(DocRelationshipName, "obs", "Obsoletes")
|
||||
|
||||
intended_status_mapping = {
|
||||
"BCP": name(IntendedStatusName, "bcp", "Best Current Practice"),
|
||||
"Draft Standard": name(IntendedStatusName, "ds", name="Draft Standard"),
|
||||
"Experimental": name(IntendedStatusName, "exp", name="Experimental"),
|
||||
"Historic": name(IntendedStatusName, "hist", name="Historic"),
|
||||
"Informational": name(IntendedStatusName, "inf", name="Informational"),
|
||||
"Proposed Standard": name(IntendedStatusName, "ps", name="Proposed Standard"),
|
||||
"Standard": name(IntendedStatusName, "std", name="Standard"),
|
||||
intended_std_level_mapping = {
|
||||
"BCP": name(IntendedStdLevelName, "bcp", "Best Current Practice"),
|
||||
"Draft Standard": name(IntendedStdLevelName, "ds", name="Draft Standard"),
|
||||
"Experimental": name(IntendedStdLevelName, "exp", name="Experimental"),
|
||||
"Historic": name(IntendedStdLevelName, "hist", name="Historic"),
|
||||
"Informational": name(IntendedStdLevelName, "inf", name="Informational"),
|
||||
"Proposed Standard": name(IntendedStdLevelName, "ps", name="Proposed Standard"),
|
||||
"Standard": name(IntendedStdLevelName, "std", name="Standard"),
|
||||
"None": None,
|
||||
"Request": None, # FIXME: correct? from idrfc_wrapper.py
|
||||
"Request": None,
|
||||
}
|
||||
|
||||
status_mapping = {
|
||||
# add aliases from rfc_intend_status
|
||||
intended_std_level_mapping["Proposed"] = intended_std_level_mapping["Proposed Standard"]
|
||||
intended_std_level_mapping["Draft"] = intended_std_level_mapping["Draft Standard"]
|
||||
|
||||
std_level_mapping = {
|
||||
"Standard": name(StdLevelName, "std", "Standard"),
|
||||
"Draft Standard": name(StdLevelName, "ds", "Draft Standard"),
|
||||
"Proposed Standard": name(StdLevelName, "ps", "Proposed Standard"),
|
||||
"Informational": name(StdLevelName, "inf", "Informational"),
|
||||
"Experimental": name(StdLevelName, "exp", "Experimental"),
|
||||
"Best Current Practice": name(StdLevelName, "bcp", "Best Current Practice"),
|
||||
"Historic": name(StdLevelName, "hist", "Historic"),
|
||||
"Unknown": name(StdLevelName, "unkn", "Unknown"),
|
||||
}
|
||||
|
||||
state_mapping = {
|
||||
'Active': name(DocStateName, "active", "Active"),
|
||||
'Expired': name(DocStateName, "expired", "Expired"),
|
||||
'RFC': name(DocStateName, "rfc", "RFC"),
|
||||
|
@ -125,9 +146,9 @@ tag_review_by_rfc_editor = name(DocInfoTagName, 'rfc-rev', "Review by RFC Editor
|
|||
tag_via_rfc_editor = name(DocInfoTagName, 'via-rfc', "Via RFC Editor")
|
||||
tag_expired_tombstone = name(DocInfoTagName, 'exp-tomb', "Expired tombstone")
|
||||
tag_approved_in_minute = name(DocInfoTagName, 'app-min', "Approved in minute")
|
||||
tag_has_errata = name(DocInfoTagName, 'errata', "Has errata")
|
||||
|
||||
# helpers for events
|
||||
|
||||
# helpers
|
||||
def save_event(doc, event, comment):
|
||||
event.time = comment.datetime()
|
||||
event.by = iesg_login_to_email(comment.created_by)
|
||||
|
@ -136,6 +157,12 @@ def save_event(doc, event, comment):
|
|||
event.desc = comment.comment_text # FIXME: consider unquoting here
|
||||
event.save()
|
||||
|
||||
def sync_tag(d, include, tag):
|
||||
if include:
|
||||
d.tags.add(tag)
|
||||
else:
|
||||
d.tags.remove(tag)
|
||||
|
||||
buggy_iesg_logins_cache = {}
|
||||
|
||||
# make sure system email exists
|
||||
|
@ -199,6 +226,354 @@ re_intended_status_changed = re.compile(r"Intended [sS]tatus has been changed to
|
|||
re_state_change_notice = re.compile(r"State Change Notice email list (have been change|has been changed) (<b>)?")
|
||||
re_area_acronym_changed = re.compile(r"Area acronymn? has been changed to \w+ from \w+(<b>)?")
|
||||
|
||||
|
||||
def import_from_idinternal(d, idinternal):
|
||||
d.time = idinternal.event_date
|
||||
d.iesg_state = iesg_state_mapping[idinternal.cur_state.state]
|
||||
d.ad = iesg_login_to_email(idinternal.job_owner)
|
||||
d.notify = idinternal.state_change_notice_to or ""
|
||||
d.note = idinternal.note or ""
|
||||
d.save()
|
||||
|
||||
# extract events
|
||||
last_note_change_text = ""
|
||||
|
||||
for c in idinternal.documentcomment_set.order_by('date', 'time', 'id'):
|
||||
handled = False
|
||||
|
||||
# telechat agenda schedulings
|
||||
match = re_telechat_agenda.search(c.comment_text) or re_telechat_changed.search(c.comment_text)
|
||||
if match:
|
||||
e = Telechat()
|
||||
e.type = "scheduled_for_telechat"
|
||||
e.telechat_date = date_in_match(match) if "Placed on" in c.comment_text else None
|
||||
# can't extract this from history so we just take the latest value
|
||||
e.returning_item = bool(idinternal.returning_item)
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# ballot issued
|
||||
match = re_ballot_issued.search(c.comment_text)
|
||||
if match:
|
||||
e = Text()
|
||||
e.type = "sent_ballot_announcement"
|
||||
save_event(d, e, c)
|
||||
|
||||
# when you issue a ballot, you also vote yes; add that vote
|
||||
e = BallotPosition()
|
||||
e.type = "changed_ballot_position"
|
||||
e.ad = iesg_login_to_email(c.created_by)
|
||||
e.desc = "[Ballot Position Update] New position, Yes, has been recorded by %s" % e.ad.get_name()
|
||||
last_pos = d.latest_event(type="changed_ballot_position", ballotposition__ad=e.ad)
|
||||
e.pos = ballot_position_mapping["Yes"]
|
||||
e.discuss = last_pos.ballotposition.discuss if last_pos else ""
|
||||
e.discuss_time = last_pos.ballotposition.discuss_time if last_pos else None
|
||||
e.comment = last_pos.ballotposition.comment if last_pos else ""
|
||||
e.comment_time = last_pos.ballotposition.comment_time if last_pos else None
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# ballot positions
|
||||
match = re_ballot_position.search(c.comment_text)
|
||||
if match:
|
||||
position = match.group('position') or match.group('position2')
|
||||
ad_name = match.group('for') or match.group('for2') or match.group('by') # some of the old positions don't specify who it's for, in that case assume it's "by", the person who entered the position
|
||||
ad_first, ad_last = ad_name.split(' ')
|
||||
|
||||
e = BallotPosition()
|
||||
e.type = "changed_ballot_position"
|
||||
e.ad = iesg_login_to_email(IESGLogin.objects.get(first_name=ad_first, last_name=ad_last))
|
||||
last_pos = d.latest_event(type="changed_ballot_position", ballotposition__ad=e.ad)
|
||||
e.pos = ballot_position_mapping[position]
|
||||
e.discuss = last_pos.ballotposition.discuss if last_pos else ""
|
||||
e.discuss_time = last_pos.ballotposition.discuss_time if last_pos else None
|
||||
e.comment = last_pos.ballotposition.comment if last_pos else ""
|
||||
e.comment_time = last_pos.ballotposition.comment_time if last_pos else None
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# ballot discusses/comments
|
||||
if c.ballot in (DocumentComment.BALLOT_DISCUSS, DocumentComment.BALLOT_COMMENT):
|
||||
e = BallotPosition()
|
||||
e.type = "changed_ballot_position"
|
||||
e.ad = iesg_login_to_email(c.created_by)
|
||||
last_pos = d.latest_event(type="changed_ballot_position", ballotposition__ad=e.ad)
|
||||
e.pos = last_pos.ballotposition.pos if last_pos else ballot_position_mapping[None]
|
||||
if c.ballot == DocumentComment.BALLOT_DISCUSS:
|
||||
e.discuss = c.comment_text
|
||||
e.discuss_time = c.datetime()
|
||||
e.comment = last_pos.ballotposition.comment if last_pos else ""
|
||||
e.comment_time = last_pos.ballotposition.comment_time if last_pos else None
|
||||
# put header into description
|
||||
c.comment_text = "[Ballot discuss]\n" + c.comment_text
|
||||
else:
|
||||
e.discuss = last_pos.ballotposition.discuss if last_pos else ""
|
||||
e.discuss_time = last_pos.ballotposition.discuss_time if last_pos else None
|
||||
e.comment = c.comment_text
|
||||
e.comment_time = c.datetime()
|
||||
# put header into description
|
||||
c.comment_text = "[Ballot comment]\n" + c.comment_text
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# last call requested
|
||||
match = re_last_call_requested.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="requested_last_call")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# state changes
|
||||
match = re_state_changed.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# note changed
|
||||
match = re_note_changed.search(c.comment_text)
|
||||
if match:
|
||||
# watch out for duplicates of which the old data's got many
|
||||
if c.comment_text != last_note_change_text:
|
||||
last_note_change_text = c.comment_text
|
||||
e = Event(type="changed_document")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# draft added
|
||||
match = re_draft_added.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="started_iesg_process")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# new version
|
||||
if c.comment_text == "New version available":
|
||||
e = NewRevision(type="new_revision", rev=c.version)
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# resurrect requested
|
||||
match = re_resurrection_requested.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="requested_resurrect")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# completed resurrect
|
||||
match = re_completed_resurrect.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="completed_resurrect")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# document expiration
|
||||
if c.comment_text == "Document is expired by system":
|
||||
e = Event(type="expired_document")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# approved document
|
||||
match = re_document_approved.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="iesg_approved")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# disapproved document
|
||||
match = re_document_disapproved.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="iesg_disapproved")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
|
||||
# some changes can be bundled - this is not entirely
|
||||
# convenient, especially since it makes it hard to give
|
||||
# each a type, so unbundle them
|
||||
if not handled:
|
||||
unhandled_lines = []
|
||||
for line in c.comment_text.split("<br>"):
|
||||
# status date changed
|
||||
match = re_status_date_changed.search(line)
|
||||
if match:
|
||||
e = Status(type="changed_status_date", date=date_in_match(match))
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# AD/job owner changed
|
||||
match = re_responsible_ad_changed.search(line)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# intended standard level changed
|
||||
match = re_intended_status_changed.search(line)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# state change notice
|
||||
match = re_state_change_notice.search(line)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# area acronym
|
||||
match = re_area_acronym_changed.search(line)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# multiline change bundles end with a single "by xyz" that we skip
|
||||
if not handled and not line.startswith("by <b>"):
|
||||
unhandled_lines.append(line)
|
||||
|
||||
if handled:
|
||||
c.comment_text = "<br>".join(unhandled_lines)
|
||||
|
||||
if c.comment_text:
|
||||
print "COULDN'T HANDLE multi-line comment %s '%s'" % (c.id, c.comment_text.replace("\n", " ").replace("\r", "")[0:80])
|
||||
|
||||
# all others are added as comments
|
||||
if not handled:
|
||||
e = Event(type="added_comment")
|
||||
save_event(d, e, c)
|
||||
|
||||
# stop typical comments from being output
|
||||
typical_comments = [
|
||||
"Document Shepherd Write-up for %s" % d.name,
|
||||
"Who is the Document Shepherd for this document",
|
||||
"We understand that this document doesn't require any IANA actions",
|
||||
"IANA questions",
|
||||
"IANA has questions",
|
||||
"IANA comments",
|
||||
"IANA Comments",
|
||||
"IANA Evaluation Comments",
|
||||
"Published as RFC",
|
||||
]
|
||||
for t in typical_comments:
|
||||
if t in c.comment_text:
|
||||
handled = True
|
||||
break
|
||||
|
||||
if not handled:
|
||||
print "couldn't handle comment %s '%s'" % (c.id, c.comment_text.replace("\n", " ").replace("\r", "")[0:80])
|
||||
|
||||
made_up_date = d.latest_event().time + datetime.timedelta(seconds=1)
|
||||
|
||||
e = d.latest_event(Status, type="changed_status_date")
|
||||
status_date = e.date if e else None
|
||||
if idinternal.status_date != status_date:
|
||||
e = Status(type="changed_status_date", date=idinternal.status_date)
|
||||
e.time = made_up_date
|
||||
e.by = system_email
|
||||
e.doc = d
|
||||
e.desc = "Status date has been changed to <b>%s</b> from <b>%s</b>" % (idinternal.status_date, status_date)
|
||||
e.save()
|
||||
|
||||
e = d.latest_event(Telechat, type="scheduled_for_telechat")
|
||||
telechat_date = e.telechat_date if e else None
|
||||
if not idinternal.agenda:
|
||||
idinternal.telechat_date = None # normalize
|
||||
|
||||
if telechat_date != idinternal.telechat_date:
|
||||
e = Telechat(type="scheduled_for_telechat",
|
||||
telechat_date=idinternal.telechat_date,
|
||||
returning_item=bool(idinternal.returning_item))
|
||||
# a common case is that it has been removed from the
|
||||
# agenda automatically by a script without a notice in the
|
||||
# comments, in that case the time is simply the day after
|
||||
# the telechat
|
||||
e.time = telechat_date + datetime.timedelta(days=1) if telechat_date and not idinternal.telechat_date else made_up_date
|
||||
e.by = system_email
|
||||
args = ("Placed on", idinternal.telechat_date) if idinternal.telechat_date else ("Removed from", telechat_date)
|
||||
e.doc = d
|
||||
e.desc = "%s agenda for telechat - %s by system" % args
|
||||
e.save()
|
||||
|
||||
try:
|
||||
# sad fact: some ballots haven't been generated yet
|
||||
ballot = idinternal.ballot
|
||||
except BallotInfo.DoesNotExist:
|
||||
ballot = None
|
||||
|
||||
if ballot:
|
||||
# make sure the comments and discusses are updated
|
||||
positions = list(BallotPosition.objects.filter(doc=d).order_by("-time"))
|
||||
for c in IESGComment.objects.filter(ballot=idinternal.ballot):
|
||||
ad = iesg_login_to_email(c.ad)
|
||||
for p in positions:
|
||||
if p.ad == ad:
|
||||
if p.comment != c.text:
|
||||
p.comment = c.text
|
||||
p.comment_time = c.date if p.time.date() != c.date else p.time
|
||||
p.save()
|
||||
break
|
||||
|
||||
for c in IESGDiscuss.objects.filter(ballot=idinternal.ballot):
|
||||
ad = iesg_login_to_email(c.ad)
|
||||
for p in positions:
|
||||
if p.ad == ad:
|
||||
if p.discuss != c.text:
|
||||
p.discuss = c.text
|
||||
p.discuss_time = c.date if p.time.date() != c.date else p.time
|
||||
p.save()
|
||||
break
|
||||
|
||||
# if any of these events have happened, they're closer to
|
||||
# the real time
|
||||
e = d.event_set.filter(type__in=("requested_last_call", "sent_last_call", "sent_ballot_announcement", "iesg_approved", "iesg_disapproved")).order_by('time')[:1]
|
||||
if e:
|
||||
text_date = e[0].time - datetime.timedelta(seconds=1)
|
||||
else:
|
||||
text_date = made_up_date
|
||||
|
||||
if idinternal.ballot.approval_text:
|
||||
e, _ = Text.objects.get_or_create(type="changed_ballot_approval_text", doc=d)
|
||||
e.content = idinternal.ballot.approval_text
|
||||
e.time = text_date
|
||||
e.by = system_email
|
||||
e.desc = "Ballot approval text was added"
|
||||
e.save()
|
||||
|
||||
if idinternal.ballot.last_call_text:
|
||||
e, _ = Text.objects.get_or_create(type="changed_last_call_text", doc=d)
|
||||
e.content = idinternal.ballot.last_call_text
|
||||
e.time = text_date
|
||||
e.by = system_email
|
||||
e.desc = "Last call text was added"
|
||||
e.save()
|
||||
|
||||
if idinternal.ballot.ballot_writeup:
|
||||
e, _ = Text.objects.get_or_create(type="changed_ballot_writeup_text", doc=d)
|
||||
e.content = idinternal.ballot.ballot_writeup
|
||||
e.time = text_date
|
||||
e.by = system_email
|
||||
e.desc = "Ballot writeup text was added"
|
||||
e.save()
|
||||
|
||||
# fix tags
|
||||
sync_tag(d, idinternal.via_rfc_editor, tag_via_rfc_editor)
|
||||
|
||||
n = idinternal.cur_sub_state and idinternal.cur_sub_state.sub_state
|
||||
for k, v in substate_mapping.iteritems():
|
||||
sync_tag(d, k == n, v)
|
||||
# currently we ignore prev_sub_state
|
||||
|
||||
sync_tag(d, idinternal.approved_in_minute, tag_approved_in_minute)
|
||||
|
||||
|
||||
|
||||
all_drafts = InternetDraft.objects.all().select_related()
|
||||
if document_name_to_import:
|
||||
all_drafts = all_drafts.filter(filename=document_name_to_import)
|
||||
|
@ -209,277 +584,62 @@ for o in all_drafts:
|
|||
d = Document.objects.get(name=o.filename)
|
||||
except Document.DoesNotExist:
|
||||
d = Document(name=o.filename)
|
||||
|
||||
d.time = o.idinternal.event_date if o.idinternal else o.revision_date
|
||||
|
||||
d.time = o.revision_date
|
||||
d.type = type_draft
|
||||
d.title = o.title
|
||||
d.state = status_mapping[o.status.status]
|
||||
d.state = state_mapping[o.status.status]
|
||||
d.group = Group.objects.get(acronym=o.group.acronym)
|
||||
d.stream = stream_ietf
|
||||
if o.filename.startswith("draft-iab-"):
|
||||
d.stream = stream_mapping["IAB"]
|
||||
elif o.filename.startswith("draft-irtf-"):
|
||||
d.stream = stream_mapping["IRTF"]
|
||||
elif o.idinternal and o.idinternal.via_rfc_editor:
|
||||
d.stream = stream_mapping["INDEPENDENT"] # FIXME: correct?
|
||||
else:
|
||||
d.stream = stream_mapping["IETF"] # FIXME: correct?
|
||||
d.wg_state = None
|
||||
d.iesg_state = iesg_state_mapping[o.idinternal.cur_state.state if o.idinternal else None]
|
||||
# we currently ignore the previous IESG state prev_state
|
||||
d.iesg_state = iesg_state_mapping[None]
|
||||
d.iana_state = None
|
||||
# d.rfc_state = # FIXME
|
||||
d.rfc_state = None
|
||||
d.rev = o.revision
|
||||
d.abstract = o.abstract
|
||||
d.pages = o.txt_page_count
|
||||
d.intended_std_level = intended_status_mapping[o.intended_status.intended_status]
|
||||
# d.std_level = # FIXME
|
||||
# d.authors =
|
||||
# d.related = # FIXME
|
||||
d.ad = iesg_login_to_email(o.idinternal.job_owner) if o.idinternal else None
|
||||
d.intended_std_level = intended_std_level_mapping[o.intended_status.intended_status]
|
||||
d.ad = None
|
||||
d.shepherd = None
|
||||
d.notify = o.idinternal.state_change_notice_to or "" if o.idinternal else ""
|
||||
d.notify = ""
|
||||
d.external_url = ""
|
||||
d.note = o.idinternal.note or "" if o.idinternal else ""
|
||||
d.internal_comments = o.comments or "" # FIXME: maybe put these somewhere else
|
||||
d.note = ""
|
||||
d.internal_comments = o.comments or ""
|
||||
d.save()
|
||||
|
||||
# make sure our alias is updated
|
||||
alias_doc(d.name, d)
|
||||
|
||||
# clear already imported events
|
||||
d_alias = alias_doc(d.name, d)
|
||||
|
||||
d.authors.clear()
|
||||
for i, a in enumerate(o.authors.all().select_related("person").order_by('author_order', 'person')):
|
||||
try:
|
||||
e = Email.objects.get(address=a.person.email()[1])
|
||||
# renumber since old numbers may be a bit borked
|
||||
DocumentAuthor.objects.create(document=d, author=e, order=i)
|
||||
except Email.DoesNotExist:
|
||||
print "SKIPPED author", unicode(a.person).encode('utf-8')
|
||||
|
||||
# clear any already imported events as the event importer isn't
|
||||
# clever enough to do a diff
|
||||
d.event_set.all().delete()
|
||||
|
||||
if o.idinternal:
|
||||
last_note_change_text = ""
|
||||
|
||||
# extract events
|
||||
for c in o.idinternal.documentcomment_set.order_by('date', 'time', 'id'):
|
||||
handled = False
|
||||
|
||||
# telechat agenda schedulings
|
||||
match = re_telechat_agenda.search(c.comment_text) or re_telechat_changed.search(c.comment_text)
|
||||
if match:
|
||||
e = Telechat()
|
||||
e.type = "scheduled_for_telechat"
|
||||
e.telechat_date = date_in_match(match) if "Placed on" in c.comment_text else None
|
||||
# can't extract this from history so we just take the latest value
|
||||
e.returning_item = bool(o.idinternal.returning_item)
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# ballot issued
|
||||
match = re_ballot_issued.search(c.comment_text)
|
||||
if match:
|
||||
e = Text()
|
||||
e.type = "sent_ballot_announcement"
|
||||
save_event(d, e, c)
|
||||
# import attributes and events
|
||||
import_from_idinternal(d, o.idinternal)
|
||||
|
||||
# when you issue a ballot, you also vote yes; add that vote
|
||||
e = BallotPosition()
|
||||
e.type = "changed_ballot_position"
|
||||
e.ad = iesg_login_to_email(c.created_by)
|
||||
e.desc = "[Ballot Position Update] New position, Yes, has been recorded by %s" % e.ad.get_name()
|
||||
last_pos = d.latest_event(type="changed_ballot_position", ballotposition__ad=e.ad)
|
||||
e.pos = ballot_position_mapping["Yes"]
|
||||
e.discuss = last_pos.ballotposition.discuss if last_pos else ""
|
||||
e.discuss_time = last_pos.ballotposition.discuss_time if last_pos else None
|
||||
e.comment = last_pos.ballotposition.comment if last_pos else ""
|
||||
e.comment_time = last_pos.ballotposition.comment_time if last_pos else None
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# ballot positions
|
||||
match = re_ballot_position.search(c.comment_text)
|
||||
if match:
|
||||
position = match.group('position') or match.group('position2')
|
||||
ad_name = match.group('for') or match.group('for2') or match.group('by') # some of the old positions don't specify who it's for, in that case assume it's "by", the person who entered the position
|
||||
ad_first, ad_last = ad_name.split(' ')
|
||||
|
||||
e = BallotPosition()
|
||||
e.type = "changed_ballot_position"
|
||||
e.ad = iesg_login_to_email(IESGLogin.objects.get(first_name=ad_first, last_name=ad_last))
|
||||
last_pos = d.latest_event(type="changed_ballot_position", ballotposition__ad=e.ad)
|
||||
e.pos = ballot_position_mapping[position]
|
||||
e.discuss = last_pos.ballotposition.discuss if last_pos else ""
|
||||
e.discuss_time = last_pos.ballotposition.discuss_time if last_pos else None
|
||||
e.comment = last_pos.ballotposition.comment if last_pos else ""
|
||||
e.comment_time = last_pos.ballotposition.comment_time if last_pos else None
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# ballot discusses/comments
|
||||
if c.ballot in (DocumentComment.BALLOT_DISCUSS, DocumentComment.BALLOT_COMMENT):
|
||||
e = BallotPosition()
|
||||
e.type = "changed_ballot_position"
|
||||
e.ad = iesg_login_to_email(c.created_by)
|
||||
last_pos = d.latest_event(type="changed_ballot_position", ballotposition__ad=e.ad)
|
||||
e.pos = last_pos.ballotposition.pos if last_pos else ballot_position_mapping[None]
|
||||
if c.ballot == DocumentComment.BALLOT_DISCUSS:
|
||||
e.discuss = c.comment_text
|
||||
e.discuss_time = c.datetime()
|
||||
e.comment = last_pos.ballotposition.comment if last_pos else ""
|
||||
e.comment_time = last_pos.ballotposition.comment_time if last_pos else None
|
||||
# put header into description
|
||||
c.comment_text = "[Ballot discuss]\n" + c.comment_text
|
||||
else:
|
||||
e.discuss = last_pos.ballotposition.discuss if last_pos else ""
|
||||
e.discuss_time = last_pos.ballotposition.discuss_time if last_pos else None
|
||||
e.comment = c.comment_text
|
||||
e.comment_time = c.datetime()
|
||||
# put header into description
|
||||
c.comment_text = "[Ballot comment]\n" + c.comment_text
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# last call requested
|
||||
match = re_last_call_requested.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="requested_last_call")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# state changes
|
||||
match = re_state_changed.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# note changed
|
||||
match = re_note_changed.search(c.comment_text)
|
||||
if match:
|
||||
# watch out for duplicates of which the old data's got many
|
||||
if c.comment_text != last_note_change_text:
|
||||
last_note_change_text = c.comment_text
|
||||
e = Event(type="changed_document")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# draft added
|
||||
match = re_draft_added.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# new version
|
||||
if c.comment_text == "New version available":
|
||||
e = NewRevision(type="new_revision", rev=c.version)
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# resurrect requested
|
||||
match = re_resurrection_requested.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="requested_resurrect")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# completed resurrect
|
||||
match = re_completed_resurrect.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="completed_resurrect")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# document expiration
|
||||
if c.comment_text == "Document is expired by system":
|
||||
e = Event(type="expired_document")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# approved document
|
||||
match = re_document_approved.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="iesg_approved")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# disapproved document
|
||||
match = re_document_disapproved.search(c.comment_text)
|
||||
if match:
|
||||
e = Event(type="iesg_disapproved")
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
|
||||
# some changes can be bundled - this is not entirely
|
||||
# convenient, especially since it makes it hard to give
|
||||
# each a type, so unbundle them
|
||||
if not handled:
|
||||
unhandled_lines = []
|
||||
for line in c.comment_text.split("<br>"):
|
||||
# status date changed
|
||||
match = re_status_date_changed.search(line)
|
||||
if match:
|
||||
e = Status(type="changed_status_date", date=date_in_match(match))
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# AD/job owner changed
|
||||
match = re_responsible_ad_changed.search(line)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# intended standard level changed
|
||||
match = re_intended_status_changed.search(line)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# state change notice
|
||||
match = re_state_change_notice.search(line)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# area acronym
|
||||
match = re_area_acronym_changed.search(line)
|
||||
if match:
|
||||
e = Event(type="changed_document")
|
||||
e.desc = line
|
||||
save_event(d, e, c)
|
||||
handled = True
|
||||
|
||||
# multiline change bundles end with a single "by xyz" that we skip
|
||||
if not handled and not line.startswith("by <b>"):
|
||||
unhandled_lines.append(line)
|
||||
|
||||
if handled:
|
||||
c.comment_text = "<br>".join(unhandled_lines)
|
||||
|
||||
if c.comment_text:
|
||||
print "couldn't handle multi-line comment %s '%s'" % (c.id, c.comment_text.replace("\n", " ").replace("\r", "")[0:80])
|
||||
|
||||
# all others are added as comments
|
||||
if not handled:
|
||||
e = Event(type="added_comment")
|
||||
save_event(d, e, c)
|
||||
|
||||
# stop typical comments from being output
|
||||
typical_comments = [
|
||||
"Document Shepherd Write-up for %s" % d.name,
|
||||
"Who is the Document Shepherd for this document",
|
||||
"We understand that this document doesn't require any IANA actions",
|
||||
"IANA questions",
|
||||
"IANA has questions",
|
||||
"IANA comments",
|
||||
"IANA Comments",
|
||||
"IANA Evaluation Comments",
|
||||
]
|
||||
for t in typical_comments:
|
||||
if t in c.comment_text:
|
||||
handled = True
|
||||
break
|
||||
|
||||
if not handled:
|
||||
print "couldn't handle comment %s '%s'" % (c.id, c.comment_text.replace("\n", " ").replace("\r", "")[0:80])
|
||||
|
||||
|
||||
# import missing revision changes from DraftVersions
|
||||
known_revisions = set(e.newrevision.rev for e in d.event_set.filter(type="new_revision").select_related('newrevision'))
|
||||
for v in DraftVersions.objects.filter(filename=d.name).order_by("revision"):
|
||||
draft_versions = list(DraftVersions.objects.filter(filename=d.name).order_by("revision"))
|
||||
# DraftVersions is not entirely accurate, make sure we got the current one
|
||||
draft_versions.insert(0, DraftVersions(filename=d.name, revision=o.revision_display(), revision_date=o.revision_date))
|
||||
for v in draft_versions:
|
||||
if v.revision not in known_revisions:
|
||||
e = NewRevision(type="new_revision")
|
||||
e.rev = v.revision
|
||||
|
@ -521,104 +681,22 @@ for o in all_drafts:
|
|||
e.desc = "Last call sent"
|
||||
e.save()
|
||||
|
||||
if o.idinternal:
|
||||
made_up_date = d.latest_event().time + datetime.timedelta(seconds=1) # datetime.datetime(2030, 1, 1, 0, 0, 0)
|
||||
|
||||
e = d.latest_event(Status, type="changed_status_date")
|
||||
status_date = e.date if e else None
|
||||
if o.idinternal.status_date != status_date:
|
||||
e = Status(type="changed_status_date", date=o.idinternal.status_date)
|
||||
e.time = made_up_date
|
||||
e.by = system_email
|
||||
e.doc = d
|
||||
e.desc = "Status date has been changed to <b>%s</b> from <b>%s</b>" % (o.idinternal.status_date, status_date)
|
||||
e.save()
|
||||
|
||||
e = d.latest_event(Telechat, type="scheduled_for_telechat")
|
||||
telechat_date = e.telechat_date if e else None
|
||||
if not o.idinternal.agenda:
|
||||
o.idinternal.telechat_date = None # normalize
|
||||
|
||||
if telechat_date != o.idinternal.telechat_date:
|
||||
e = Telechat(type="scheduled_for_telechat",
|
||||
telechat_date=o.idinternal.telechat_date,
|
||||
returning_item=bool(o.idinternal.returning_item))
|
||||
# a common case is that it has been removed from the
|
||||
# agenda automatically by a script without a notice in the
|
||||
# comments, in that case the time is simply the day after
|
||||
# the telechat
|
||||
e.time = telechat_date + datetime.timedelta(days=1) if telechat_date and not o.idinternal.telechat_date else made_up_date
|
||||
e.by = system_email
|
||||
args = ("Placed on", o.idinternal.telechat_date) if o.idinternal.telechat_date else ("Removed from", telechat_date)
|
||||
e.doc = d
|
||||
e.desc = "%s agenda for telechat - %s by system" % args
|
||||
e.save()
|
||||
|
||||
if o.idinternal.ballot:
|
||||
text_date = made_up_date
|
||||
|
||||
# if any of these events have happened, they're closer to
|
||||
# the real time
|
||||
e = d.event_set.filter(type__in=("requested_last_call", "sent_last_call", "sent_ballot_announcement", "iesg_approved", "iesg_disapproved")).order_by('time')[:1]
|
||||
if e:
|
||||
text_date = e[0].time - datetime.timedelta(seconds=1)
|
||||
|
||||
if o.idinternal.ballot.approval_text:
|
||||
e = Text(type="changed_ballot_approval_text", content=o.idinternal.ballot.approval_text)
|
||||
e.time = text_date
|
||||
e.by = system_email
|
||||
e.doc = d
|
||||
e.desc = "Ballot approval text was added"
|
||||
e.save()
|
||||
|
||||
if o.idinternal.ballot.last_call_text:
|
||||
e = Text(type="changed_last_call_text", content=o.idinternal.ballot.last_call_text)
|
||||
e.time = text_date
|
||||
e.by = system_email
|
||||
e.doc = d
|
||||
e.desc = "Last call text was added"
|
||||
e.save()
|
||||
|
||||
if o.idinternal.ballot.ballot_writeup:
|
||||
e = Text(type="changed_ballot_writeup_text", content=o.idinternal.ballot.ballot_writeup)
|
||||
e.time = text_date
|
||||
e.by = system_email
|
||||
e.doc = d
|
||||
e.desc = "Ballot writeup text was added"
|
||||
e.save()
|
||||
|
||||
# import other attributes
|
||||
|
||||
# tags
|
||||
tags = d.tags.all()
|
||||
def sync_tag(include, tag):
|
||||
if include and tag not in tags:
|
||||
d.tags.add(tag)
|
||||
if not include and tag in tags:
|
||||
d.tags.remove(tag)
|
||||
|
||||
sync_tag(o.review_by_rfc_editor, tag_review_by_rfc_editor)
|
||||
sync_tag(o.expired_tombstone, tag_expired_tombstone)
|
||||
sync_tag(o.idinternal and o.idinternal.via_rfc_editor, tag_via_rfc_editor)
|
||||
|
||||
n = o.idinternal and o.idinternal.cur_sub_state and o.idinternal.cur_sub_state.sub_state
|
||||
for k, v in substate_mapping.iteritems():
|
||||
sync_tag(k == n, v)
|
||||
# currently we ignore prev_sub_state
|
||||
|
||||
sync_tag(o.idinternal and o.idinternal.approved_in_minute, tag_approved_in_minute)
|
||||
sync_tag(d, o.review_by_rfc_editor, tag_review_by_rfc_editor)
|
||||
sync_tag(d, o.expired_tombstone, tag_expired_tombstone)
|
||||
|
||||
# RFC alias
|
||||
if o.rfc_number:
|
||||
alias_doc("rfc%s" % o.rfc_number, d)
|
||||
# FIXME: some RFCs seem to be called rfc1234bis?
|
||||
|
||||
if o.replaced_by:
|
||||
replacement, _ = Document.objects.get_or_create(name=o.replaced_by.filename)
|
||||
RelatedDocument.objects.get_or_create(document=replacement, doc_alias=d_alias, relationship=relationship_replaces)
|
||||
|
||||
# the RFC-related attributes are imported when we handle the RFCs below
|
||||
|
||||
|
||||
print "imported", d.name, " - ", d.iesg_state
|
||||
|
||||
|
||||
|
@ -653,12 +731,13 @@ def get_or_create_rfc_document(rfc_number):
|
|||
alias = alias_doc("rfc%s" % rfc_number, d)
|
||||
|
||||
return (d, alias)
|
||||
|
||||
|
||||
all_rfcs = RfcIndex.objects.all()
|
||||
|
||||
if all_drafts.count() != InternetDraft.objects.count():
|
||||
if document_name_to_import.startswith("rfc"):
|
||||
# we wanted to import just an RFC, great
|
||||
if document_name_to_import and document_name_to_import.startswith("rfc"):
|
||||
# we wanted to import an RFC
|
||||
all_rfcs = all_rfcs.filter(rfc_number=document_name_to_import[3:])
|
||||
else:
|
||||
# if we didn't process all drafts, limit the RFCs to the ones we
|
||||
|
@ -667,7 +746,38 @@ if all_drafts.count() != InternetDraft.objects.count():
|
|||
|
||||
for o in all_rfcs:
|
||||
d, d_alias = get_or_create_rfc_document(o.rfc_number)
|
||||
#if d.name.startswith('rfc'):
|
||||
d.time = datetime.datetime.now()
|
||||
d.title = o.title
|
||||
d.std_level = std_level_mapping[o.current_status]
|
||||
d.stream = stream_mapping[o.stream]
|
||||
if not d.group and o.wg:
|
||||
d.group = Group.objects.get(acronym=o.wg)
|
||||
|
||||
# get some values from the rfc table
|
||||
rfcs = Rfc.objects.filter(rfc_number=o.rfc_number).select_related()
|
||||
if rfcs:
|
||||
r = rfcs[0]
|
||||
d.intended_std_level = intended_std_level_mapping[r.intended_status.status]
|
||||
d.save()
|
||||
|
||||
# a few RFCs have an IDInternal so we may have to import the
|
||||
# events and attributes
|
||||
internals = IDInternal.objects.filter(rfc_flag=1, draft=o.rfc_number)
|
||||
if internals:
|
||||
if d.name.startswith("rfc"):
|
||||
# clear any already imported events as the event importer isn't
|
||||
# clever enough to do a diff
|
||||
d.event_set.all().delete()
|
||||
import_from_idinternal(d, internals[0])
|
||||
|
||||
# publication date
|
||||
e, _ = Event.objects.get_or_create(doc=d, type="published_rfc")
|
||||
e.time = o.rfc_published_date
|
||||
e.by = system_email
|
||||
e.desc = "RFC published"
|
||||
e.save()
|
||||
|
||||
# import obsoletes/updates
|
||||
def make_relation(other_rfc, rel_type, reverse):
|
||||
other_number = int(other_rfc.replace("RFC", ""))
|
||||
|
@ -691,27 +801,10 @@ for o in all_rfcs:
|
|||
make_relation(x, relationship_updates, True)
|
||||
|
||||
if o.also:
|
||||
print o.also
|
||||
alias_doc(o.also.lower(), d)
|
||||
|
||||
|
||||
sync_tag(d, o.has_errata, tag_has_errata)
|
||||
|
||||
# FIXME: import RFC authors?
|
||||
|
||||
print "imported", d_alias.name, " - ", d.rfc_state
|
||||
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
class RfcIndex(models.Model):
|
||||
# rfc_number = models.IntegerField(primary_key=True)
|
||||
title = models.CharField(max_length=250)
|
||||
authors = models.CharField(max_length=250)
|
||||
rfc_published_date = models.DateField()
|
||||
current_status = models.CharField(max_length=50,null=True)
|
||||
# updates = models.CharField(max_length=200,blank=True,null=True)
|
||||
# updated_by = models.CharField(max_length=200,blank=True,null=True)
|
||||
# obsoletes = models.CharField(max_length=200,blank=True,null=True)
|
||||
# obsoleted_by = models.CharField(max_length=200,blank=True,null=True)
|
||||
# also = models.CharField(max_length=50,blank=True,null=True)
|
||||
draft = models.CharField(max_length=200,null=True)
|
||||
has_errata = models.BooleanField()
|
||||
stream = models.CharField(max_length=15,blank=True,null=True)
|
||||
wg = models.CharField(max_length=15,blank=True,null=True)
|
||||
file_formats = models.CharField(max_length=20,blank=True,null=True)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
import sys, os, re, datetime
|
||||
import unaccent
|
||||
|
||||
basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
|
||||
sys.path = [ basedir ] + sys.path
|
||||
|
@ -14,17 +15,18 @@ management.setup_environ(settings)
|
|||
from redesign.person.models import *
|
||||
from redesign.group.models import *
|
||||
from redesign.name.models import *
|
||||
from ietf.idtracker.models import IESGLogin, AreaDirector, PersonOrOrgInfo
|
||||
from ietf.idtracker.models import IESGLogin, AreaDirector, IDAuthor, PersonOrOrgInfo
|
||||
|
||||
# assumptions:
|
||||
# - groups have been imported
|
||||
|
||||
# PersonOrOrgInfo/PostalAddress/EmailAddress/PhoneNumber are not imported
|
||||
# PersonOrOrgInfo/PostalAddress/EmailAddress/PhoneNumber are not
|
||||
# imported, although some information is retrieved from those
|
||||
|
||||
# imports IESGLogin, AreaDirector
|
||||
# imports IESGLogin, AreaDirector and persons from IDAuthor
|
||||
|
||||
# should import IDAuthor, WGChair, WGEditor,
|
||||
# WGSecretary, WGTechAdvisor, Role, ChairsHistory, IRTFChair
|
||||
# should probably import WGChair, WGEditor, WGSecretary,
|
||||
# WGTechAdvisor, Role, ChairsHistory, IRTFChair
|
||||
|
||||
# make sure names exist
|
||||
def name(name_class, slug, name, desc=""):
|
||||
|
@ -44,25 +46,27 @@ def get_or_create_email(o):
|
|||
|
||||
email = o.person.email()[1] or hardcoded_emails.get("%s %s" % (o.person.first_name, o.person.last_name))
|
||||
if not email:
|
||||
print "NO EMAIL FOR %s %s %s %s" % (o.__class__, o.id, o.person.first_name, o.person.last_name)
|
||||
print "NO EMAIL FOR %s %s %s %s %s" % (o.__class__, o.id, o.person.pk, o.person.first_name, o.person.last_name)
|
||||
return None
|
||||
|
||||
e, _ = Email.objects.get_or_create(address=email)
|
||||
if not e.person:
|
||||
n = u"%s %s" % (o.person.first_name, o.person.last_name)
|
||||
aliases = Alias.objects.filter(name=n)
|
||||
asciified = unaccent.asciify(n)
|
||||
aliases = Alias.objects.filter(name__in=(n, asciified))
|
||||
if aliases:
|
||||
p = aliases[0].person
|
||||
else:
|
||||
p = Person.objects.create(name=n, ascii=n)
|
||||
Alias.objects.create(name=n, person=p)
|
||||
p = Person.objects.create(name=n, ascii=asciified)
|
||||
# FIXME: fill in address?
|
||||
Alias.objects.create(name=n, person=p)
|
||||
if asciified != n:
|
||||
Alias.objects.create(name=asciified, person=p)
|
||||
|
||||
e.person = p
|
||||
e.save()
|
||||
|
||||
return e
|
||||
|
||||
|
||||
# IESGLogin
|
||||
for o in IESGLogin.objects.all():
|
||||
|
@ -95,3 +99,10 @@ for o in AreaDirector.objects.all():
|
|||
|
||||
Role.objects.get_or_create(name=area_director_role, group=area, email=email)
|
||||
|
||||
# IDAuthor persons
|
||||
for o in IDAuthor.objects.all().order_by('id').select_related('person'):
|
||||
print "importing IDAuthor", o.id, o.person_id, o.person.first_name.encode('utf-8'), o.person.last_name.encode('utf-8')
|
||||
email = get_or_create_email(o)
|
||||
|
||||
# FIXME: we lack email addresses for some, need to do something
|
||||
|
||||
|
|
|
@ -17,6 +17,6 @@ admin.site.register(IanaDocStateName, NameAdmin)
|
|||
admin.site.register(RfcDocStateName, NameAdmin)
|
||||
admin.site.register(DocTypeName, NameAdmin)
|
||||
admin.site.register(DocInfoTagName, NameAdmin)
|
||||
admin.site.register(IntendedStatusName, NameAdmin)
|
||||
admin.site.register(StdStatusName, NameAdmin)
|
||||
admin.site.register(StdLevelName, NameAdmin)
|
||||
admin.site.register(IntendedStdLevelName, NameAdmin)
|
||||
admin.site.register(BallotPositionName, NameAdmin)
|
||||
|
|
|
@ -21,7 +21,7 @@ class GroupTypeName(NameModel):
|
|||
class RoleName(NameModel):
|
||||
"""AD, Chair"""
|
||||
class DocStreamName(NameModel):
|
||||
"""IETF, IAB, IRTF, Independent Submission"""
|
||||
"""IETF, IAB, IRTF, Independent Submission, Legacy"""
|
||||
class DocStateName(NameModel):
|
||||
"""Active, Expired, RFC, Replaced, Withdrawn"""
|
||||
class DocRelationshipName(NameModel):
|
||||
|
@ -47,10 +47,10 @@ class DocTypeName(NameModel):
|
|||
class DocInfoTagName(NameModel):
|
||||
"""Waiting for Reference, IANA Coordination, Revised ID Needed,
|
||||
External Party, AD Followup, Point Raised - Writeup Needed"""
|
||||
class StdStatusName(NameModel):
|
||||
class StdLevelName(NameModel):
|
||||
"""Proposed Standard, Draft Standard, Standard, Experimental,
|
||||
Informational, Best Current Practice, Historic, ..."""
|
||||
class IntendedStatusName(NameModel):
|
||||
class IntendedStdLevelName(NameModel):
|
||||
"""Standards Track, Experimental, Informational, Best Current
|
||||
Practice, Historic, ..."""
|
||||
class BallotPositionName(NameModel):
|
||||
|
|
|
@ -11,7 +11,8 @@ class TranslatingQuerySet(QuerySet):
|
|||
if callable(t):
|
||||
t, v = t(v)
|
||||
|
||||
res[t] = v
|
||||
if t:
|
||||
res[t] = v
|
||||
else:
|
||||
res[k] = v
|
||||
return res
|
||||
|
|
144
redesign/unaccent.py
Normal file
144
redesign/unaccent.py
Normal file
|
@ -0,0 +1,144 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# use a dynamically populated translation dictionary to remove accents
|
||||
# from a string
|
||||
# (by Chris Mulligan, http://chmullig.com/2009/12/python-unicode-ascii-ifier/)
|
||||
|
||||
import unicodedata, sys
|
||||
|
||||
class unaccented_map(dict):
|
||||
# Translation dictionary. Translation entries are added to this dictionary as needed.
|
||||
CHAR_REPLACEMENT = {
|
||||
0xc6: u"AE", # Æ LATIN CAPITAL LETTER AE
|
||||
0xd0: u"D", # Ð LATIN CAPITAL LETTER ETH
|
||||
0xd8: u"OE", # Ø LATIN CAPITAL LETTER O WITH STROKE
|
||||
0xde: u"Th", # Þ LATIN CAPITAL LETTER THORN
|
||||
0xc4: u'Ae', # Ä LATIN CAPITAL LETTER A WITH DIAERESIS
|
||||
0xd6: u'Oe', # Ö LATIN CAPITAL LETTER O WITH DIAERESIS
|
||||
0xdc: u'Ue', # Ü LATIN CAPITAL LETTER U WITH DIAERESIS
|
||||
|
||||
0xc0: u"A", # À LATIN CAPITAL LETTER A WITH GRAVE
|
||||
0xc1: u"A", # Á LATIN CAPITAL LETTER A WITH ACUTE
|
||||
0xc3: u"A", # Ã LATIN CAPITAL LETTER A WITH TILDE
|
||||
0xc7: u"C", # Ç LATIN CAPITAL LETTER C WITH CEDILLA
|
||||
0xc8: u"E", # È LATIN CAPITAL LETTER E WITH GRAVE
|
||||
0xc9: u"E", # É LATIN CAPITAL LETTER E WITH ACUTE
|
||||
0xca: u"E", # Ê LATIN CAPITAL LETTER E WITH CIRCUMFLEX
|
||||
0xcc: u"I", # Ì LATIN CAPITAL LETTER I WITH GRAVE
|
||||
0xcd: u"I", # Í LATIN CAPITAL LETTER I WITH ACUTE
|
||||
0xd2: u"O", # Ò LATIN CAPITAL LETTER O WITH GRAVE
|
||||
0xd3: u"O", # Ó LATIN CAPITAL LETTER O WITH ACUTE
|
||||
0xd5: u"O", # Õ LATIN CAPITAL LETTER O WITH TILDE
|
||||
0xd9: u"U", # Ù LATIN CAPITAL LETTER U WITH GRAVE
|
||||
0xda: u"U", # Ú LATIN CAPITAL LETTER U WITH ACUTE
|
||||
|
||||
0xdf: u"ss", # ß LATIN SMALL LETTER SHARP S
|
||||
0xe6: u"ae", # æ LATIN SMALL LETTER AE
|
||||
0xf0: u"d", # ð LATIN SMALL LETTER ETH
|
||||
0xf8: u"oe", # ø LATIN SMALL LETTER O WITH STROKE
|
||||
0xfe: u"th", # þ LATIN SMALL LETTER THORN,
|
||||
0xe4: u'ae', # ä LATIN SMALL LETTER A WITH DIAERESIS
|
||||
0xf6: u'oe', # ö LATIN SMALL LETTER O WITH DIAERESIS
|
||||
0xfc: u'ue', # ü LATIN SMALL LETTER U WITH DIAERESIS
|
||||
|
||||
0xe0: u"a", # à LATIN SMALL LETTER A WITH GRAVE
|
||||
0xe1: u"a", # á LATIN SMALL LETTER A WITH ACUTE
|
||||
0xe3: u"a", # ã LATIN SMALL LETTER A WITH TILDE
|
||||
0xe7: u"c", # ç LATIN SMALL LETTER C WITH CEDILLA
|
||||
0xe8: u"e", # è LATIN SMALL LETTER E WITH GRAVE
|
||||
0xe9: u"e", # é LATIN SMALL LETTER E WITH ACUTE
|
||||
0xea: u"e", # ê LATIN SMALL LETTER E WITH CIRCUMFLEX
|
||||
0xec: u"i", # ì LATIN SMALL LETTER I WITH GRAVE
|
||||
0xed: u"i", # í LATIN SMALL LETTER I WITH ACUTE
|
||||
0xf2: u"o", # ò LATIN SMALL LETTER O WITH GRAVE
|
||||
0xf3: u"o", # ó LATIN SMALL LETTER O WITH ACUTE
|
||||
0xf5: u"o", # õ LATIN SMALL LETTER O WITH TILDE
|
||||
0xf9: u"u", # ù LATIN SMALL LETTER U WITH GRAVE
|
||||
0xfa: u"u", # ú LATIN SMALL LETTER U WITH ACUTE
|
||||
|
||||
0x2018: u"'", # ‘ LEFT SINGLE QUOTATION MARK
|
||||
0x2019: u"'", # ’ RIGHT SINGLE QUOTATION MARK
|
||||
0x201c: u'"', # “ LEFT DOUBLE QUOTATION MARK
|
||||
0x201d: u'"', # ” RIGHT DOUBLE QUOTATION MARK
|
||||
|
||||
}
|
||||
|
||||
# Maps a unicode character code (the key) to a replacement code
|
||||
# (either a character code or a unicode string).
|
||||
def mapchar(self, key):
|
||||
ch = self.get(key)
|
||||
if ch is not None:
|
||||
return ch
|
||||
try:
|
||||
de = unicodedata.decomposition(unichr(key))
|
||||
p1, p2 = [int(x, 16) for x in de.split(None, 1)]
|
||||
if p2 == 0x308:
|
||||
ch = self.CHAR_REPLACEMENT.get(key)
|
||||
else:
|
||||
ch = int(p1)
|
||||
|
||||
except (IndexError, ValueError):
|
||||
ch = self.CHAR_REPLACEMENT.get(key, key)
|
||||
self[key] = ch
|
||||
return ch
|
||||
|
||||
if sys.version <= "2.5":
|
||||
# use __missing__ where available
|
||||
__missing__ = mapchar
|
||||
else:
|
||||
# otherwise, use standard __getitem__ hook (this is slower,
|
||||
# since it's called for each character)
|
||||
__getitem__ = mapchar
|
||||
|
||||
map = unaccented_map()
|
||||
|
||||
def asciify(input):
|
||||
try:
|
||||
return input.encode('ascii')
|
||||
except AttributeError:
|
||||
return str(input).encode('ascii')
|
||||
except UnicodeEncodeError:
|
||||
return unicodedata.normalize('NFKD', input.translate(map)).encode('ascii', 'replace')
|
||||
|
||||
text = u"""
|
||||
|
||||
##Norwegian
|
||||
"Jo, når'n da ha gått ett stôck te, så kommer'n te e å,
|
||||
å i åa ä e ö."
|
||||
"Vasa", sa'n.
|
||||
"Å i åa ä e ö", sa ja.
|
||||
"Men va i all ti ä dä ni säjer, a, o?", sa'n.
|
||||
"D'ä e å, vett ja", skrek ja, för ja ble rasen, "å i åa
|
||||
ä e ö, hörer han lite, d'ä e å, å i åa ä e ö."
|
||||
"A, o, ö", sa'n å dämmä geck'en.
|
||||
Jo, den va nôe te dum den.
|
||||
|
||||
(taken from the short story "Dumt fôlk" in Gustaf Fröding's
|
||||
"Räggler å paschaser på våra mål tå en bonne" (1895).
|
||||
|
||||
##Danish
|
||||
|
||||
Nu bliver Mølleren sikkert sur, og dog, han er stadig den største på verdensplan.
|
||||
|
||||
Userneeds A/S er en dansk virksomhed, der udfører statistiske undersøgelser på internettet. Den blev etableret i 2001 som et anpartsselskab af David Jensen og Henrik Vincentz.
|
||||
Frem til 2004 var det primære fokus på at forbedre hjemmesiderne for andre virksomheder. Herefter blev fokus omlagt, så man også beskæftigede sig med statistiske målinger. Ledelsen vurderede, at dette marked ville vokse betragteligt i de kommende år, hvilket man ønskede at udnytte.
|
||||
Siden omlægningen er der blevet fokuseret på at etablere meget store forbrugerpaneler. Således udgjorde det danske panel i 2005 65.000 personer og omfatter per 2008 100.000 personer.
|
||||
I 2007 blev Userneeds ApS konverteret til aktieselskabet Userneeds A/S
|
||||
Efterhånden er aktiviteterne blevet udvidet til de nordiske lande (med undtagelse af Island) og besidder i 2009 et forbrugerpanel med i alt mere end 250.000 personer bosat i de fire store nordiske lande.
|
||||
Selskabet tegnes udadtil af en direktion på tre personer, der foruden Henrik Vincentz tæller Palle Viby Morgen og Simon Andersen.
|
||||
De primære konkurrenter er andre analysebureauer som AC Nielsen, Analysedanmark, Gallup, Norstat, Synnovate og Zapera.
|
||||
|
||||
##Finnish
|
||||
Titus Aurelius Fulvus Boionius Arrius Antoninus eli Antoninus Pius (19. syyskuuta 86 – 7. maaliskuuta 161) oli Rooman keisari vuosina 138–161. Antoninus sai lisänimensä Pius (suom. velvollisuudentuntoinen) noustuaan valtaan vuonna 138. Hän kuului Nerva–Antoninusten hallitsijasukuun ja oli suosittu ja kunnioitettu keisari, joka tunnettiin lempeydestään ja oikeamielisyydestään. Hänen valtakauttaan on usein sanottu Rooman valtakunnan kultakaudeksi, jolloin talous kukoisti, poliittinen tilanne oli vakaa ja armeija vahva. Hän hallitsi pitempään kuin yksikään Rooman keisari Augustuksen jälkeen, ja hänen kautensa tunnetaan erityisen rauhallisena, joskaan ei sodattomana. Antoninus adoptoi Marcus Aureliuksen ja Lucius Veruksen vallanperijöikseen. Hän kuoli vuonna 161.
|
||||
|
||||
#German
|
||||
So heißt ein altes Märchen: "Der Ehre Dornenpfad", und es handelt von einem Schützen mit Namen Bryde, der wohl zu großen Ehren und Würden kam, aber nicht ohne lange und vielfältige Widerwärtigkeiten und Fährnisse des Lebens durchzumachen. Manch einer von uns hat es gewiß als Kind gehört oder es vielleicht später gelesen und dabei an seinen eigenen stillen Dornenweg und die vielen Widerwärtigkeiten gedacht. Märchen und Wirklichkeit liegen einander so nahe, aber das Märchen hat seine harmonische Lösung hier auf Erden, während die Wirklichkeit sie meist aus dem Erdenleben hinaus in Zeit und Ewigkeit verlegt.
|
||||
|
||||
12\xbd inch
|
||||
"""
|
||||
|
||||
if __name__ == "__main__":
|
||||
for i, line in enumerate(text.splitlines()):
|
||||
line = line.strip()
|
||||
print line
|
||||
if line and not line.startswith('#'):
|
||||
print '\tTrans: ', asciify(line).strip()
|
Loading…
Reference in a new issue