Port idindex to new schema, speed them up, add tests, refactor index

page in views_search to share code with the text index file, get rid
of some special-case idindex filters from ietf_filters, move
"/drafts/" redirects to a file in /doc/
 - Legacy-Id: 5634
This commit is contained in:
Ole Laursen 2013-04-10 11:48:07 +00:00
parent 4f7de7b4f4
commit 3d1eb07afe
20 changed files with 478 additions and 434 deletions

View file

@ -0,0 +1,23 @@
# Copyright The IETF Trust 2007, All Rights Reserved
from django.conf import settings
from django.conf.urls.defaults import patterns
from django.http import HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from ietf.group.models import Group
urlpatterns = patterns('',
(r'^$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/'}),
(r'^all/$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/all/'}),
(r'^rfc/$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/all/#rfc'}),
(r'^dead/$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/all/#expired'}),
(r'^current/$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/active/'}),
(r'^(?P<object_id>\d+)/(related/)?$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/' }),
(r'^(?P<name>[^/]+)/(related/)?$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/%(name)s/' }),
(r'^wgid/(?P<id>\d+)/$', lambda request, id: HttpResponsePermanentRedirect("/wg/%s/" % get_object_or_404(Group, id=id).acronym)),
(r'^wg/(?P<acronym>[^/]+)/$', 'django.views.generic.simple.redirect_to', { 'url': '/wg/%(acronym)s/' }),
(r'^all_id(?:_txt)?.html$', 'django.views.generic.simple.redirect_to', { 'url': 'http://www.ietf.org/id/all_id.txt' }),
)

View file

@ -2,7 +2,6 @@ import os, shutil, datetime
import django.test import django.test
from django.core.urlresolvers import reverse as urlreverse from django.core.urlresolvers import reverse as urlreverse
from django.conf import settings
from pyquery import PyQuery from pyquery import PyQuery

View file

@ -34,5 +34,5 @@ from ietf import settings
from django.core import management from django.core import management
management.setup_environ(settings) management.setup_environ(settings)
from ietf.idindex.views import all_id2_txt from ietf.idindex.index import all_id2_txt
print all_id2_txt().encode('utf-8'), print all_id2_txt().encode('utf-8'),

View file

@ -34,5 +34,5 @@ from ietf import settings
from django.core import management from django.core import management
management.setup_environ(settings) management.setup_environ(settings)
from ietf.idindex.views import all_id_txt from ietf.idindex.index import all_id_txt
print all_id_txt(), print all_id_txt().encode("utf-8"),

View file

@ -34,9 +34,5 @@ from ietf import settings
from django.core import management from django.core import management
management.setup_environ(settings) management.setup_environ(settings)
from ietf.idindex.views import id_abstracts_txt from ietf.idindex.index import id_index_txt
x = id_abstracts_txt() print id_index_txt(with_abstracts=True).encode('utf-8'),
if isinstance(x, unicode):
print x.encode('utf-8'),
else:
print x,

View file

@ -34,9 +34,5 @@ from ietf import settings
from django.core import management from django.core import management
management.setup_environ(settings) management.setup_environ(settings)
from ietf.idindex.views import id_index_txt from ietf.idindex.index import id_index_txt
x = id_index_txt() print id_index_txt().encode('utf-8'),
if isinstance(x, unicode):
print x.encode('utf-8'),
else:
print x,

270
ietf/idindex/index.py Normal file
View file

@ -0,0 +1,270 @@
import datetime, os
import pytz
from django.conf import settings
from django.template.loader import render_to_string
from ietf.idtracker.templatetags.ietf_filters import clean_whitespace
from ietf.doc.models import *
def all_id_txt():
# this returns a lot of data so try to be efficient
# precalculations
revision_time = dict(NewRevisionDocEvent.objects.filter(type="new_revision", doc__name__startswith="draft-").order_by('time').values_list("doc_id", "time"))
def formatted_rev_date(name):
t = revision_time.get(name)
return t.strftime("%Y-%m-%d") if t else ""
rfc_aliases = dict(DocAlias.objects.filter(name__startswith="rfc",
document__states=State.objects.get(type="draft", slug="rfc")).values_list("document_id", "name"))
replacements = dict(RelatedDocument.objects.filter(target__document__states=State.objects.get(type="draft", slug="repl"),
relationship="replaces").values_list("target__document_id", "source"))
# we need a distinct to prevent the queries below from multiplying the result
all_ids = Document.objects.filter(type="draft").order_by('name').exclude(name__startswith="rfc").distinct()
res = ["\nInternet-Drafts Status Summary\n"]
def add_line(f1, f2, f3, f4):
# each line must have exactly 4 tab-separated fields
res.append(f1 + "\t" + f2 + "\t" + f3 + "\t" + f4)
inactive_states = ["pub", "watching", "dead"]
in_iesg_process = all_ids.exclude(states=State.objects.get(type="draft", slug="rfc")).filter(states__in=list(State.objects.filter(type="draft-iesg").exclude(slug__in=inactive_states))).only("name", "rev")
# handle those actively in the IESG process
for d in in_iesg_process:
state = d.get_state("draft-iesg").name
tags = d.tags.filter(slug__in=IESG_SUBSTATE_TAGS).values_list("name", flat=True)
if tags:
state += "::" + "::".join(tags)
add_line(d.name + "-" + d.rev,
formatted_rev_date(d.name),
"In IESG processing - ID Tracker state <" + state + ">",
"",
)
# handle the rest
not_in_process = all_ids.exclude(pk__in=[d.name for d in in_iesg_process])
for s in State.objects.filter(type="draft").order_by("order"):
for name, rev in not_in_process.filter(states=s).values_list("name", "rev"):
state = s.name
last_field = ""
if s.slug == "rfc":
a = rfc_aliases.get(name)
if a:
last_field = a[3:]
elif s.slug == "repl":
state += " replaced by " + replacements.get(name, "0")
add_line(name + "-" + rev,
formatted_rev_date(name),
state,
last_field,
)
return u"\n".join(res) + "\n"
def file_types_for_drafts():
"""Look in the draft directory and return file types found as dict (name + rev -> [t1, t2, ...])."""
file_types = {}
for filename in os.listdir(settings.INTERNET_DRAFT_PATH):
if filename.startswith("draft-"):
base, ext = os.path.splitext(filename)
if ext:
if base not in file_types:
file_types[base] = [ext]
else:
file_types[base].append(ext)
return file_types
def all_id2_txt():
# this returns a lot of data so try to be efficient
drafts = Document.objects.filter(type="draft").exclude(name__startswith="rfc").order_by('name').select_related('group', 'group__parent', 'ad', 'ad__email', 'intended_std_level', 'shepherd', 'shepherd__email')
rfc_aliases = dict(DocAlias.objects.filter(name__startswith="rfc",
document__states=State.objects.get(type="draft", slug="rfc")).values_list("document_id", "name"))
replacements = dict(RelatedDocument.objects.filter(target__document__states=State.objects.get(type="draft", slug="repl"),
relationship="replaces").values_list("target__document_id", "source"))
revision_time = dict(DocEvent.objects.filter(type="new_revision", doc__name__startswith="draft-").order_by('time').values_list("doc_id", "time"))
file_types = file_types_for_drafts()
authors = {}
for a in DocumentAuthor.objects.filter(document__name__startswith="draft-").order_by("order").select_related("author", "author__person").iterator():
if a.document_id not in authors:
l = authors[a.document_id] = []
else:
l = authors[a.document_id]
if "@" in a.author.address:
l.append(u'%s <%s>' % (a.author.person.plain_name().replace("@", ""), a.author.address.replace(",", "")))
else:
l.append(a.author.person.plain_name())
res = []
for d in drafts:
state = d.get_state_slug()
iesg_state = d.get_state("draft-iesg")
fields = []
# 0
fields.append(d.name + "-" + d.rev)
# 1
fields.append("-1") # used to be internal numeric identifier, we don't have that anymore
# 2
fields.append(d.get_state().name if state else "")
# 3
if state == "active":
s = "I-D Exists"
if iesg_state:
s = iesg_state.name
tags = d.tags.filter(slug__in=IESG_SUBSTATE_TAGS).values_list("name", flat=True)
if tags:
s += "::" + "::".join(tags)
fields.append(s)
else:
fields.append("")
# 4
rfc_number = ""
if state == "rfc":
a = rfc_aliases.get(d.name)
if a:
rfc_number = a[3:]
fields.append(rfc_number)
# 5
repl = ""
if state == "repl":
repl = replacements.get(d.name, "")
fields.append(repl)
# 6
t = revision_time.get(d.name)
fields.append(t.strftime("%Y-%m-%d") if t else "")
# 7
group_acronym = ""
if d.group and d.group.type_id != "area" and d.group.acronym != "none":
group_acronym = d.group.acronym
fields.append(group_acronym)
# 8
area = ""
if d.group:
if d.group.type_id == "area":
area = d.group.acronym
elif d.group.type_id == "wg" and d.group.parent and d.group.parent.type_id == "area":
area = d.group.parent.acronym
fields.append(area)
# 9 responsible AD name
fields.append(unicode(d.ad) if d.ad else "")
# 10
fields.append(d.intended_std_level.name if d.intended_std_level else "")
# 11
lc_expires = ""
if iesg_state and iesg_state.slug == "lc":
e = d.latest_event(LastCallDocEvent, type="sent_last_call")
if e:
lc_expires = e.expires.strftime("%Y-%m-%d")
fields.append(lc_expires)
# 12
fields.append(",".join(file_types.get(d.name + "-" + d.rev, "")) if state == "active" else "")
# 13
fields.append(clean_whitespace(d.title)) # FIXME: we should make sure this is okay in the database and in submit
# 14
fields.append(u", ".join(authors.get(d.name, [])))
# 15
fields.append(d.shepherd.formatted_email().replace('"', '') if d.shepherd else "")
# 16 Responsible AD name and email
fields.append(d.ad.formatted_email().replace('"', '') if d.ad else "")
#
res.append(u"\t".join(fields))
return render_to_string("idindex/all_id2.txt", {'data': u"\n".join(res) })
def active_drafts_index_by_group(extra_values=()):
"""Return active drafts grouped into their corresponding
associated group, for spitting out draft index."""
# this returns a lot of data so try to be efficient
active_state = State.objects.get(type="draft", slug="active")
groups_dict = dict((g.id, g) for g in Group.objects.all())
extracted_values = ("name", "rev", "title", "group_id") + extra_values
docs_dict = dict((d["name"], d)
for d in Document.objects.filter(states=active_state).values(*extracted_values))
# add initial and latest revision time
for time, doc_id in NewRevisionDocEvent.objects.filter(type="new_revision", doc__states=active_state).order_by('-time').values_list("time", "doc_id"):
d = docs_dict.get(doc_id)
if d:
if "rev_time" not in d:
d["rev_time"] = time
d["initial_rev_time"] = time
# add authors
for a in DocumentAuthor.objects.filter(document__states=active_state).order_by("order").select_related("author__person"):
d = docs_dict.get(a.document_id)
if d:
if "authors" not in d:
d["authors"] = []
d["authors"].append(unicode(a.author.person))
# put docs into groups
for d in docs_dict.itervalues():
g = groups_dict.get(d["group_id"])
if not g:
continue
if not hasattr(g, "active_drafts"):
g.active_drafts = []
g.active_drafts.append(d)
groups = [g for g in groups_dict.itervalues() if hasattr(g, "active_drafts")]
groups.sort(key=lambda g: g.acronym)
fallback_time = datetime.datetime(1950, 1, 1)
for g in groups:
g.active_drafts.sort(key=lambda d: d.get("initial_rev_time", fallback_time))
return groups
def id_index_txt(with_abstracts=False):
extra_values = ()
if with_abstracts:
extra_values = ("abstract",)
groups = active_drafts_index_by_group(extra_values)
file_types = file_types_for_drafts()
for g in groups:
for d in g.active_drafts:
# we need to output a multiple extension thing
types = file_types.get(d["name"] + "-" + d["rev"], "")
exts = ".txt"
if ".ps" in types:
exts += ",.ps"
if ".pdf" in types:
exts += ",.pdf"
d["exts"] = exts
return render_to_string("idindex/id_index.txt", {
'groups': groups,
'time': datetime.datetime.now(pytz.UTC).strftime("%Y-%m-%d %H:%M:%S %Z"),
'with_abstracts': with_abstracts,
})

View file

@ -1 +0,0 @@
# Copyright The IETF Trust 2007, All Rights Reserved

View file

@ -1,72 +1,146 @@
# Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies). import datetime, shutil
# All rights reserved. Contact: Pasi Eronen <pasi.eronen@nokia.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Nokia Corporation and/or its
# subsidiary(-ies) nor the names of its contributors may be used
# to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest import django.test
import re from django.core.urlresolvers import reverse as urlreverse
from django.test.client import Client
from ietf.utils.test_utils import SimpleUrlTestCase, RealDatabaseTest
class IdIndexUrlTestCase(SimpleUrlTestCase): from ietf.utils.test_data import make_test_data
def testUrls(self):
self.doTestUrls(__file__)
# class IndexTestCase(unittest.TestCase, RealDatabaseTest): from ietf.doc.models import *
# def setUp(self): from ietf.idindex.index import *
# self.setUpRealDatabase()
# def tearDown(self):
# self.tearDownRealDatabase()
# def testAllId(self):
# print " Testing all_id.txt generation" class IndexTestCase(django.test.TestCase):
# c = Client() fixtures = ['names']
# response = c.get('/drafts/_test/all_id.txt')
# self.assertEquals(response.status_code, 200) def setUp(self):
# content = response.content self.id_dir = os.path.abspath("tmp-id-dir")
# # Test that correct version number is shown for couple of old drafts os.mkdir(self.id_dir)
# self.assert_(content.find("draft-ietf-tls-psk-09") >= 0) settings.INTERNET_DRAFT_PATH = self.id_dir
# self.assert_(content.find("draft-eronen-eap-sim-aka-80211-00") >= 0)
# # Since all_id.txt contains all old drafts, it should never shrink def tearDown(self):
# lines = content.split("\n") shutil.rmtree(self.id_dir)
# self.assert_(len(lines) > 18000)
# # Test that the lines look OK and have correct number of tabs def write_draft_file(self, name, size):
# r = re.compile(r'^(draft-\S*-\d\d)\t(\d\d\d\d-\d\d-\d\d)\t([^\t]+)\t([^\t]*)$') with open(os.path.join(self.id_dir, name), 'w') as f:
# for line in lines: f.write("a" * size)
# if ((line == "") or
# (line == "Internet-Drafts Status Summary") or def test_all_id_txt(self):
# (line == "Web version is available at") or draft = make_test_data()
# (line == "https://datatracker.ietf.org/public/idindex.cgi")):
# pass # active in IESG process
# elif r.match(line): draft.set_state(State.objects.get(type="draft", slug="active"))
# pass draft.set_state(State.objects.get(type="draft-iesg", slug="lc"))
# else:
# self.fail("Unexpected line \""+line+"\"") txt = all_id_txt()
# print "OK (all_id.txt)"
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue(draft.get_state("draft-iesg").name in txt)
# not active in IESG process
draft.unset_state("draft-iesg")
txt = all_id_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue("Active" in txt)
# published
draft.set_state(State.objects.get(type="draft", slug="rfc"))
DocAlias.objects.create(name="rfc1234", document=draft)
txt = all_id_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue("RFC\t1234" in txt)
# replaced
draft.set_state(State.objects.get(type="draft", slug="repl"))
RelatedDocument.objects.create(
relationship=DocRelationshipName.objects.get(slug="replaces"),
source=Document.objects.create(type_id="draft", rev="00", name="draft-test-replacement"),
target=draft.docalias_set.get(name__startswith="draft"))
txt = all_id_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue("Replaced replaced by draft-test-replacement" in txt)
def test_all_id2_txt(self):
draft = make_test_data()
def get_fields(content):
self.assertTrue(draft.name + "-" + draft.rev in content)
for line in content.splitlines():
if line.startswith(draft.name + "-" + draft.rev):
return line.split("\t")
# test Active
draft.set_state(State.objects.get(type="draft", slug="active"))
draft.set_state(State.objects.get(type="draft-iesg", slug="review-e"))
NewRevisionDocEvent.objects.create(doc=draft, type="new_revision", rev=draft.rev, by=draft.ad)
self.write_draft_file("%s-%s.txt" % (draft.name, draft.rev), 5000)
self.write_draft_file("%s-%s.pdf" % (draft.name, draft.rev), 5000)
t = get_fields(all_id2_txt())
self.assertEqual(t[0], draft.name + "-" + draft.rev)
self.assertEqual(t[1], "-1")
self.assertEqual(t[2], "Active")
self.assertEqual(t[3], "Expert Review")
self.assertEqual(t[4], "")
self.assertEqual(t[5], "")
self.assertEqual(t[6], draft.latest_event(type="new_revision").time.strftime("%Y-%m-%d"))
self.assertEqual(t[7], draft.group.acronym)
self.assertEqual(t[8], draft.group.parent.acronym)
self.assertEqual(t[9], unicode(draft.ad))
self.assertEqual(t[10], draft.intended_std_level.name)
self.assertEqual(t[11], "")
self.assertEqual(t[12], ".txt,.pdf")
self.assertEqual(t[13], draft.title)
author = draft.documentauthor_set.order_by("order").get()
self.assertEqual(t[14], "%s <%s>" % (author.author.person.name, author.author.address))
self.assertEqual(t[15], "%s <%s>" % (draft.shepherd, draft.shepherd.email_address()))
self.assertEqual(t[16], "%s <%s>" % (draft.ad, draft.ad.email_address()))
# test RFC
draft.set_state(State.objects.get(type="draft", slug="rfc"))
DocAlias.objects.create(name="rfc1234", document=draft)
t = get_fields(all_id2_txt())
self.assertEqual(t[4], "1234")
# test Replaced
draft.set_state(State.objects.get(type="draft", slug="repl"))
RelatedDocument.objects.create(
relationship=DocRelationshipName.objects.get(slug="replaces"),
source=Document.objects.create(type_id="draft", rev="00", name="draft-test-replacement"),
target=draft.docalias_set.get(name__startswith="draft"))
t = get_fields(all_id2_txt())
self.assertEqual(t[5], "draft-test-replacement")
# test Last Call
draft.set_state(State.objects.get(type="draft", slug="active"))
draft.set_state(State.objects.get(type="draft-iesg", slug="lc"))
e = LastCallDocEvent.objects.create(doc=draft, type="sent_last_call", expires=datetime.datetime.now() + datetime.timedelta(days=14), by=draft.ad)
DocAlias.objects.create(name="rfc1234", document=draft)
t = get_fields(all_id2_txt())
self.assertEqual(t[11], e.expires.strftime("%Y-%m-%d"))
def test_id_index_txt(self):
draft = make_test_data()
draft.set_state(State.objects.get(type="draft", slug="active"))
txt = id_index_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
self.assertTrue(draft.title in txt)
self.assertTrue(draft.abstract[:20] not in txt)
txt = id_index_txt(with_abstracts=True)
self.assertTrue(draft.abstract[:20] in txt)

View file

@ -1,20 +0,0 @@
301 /drafts/wgid/1041/
404 /drafts/wgid/987654/
301 /drafts/wg/idr/
301 /drafts/rfc/
301 /drafts/current/
301 /drafts/all/
301 /drafts/dead/
#301 /drafts/9574/related/
#301 /drafts/9574/
301 /drafts/draft-ietf-dnsext-dnssec-protocol/related/
301 /drafts/draft-ietf-dnsext-dnssec-protocol/
#404 /drafts/987654/
301 /drafts/all_id_txt.html
301 /drafts/all_id.html
301 /drafts/
#200,heavy /drafts/_test/all_id.txt
# this takes 3 minutes, so disabled by default
#200,heavy /drafts/_test/all_id2.txt
#200,heavy /drafts/_test/id_index.txt
#200,heavy /drafts/_test/id_abstracts.txt

View file

@ -1,27 +0,0 @@
# Copyright The IETF Trust 2007, All Rights Reserved
from django.conf import settings
from django.conf.urls.defaults import patterns
from ietf.idindex import views
urlpatterns = patterns('',
(r'^$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/'}),
(r'^all/$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/all/'}),
(r'^rfc/$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/all/#rfc'}),
(r'^dead/$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/all/#dead'}),
(r'^current/$', 'django.views.generic.simple.redirect_to', { 'url': '/doc/active/'}),
(r'^(?P<object_id>\d+)/(related/)?$', views.redirect_id),
(r'^(?P<filename>[^/]+)/(related/)?$', views.redirect_filename),
(r'^wgid/(?P<id>\d+)/$', views.wgdocs_redirect_id),
(r'^wg/(?P<acronym>[^/]+)/$', views.wgdocs_redirect_acronym),
(r'^all_id(?:_txt)?.html$', 'django.views.generic.simple.redirect_to', { 'url': 'http://www.ietf.org/id/all_id.txt' }),
)
if settings.SERVER_MODE != 'production':
# these haven't been ported
urlpatterns += patterns('',
(r'^_test/all_id.txt$', views.test_all_id_txt),
(r'^_test/all_id2.txt$', views.test_all_id2_txt),
(r'^_test/id_index.txt$', views.test_id_index_txt),
(r'^_test/id_abstracts.txt$', views.test_id_abstracts_txt)
)

View file

@ -1,199 +0,0 @@
# Copyright The IETF Trust 2007, All Rights Reserved
# Portions Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved. Contact: Pasi Eronen <pasi.eronen@nokia.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Nokia Corporation and/or its
# subsidiary(-ies) nor the names of its contributors may be used
# to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.http import HttpResponse, HttpResponsePermanentRedirect
from django.template import loader
from django.shortcuts import get_object_or_404
from django.conf import settings
from ietf.idtracker.models import Acronym, IETFWG, InternetDraft, IDInternal,PersonOrOrgInfo, Area
from ietf.idtracker.templatetags.ietf_filters import clean_whitespace
import re
import sys
from datetime import datetime as Datetime
import pytz
def all_id_txt():
# we need a distinct to prevent the queries below from multiplying the result
all_ids = InternetDraft.objects.order_by('name').exclude(name__startswith="rfc").distinct()
inactive_states = ["pub", "watching", "dead"]
in_track_ids = all_ids.exclude(states__type="draft", states__slug="rfc").filter(states__type="draft-iesg").exclude(states__type="draft-iesg", states__slug__in=inactive_states)
not_in_track = all_ids.filter(states__type="draft", states__slug="rfc") | all_ids.exclude(states__type="draft-iesg") | all_ids.filter(states__type="draft-iesg", states__slug__in=inactive_states)
active = not_in_track.filter(states__type="draft", states__slug="active")
published = not_in_track.filter(states__type="draft", states__slug="rfc")
expired = not_in_track.filter(states__type="draft", states__slug="expired")
withdrawn_submitter = not_in_track.filter(states__type="draft", states__slug="auth-rm")
withdrawn_ietf = not_in_track.filter(states__type="draft", states__slug="ietf-rm")
replaced = not_in_track.filter(states__type="draft", states__slug="repl")
return loader.render_to_string("idindex/all_ids.txt",
{ 'in_track_ids':in_track_ids,
'active':active,
'published':published,
'expired':expired,
'withdrawn_submitter':withdrawn_submitter,
'withdrawn_ietf':withdrawn_ietf,
'replaced':replaced})
def all_id2_entry(id):
fields = []
# 0
fields.append(id.filename+"-"+id.revision_display())
# 1
fields.append(-1) # this used to be id.id_document_tag, we don't have this identifier anymore
# 2
status = str(id.get_state())
fields.append(status)
# 3
iesgstate = id.idstate() if status=="Active" else ""
fields.append(iesgstate)
# 4
fields.append(id.rfc_number if status=="RFC" else "")
# 5
try:
fields.append(id.replaced_by.filename)
except (AttributeError, InternetDraft.DoesNotExist):
fields.append("")
# 6
fields.append(id.revision_date)
# 7
group_acronym = "" if id.group.type_id == "area" else id.group.acronym
if group_acronym == "none":
group_acronym = ""
fields.append(group_acronym)
# 8
area = ""
if id.group.type_id == "area":
area = id.group.acronym
elif id.group.type_id == "wg" and id.group.parent:
area = id.group.parent.acronym
fields.append(area)
# 9 responsible AD name
fields.append(id.idinternal.job_owner if id.idinternal else "")
# 10
s = id.intended_status
if s and str(s) not in ("None","Request"):
fields.append(str(s))
else:
fields.append("")
# 11
if (iesgstate=="In Last Call") or iesgstate.startswith("In Last Call::"):
fields.append(id.lc_expiration_date)
else:
fields.append("")
# 12
fields.append(id.file_type if status=="Active" else "")
# 13
fields.append(clean_whitespace(id.title))
# 14
authors = []
for author in sorted(id.authors.all(), key=lambda x: x.final_author_order()):
try:
realname = unicode(author.person)
email = author.email() or ""
name = re.sub(u"[<>@,]", u"", realname) + u" <"+re.sub(u"[<>,]", u"", email).strip()+u">"
authors.append(clean_whitespace(name))
except PersonOrOrgInfo.DoesNotExist:
pass
fields.append(u", ".join(authors))
# 15
if id.shepherd:
shepherd = id.shepherd
realname = unicode(shepherd)
email = shepherd.email_address()
name = re.sub(u"[<>@,]", u"", realname) + u" <"+re.sub(u"[<>,]", u"", email).strip()+u">"
else:
name = u""
fields.append(name)
# 16 Responsible AD name and email
if id.ad:
ad = id.ad
realname = unicode(ad)
email = ad.email_address()
name = re.sub(u"[<>@,]", u"", realname) + u" <"+re.sub(u"[<>,]", u"", email).strip()+u">"
else:
name = u""
fields.append(name)
#
return "\t".join([unicode(x) for x in fields])
def all_id2_txt():
all_ids = InternetDraft.objects.order_by('name').exclude(name__startswith="rfc").select_related('group', 'group__parent', 'ad')
data = "\n".join(all_id2_entry(id) for id in all_ids)
return loader.render_to_string("idindex/all_id2.txt",{'data':data})
def id_index_txt():
groups = IETFWG.objects.all()
return loader.render_to_string("idindex/id_index.txt", {'groups':groups})
def id_abstracts_txt():
groups = IETFWG.objects.all()
return loader.render_to_string("idindex/id_abstracts.txt", {'groups':groups, 'time':Datetime.now(pytz.UTC).strftime("%Y-%m-%d %H:%M:%S %Z")})
def test_all_id_txt(request):
return HttpResponse(all_id_txt(), mimetype='text/plain')
def test_all_id2_txt(request):
return HttpResponse(all_id2_txt(), mimetype='text/plain')
def test_id_index_txt(request):
return HttpResponse(id_index_txt(), mimetype='text/plain')
def test_id_abstracts_txt(request):
return HttpResponse(id_abstracts_txt(), mimetype='text/plain')
def redirect_id(request, object_id):
'''Redirect from historical document ID to preferred filename url.'''
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
return HttpResponsePermanentRedirect("/doc/")
doc = get_object_or_404(InternetDraft, id_document_tag=object_id)
return HttpResponsePermanentRedirect("/doc/"+doc.filename+"/")
def redirect_filename(request, filename):
return HttpResponsePermanentRedirect("/doc/"+filename+"/")
def wgdocs_redirect_id(request, id):
if settings.USE_DB_REDESIGN_PROXY_CLASSES:
from ietf.group.models import Group
group = get_object_or_404(Group, id=id)
return HttpResponsePermanentRedirect("/wg/%s/" % group.acronym)
group = get_object_or_404(Acronym, acronym_id=id)
return HttpResponsePermanentRedirect("/wg/"+group.acronym+"/")
def wgdocs_redirect_acronym(request, acronym):
return HttpResponsePermanentRedirect("/wg/"+acronym+"/")

View file

@ -46,6 +46,7 @@ from ietf.doc.models import *
from ietf.person.models import * from ietf.person.models import *
from ietf.group.models import * from ietf.group.models import *
from ietf.ipr.models import IprDocAlias from ietf.ipr.models import IprDocAlias
from ietf.idindex.index import active_drafts_index_by_group
class SearchForm(forms.Form): class SearchForm(forms.Form):
name = forms.CharField(required=False) name = forms.CharField(required=False)
@ -416,7 +417,8 @@ def index_all_drafts(request):
names.sort(key=lambda t: t[1]) names.sort(key=lambda t: t[1])
names = ['<a href="/doc/' + name + ' %s/">' + name +'</a>' for name, _ in names if name not in names_to_skip] names = ['<a href="/doc/' + name + '/">' + name +'</a>'
for name, _ in names if name not in names_to_skip]
categories.append((state, categories.append((state,
heading, heading,
@ -427,45 +429,6 @@ def index_all_drafts(request):
context_instance=RequestContext(request)) context_instance=RequestContext(request))
def index_active_drafts(request): def index_active_drafts(request):
# try to be efficient since this view returns a lot of data groups = active_drafts_index_by_group()
active_state = State.objects.get(type="draft", slug="active")
groups_dict = dict((g.id, g) for g in Group.objects.all())
docs_dict = dict((d["name"], d)
for d in Document.objects.filter(states=active_state).values("name", "rev", "title", "group_id"))
# add latest revision time
for time, doc_id in NewRevisionDocEvent.objects.filter(type="new_revision", doc__states=active_state).order_by('-time').values_list("time", "doc_id"):
d = docs_dict.get(doc_id)
if d and "rev_time" not in d:
d["rev_time"] = time
# add authors
for a in DocumentAuthor.objects.filter(document__states=active_state).order_by("order").select_related("author__person"):
d = docs_dict.get(a.document_id)
if d:
if "authors" not in d:
d["authors"] = []
d["authors"].append(unicode(a.author.person))
# put docs into groups
for d in docs_dict.itervalues():
g = groups_dict.get(d["group_id"])
if not g:
continue
if not hasattr(g, "active_drafts"):
g.active_drafts = []
g.active_drafts.append(d)
groups = [g for g in groups_dict.itervalues() if hasattr(g, "active_drafts")]
groups.sort(key=lambda g: g.acronym)
fallback_time = datetime.datetime(1990, 1, 1)
for g in groups:
g.active_drafts.sort(key=lambda d: d.get("rev_time", fallback_time))
return render_to_response("doc/index_active_drafts.html", { 'groups': groups }, context_instance=RequestContext(request)) return render_to_response("doc/index_active_drafts.html", { 'groups': groups }, context_instance=RequestContext(request))

View file

@ -31,9 +31,18 @@ def expand_comma(value):
def format_charter(value): def format_charter(value):
return value.replace("\n\n", "</p><p>").replace("\n","<br/>\n") return value.replace("\n\n", "</p><p>").replace("\n","<br/>\n")
@register.filter(name='indent') @register.filter
def indent(value,numspaces=2): def indent(value, numspaces=2):
return value.replace("\n", "\n"+" "*int(numspaces)); replacement = "\n" + " " * int(numspaces)
res = value.replace("\n", replacement)
if res.endswith(replacement):
res = res[:-int(numspaces)] # fix up superfluous spaces
return res
@register.filter
def unindent(value):
"""Remove indentation from string."""
return re.sub("\n +", "\n", value)
@register.filter(name='parse_email_list') @register.filter(name='parse_email_list')
def parse_email_list(value): def parse_email_list(value):
@ -240,6 +249,11 @@ def dashify(string):
""" """
return re.sub('.', '-', string) return re.sub('.', '-', string)
@register.filter
def underline(string):
"""Return string with an extra line underneath of dashes, for plain text underlining."""
return string + "\n" + ("-" * len(string))
@register.filter(name='lstrip') @register.filter(name='lstrip')
def lstripw(string, chars): def lstripw(string, chars):
"""Strip matching leading characters from words in string""" """Strip matching leading characters from words in string"""
@ -319,23 +333,6 @@ def wrap_text(text, width=72):
prev_indent = indent prev_indent = indent
return "\n".join(filled) return "\n".join(filled)
@register.filter(name="id_index_file_types")
def id_index_file_types(text):
r = ".txt"
if text.find("txt") < 0:
return r
if text.find("ps") >= 0:
r = r + ",.ps"
if text.find("pdf") >= 0:
r = r + ",.pdf"
return r
@register.filter(name="id_index_wrap")
def id_index_wrap(text):
x = wordwrap(text, 72)
x = x.replace("\n", "\n ")
return " "+x.strip()
@register.filter(name="compress_empty_lines") @register.filter(name="compress_empty_lines")
def compress_empty_lines(text): def compress_empty_lines(text):
text = re.sub("( *\n){3,}", "\n\n", text) text = re.sub("( *\n){3,}", "\n\n", text)

View file

@ -4,8 +4,7 @@
# #
# Description of fields: # Description of fields:
# 0 draft name and latest revision # 0 draft name and latest revision
# 1 id_document_tag (internal database identifier; avoid using # 1 always -1 (was internal numeric database id in earlier schema)
# unless you really need it)
# 2 one of "Active", "Expired", "RFC", "Withdrawn by Submitter", # 2 one of "Active", "Expired", "RFC", "Withdrawn by Submitter",
# "Replaced", or "Withdrawn by IETF" # "Replaced", or "Withdrawn by IETF"
# 3 if #2 is "Active", the IESG state for the document (such as # 3 if #2 is "Active", the IESG state for the document (such as

View file

@ -1,17 +0,0 @@
Internet-Drafts Status Summary
{% for item in in_track_ids %}{{ item.filename }}-{{ item.revision_display }} {{ item.revision_date|default_if_none:"" }} In IESG processing - ID Tracker state <{{ item.idstate }}> {# that last tab is on purpose #}
{% endfor %}{%comment%}
{%endcomment%}{% for item in active %}{{ item.filename }}-{{ item.revision_display }} {{ item.revision_date|default_if_none:"" }} {{ item.status.status }} {# keep that last tab #}
{% endfor %}{%comment%}
{%endcomment%}{% for item in published %}{{ item.filename }}-{{ item.revision_display }} {{ item.revision_date|default_if_none:"" }} {{ item.status.status }} {{ item.rfc_number }}
{% endfor %}{%comment%}
{%endcomment%}{% for item in expired %}{{ item.filename }}-{{ item.revision_display }} {{ item.revision_date|default_if_none:"" }} {{ item.status.status }} {# keep that last tab #}
{% endfor %}{%comment%}
{%endcomment%}{% for item in withdrawn_submitter %}{{ item.filename }}-{{ item.revision_display }} {{ item.revision_date|default_if_none:"" }} {{ item.status.status }} {# keep that last tab #}
{% endfor %}{%comment%}
{%endcomment%}{% for item in withdrawn_ietf %}{{ item.filename }}-{{ item.revision_display }} {{ item.revision_date|default_if_none:"" }} {{ item.status.status }} {# keep that last tab #}
{% endfor %}{%comment%}
{%endcomment%}{% for item in replaced %}{{ item.filename }}-{{ item.revision_display }} {{ item.revision_date|default_if_none:"" }} {{ item.status.status }} replaced by {% if item.replaced_by_id %}{{ item.replaced_by.filename }}{% else %}0{% endif %} {# and this one needs the trailing tab as well #}
{% endfor %}

View file

@ -1,9 +0,0 @@
{% extends "idindex/id_index.txt" %}{% load ietf_filters %} {% block intro %} Current Internet-Drafts
This summary sheet provides a short synopsis of each Internet-Draft
available within the "internet-drafts" directory at the shadow
sites directory. These drafts are listed alphabetically by working
group acronym and start date. Generated {{ time }}
{% endblock %}{% block abstract %}
{{ draft.clean_abstract|indent|indent|safe }}{% endblock %}

View file

@ -1,13 +1,12 @@
{% autoescape off %}{% load ietf_filters %}{% block intro %} Current Internet-Drafts {% autoescape off %}{% load ietf_filters %} Current Internet-Drafts
This summary sheet provides an index of each Internet-Draft This summary sheet provides an index of each Internet-Draft. These
These drafts are listed alphabetically by Working Group acronym and drafts are listed alphabetically by Working Group acronym and initial
initial post date. post date. Generated {{ time }}.
{% endblock %}
{% for group in groups|dictsort:"group_acronym.acronym" %}{% if group.active_drafts %} {% for group in groups %}
{{ group.group_acronym.name }} ({{ group.group_acronym.acronym}}) {% filter underline %}{{ group.name }} ({{ group.acronym }}){% endfilter %}
{% filter dashify %}{{ group.group_acronym.name }} ({{ group.group_acronym.acronym}}){% endfilter %} {% for d in group.active_drafts %}
{% for draft in group.active_drafts|stable_dictsort:"filename"|stable_dictsort:"start_date" %} {% filter wordwrap:72|indent:2 %}"{{ d.title|clean_whitespace }}", {% for a in d.authors %}{{ a }}, {% endfor %}{{ d.rev_time|date:"Y-m-d"}}, <{{ d.name }}-{{ d.rev }}{{ d.exts }}>
{% filter id_index_wrap %} {% endfilter %}{% if with_abstracts %}
"{{draft.title.strip|clean_whitespace}}", {% for author in draft.authors.all|dictsort:"final_author_order" %}{{author.person}}, {% endfor %}{{draft.revision_date|date:"j-M-y"}}, <{{draft.filename}}-{{draft.revision}}{{draft.file_type|id_index_file_types}}>
{% endfilter %}{% block abstract %}{% endblock %} {{ d.abstract.strip|unindent|fill:72|indent:6 }}{% endif %}{% endfor %}{% endfor %}{% endautoescape %}
{% endfor %}{%endif %}{% endfor %}{% endautoescape %}

View file

@ -51,7 +51,7 @@ urlpatterns = patterns('',
(r'^community/', include('ietf.community.urls')), (r'^community/', include('ietf.community.urls')),
(r'^cookies/', include('ietf.cookies.urls')), (r'^cookies/', include('ietf.cookies.urls')),
(r'^doc/', include('ietf.idrfc.urls')), (r'^doc/', include('ietf.idrfc.urls')),
(r'^drafts/', include('ietf.idindex.urls')), (r'^drafts/', include('ietf.doc.redirect_drafts_urls')),
(r'^feed/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', { 'feed_dict': feeds}), (r'^feed/(?P<url>.*)/$', 'django.contrib.syndication.views.feed', { 'feed_dict': feeds}),
(r'^idtracker/', include('ietf.idtracker.urls')), (r'^idtracker/', include('ietf.idtracker.urls')),
(r'^iesg/', include('ietf.iesg.urls')), (r'^iesg/', include('ietf.iesg.urls')),

View file

@ -399,6 +399,7 @@ def make_test_data():
docalias = DocAlias.objects.create(name=doc.name, document=doc) docalias = DocAlias.objects.create(name=doc.name, document=doc)
doc.stream = StreamName.objects.get(slug='irtf') doc.stream = StreamName.objects.get(slug='irtf')
doc.save() doc.save()
doc.set_state(State.objects.get(type="draft", slug="active"))
crdoc = Document.objects.create(name='conflict-review-imaginary-irtf-submission', type_id='conflrev', rev='00', notify="fsm@ietf.org") crdoc = Document.objects.create(name='conflict-review-imaginary-irtf-submission', type_id='conflrev', rev='00', notify="fsm@ietf.org")
DocAlias.objects.create(name=crdoc.name, document=crdoc) DocAlias.objects.create(name=crdoc.name, document=crdoc)
crdoc.set_state(State.objects.get(name='Needs Shepherd', type__slug='conflrev')) crdoc.set_state(State.objects.get(name='Needs Shepherd', type__slug='conflrev'))