chore: more bulk change damage repair
This commit is contained in:
parent
90ca856afa
commit
c631cb17cb
|
@ -111,7 +111,7 @@ admin.site.register(DocHistory, DocHistoryAdmin)
|
||||||
|
|
||||||
class DocAliasAdmin(admin.ModelAdmin):
|
class DocAliasAdmin(admin.ModelAdmin):
|
||||||
list_display = ['name', 'targets']
|
list_display = ['name', 'targets']
|
||||||
search_fields = ['name', 'docs__name']
|
search_fields = ['name']
|
||||||
raw_id_fields = ['docs']
|
raw_id_fields = ['docs']
|
||||||
def targets(self, obj):
|
def targets(self, obj):
|
||||||
return ', '.join([o.name for o in obj.docs.all()])
|
return ', '.join([o.name for o in obj.docs.all()])
|
||||||
|
|
|
@ -8,7 +8,7 @@ def forward(apps, schema_editor):
|
||||||
Document = apps.get_model("doc", "Document")
|
Document = apps.get_model("doc", "Document")
|
||||||
RelatedDocument = apps.get_model("doc", "RelatedDocument")
|
RelatedDocument = apps.get_model("doc", "RelatedDocument")
|
||||||
for rfc_alias in DocAlias.objects.filter(name__startswith="rfc").exclude(
|
for rfc_alias in DocAlias.objects.filter(name__startswith="rfc").exclude(
|
||||||
docs__type__slug="rfc"
|
type_id="rfc"
|
||||||
):
|
):
|
||||||
# Move these over to the RFC
|
# Move these over to the RFC
|
||||||
RelatedDocument.objects.filter(
|
RelatedDocument.objects.filter(
|
||||||
|
|
|
@ -939,8 +939,9 @@ class Document(DocumentInfo):
|
||||||
|
|
||||||
def ipr(self,states=('posted','removed')):
|
def ipr(self,states=('posted','removed')):
|
||||||
"""Returns the IPR disclosures against this document (as a queryset over IprDocRel)."""
|
"""Returns the IPR disclosures against this document (as a queryset over IprDocRel)."""
|
||||||
from ietf.ipr.models import IprDocRel
|
# from ietf.ipr.models import IprDocRel
|
||||||
return IprDocRel.objects.filter(document__docs=self, disclosure__state__in=states)
|
# return IprDocRel.objects.filter(document__docs=self, disclosure__state__in=states) # TODO - clear these comments away
|
||||||
|
return self.iprdocrel_set.filter(disclosure__state__in=states)
|
||||||
|
|
||||||
def related_ipr(self):
|
def related_ipr(self):
|
||||||
"""Returns the IPR disclosures against this document and those documents this
|
"""Returns the IPR disclosures against this document and those documents this
|
||||||
|
|
|
@ -146,7 +146,7 @@ def doc_name(name):
|
||||||
key = hash(n)
|
key = hash(n)
|
||||||
found = cache.get(key)
|
found = cache.get(key)
|
||||||
if not found:
|
if not found:
|
||||||
exact = Document.objects.filter(name=n)
|
exact = Document.objects.filter(name=n).first()
|
||||||
found = exact.name if exact else "_"
|
found = exact.name if exact else "_"
|
||||||
# TODO review this cache policy (and the need for these entire function)
|
# TODO review this cache policy (and the need for these entire function)
|
||||||
cache.set(key, found, timeout=60*60*24) # cache for one day
|
cache.set(key, found, timeout=60*60*24) # cache for one day
|
||||||
|
@ -250,14 +250,12 @@ def urlize_ietf_docs(string, autoescape=None):
|
||||||
string,
|
string,
|
||||||
flags=re.IGNORECASE | re.ASCII,
|
flags=re.IGNORECASE | re.ASCII,
|
||||||
)
|
)
|
||||||
debug.show('string')
|
|
||||||
string = re.sub(
|
string = re.sub(
|
||||||
r"\b(?<![/\-:=#\"\'])((RFC|BCP|STD|FYI) *\n? *0*(\d+))\b",
|
r"\b(?<![/\-:=#\"\'])((RFC|BCP|STD|FYI) *\n? *0*(\d+))\b",
|
||||||
link_other_doc_match,
|
link_other_doc_match,
|
||||||
string,
|
string,
|
||||||
flags=re.IGNORECASE | re.ASCII,
|
flags=re.IGNORECASE | re.ASCII,
|
||||||
)
|
)
|
||||||
debug.show('string')
|
|
||||||
|
|
||||||
return mark_safe(string)
|
return mark_safe(string)
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
|
|
||||||
from ietf.doc.factories import (
|
from ietf.doc.factories import (
|
||||||
WgDraftFactory,
|
WgRfcFactory,
|
||||||
IndividualDraftFactory,
|
IndividualDraftFactory,
|
||||||
CharterFactory,
|
CharterFactory,
|
||||||
NewRevisionDocEventFactory,
|
NewRevisionDocEventFactory,
|
||||||
|
@ -25,7 +25,7 @@ class IetfFiltersTests(TestCase):
|
||||||
self.assertEqual(is_valid_url(url), result)
|
self.assertEqual(is_valid_url(url), result)
|
||||||
|
|
||||||
def test_urlize_ietf_docs(self):
|
def test_urlize_ietf_docs(self):
|
||||||
rfc = WgDraftFactory(rfc_number=123456,std_level_id="bcp")
|
rfc = WgRfcFactory(rfc_number=123456,std_level_id="bcp")
|
||||||
rfc.save_with_history(
|
rfc.save_with_history(
|
||||||
[
|
[
|
||||||
DocEvent.objects.create(
|
DocEvent.objects.create(
|
||||||
|
@ -57,12 +57,14 @@ class IetfFiltersTests(TestCase):
|
||||||
|
|
||||||
cases = [
|
cases = [
|
||||||
("no change", "no change"),
|
("no change", "no change"),
|
||||||
("bCp123456", '<a href="/doc/bcp123456/">bCp123456</a>'),
|
|
||||||
("Std 00123456", '<a href="/doc/std123456/">Std 00123456</a>'),
|
# TODO: rework subseries when we add them
|
||||||
(
|
# ("bCp123456", '<a href="/doc/bcp123456/">bCp123456</a>'),
|
||||||
"FyI 0123456 changes std 00123456",
|
# ("Std 00123456", '<a href="/doc/std123456/">Std 00123456</a>'),
|
||||||
'<a href="/doc/fyi123456/">FyI 0123456</a> changes <a href="/doc/std123456/">std 00123456</a>',
|
# (
|
||||||
),
|
# "FyI 0123456 changes std 00123456",
|
||||||
|
# '<a href="/doc/fyi123456/">FyI 0123456</a> changes <a href="/doc/std123456/">std 00123456</a>',
|
||||||
|
# ),
|
||||||
("rfc123456", '<a href="/doc/rfc123456/">rfc123456</a>'),
|
("rfc123456", '<a href="/doc/rfc123456/">rfc123456</a>'),
|
||||||
("Rfc 0123456", '<a href="/doc/rfc123456/">Rfc 0123456</a>'),
|
("Rfc 0123456", '<a href="/doc/rfc123456/">Rfc 0123456</a>'),
|
||||||
(rfc.name, f'<a href="/doc/{rfc.name}/">{rfc.name}</a>'),
|
(rfc.name, f'<a href="/doc/{rfc.name}/">{rfc.name}</a>'),
|
||||||
|
|
|
@ -922,7 +922,7 @@ class ReviewTests(TestCase):
|
||||||
date_today().isoformat(),
|
date_today().isoformat(),
|
||||||
]
|
]
|
||||||
review_name = "-".join(c for c in name_components if c).lower()
|
review_name = "-".join(c for c in name_components if c).lower()
|
||||||
Document.objects.create(name=review_name,type_id='review',group=assignment.review_request.team)
|
d = Document.objects.create(name=review_name,type_id='review',group=assignment.review_request.team)
|
||||||
|
|
||||||
r = self.client.post(url, data={
|
r = self.client.post(url, data={
|
||||||
"result": ReviewResultName.objects.get(reviewteamsettings_review_results_set__group=assignment.review_request.team, slug="ready").pk,
|
"result": ReviewResultName.objects.get(reviewteamsettings_review_results_set__group=assignment.review_request.team, slug="ready").pk,
|
||||||
|
|
|
@ -1175,7 +1175,7 @@ def get_diff_revisions(request, name, doc):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if doc.tyoe_id == "rfc":
|
if doc.type_id == "rfc":
|
||||||
e = doc.latest_event(type="published_rfc")
|
e = doc.latest_event(type="published_rfc")
|
||||||
diff_revisions.append((name, "", e.time if e else doc.time, name))
|
diff_revisions.append((name, "", e.time if e else doc.time, name))
|
||||||
|
|
||||||
|
|
|
@ -254,7 +254,7 @@ def search_for_name(request, name):
|
||||||
|
|
||||||
startswith = Document.objects.filter(name__istartswith=n)[:2]
|
startswith = Document.objects.filter(name__istartswith=n)[:2]
|
||||||
if len(startswith) == 1:
|
if len(startswith) == 1:
|
||||||
return startswith.name
|
return startswith[0].name
|
||||||
|
|
||||||
contains = Document.objects.filter(name__icontains=n)[:2]
|
contains = Document.objects.filter(name__icontains=n)[:2]
|
||||||
if len(contains) == 1:
|
if len(contains) == 1:
|
||||||
|
@ -818,7 +818,7 @@ def index_all_drafts(request): # Should we rename this
|
||||||
else:
|
else:
|
||||||
heading = "%s Internet-Drafts" % state.name
|
heading = "%s Internet-Drafts" % state.name
|
||||||
|
|
||||||
drafts = Document.objects.filter(docs__type_id="draft", docs__states=state).order_by("name")
|
drafts = Document.objects.filter(type_id="draft", states=state).order_by("name")
|
||||||
|
|
||||||
names = [
|
names = [
|
||||||
f'<a href=\"{urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=doc.name))}\">{doc.name}</a>'
|
f'<a href=\"{urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=doc.name))}\">{doc.name}</a>'
|
||||||
|
@ -832,9 +832,9 @@ def index_all_drafts(request): # Should we rename this
|
||||||
))
|
))
|
||||||
|
|
||||||
# gather RFCs
|
# gather RFCs
|
||||||
rfcs = Document.objects.filter(docs__type_id="rfc").order_by('-rfc_number')
|
rfcs = Document.objects.filter(type_id="rfc").order_by('-rfc_number')
|
||||||
names = [
|
names = [
|
||||||
f'<a href=\"{urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name))}\">{rfc.name}</a>'
|
f'<a href=\"{urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name))}\">{rfc.name.upper()}</a>'
|
||||||
for rfc in rfcs
|
for rfc in rfcs
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ def related_docs(doc, relationship=('replaces', 'obs')):
|
||||||
|
|
||||||
results = [doc]
|
results = [doc]
|
||||||
|
|
||||||
rels = list(doc.document.all_relations_that_doc(relationship))
|
rels = list(doc.all_relations_that_doc(relationship))
|
||||||
|
|
||||||
for rel in rels:
|
for rel in rels:
|
||||||
rel.target.related = rel
|
rel.target.related = rel
|
||||||
|
|
|
@ -709,13 +709,13 @@ def search(request):
|
||||||
# Search by wg acronym
|
# Search by wg acronym
|
||||||
# Document list with IPRs
|
# Document list with IPRs
|
||||||
elif search_type == "group":
|
elif search_type == "group":
|
||||||
docs = list(Document.objects.filter(docs__group=q))
|
docs = list(Document.objects.filter(group=q))
|
||||||
related = []
|
related = []
|
||||||
for doc in docs:
|
for doc in docs:
|
||||||
doc.product_of_this_wg = True
|
doc.product_of_this_wg = True
|
||||||
related += related_docs(doc)
|
related += related_docs(doc)
|
||||||
iprs = iprs_from_docs(list(set(docs+related)),states=states)
|
iprs = iprs_from_docs(list(set(docs+related)),states=states)
|
||||||
docs = [ doc for doc in docs if doc.document.ipr() ]
|
docs = [ doc for doc in docs if doc.ipr() ]
|
||||||
docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.document.ipr()]), reverse=True)
|
docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.document.ipr()]), reverse=True)
|
||||||
template = "ipr/search_wg_result.html"
|
template = "ipr/search_wg_result.html"
|
||||||
q = Group.objects.get(id=q).acronym # make acronym for use in template
|
q = Group.objects.get(id=q).acronym # make acronym for use in template
|
||||||
|
@ -723,12 +723,12 @@ def search(request):
|
||||||
# Search by rfc and id title
|
# Search by rfc and id title
|
||||||
# Document list with IPRs
|
# Document list with IPRs
|
||||||
elif search_type == "doctitle":
|
elif search_type == "doctitle":
|
||||||
docs = list(Document.objects.filter(docs__title__icontains=q))
|
docs = list(Document.objects.filter(title__icontains=q))
|
||||||
related = []
|
related = []
|
||||||
for doc in docs:
|
for doc in docs:
|
||||||
related += related_docs(doc)
|
related += related_docs(doc)
|
||||||
iprs = iprs_from_docs(list(set(docs+related)),states=states)
|
iprs = iprs_from_docs(list(set(docs+related)),states=states)
|
||||||
docs = [ doc for doc in docs if doc.document.ipr() ]
|
docs = [ doc for doc in docs if doc.ipr() ]
|
||||||
docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.document.ipr()]), reverse=True)
|
docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.document.ipr()]), reverse=True)
|
||||||
template = "ipr/search_doctitle_result.html"
|
template = "ipr/search_doctitle_result.html"
|
||||||
|
|
||||||
|
|
|
@ -214,13 +214,13 @@ def document_stats(request, stats_type=None):
|
||||||
|
|
||||||
if any(stats_type == t[0] for t in possible_document_stats_types):
|
if any(stats_type == t[0] for t in possible_document_stats_types):
|
||||||
# filter documents
|
# filter documents
|
||||||
document_filters = Q(docs__type="draft")
|
document_filters = Q(type__in=["draft","rfc"]) # TODO - review lots of "rfc is a draft" assumptions below
|
||||||
|
|
||||||
rfc_state = State.objects.get(type="draft", slug="rfc")
|
rfc_state = State.objects.get(type="draft", slug="rfc")
|
||||||
if document_type == "rfc":
|
if document_type == "rfc":
|
||||||
document_filters &= Q(docs__states=rfc_state)
|
document_filters &= Q(states=rfc_state)
|
||||||
elif document_type == "draft":
|
elif document_type == "draft":
|
||||||
document_filters &= ~Q(docs__states=rfc_state)
|
document_filters &= ~Q(states=rfc_state)
|
||||||
|
|
||||||
if from_time:
|
if from_time:
|
||||||
# this is actually faster than joining in the database,
|
# this is actually faster than joining in the database,
|
||||||
|
@ -231,7 +231,7 @@ def document_stats(request, stats_type=None):
|
||||||
docevent__type__in=["published_rfc", "new_revision"],
|
docevent__type__in=["published_rfc", "new_revision"],
|
||||||
).values_list("pk"))
|
).values_list("pk"))
|
||||||
|
|
||||||
document_filters &= Q(docs__in=docs_within_time_constraint)
|
document_filters &= Q(pk__in=docs_within_time_constraint)
|
||||||
|
|
||||||
document_qs = Document.objects.filter(document_filters)
|
document_qs = Document.objects.filter(document_filters)
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,7 @@
|
||||||
<tbody>
|
<tbody>
|
||||||
<tr>
|
<tr>
|
||||||
<th scope="col" class="table-info" colspan="3">
|
<th scope="col" class="table-info" colspan="3">
|
||||||
Results for {{ doc.name|prettystdname|urlize_ietf_docs }} ("{{ doc.document.title }}"){% if not forloop.first %}{% if doc.related %}, which was {{ doc.relation|lower }} {{ doc.related.source|prettystdname|urlize_ietf_docs }} ("{{ doc.related.source.title }}"){% endif %}{% endif %}
|
Results for {{ doc.name|prettystdname|urlize_ietf_docs }} ("{{ doc.title }}"){% if not forloop.first %}{% if doc.related %}, which was {{ doc.relation|lower }} {{ doc.related.source|prettystdname|urlize_ietf_docs }} ("{{ doc.related.source.title }}"){% endif %}{% endif %}
|
||||||
</th>
|
</th>
|
||||||
</tr>
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
|
|
Loading…
Reference in a new issue