diff --git a/.github/workflows/ci-run-tests.yml b/.github/workflows/ci-run-tests.yml index 346dd97b4..9121bf8ae 100644 --- a/.github/workflows/ci-run-tests.yml +++ b/.github/workflows/ci-run-tests.yml @@ -4,6 +4,7 @@ on: pull_request: branches: - 'main' + - 'feat/rfc' paths: - 'client/**' - 'ietf/**' diff --git a/dev/deploy-to-container/start.sh b/dev/deploy-to-container/start.sh index 5621c68fa..271c54a43 100644 --- a/dev/deploy-to-container/start.sh +++ b/dev/deploy-to-container/start.sh @@ -36,7 +36,10 @@ echo "Running Datatracker checks..." # Migrate, adjusting to what the current state of the underlying database might be: echo "Running Datatracker migrations..." -/usr/local/bin/python ./ietf/manage.py migrate --fake-initial --settings=settings_local +/usr/local/bin/python ./ietf/manage.py migrate --settings=settings_local + +echo "Syncing with the rfc-index" +./ietf/bin/rfc-editor-index-updates -d 1969-01-01 echo "Starting Datatracker..." ./ietf/manage.py runserver 0.0.0.0:8000 --settings=settings_local diff --git a/bin/add-old-drafts-from-archive.py b/dev/legacy/add-old-drafts-from-archive.py similarity index 100% rename from bin/add-old-drafts-from-archive.py rename to dev/legacy/add-old-drafts-from-archive.py diff --git a/ietf/bin/recalculate-rfc-authors-snapshot b/dev/legacy/recalculate-rfc-authors-snapshot similarity index 100% rename from ietf/bin/recalculate-rfc-authors-snapshot rename to dev/legacy/recalculate-rfc-authors-snapshot diff --git a/ietf/api/tests.py b/ietf/api/tests.py index 24d76a6a9..e61069b3f 100644 --- a/ietf/api/tests.py +++ b/ietf/api/tests.py @@ -24,7 +24,7 @@ import debug # pyflakes:ignore import ietf from ietf.doc.utils import get_unicode_document_content from ietf.doc.models import RelatedDocument, State -from ietf.doc.factories import IndividualDraftFactory, WgDraftFactory +from ietf.doc.factories import IndividualDraftFactory, WgDraftFactory, WgRfcFactory from ietf.group.factories import RoleFactory from ietf.meeting.factories import MeetingFactory, SessionFactory from ietf.meeting.models import Session @@ -944,7 +944,7 @@ class RfcdiffSupportTests(TestCase): self.assertNotIn('previous', received, 'Rev 00 has no previous name when not replacing a draft') replaced = IndividualDraftFactory() - RelatedDocument.objects.create(relationship_id='replaces',source=draft,target=replaced.docalias.first()) + RelatedDocument.objects.create(relationship_id='replaces',source=draft,target=replaced) received = self.getJson(dict(name=draft.name, rev='00')) self.assertEqual(received['previous'], f'{replaced.name}-{replaced.rev}', 'Rev 00 has a previous name when replacing a draft') @@ -974,19 +974,19 @@ class RfcdiffSupportTests(TestCase): def do_rfc_test(self, draft_name): draft = WgDraftFactory(name=draft_name, create_revisions=range(0,2)) - draft.docalias.create(name=f'rfc{self.next_rfc_number():04}') + rfc = WgRfcFactory(group=draft.group, rfc_number=self.next_rfc_number()) + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) draft.set_state(State.objects.get(type_id='draft',slug='rfc')) draft.set_state(State.objects.get(type_id='draft-iesg', slug='pub')) - draft = reload_db_objects(draft) - rfc = draft + draft, rfc = reload_db_objects(draft, rfc) - number = rfc.rfc_number() + number = rfc.rfc_number received = self.getJson(dict(name=number)) self.assertEqual( received, dict( content_url=rfc.get_href(), - name=rfc.canonical_name(), + name=rfc.name, previous=f'{draft.name}-{draft.rev}', previous_url= draft.history_set.get(rev=draft.rev).get_href(), ), @@ -994,7 +994,7 @@ class RfcdiffSupportTests(TestCase): ) num_received = received - received = self.getJson(dict(name=rfc.canonical_name())) + received = self.getJson(dict(name=rfc.name)) self.assertEqual(num_received, received, 'RFC by canonical name gives same result as by number') received = self.getJson(dict(name=f'RfC {number}')) @@ -1026,30 +1026,30 @@ class RfcdiffSupportTests(TestCase): def test_rfc_with_tombstone(self): draft = WgDraftFactory(create_revisions=range(0,2)) - draft.docalias.create(name='rfc3261') # See views_doc.HAS_TOMBSTONE + rfc = WgRfcFactory(rfc_number=3261,group=draft.group)# See views_doc.HAS_TOMBSTONE + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) draft.set_state(State.objects.get(type_id='draft',slug='rfc')) draft.set_state(State.objects.get(type_id='draft-iesg', slug='pub')) draft = reload_db_objects(draft) - rfc = draft # Some old rfcs had tombstones that shouldn't be used for comparisons - received = self.getJson(dict(name=rfc.canonical_name())) + received = self.getJson(dict(name=rfc.name)) self.assertTrue(received['previous'].endswith('00')) def do_rfc_with_broken_history_test(self, draft_name): draft = WgDraftFactory(rev='10', name=draft_name) - draft.docalias.create(name=f'rfc{self.next_rfc_number():04}') + rfc = WgRfcFactory(group=draft.group, rfc_number=self.next_rfc_number()) + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) draft.set_state(State.objects.get(type_id='draft',slug='rfc')) draft.set_state(State.objects.get(type_id='draft-iesg', slug='pub')) draft = reload_db_objects(draft) - rfc = draft received = self.getJson(dict(name=draft.name)) self.assertEqual( received, dict( content_url=rfc.get_href(), - name=rfc.canonical_name(), + name=rfc.name, previous=f'{draft.name}-10', previous_url= f'{settings.IETF_ID_ARCHIVE_URL}{draft.name}-10.txt', ), @@ -1080,3 +1080,9 @@ class RfcdiffSupportTests(TestCase): # tricky draft names self.do_rfc_with_broken_history_test(draft_name='draft-gizmo-01') self.do_rfc_with_broken_history_test(draft_name='draft-oh-boy-what-a-draft-02-03') + + def test_no_such_document(self): + for name in ['rfc0000', 'draft-ftei-oof-rab-00']: + url = urlreverse(self.target_view, kwargs={'name': name}) + r = self.client.get(url) + self.assertEqual(r.status_code, 404) diff --git a/ietf/api/views.py b/ietf/api/views.py index f6221b5e2..9d832f6fa 100644 --- a/ietf/api/views.py +++ b/ietf/api/views.py @@ -317,12 +317,9 @@ def get_previous_url(name, rev=None): previous_url = '' if condition in ('historic version', 'current version'): doc = history if history else document - if found_rev: - doc.is_rfc = lambda: False previous_url = doc.get_href() elif condition == 'version dochistory not found': document.rev = found_rev - document.is_rfc = lambda: False previous_url = document.get_href() return previous_url @@ -330,32 +327,38 @@ def get_previous_url(name, rev=None): def rfcdiff_latest_json(request, name, rev=None): response = dict() condition, document, history, found_rev = find_doc_for_rfcdiff(name, rev) - + if document and document.type_id == "rfc": + draft = document.came_from_draft() if condition == 'no such document': raise Http404 elif condition in ('historic version', 'current version'): doc = history if history else document - if not found_rev and doc.is_rfc(): - response['content_url'] = doc.get_href() - response['name']=doc.canonical_name() - if doc.name != doc.canonical_name(): + if doc.type_id == "rfc": + response['content_url'] = doc.get_href() + response['name']=doc.name + if draft: + prev_rev = draft.rev + if doc.rfc_number in HAS_TOMBSTONE and prev_rev != '00': + prev_rev = f'{(int(draft.rev)-1):02d}' + response['previous'] = f'{draft.name}-{prev_rev}' + response['previous_url'] = get_previous_url(draft.name, prev_rev) + elif doc.type_id == "draft" and not found_rev and doc.relateddocument_set.filter(relationship_id="became_rfc").exists(): + rfc = doc.related_that_doc("became_rfc")[0] + response['content_url'] = rfc.get_href() + response['name']=rfc.name prev_rev = doc.rev - # not sure what to do if non-numeric values come back, so at least log it - log.assertion('doc.rfc_number().isdigit()') # .rfc_number() is expensive... - log.assertion('doc.rev.isdigit()') - if int(doc.rfc_number()) in HAS_TOMBSTONE and prev_rev != '00': + if rfc.rfc_number in HAS_TOMBSTONE and prev_rev != '00': prev_rev = f'{(int(doc.rev)-1):02d}' response['previous'] = f'{doc.name}-{prev_rev}' response['previous_url'] = get_previous_url(doc.name, prev_rev) else: - doc.is_rfc = lambda: False response['content_url'] = doc.get_href() response['rev'] = doc.rev response['name'] = doc.name if doc.rev == '00': replaces_docs = (history.doc if condition=='historic version' else doc).related_that_doc('replaces') if replaces_docs: - replaces = replaces_docs[0].document + replaces = replaces_docs[0] response['previous'] = f'{replaces.name}-{replaces.rev}' response['previous_url'] = get_previous_url(replaces.name, replaces.rev) else: @@ -374,7 +377,6 @@ def rfcdiff_latest_json(request, name, rev=None): response['name'] = document.name response['rev'] = found_rev document.rev = found_rev - document.is_rfc = lambda: False response['content_url'] = document.get_href() # not sure what to do if non-numeric values come back, so at least log it log.assertion('found_rev.isdigit()') diff --git a/ietf/bin/rfc-editor-index-updates b/ietf/bin/rfc-editor-index-updates index dc7abe26b..c3e8f1f46 100755 --- a/ietf/bin/rfc-editor-index-updates +++ b/ietf/bin/rfc-editor-index-updates @@ -79,12 +79,12 @@ if len(errata_data) < ietf.sync.rfceditor.MIN_ERRATA_RESULTS: sys.exit(1) new_rfcs = [] -for changes, doc, rfc_published in ietf.sync.rfceditor.update_docs_from_rfc_index(index_data, errata_data, skip_older_than_date=skip_date): +for rfc_number, changes, doc, rfc_published in ietf.sync.rfceditor.update_docs_from_rfc_index(index_data, errata_data, skip_older_than_date=skip_date): if rfc_published: new_rfcs.append(doc) for c in changes: - log("RFC%s, %s: %s" % (doc.rfcnum, doc.name, c)) + log("RFC%s, %s: %s" % (rfc_number, doc.name, c)) sys.exit(0) @@ -99,7 +99,7 @@ if newpid == 0: pipe("%s -a %s %s" % (settings.RSYNC_BINARY,settings.RFC_TEXT_RSYNC_SOURCE,settings.RFC_PATH)) for rfc in new_rfcs: rebuild_reference_relations(rfc) - log("Updated references for %s"%rfc.canonical_name()) + log("Updated references for %s"%rfc.name) except: subject = "Exception in updating references for new rfcs: %s : %s" % (sys.exc_info()[0],sys.exc_info()[1]) msg = "%s\n%s\n----\n%s"%(sys.exc_info()[0],sys.exc_info()[1],traceback.format_tb(sys.exc_info()[2])) diff --git a/ietf/community/forms.py b/ietf/community/forms.py index 8d72ce0d7..ad8570896 100644 --- a/ietf/community/forms.py +++ b/ietf/community/forms.py @@ -30,6 +30,8 @@ class SearchRuleForm(forms.ModelForm): super(SearchRuleForm, self).__init__(*args, **kwargs) def restrict_state(state_type, slug=None): + if "state" not in self.fields: + raise RuntimeError(f"Rule type {rule_type} cannot include state filtering") f = self.fields['state'] f.queryset = f.queryset.filter(used=True).filter(type=state_type) if slug: @@ -38,11 +40,15 @@ class SearchRuleForm(forms.ModelForm): f.initial = f.queryset[0].pk f.widget = forms.HiddenInput() + if rule_type.endswith("_rfc"): + del self.fields["state"] # rfc rules must not look at document states + if rule_type in ["group", "group_rfc", "area", "area_rfc", "group_exp"]: if rule_type == "group_exp": restrict_state("draft", "expired") else: - restrict_state("draft", "rfc" if rule_type.endswith("rfc") else "active") + if not rule_type.endswith("_rfc"): + restrict_state("draft", "active") if rule_type.startswith("area"): self.fields["group"].label = "Area" @@ -70,7 +76,8 @@ class SearchRuleForm(forms.ModelForm): del self.fields["text"] elif rule_type in ["author", "author_rfc", "shepherd", "ad"]: - restrict_state("draft", "rfc" if rule_type.endswith("rfc") else "active") + if not rule_type.endswith("_rfc"): + restrict_state("draft", "active") if rule_type.startswith("author"): self.fields["person"].label = "Author" @@ -84,7 +91,8 @@ class SearchRuleForm(forms.ModelForm): del self.fields["text"] elif rule_type == "name_contains": - restrict_state("draft", "rfc" if rule_type.endswith("rfc") else "active") + if not rule_type.endswith("_rfc"): + restrict_state("draft", "active") del self.fields["person"] del self.fields["group"] diff --git a/ietf/community/migrations/0003_track_rfcs.py b/ietf/community/migrations/0003_track_rfcs.py new file mode 100644 index 000000000..3c2d04097 --- /dev/null +++ b/ietf/community/migrations/0003_track_rfcs.py @@ -0,0 +1,50 @@ +# Generated by Django 4.2.3 on 2023-07-07 18:33 + +from django.db import migrations + + +def forward(apps, schema_editor): + """Track any RFCs that were created from tracked drafts""" + CommunityList = apps.get_model("community", "CommunityList") + RelatedDocument = apps.get_model("doc", "RelatedDocument") + + # Handle individually tracked documents + for cl in CommunityList.objects.all(): + for rfc in set( + RelatedDocument.objects.filter( + source__in=cl.added_docs.all(), + relationship__slug="became_rfc", + ).values_list("target__docs", flat=True) + ): + cl.added_docs.add(rfc) + + # Handle rules - rules ending with _rfc should no longer filter by state. + # There are 9 CommunityLists with invalid author_rfc rules that are filtering + # by (draft, active) instead of (draft, rfc) state before migration. All but one + # also includes an author rule for (draft, active), so these will start following + # RFCs as well. The one exception will start tracking RFCs instead of I-Ds, which + # is probably what was intended, but will be a change in their user experience. + SearchRule = apps.get_model("community", "SearchRule") + rfc_rules = SearchRule.objects.filter(rule_type__endswith="_rfc") + rfc_rules.update(state=None) + +def reverse(apps, schema_editor): + Document = apps.get_model("doc", "Document") + for rfc in Document.objects.filter(type__slug="rfc"): + rfc.communitylist_set.clear() + + # See the comment above regarding author_rfc + SearchRule = apps.get_model("community", "SearchRule") + State = apps.get_model("doc", "State") + SearchRule.objects.filter(rule_type__endswith="_rfc").update( + state=State.objects.get(type_id="draft", slug="rfc") + ) + + +class Migration(migrations.Migration): + dependencies = [ + ("community", "0002_auto_20230320_1222"), + ("doc", "0014_move_rfc_docaliases"), + ] + + operations = [migrations.RunPython(forward, reverse)] diff --git a/ietf/community/tests.py b/ietf/community/tests.py index 3dd86f70e..ee5827ee2 100644 --- a/ietf/community/tests.py +++ b/ietf/community/tests.py @@ -41,7 +41,7 @@ class CommunityListTests(WebTest): clist = CommunityList.objects.create(user=User.objects.get(username="plain")) rule_group = SearchRule.objects.create(rule_type="group", group=draft.group, state=State.objects.get(type="draft", slug="active"), community_list=clist) - rule_group_rfc = SearchRule.objects.create(rule_type="group_rfc", group=draft.group, state=State.objects.get(type="draft", slug="rfc"), community_list=clist) + rule_group_rfc = SearchRule.objects.create(rule_type="group_rfc", group=draft.group, state=State.objects.get(type="rfc", slug="published"), community_list=clist) rule_area = SearchRule.objects.create(rule_type="area", group=draft.group.parent, state=State.objects.get(type="draft", slug="active"), community_list=clist) rule_state_iesg = SearchRule.objects.create(rule_type="state_iesg", state=State.objects.get(type="draft-iesg", slug="lc"), community_list=clist) @@ -151,7 +151,7 @@ class CommunityListTests(WebTest): "action": "add_rule", "rule_type": "author_rfc", "author_rfc-person": Person.objects.filter(documentauthor__document=draft).first().pk, - "author_rfc-state": State.objects.get(type="draft", slug="rfc").pk, + "author_rfc-state": State.objects.get(type="rfc", slug="published").pk, }) self.assertEqual(r.status_code, 302) clist = CommunityList.objects.get(user__username="plain") @@ -408,4 +408,4 @@ class CommunityListTests(WebTest): self.assertEqual(len(outbox), mailbox_before + 1) self.assertTrue(draft.name in outbox[-1]["Subject"]) - \ No newline at end of file + diff --git a/ietf/community/utils.py b/ietf/community/utils.py index 8130954b9..f411af6a5 100644 --- a/ietf/community/utils.py +++ b/ietf/community/utils.py @@ -60,7 +60,7 @@ def reset_name_contains_index_for_rule(rule): if not rule.rule_type == "name_contains": return - rule.name_contains_index.set(Document.objects.filter(docalias__name__regex=rule.text)) + rule.name_contains_index.set(Document.objects.filter(name__regex=rule.text)) def update_name_contains_indexes_with_new_doc(doc): for r in SearchRule.objects.filter(rule_type="name_contains"): @@ -71,70 +71,103 @@ def update_name_contains_indexes_with_new_doc(doc): if re.search(r.text, doc.name) and not doc in r.name_contains_index.all(): r.name_contains_index.add(doc) + def docs_matching_community_list_rule(rule): docs = Document.objects.all() + + if rule.rule_type.endswith("_rfc"): + docs = docs.filter(type_id="rfc") # rule.state is ignored for RFCs + else: + docs = docs.filter(type_id="draft", states=rule.state) + if rule.rule_type in ['group', 'area', 'group_rfc', 'area_rfc']: - return docs.filter(Q(group=rule.group_id) | Q(group__parent=rule.group_id), states=rule.state) + return docs.filter(Q(group=rule.group_id) | Q(group__parent=rule.group_id)) elif rule.rule_type in ['group_exp']: - return docs.filter(group=rule.group_id, states=rule.state) + return docs.filter(group=rule.group_id) elif rule.rule_type.startswith("state_"): - return docs.filter(states=rule.state) + return docs elif rule.rule_type in ["author", "author_rfc"]: - return docs.filter(states=rule.state, documentauthor__person=rule.person) + return docs.filter(documentauthor__person=rule.person) elif rule.rule_type == "ad": - return docs.filter(states=rule.state, ad=rule.person) + return docs.filter(ad=rule.person) elif rule.rule_type == "shepherd": - return docs.filter(states=rule.state, shepherd__person=rule.person) + return docs.filter(shepherd__person=rule.person) elif rule.rule_type == "name_contains": - return docs.filter(states=rule.state, searchrule=rule) + return docs.filter(searchrule=rule) raise NotImplementedError + def community_list_rules_matching_doc(doc): + rules = SearchRule.objects.none() + if doc.type_id not in ["draft", "rfc"]: + return rules # none states = list(doc.states.values_list("pk", flat=True)) - rules = SearchRule.objects.none() - + # group and area rules if doc.group_id: groups = [doc.group_id] if doc.group.parent_id: groups.append(doc.group.parent_id) + rules_to_add = SearchRule.objects.filter(group__in=groups) + if doc.type_id == "rfc": + rules_to_add = rules_to_add.filter(rule_type__in=["group_rfc", "area_rfc"]) + else: + rules_to_add = rules_to_add.filter( + rule_type__in=["group", "area", "group_exp"], + state__in=states, + ) + rules |= rules_to_add + + # state rules (only relevant for I-Ds) + if doc.type_id == "draft": rules |= SearchRule.objects.filter( - rule_type__in=['group', 'area', 'group_rfc', 'area_rfc', 'group_exp'], + rule_type__in=[ + "state_iab", + "state_iana", + "state_iesg", + "state_irtf", + "state_ise", + "state_rfceditor", + "state_ietf", + ], state__in=states, - group__in=groups ) - rules |= SearchRule.objects.filter( - rule_type__in=['state_iab', 'state_iana', 'state_iesg', 'state_irtf', 'state_ise', 'state_rfceditor', 'state_ietf'], - state__in=states, - ) - - rules |= SearchRule.objects.filter( - rule_type__in=["author", "author_rfc"], - state__in=states, - person__in=list(Person.objects.filter(documentauthor__document=doc)), - ) - - if doc.ad_id: + # author rules + if doc.type_id == "rfc": rules |= SearchRule.objects.filter( - rule_type="ad", + rule_type="author_rfc", + person__in=list(Person.objects.filter(documentauthor__document=doc)), + ) + else: + rules |= SearchRule.objects.filter( + rule_type="author", state__in=states, - person=doc.ad_id, + person__in=list(Person.objects.filter(documentauthor__document=doc)), ) - if doc.shepherd_id: - rules |= SearchRule.objects.filter( - rule_type="shepherd", - state__in=states, - person__email=doc.shepherd_id, - ) + # Other draft-only rules rules + if doc.type_id == "draft": + if doc.ad_id: + rules |= SearchRule.objects.filter( + rule_type="ad", + state__in=states, + person=doc.ad_id, + ) - rules |= SearchRule.objects.filter( - rule_type="name_contains", - state__in=states, - name_contains_index=doc, # search our materialized index to avoid full scan - ) + if doc.shepherd_id: + rules |= SearchRule.objects.filter( + rule_type="shepherd", + state__in=states, + person__email=doc.shepherd_id, + ) + + rules |= SearchRule.objects.filter( + rule_type="name_contains", + state__in=states, + name_contains_index=doc, # search our materialized index to avoid full scan + ) return rules @@ -146,7 +179,11 @@ def docs_tracked_by_community_list(clist): # in theory, we could use an OR query, but databases seem to have # trouble with OR queries and complicated joins so do the OR'ing # manually - doc_ids = set(clist.added_docs.values_list("pk", flat=True)) + doc_ids = set() + for doc in clist.added_docs.all(): + doc_ids.add(doc.pk) + doc_ids.update(rfc.pk for rfc in doc.related_that_doc("became_rfc")) + for rule in clist.searchrule_set.all(): doc_ids = doc_ids | set(docs_matching_community_list_rule(rule).values_list("pk", flat=True)) diff --git a/ietf/community/views.py b/ietf/community/views.py index 1dbbfcaf0..fdaaffec0 100644 --- a/ietf/community/views.py +++ b/ietf/community/views.py @@ -79,19 +79,18 @@ def manage_list(request, username=None, acronym=None, group_type=None): rule_type_form = SearchRuleTypeForm(request.POST) if rule_type_form.is_valid(): rule_type = rule_type_form.cleaned_data['rule_type'] - - if rule_type: - rule_form = SearchRuleForm(clist, rule_type, request.POST) - if rule_form.is_valid(): - if clist.pk is None: - clist.save() - - rule = rule_form.save(commit=False) - rule.community_list = clist - rule.rule_type = rule_type - rule.save() - if rule.rule_type == "name_contains": - reset_name_contains_index_for_rule(rule) + if rule_type: + rule_form = SearchRuleForm(clist, rule_type, request.POST) + if rule_form.is_valid(): + if clist.pk is None: + clist.save() + + rule = rule_form.save(commit=False) + rule.community_list = clist + rule.rule_type = rule_type + rule.save() + if rule.rule_type == "name_contains": + reset_name_contains_index_for_rule(rule) return HttpResponseRedirect("") else: @@ -130,7 +129,7 @@ def manage_list(request, username=None, acronym=None, group_type=None): @login_required def track_document(request, name, username=None, acronym=None): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if request.method == "POST": clist = lookup_community_list(username, acronym) @@ -154,7 +153,7 @@ def track_document(request, name, username=None, acronym=None): @login_required def untrack_document(request, name, username=None, acronym=None): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) clist = lookup_community_list(username, acronym) if not can_manage_community_list(request.user, clist): permission_denied(request, "You do not have permission to access this view") diff --git a/ietf/doc/admin.py b/ietf/doc/admin.py index 64b9d9eff..3ad4bee2a 100644 --- a/ietf/doc/admin.py +++ b/ietf/doc/admin.py @@ -7,7 +7,7 @@ from django.db import models from django import forms from .models import (StateType, State, RelatedDocument, DocumentAuthor, Document, RelatedDocHistory, - DocHistoryAuthor, DocHistory, DocAlias, DocReminder, DocEvent, NewRevisionDocEvent, + DocHistoryAuthor, DocHistory, DocReminder, DocEvent, NewRevisionDocEvent, StateDocEvent, ConsensusDocEvent, BallotType, BallotDocEvent, WriteupDocEvent, LastCallDocEvent, TelechatDocEvent, BallotPositionDocEvent, ReviewRequestDocEvent, InitialReviewDocEvent, AddedMessageEvent, SubmissionDocEvent, DeletedEvent, EditedAuthorsDocEvent, DocumentURL, @@ -27,10 +27,6 @@ class StateAdmin(admin.ModelAdmin): filter_horizontal = ["next_states"] admin.site.register(State, StateAdmin) -# class DocAliasInline(admin.TabularInline): -# model = DocAlias -# extra = 1 - class DocAuthorInline(admin.TabularInline): model = DocumentAuthor raw_id_fields = ['person', 'email'] @@ -43,8 +39,9 @@ class DocActionHolderInline(admin.TabularInline): class RelatedDocumentInline(admin.TabularInline): model = RelatedDocument + fk_name= 'source' def this(self, instance): - return instance.source.canonical_name() + return instance.source.name readonly_fields = ['this', ] fields = ['this', 'relationship', 'target', ] raw_id_fields = ['target'] @@ -70,7 +67,7 @@ class DocumentForm(forms.ModelForm): class DocumentAuthorAdmin(admin.ModelAdmin): list_display = ['id', 'document', 'person', 'email', 'affiliation', 'country', 'order'] - search_fields = ['document__docalias__name', 'person__name', 'email__address', 'affiliation', 'country'] + search_fields = ['document__name', 'person__name', 'email__address', 'affiliation', 'country'] raw_id_fields = ["document", "person", "email"] admin.site.register(DocumentAuthor, DocumentAuthorAdmin) @@ -108,14 +105,6 @@ class DocHistoryAdmin(admin.ModelAdmin): admin.site.register(DocHistory, DocHistoryAdmin) -class DocAliasAdmin(admin.ModelAdmin): - list_display = ['name', 'targets'] - search_fields = ['name', 'docs__name'] - raw_id_fields = ['docs'] - def targets(self, obj): - return ', '.join([o.name for o in obj.docs.all()]) -admin.site.register(DocAlias, DocAliasAdmin) - class DocReminderAdmin(admin.ModelAdmin): list_display = ['id', 'event', 'type', 'due', 'active'] list_filter = ['type', 'due', 'active'] @@ -125,7 +114,7 @@ admin.site.register(DocReminder, DocReminderAdmin) class RelatedDocumentAdmin(admin.ModelAdmin): list_display = ['source', 'target', 'relationship', ] list_filter = ['relationship', ] - search_fields = ['source__name', 'target__name', 'target__docs__name', ] + search_fields = ['source__name', 'target__name', ] raw_id_fields = ['source', 'target', ] admin.site.register(RelatedDocument, RelatedDocumentAdmin) diff --git a/ietf/doc/factories.py b/ietf/doc/factories.py index 3ea9f2b8f..50fba50c4 100644 --- a/ietf/doc/factories.py +++ b/ietf/doc/factories.py @@ -12,7 +12,7 @@ from typing import Optional # pyflakes:ignore from django.conf import settings from django.utils import timezone -from ietf.doc.models import ( Document, DocEvent, NewRevisionDocEvent, DocAlias, State, DocumentAuthor, +from ietf.doc.models import ( Document, DocEvent, NewRevisionDocEvent, State, DocumentAuthor, StateDocEvent, BallotPositionDocEvent, BallotDocEvent, BallotType, IRSGBallotDocEvent, TelechatDocEvent, DocumentActionHolder, BofreqEditorDocEvent, BofreqResponsibleDocEvent, DocExtResource ) from ietf.group.models import Group @@ -51,16 +51,11 @@ class BaseDocumentFactory(factory.django.DjangoModelFactory): def name(self, n): return draft_name_generator(self.type_id,self.group,n) - newrevisiondocevent = factory.RelatedFactory('ietf.doc.factories.NewRevisionDocEventFactory','doc') - @factory.post_generation - def other_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument - alias = DocAliasFactory(name=obj.name) - alias.docs.add(obj) - if create and extracted: - for name in extracted: - alias = DocAliasFactory(name=name) - alias.docs.add(obj) + def newrevisiondocevent(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument + if create: + if obj.type_id != "rfc": + NewRevisionDocEventFactory(doc=obj) @factory.post_generation def states(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument @@ -83,13 +78,7 @@ class BaseDocumentFactory(factory.django.DjangoModelFactory): def relations(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument if create and extracted: for (rel_id, doc) in extracted: - if isinstance(doc, Document): - docalias = doc.docalias.first() - elif isinstance(doc, DocAlias): - docalias = doc - else: - continue - obj.relateddocument_set.create(relationship_id=rel_id, target=docalias) + obj.relateddocument_set.create(relationship_id=rel_id, target=doc) @factory.post_generation def create_revisions(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument @@ -119,6 +108,24 @@ class DocumentFactory(BaseDocumentFactory): group = factory.SubFactory('ietf.group.factories.GroupFactory',acronym='none') +class RfcFactory(BaseDocumentFactory): + type_id = "rfc" + rev = "" + rfc_number = factory.Sequence(lambda n: n + 1000) + name = factory.LazyAttribute(lambda o: f"rfc{o.rfc_number:d}") + expires = None + + @factory.post_generation + def states(obj, create, extracted, **kwargs): + if not create: + return + if extracted: + for (state_type_id,state_slug) in extracted: + obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug)) + else: + obj.set_state(State.objects.get(type_id='rfc',slug='published')) + + class IndividualDraftFactory(BaseDocumentFactory): type_id = 'draft' @@ -137,28 +144,11 @@ class IndividualDraftFactory(BaseDocumentFactory): obj.set_state(State.objects.get(type_id='draft',slug='active')) obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists')) -class IndividualRfcFactory(IndividualDraftFactory): +class IndividualRfcFactory(RfcFactory): + group = factory.SubFactory('ietf.group.factories.GroupFactory',acronym='none') - alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000))) - - @factory.post_generation - def states(obj, create, extracted, **kwargs): - if not create: - return - if extracted: - for (state_type_id,state_slug) in extracted: - obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug)) - else: - obj.set_state(State.objects.get(type_id='draft',slug='rfc')) - - @factory.post_generation - def reset_canonical_name(obj, create, extracted, **kwargs): - if hasattr(obj, '_canonical_name'): - del obj._canonical_name - return None class WgDraftFactory(BaseDocumentFactory): - type_id = 'draft' group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='wg') stream_id = 'ietf' @@ -177,30 +167,12 @@ class WgDraftFactory(BaseDocumentFactory): obj.set_state(State.objects.get(type_id='draft-stream-ietf',slug='wg-doc')) obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists')) -class WgRfcFactory(WgDraftFactory): - - alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000))) +class WgRfcFactory(RfcFactory): + group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='wg') + stream_id = 'ietf' std_level_id = 'ps' - @factory.post_generation - def states(obj, create, extracted, **kwargs): - if not create: - return - if extracted: - for (state_type_id,state_slug) in extracted: - obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug)) - if not obj.get_state('draft-iesg'): - obj.set_state(State.objects.get(type_id='draft-iesg', slug='pub')) - else: - obj.set_state(State.objects.get(type_id='draft',slug='rfc')) - obj.set_state(State.objects.get(type_id='draft-iesg', slug='pub')) - - @factory.post_generation - def reset_canonical_name(obj, create, extracted, **kwargs): - if hasattr(obj, '_canonical_name'): - del obj._canonical_name - return None class RgDraftFactory(BaseDocumentFactory): @@ -223,34 +195,11 @@ class RgDraftFactory(BaseDocumentFactory): obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists')) -class RgRfcFactory(RgDraftFactory): - - alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000))) - +class RgRfcFactory(RfcFactory): + group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='rg') + stream_id = 'irtf' std_level_id = 'inf' - @factory.post_generation - def states(obj, create, extracted, **kwargs): - if not create: - return - if extracted: - for (state_type_id,state_slug) in extracted: - obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug)) - if not obj.get_state('draft-stream-irtf'): - obj.set_state(State.objects.get(type_id='draft-stream-irtf', slug='pub')) - if not obj.get_state('draft-iesg'): - obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists')) - else: - obj.set_state(State.objects.get(type_id='draft',slug='rfc')) - obj.set_state(State.objects.get(type_id='draft-stream-irtf', slug='pub')) - obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists')) - - @factory.post_generation - def reset_canonical_name(obj, create, extracted, **kwargs): - if hasattr(obj, '_canonical_name'): - del obj._canonical_name - return None - class CharterFactory(BaseDocumentFactory): @@ -279,7 +228,7 @@ class StatusChangeFactory(BaseDocumentFactory): for (rel, target) in extracted: obj.relateddocument_set.create(relationship_id=rel,target=target) else: - obj.relateddocument_set.create(relationship_id='tobcp', target=WgRfcFactory().docalias.first()) + obj.relateddocument_set.create(relationship_id='tobcp', target=WgRfcFactory()) @factory.post_generation def states(obj, create, extracted, **kwargs): @@ -306,9 +255,9 @@ class ConflictReviewFactory(BaseDocumentFactory): if not create: return if extracted: - obj.relateddocument_set.create(relationship_id='conflrev',target=extracted.docalias.first()) + obj.relateddocument_set.create(relationship_id='conflrev',target=extracted) else: - obj.relateddocument_set.create(relationship_id='conflrev',target=DocumentFactory(name=obj.name.replace('conflict-review-','draft-'),type_id='draft',group=Group.objects.get(type_id='individ')).docalias.first()) + obj.relateddocument_set.create(relationship_id='conflrev',target=DocumentFactory(name=obj.name.replace('conflict-review-','draft-'),type_id='draft',group=Group.objects.get(type_id='individ'))) @factory.post_generation @@ -327,24 +276,6 @@ class ReviewFactory(BaseDocumentFactory): name = factory.LazyAttribute(lambda o: 'review-doesnotexist-00-%s-%s'%(o.group.acronym,date_today().isoformat())) group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='review') -class DocAliasFactory(factory.django.DjangoModelFactory): - class Meta: - model = DocAlias - skip_postgeneration_save = True - - @factory.post_generation - def document(self, create, extracted, **kwargs): - if create and extracted: - self.docs.add(extracted) - - @factory.post_generation - def docs(self, create, extracted, **kwargs): - if create and extracted: - for doc in extracted: - if not doc in self.docs.all(): - self.docs.add(doc) - - class DocEventFactory(factory.django.DjangoModelFactory): class Meta: model = DocEvent @@ -557,33 +488,8 @@ class EditorialDraftFactory(BaseDocumentFactory): obj.set_state(State.objects.get(type_id='draft-stream-editorial',slug='active')) obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists')) -class EditorialRfcFactory(RgDraftFactory): - - alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000))) - - std_level_id = 'inf' - - @factory.post_generation - def states(obj, create, extracted, **kwargs): - if not create: - return - if extracted: - for (state_type_id,state_slug) in extracted: - obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug)) - if not obj.get_state('draft-stream-editorial'): - obj.set_state(State.objects.get(type_id='draft-stream-editorial', slug='pub')) - if not obj.get_state('draft-iesg'): - obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists')) - else: - obj.set_state(State.objects.get(type_id='draft',slug='rfc')) - obj.set_state(State.objects.get(type_id='draft-stream-editorial', slug='pub')) - obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists')) - - @factory.post_generation - def reset_canonical_name(obj, create, extracted, **kwargs): - if hasattr(obj, '_canonical_name'): - del obj._canonical_name - return None +class EditorialRfcFactory(RgRfcFactory): + pass class StatementFactory(BaseDocumentFactory): type_id = "statement" @@ -611,3 +517,31 @@ class StatementFactory(BaseDocumentFactory): obj.set_state(State.objects.get(type_id=state_type_id, slug=state_slug)) else: obj.set_state(State.objects.get(type_id="statement", slug="active")) + +class SubseriesFactory(factory.django.DjangoModelFactory): + class Meta: + model = Document + skip_postgeneration_save = True + + @factory.lazy_attribute_sequence + def name(self, n): + return f"{self.type_id}{n}" + + @factory.post_generation + def contains(obj, create, extracted, **kwargs): + if not create: + return + if extracted: + for doc in extracted: + obj.relateddocument_set.create(relationship_id="contains",target=doc) + else: + obj.relateddocument_set.create(relationship_id="contains", target=RfcFactory()) + +class BcpFactory(SubseriesFactory): + type_id="bcp" + +class StdFactory(SubseriesFactory): + type_id="std" + +class FyiFactory(SubseriesFactory): + type_id="fyi" diff --git a/ietf/doc/feeds.py b/ietf/doc/feeds.py index c5bb467e9..500ed3cb1 100644 --- a/ietf/doc/feeds.py +++ b/ietf/doc/feeds.py @@ -36,7 +36,7 @@ class DocumentChangesFeed(Feed): feed_type = Atom1Feed def get_object(self, request, name): - return Document.objects.get(docalias__name=name) + return Document.objects.get(name=name) def title(self, obj): return "Changes for %s" % obj.display_name() @@ -46,7 +46,7 @@ class DocumentChangesFeed(Feed): raise FeedDoesNotExist return urlreverse( "ietf.doc.views_doc.document_history", - kwargs=dict(name=obj.canonical_name()), + kwargs=dict(name=obj.name), ) def subtitle(self, obj): @@ -86,7 +86,7 @@ class DocumentChangesFeed(Feed): return ( urlreverse( "ietf.doc.views_doc.document_history", - kwargs=dict(name=item.doc.canonical_name()), + kwargs=dict(name=item.doc.name), ) + "#history-%s" % item.pk ) @@ -208,13 +208,13 @@ class RfcFeed(Feed): return [doc for doc, time in results] def item_title(self, item): - return "%s : %s" % (item.canonical_name(), item.title) + return "%s : %s" % (item.name, item.title) def item_description(self, item): return item.abstract def item_link(self, item): - return "https://rfc-editor.org/info/%s" % item.canonical_name() + return "https://rfc-editor.org/info/%s" % item.name def item_pubdate(self, item): return item.publication_time @@ -224,20 +224,20 @@ class RfcFeed(Feed): extra.update({"dcterms_accessRights": "gratis"}) extra.update({"dcterms_format": "text/html"}) media_contents = [] - if int(item.rfc_number()) < 8650: - if int(item.rfc_number()) not in [8, 9, 51, 418, 500, 530, 589]: + if item.rfc_number < 8650: + if item.rfc_number not in [8, 9, 51, 418, 500, 530, 589]: for fmt, media_type in [("txt", "text/plain"), ("html", "text/html")]: media_contents.append( { - "url": f"https://rfc-editor.org/rfc/{item.canonical_name()}.{fmt}", + "url": f"https://rfc-editor.org/rfc/{item.name}.{fmt}", "media_type": media_type, "is_format_of": self.item_link(item), } ) - if int(item.rfc_number()) not in [571, 587]: + if item.rfc_number not in [571, 587]: media_contents.append( { - "url": f"https://www.rfc-editor.org/rfc/pdfrfc/{item.canonical_name()}.txt.pdf", + "url": f"https://www.rfc-editor.org/rfc/pdfrfc/{item.name}.txt.pdf", "media_type": "application/pdf", "is_format_of": self.item_link(item), } @@ -245,7 +245,7 @@ class RfcFeed(Feed): else: media_contents.append( { - "url": f"https://www.rfc-editor.org/rfc/{item.canonical_name()}.xml", + "url": f"https://www.rfc-editor.org/rfc/{item.name}.xml", "media_type": "application/rfc+xml", } ) @@ -256,16 +256,16 @@ class RfcFeed(Feed): ]: media_contents.append( { - "url": f"https://rfc-editor.org/rfc/{item.canonical_name()}.{fmt}", + "url": f"https://rfc-editor.org/rfc/{item.name}.{fmt}", "media_type": media_type, - "is_format_of": f"https://www.rfc-editor.org/rfc/{item.canonical_name()}.xml", + "is_format_of": f"https://www.rfc-editor.org/rfc/{item.name}.xml", } ) extra.update({"media_contents": media_contents}) - extra.update({"doi": "10.17487/%s" % item.canonical_name().upper()}) + extra.update({"doi": "10.17487/%s" % item.name.upper()}) extra.update( - {"doiuri": "http://dx.doi.org/10.17487/%s" % item.canonical_name().upper()} + {"doiuri": "http://dx.doi.org/10.17487/%s" % item.name.upper()} ) # R104 Publisher (Mandatory - but we need a string from them first) diff --git a/ietf/doc/fields.py b/ietf/doc/fields.py index fde519950..4a6922bf3 100644 --- a/ietf/doc/fields.py +++ b/ietf/doc/fields.py @@ -13,7 +13,7 @@ from django.urls import reverse as urlreverse import debug # pyflakes:ignore -from ietf.doc.models import Document, DocAlias +from ietf.doc.models import Document from ietf.doc.utils import uppercase_std_abbreviated_name from ietf.utils.fields import SearchableField @@ -69,19 +69,3 @@ class SearchableDocumentsField(SearchableField): class SearchableDocumentField(SearchableDocumentsField): """Specialized to only return one Document""" max_entries = 1 - - -class SearchableDocAliasesField(SearchableDocumentsField): - """Search DocAliases instead of Documents""" - model = DocAlias # type: Type[models.Model] - - def doc_type_filter(self, queryset): - """Filter to include only desired doc type - - For DocAlias, pass through to the docs to check type. - """ - return queryset.filter(docs__type=self.doc_type) - -class SearchableDocAliasField(SearchableDocAliasesField): - """Specialized to only return one DocAlias""" - max_entries = 1 \ No newline at end of file diff --git a/ietf/doc/forms.py b/ietf/doc/forms.py index c0c52571c..554451c56 100644 --- a/ietf/doc/forms.py +++ b/ietf/doc/forms.py @@ -8,7 +8,7 @@ from django import forms from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.core.validators import validate_email -from ietf.doc.fields import SearchableDocAliasesField, SearchableDocAliasField +from ietf.doc.fields import SearchableDocumentField, SearchableDocumentsField from ietf.doc.models import RelatedDocument, DocExtResource from ietf.iesg.models import TelechatDate from ietf.iesg.utils import telechat_page_count @@ -134,11 +134,12 @@ class ActionHoldersForm(forms.Form): IESG_APPROVED_STATE_LIST = ("ann", "rfcqueue", "pub") class AddDownrefForm(forms.Form): - rfc = SearchableDocAliasField( + rfc = SearchableDocumentField( label="Referenced RFC", help_text="The RFC that is approved for downref", - required=True) - drafts = SearchableDocAliasesField( + required=True, + doc_type="rfc") + drafts = SearchableDocumentsField( label="Internet-Drafts that makes the reference", help_text="The Internet-Drafts that approve the downref in their Last Call", required=True) @@ -148,7 +149,7 @@ class AddDownrefForm(forms.Form): raise forms.ValidationError("Please provide a referenced RFC and a referencing Internet-Draft") rfc = self.cleaned_data['rfc'] - if not rfc.document.is_rfc(): + if rfc.type_id != "rfc": raise forms.ValidationError("Cannot find the RFC: " + rfc.name) return rfc @@ -158,10 +159,10 @@ class AddDownrefForm(forms.Form): v_err_names = [] drafts = self.cleaned_data['drafts'] - for da in drafts: - state = da.document.get_state("draft-iesg") + for d in drafts: + state = d.get_state("draft-iesg") if not state or state.slug not in IESG_APPROVED_STATE_LIST: - v_err_names.append(da.name) + v_err_names.append(d.name) if v_err_names: raise forms.ValidationError("Internet-Draft is not yet approved: " + ", ".join(v_err_names)) return drafts @@ -173,23 +174,23 @@ class AddDownrefForm(forms.Form): v_err_pairs = [] rfc = self.cleaned_data['rfc'] drafts = self.cleaned_data['drafts'] - for da in drafts: - if RelatedDocument.objects.filter(source=da.document, target=rfc, relationship_id='downref-approval'): - v_err_pairs.append(da.name + " --> RFC " + rfc.document.rfc_number()) + for d in drafts: + if RelatedDocument.objects.filter(source=d, target=rfc, relationship_id='downref-approval'): + v_err_pairs.append(f"{d.name} --> RFC {rfc.rfc_number}") if v_err_pairs: raise forms.ValidationError("Downref is already in the registry: " + ", ".join(v_err_pairs)) if 'save_downref_anyway' not in self.data: # this check is skipped if the save_downref_anyway button is used v_err_refnorm = "" - for da in drafts: - if not RelatedDocument.objects.filter(source=da.document, target=rfc, relationship_id='refnorm'): + for d in drafts: + if not RelatedDocument.objects.filter(source=d, target=rfc, relationship_id='refnorm'): if v_err_refnorm: - v_err_refnorm = v_err_refnorm + " or " + da.name + v_err_refnorm = v_err_refnorm + " or " + d.name else: - v_err_refnorm = da.name + v_err_refnorm = d.name if v_err_refnorm: - v_err_refnorm_prefix = "There does not seem to be a normative reference to RFC " + rfc.document.rfc_number() + " by " + v_err_refnorm_prefix = f"There does not seem to be a normative reference to RFC {rfc.rfc_number} by " raise forms.ValidationError(v_err_refnorm_prefix + v_err_refnorm) diff --git a/ietf/doc/mails.py b/ietf/doc/mails.py index 8f5d0eb67..c1e2074bc 100644 --- a/ietf/doc/mails.py +++ b/ietf/doc/mails.py @@ -19,7 +19,7 @@ from ietf.doc.templatetags.mail_filters import std_level_prompt from ietf.utils import log from ietf.utils.mail import send_mail, send_mail_text from ietf.ipr.utils import iprs_from_docs, related_docs -from ietf.doc.models import WriteupDocEvent, LastCallDocEvent, DocAlias, ConsensusDocEvent +from ietf.doc.models import WriteupDocEvent, LastCallDocEvent, ConsensusDocEvent from ietf.doc.utils import needed_ballot_positions from ietf.doc.utils_bofreq import bofreq_editors, bofreq_responsible from ietf.group.models import Role @@ -54,7 +54,7 @@ def email_ad_approved_doc(request, doc, text): def email_ad_approved_conflict_review(request, review, ok_to_publish): """Email notification when AD approves a conflict review""" - conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document + conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target (to, cc) = gather_address_lists("ad_approved_conflict_review") frm = request.user.person.formatted_email() send_mail(request, @@ -202,7 +202,7 @@ def generate_last_call_announcement(request, doc): doc.filled_title = textwrap.fill(doc.title, width=70, subsequent_indent=" " * 3) - iprs = iprs_from_docs(related_docs(DocAlias.objects.get(name=doc.canonical_name()))) + iprs = iprs_from_docs(related_docs(Document.objects.get(name=doc.name))) if iprs: ipr_links = [ urlreverse("ietf.ipr.views.show", kwargs=dict(id=i.id)) for i in iprs] ipr_links = [ settings.IDTRACKER_BASE_URL+url if not url.startswith("http") else url for url in ipr_links ] @@ -670,7 +670,7 @@ def send_review_possibly_replaces_request(request, doc, submitter_info): to = set(addrs.to) cc = set(addrs.cc) - possibly_replaces = Document.objects.filter(name__in=[alias.name for alias in doc.related_that_doc("possibly-replaces")]) + possibly_replaces = Document.objects.filter(name__in=[related.name for related in doc.related_that_doc("possibly-replaces")]) for other_doc in possibly_replaces: (other_to, other_cc) = gather_address_lists('doc_replacement_suggested',doc=other_doc) to.update(other_to) diff --git a/ietf/doc/management/commands/generate_draft_aliases.py b/ietf/doc/management/commands/generate_draft_aliases.py index 88f4aa98c..6d42a66a1 100755 --- a/ietf/doc/management/commands/generate_draft_aliases.py +++ b/ietf/doc/management/commands/generate_draft_aliases.py @@ -24,6 +24,7 @@ from ietf.doc.models import Document from ietf.group.utils import get_group_role_emails, get_group_ad_emails from ietf.utils.aliases import dump_sublist from utils.mail import parseaddr +from ietf.utils import log DEFAULT_YEARS = 2 @@ -120,16 +121,18 @@ class Command(BaseCommand): vfile.write("%s anything\n" % settings.DRAFT_VIRTUAL_DOMAIN) # Internet-Drafts with active status or expired within DEFAULT_YEARS - drafts = Document.objects.filter(name__startswith='draft-') + drafts = Document.objects.filter(type_id="draft") active_drafts = drafts.filter(states__slug='active') inactive_recent_drafts = drafts.exclude(states__slug='active').filter(expires__gte=show_since) interesting_drafts = active_drafts | inactive_recent_drafts alias_domains = ['ietf.org', ] for draft in interesting_drafts.distinct().iterator(): - # Omit RFCs, unless they were published in the last DEFAULT_YEARS - if draft.docalias.filter(name__startswith='rfc'): - if draft.latest_event(type='published_rfc').time < show_since: + # Omit drafts that became RFCs, unless they were published in the last DEFAULT_YEARS + if draft.get_state_slug()=="rfc": + rfc = draft.became_rfc() + log.assertion("rfc is not None") + if rfc.latest_event(type='published_rfc').time < show_since: continue alias = draft.name diff --git a/ietf/doc/migrations/0009_add_rfc_states.py b/ietf/doc/migrations/0009_add_rfc_states.py new file mode 100644 index 000000000..07a6ac020 --- /dev/null +++ b/ietf/doc/migrations/0009_add_rfc_states.py @@ -0,0 +1,23 @@ +# Generated by Django 4.2.2 on 2023-06-14 20:57 + +from django.db import migrations + + +def forward(apps, schema_editor): + StateType = apps.get_model("doc", "StateType") + rfc_statetype, _ = StateType.objects.get_or_create(slug="rfc", label="State") + + State = apps.get_model("doc", "State") + State.objects.get_or_create( + type=rfc_statetype, slug="published", name="Published", used=True, order=1 + ) + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0008_alter_docevent_type"), + ] + + operations = [ + migrations.RunPython(forward), + ] diff --git a/ietf/doc/migrations/0010_dochistory_rfc_number_document_rfc_number.py b/ietf/doc/migrations/0010_dochistory_rfc_number_document_rfc_number.py new file mode 100644 index 000000000..26b2a85c6 --- /dev/null +++ b/ietf/doc/migrations/0010_dochistory_rfc_number_document_rfc_number.py @@ -0,0 +1,22 @@ +# Generated by Django 4.2.2 on 2023-06-14 22:28 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0009_add_rfc_states"), + ] + + operations = [ + migrations.AddField( + model_name="dochistory", + name="rfc_number", + field=models.PositiveIntegerField(blank=True, null=True), + ), + migrations.AddField( + model_name="document", + name="rfc_number", + field=models.PositiveIntegerField(blank=True, null=True), + ), + ] diff --git a/ietf/doc/migrations/0011_create_rfc_documents.py b/ietf/doc/migrations/0011_create_rfc_documents.py new file mode 100644 index 000000000..466ff81bb --- /dev/null +++ b/ietf/doc/migrations/0011_create_rfc_documents.py @@ -0,0 +1,76 @@ +# Generated by Django 4.2.2 on 2023-06-15 15:27 + +from django.db import migrations + + +def forward(apps, schema_editor): + Document = apps.get_model("doc", "Document") + DocAlias = apps.get_model("doc", "DocAlias") + DocumentAuthor = apps.get_model("doc", "DocumentAuthor") + + State = apps.get_model("doc", "State") + draft_rfc_state = State.objects.get(type_id="draft", slug="rfc") + rfc_published_state = State.objects.get(type_id="rfc", slug="published") + + # Find draft Documents in the "rfc" state + found_by_state = Document.objects.filter(states=draft_rfc_state).distinct() + + # Find Documents with an "rfc..." alias and confirm they're the same set + rfc_docaliases = DocAlias.objects.filter(name__startswith="rfc") + found_by_name = Document.objects.filter(docalias__in=rfc_docaliases).distinct() + assert set(found_by_name) == set(found_by_state), "mismatch between rfcs identified by state and docalias" + + # As of 2023-06-15, there is one Document with two rfc aliases: rfc6312 and rfc6342 are the same Document. This + # was due to a publication error. Because we go alias-by-alias, no special handling is needed in this migration. + + for rfc_alias in rfc_docaliases.order_by("name"): + assert rfc_alias.docs.count() == 1, f"DocAlias {rfc_alias} is linked to more than 1 Document" + draft = rfc_alias.docs.first() + if draft.name.startswith("rfc"): + rfc = draft + rfc.type_id = "rfc" + rfc.rfc_number = int(draft.name[3:]) + rfc.save() + rfc.states.set([rfc_published_state]) + else: + rfc = Document.objects.create( + type_id="rfc", + name=rfc_alias.name, + rfc_number=int(rfc_alias.name[3:]), + time=draft.time, + title=draft.title, + stream=draft.stream, + group=draft.group, + abstract=draft.abstract, + pages=draft.pages, + words=draft.words, + std_level=draft.std_level, + ad=draft.ad, + external_url=draft.external_url, + uploaded_filename=draft.uploaded_filename, + note=draft.note, + ) + rfc.states.set([rfc_published_state]) + rfc.formal_languages.set(draft.formal_languages.all()) + + # Copy Authors + for da in draft.documentauthor_set.all(): + DocumentAuthor.objects.create( + document=rfc, + person=da.person, + email=da.email, + affiliation=da.affiliation, + country=da.country, + order=da.order, + ) + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0010_dochistory_rfc_number_document_rfc_number"), + ("name", "0010_rfc_doctype_names"), + ] + + operations = [ + migrations.RunPython(forward), + ] diff --git a/ietf/doc/migrations/0012_move_rfc_docevents.py b/ietf/doc/migrations/0012_move_rfc_docevents.py new file mode 100644 index 000000000..9969a8f0a --- /dev/null +++ b/ietf/doc/migrations/0012_move_rfc_docevents.py @@ -0,0 +1,88 @@ +# Generated by Django 4.2.2 on 2023-06-20 18:36 + +from django.db import migrations +from django.db.models import Q + + +def forward(apps, schema_editor): + """Move RFC events from the draft to the rfc Document""" + DocAlias = apps.get_model("doc", "DocAlias") + DocEvent = apps.get_model("doc", "DocEvent") + Document = apps.get_model("doc", "Document") + + # queryset with events migrated regardless of whether before or after the "published_rfc" event + events_always_migrated = DocEvent.objects.filter( + Q( + type__in=[ + "published_rfc", # do not remove this one! + ] + ) + ) + + # queryset with events migrated only after the "published_rfc" event + events_migrated_after_pub = DocEvent.objects.exclude( + type__in=[ + "created_ballot", + "closed_ballot", + "sent_ballot_announcement", + "changed_ballot_position", + "changed_ballot_approval_text", + "changed_ballot_writeup_text", + ] + ).exclude( + type="added_comment", + desc__contains="ballot set", # excludes 311 comments that all apply to drafts + ) + + # special case for rfc 6312/6342 draft, which has two published_rfc events + ignore = ["rfc6312", "rfc6342"] # do not reprocess these later + rfc6312 = Document.objects.get(name="rfc6312") + rfc6342 = Document.objects.get(name="rfc6342") + draft = DocAlias.objects.get(name="rfc6312").docs.first() + assert draft == DocAlias.objects.get(name="rfc6342").docs.first() + published_events = list( + DocEvent.objects.filter(doc=draft, type="published_rfc").order_by("time") + ) + assert len(published_events) == 2 + ( + pub_event_6312, + pub_event_6342, + ) = published_events # order matches pub dates at rfc-editor.org + + pub_event_6312.doc = rfc6312 + pub_event_6312.save() + events_migrated_after_pub.filter( + doc=draft, + time__gte=pub_event_6312.time, + time__lt=pub_event_6342.time, + ).update(doc=rfc6312) + + pub_event_6342.doc = rfc6342 + pub_event_6342.save() + events_migrated_after_pub.filter( + doc=draft, + time__gte=pub_event_6342.time, + ).update(doc=rfc6342) + + # Now handle all the rest + for rfc in Document.objects.filter(type_id="rfc").exclude(name__in=ignore): + draft = DocAlias.objects.get(name=rfc.name).docs.first() + assert draft is not None + published_event = DocEvent.objects.get(doc=draft, type="published_rfc") + events_always_migrated.filter( + doc=draft, + ).update(doc=rfc) + events_migrated_after_pub.filter( + doc=draft, + time__gte=published_event.time, + ).update(doc=rfc) + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0011_create_rfc_documents"), + ] + + operations = [ + migrations.RunPython(forward), + ] diff --git a/ietf/doc/migrations/0013_rfc_relateddocuments.py b/ietf/doc/migrations/0013_rfc_relateddocuments.py new file mode 100644 index 000000000..9baddaebd --- /dev/null +++ b/ietf/doc/migrations/0013_rfc_relateddocuments.py @@ -0,0 +1,45 @@ +# Generated by Django 4.2.3 on 2023-07-05 22:40 + +from django.db import migrations + + +def forward(apps, schema_editor): + DocAlias = apps.get_model("doc", "DocAlias") + Document = apps.get_model("doc", "Document") + RelatedDocument = apps.get_model("doc", "RelatedDocument") + for rfc_alias in DocAlias.objects.filter(name__startswith="rfc").exclude( + docs__type_id="rfc" + ): + # Move these over to the RFC + RelatedDocument.objects.filter( + relationship__slug__in=( + "tobcp", + "toexp", + "tohist", + "toinf", + "tois", + "tops", + "obs", + "updates", + ), + source__docalias=rfc_alias, + ).update(source=Document.objects.get(name=rfc_alias.name)) + # Duplicate references on the RFC but keep the ones on the draft as well + originals = list( + RelatedDocument.objects.filter( + relationship__slug__in=("refinfo", "refnorm", "refold", "refunk"), + source__docalias=rfc_alias, + ) + ) + for o in originals: + o.pk = None + o.source = Document.objects.get(name=rfc_alias.name) + RelatedDocument.objects.bulk_create(originals) + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0012_move_rfc_docevents"), + ] + + operations = [migrations.RunPython(forward)] diff --git a/ietf/doc/migrations/0014_move_rfc_docaliases.py b/ietf/doc/migrations/0014_move_rfc_docaliases.py new file mode 100644 index 000000000..c82a98e05 --- /dev/null +++ b/ietf/doc/migrations/0014_move_rfc_docaliases.py @@ -0,0 +1,38 @@ +# Generated by Django 4.2.2 on 2023-06-20 18:36 + +from django.db import migrations + + +def forward(apps, schema_editor): + """Point "rfc..." DocAliases at the rfc-type Document + + Creates a became_rfc RelatedDocument to preserve the connection between the draft and the rfc. + """ + DocAlias = apps.get_model("doc", "DocAlias") + Document = apps.get_model("doc", "Document") + RelatedDocument = apps.get_model("doc", "RelatedDocument") + + for rfc_alias in DocAlias.objects.filter(name__startswith="rfc"): + rfc = Document.objects.get(name=rfc_alias.name) + aliased_doc = rfc_alias.docs.get() # implicitly confirms only one value in rfc_alias.docs + if aliased_doc != rfc: + # If the DocAlias was not already pointing at the rfc, it was pointing at the draft + # it came from. Create the relationship between draft and rfc Documents. + assert aliased_doc.type_id == "draft", f"Alias for {rfc.name} should be pointing at a draft" + RelatedDocument.objects.create( + source=aliased_doc, + target=rfc_alias, + relationship_id="became_rfc", + ) + # Now move the alias from the draft to the rfc + rfc_alias.docs.set([rfc]) + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0013_rfc_relateddocuments"), + ] + + operations = [ + migrations.RunPython(forward), + ] diff --git a/ietf/doc/migrations/0015_relate_no_aliases.py b/ietf/doc/migrations/0015_relate_no_aliases.py new file mode 100644 index 000000000..4ba3dd960 --- /dev/null +++ b/ietf/doc/migrations/0015_relate_no_aliases.py @@ -0,0 +1,84 @@ +# Generated by Django 4.2.2 on 2023-06-16 13:40 + +from django.db import migrations +import django.db.models.deletion +from django.db.models import F, Subquery, OuterRef, CharField +import ietf.utils.models + +def forward(apps, schema_editor): + RelatedDocument = apps.get_model("doc", "RelatedDocument") + DocAlias = apps.get_model("doc", "DocAlias") + target_subquery = Subquery(DocAlias.objects.filter(pk=OuterRef("deprecated_target")).values("docs")[:1]) + name_subquery = Subquery(DocAlias.objects.filter(pk=OuterRef("deprecated_target")).values("name")[:1]) + RelatedDocument.objects.annotate(firstdoc=target_subquery).annotate(aliasname=name_subquery).update(target=F("firstdoc"),originaltargetaliasname=F("aliasname")) + +def reverse(apps, schema_editor): + pass + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0014_move_rfc_docaliases"), + ] + + operations = [ + migrations.AlterField( + model_name='relateddocument', + name='target', + field=ietf.utils.models.ForeignKey( + db_index=False, + on_delete=django.db.models.deletion.CASCADE, + to='doc.docalias', + ), + ), + migrations.RenameField( + model_name="relateddocument", + old_name="target", + new_name="deprecated_target" + ), + migrations.AlterField( + model_name='relateddocument', + name='deprecated_target', + field=ietf.utils.models.ForeignKey( + db_index=True, + on_delete=django.db.models.deletion.CASCADE, + to='doc.docalias', + ), + ), + migrations.AddField( + model_name="relateddocument", + name="target", + field=ietf.utils.models.ForeignKey( + default=1, # A lie, but a convenient one - no relations point here. + on_delete=django.db.models.deletion.CASCADE, + related_name="targets_related", + to="doc.document", + db_index=False, + ), + preserve_default=False, + ), + migrations.AddField( + model_name="relateddocument", + name="originaltargetaliasname", + field=CharField(max_length=255,null=True,blank=True), + preserve_default=True, + ), + migrations.RunPython(forward, reverse), + migrations.AlterField( + model_name="relateddocument", + name="target", + field=ietf.utils.models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="targets_related", + to="doc.document", + db_index=True, + ), + ), + migrations.RemoveField( + model_name="relateddocument", + name="deprecated_target", + field=ietf.utils.models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to='doc.DocAlias', + ), + ), + ] diff --git a/ietf/doc/migrations/0016_relate_hist_no_aliases.py b/ietf/doc/migrations/0016_relate_hist_no_aliases.py new file mode 100644 index 000000000..df5fb3c32 --- /dev/null +++ b/ietf/doc/migrations/0016_relate_hist_no_aliases.py @@ -0,0 +1,87 @@ +# Generated by Django 4.2.2 on 2023-06-16 13:40 + +from django.db import migrations +import django.db.models.deletion +from django.db.models import F, Subquery, OuterRef, CharField +import ietf.utils.models + +def forward(apps, schema_editor): + RelatedDocHistory = apps.get_model("doc", "RelatedDocHistory") + DocAlias = apps.get_model("doc", "DocAlias") + target_subquery = Subquery(DocAlias.objects.filter(pk=OuterRef("deprecated_target")).values("docs")[:1]) + name_subquery = Subquery(DocAlias.objects.filter(pk=OuterRef("deprecated_target")).values("name")[:1]) + RelatedDocHistory.objects.annotate(firstdoc=target_subquery).annotate(aliasname=name_subquery).update(target=F("firstdoc"),originaltargetaliasname=F("aliasname")) + +def reverse(apps, schema_editor): + pass + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0015_relate_no_aliases"), + ] + + operations = [ + migrations.AlterField( + model_name='relateddochistory', + name='target', + field=ietf.utils.models.ForeignKey( + db_index=False, + on_delete=django.db.models.deletion.CASCADE, + to='doc.docalias', + related_name='reversely_related_document_history_set', + ), + ), + migrations.RenameField( + model_name="relateddochistory", + old_name="target", + new_name="deprecated_target" + ), + migrations.AlterField( + model_name='relateddochistory', + name='deprecated_target', + field=ietf.utils.models.ForeignKey( + db_index=True, + on_delete=django.db.models.deletion.CASCADE, + to='doc.docalias', + related_name='deprecated_reversely_related_document_history_set', + ), + ), + migrations.AddField( + model_name="relateddochistory", + name="target", + field=ietf.utils.models.ForeignKey( + default=1, # A lie, but a convenient one - no relations point here. + on_delete=django.db.models.deletion.CASCADE, + to="doc.document", + db_index=False, + related_name='reversely_related_document_history_set', + ), + preserve_default=False, + ), + migrations.AddField( + model_name="relateddochistory", + name="originaltargetaliasname", + field=CharField(max_length=255,null=True,blank=True), + preserve_default=True, + ), + migrations.RunPython(forward, reverse), + migrations.AlterField( + model_name="relateddochistory", + name="target", + field=ietf.utils.models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to="doc.document", + db_index=True, + related_name='reversely_related_document_history_set', + ), + ), + migrations.RemoveField( + model_name="relateddochistory", + name="deprecated_target", + field=ietf.utils.models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to='doc.DocAlias', + related_name='deprecated_reversely_related_document_history_set', + ), + ), + ] diff --git a/ietf/doc/migrations/0017_delete_docalias.py b/ietf/doc/migrations/0017_delete_docalias.py new file mode 100644 index 000000000..207ca81e1 --- /dev/null +++ b/ietf/doc/migrations/0017_delete_docalias.py @@ -0,0 +1,16 @@ +# Copyright The IETF Trust 2023, All Rights Reserved + +from django.db import migrations + + +class Migration(migrations.Migration): + dependencies = [ + ("ipr", "0002_iprdocrel_no_aliases"), + ("doc", "0016_relate_hist_no_aliases"), + ] + + operations = [ + migrations.DeleteModel( + name="DocAlias", + ), + ] diff --git a/ietf/doc/migrations/0018_move_dochistory.py b/ietf/doc/migrations/0018_move_dochistory.py new file mode 100644 index 000000000..0bc29b0bc --- /dev/null +++ b/ietf/doc/migrations/0018_move_dochistory.py @@ -0,0 +1,45 @@ +# Generated by Django 4.2.5 on 2023-09-11 17:52 + +from django.db import migrations + +from django.db.models import Subquery, OuterRef, F + + +def forward(apps, schema_editor): + DocHistory = apps.get_model("doc", "DocHistory") + RelatedDocument = apps.get_model("doc", "RelatedDocument") + Document = apps.get_model("doc", "Document") + DocHistory.objects.filter(type_id="draft", doc__type_id="rfc").update(type_id="rfc") + DocHistory.objects.filter( + type_id="draft", doc__type_id="draft", name__startswith="rfc" + ).annotate( + rfc_id=Subquery( + RelatedDocument.objects.filter( + source_id=OuterRef("doc_id"), relationship_id="became_rfc" + ).values_list("target_id", flat=True)[:1] + ) + ).update( + doc_id=F("rfc_id"), type_id="rfc" + ) + DocHistory.objects.filter(type_id="rfc").annotate( + rfcno=Subquery( + Document.objects.filter(pk=OuterRef("doc_id")).values_list( + "rfc_number", flat=True + )[:1] + ) + ).update(rfc_number=F("rfcno")) + assert not DocHistory.objects.filter( + name__startswith="rfc", type_id="draft" + ).exists() + assert not DocHistory.objects.filter( + type_id="rfc", rfc_number__isnull=True + ).exists() + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0017_delete_docalias"), + ] + + # There is no going back + operations = [migrations.RunPython(forward)] diff --git a/ietf/doc/migrations/0019_subseries.py b/ietf/doc/migrations/0019_subseries.py new file mode 100644 index 000000000..be2c612ac --- /dev/null +++ b/ietf/doc/migrations/0019_subseries.py @@ -0,0 +1,21 @@ +# Copyright The IETF Trust 2023, All Rights Reserved +from django.db import migrations + + +def forward(apps, schema_editor): + StateType = apps.get_model("doc", "StateType") + for slug in ["bcp", "std", "fyi"]: + StateType.objects.create(slug=slug, label=f"{slug} state") + + +def reverse(apps, schema_editor): + StateType = apps.get_model("doc", "StateType") + StateType.objects.filter(slug__in=["bcp", "std", "fyi"]).delete() + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0018_move_dochistory"), + ] + + operations = [migrations.RunPython(forward, reverse)] diff --git a/ietf/doc/migrations/0020_move_errata_tags.py b/ietf/doc/migrations/0020_move_errata_tags.py new file mode 100644 index 000000000..897b88f46 --- /dev/null +++ b/ietf/doc/migrations/0020_move_errata_tags.py @@ -0,0 +1,29 @@ +# Copyright The IETF Trust 2023, All Rights Reserved + +from django.db import migrations + +from django.db.models import Subquery, OuterRef, F + + +def forward(apps, schema_editor): + Document = apps.get_model("doc", "Document") + RelatedDocument = apps.get_model("doc", "RelatedDocument") + Document.tags.through.objects.filter( + doctagname_id__in=["errata", "verified-errata"], document__type_id="draft" + ).annotate( + rfcdoc=Subquery( + RelatedDocument.objects.filter( + relationship_id="became_rfc", source_id=OuterRef("document__pk") + ).values_list("target__pk", flat=True)[:1] + ) + ).update( + document_id=F("rfcdoc") + ) + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0019_subseries"), + ] + + operations = [migrations.RunPython(forward)] diff --git a/ietf/doc/models.py b/ietf/doc/models.py index 30d95fbf5..4d7a4f805 100644 --- a/ietf/doc/models.py +++ b/ietf/doc/models.py @@ -37,7 +37,6 @@ from ietf.name.models import ( DocTypeName, DocTagName, StreamName, IntendedStdL from ietf.person.models import Email, Person from ietf.person.utils import get_active_balloters from ietf.utils import log -from ietf.utils.admin import admin_link from ietf.utils.decorators import memoize from ietf.utils.validators import validate_no_control_chars from ietf.utils.mail import formataddr @@ -124,6 +123,7 @@ class DocumentInfo(models.Model): uploaded_filename = models.TextField(blank=True) note = models.TextField(blank=True) internal_comments = models.TextField(blank=True) + rfc_number = models.PositiveIntegerField(blank=True, null=True) # only valid for type="rfc" def file_extension(self): if not hasattr(self, '_cached_extension'): @@ -136,18 +136,17 @@ class DocumentInfo(models.Model): def get_file_path(self): if not hasattr(self, '_cached_file_path'): - if self.type_id == "draft": + if self.type_id == "rfc": + self._cached_file_path = settings.RFC_PATH + elif self.type_id == "draft": if self.is_dochistory(): self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR else: - if self.get_state_slug() == "rfc": - self._cached_file_path = settings.RFC_PATH + draft_state = self.get_state('draft') + if draft_state and draft_state.slug == 'active': + self._cached_file_path = settings.INTERNET_DRAFT_PATH else: - draft_state = self.get_state('draft') - if draft_state and draft_state.slug == 'active': - self._cached_file_path = settings.INTERNET_DRAFT_PATH - else: - self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR + self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR elif self.meeting_related() and self.type_id in ( "agenda", "minutes", "slides", "bluesheets", "procmaterials", "chatlog", "polls" ): @@ -172,17 +171,16 @@ class DocumentInfo(models.Model): if not hasattr(self, '_cached_base_name'): if self.uploaded_filename: self._cached_base_name = self.uploaded_filename + elif self.type_id == 'rfc': + self._cached_base_name = "%s.txt" % self.name elif self.type_id == 'draft': if self.is_dochistory(): self._cached_base_name = "%s-%s.txt" % (self.doc.name, self.rev) else: - if self.get_state_slug() == 'rfc': - self._cached_base_name = "%s.txt" % self.canonical_name() - else: - self._cached_base_name = "%s-%s.txt" % (self.name, self.rev) + self._cached_base_name = "%s-%s.txt" % (self.name, self.rev) elif self.type_id in ["slides", "agenda", "minutes", "bluesheets", "procmaterials", ] and self.meeting_related(): ext = 'pdf' if self.type_id == 'procmaterials' else 'txt' - self._cached_base_name = f'{self.canonical_name()}-{self.rev}.{ext}' + self._cached_base_name = f'{self.name}-{self.rev}.{ext}' elif self.type_id == 'review': # TODO: This will be wrong if a review is updated on the same day it was created (or updated more than once on the same day) self._cached_base_name = "%s.txt" % self.name @@ -190,9 +188,9 @@ class DocumentInfo(models.Model): self._cached_base_name = "%s-%s.md" % (self.name, self.rev) else: if self.rev: - self._cached_base_name = "%s-%s.txt" % (self.canonical_name(), self.rev) + self._cached_base_name = "%s-%s.txt" % (self.name, self.rev) else: - self._cached_base_name = "%s.txt" % (self.canonical_name(), ) + self._cached_base_name = "%s.txt" % (self.name, ) return self._cached_base_name def get_file_name(self): @@ -200,17 +198,28 @@ class DocumentInfo(models.Model): self._cached_file_name = os.path.join(self.get_file_path(), self.get_base_name()) return self._cached_file_name - def revisions(self): + + def revisions_by_dochistory(self): revisions = [] - doc = self.doc if isinstance(self, DocHistory) else self - for e in doc.docevent_set.filter(type='new_revision').distinct(): - if e.rev and not e.rev in revisions: - revisions.append(e.rev) - if not doc.rev in revisions: - revisions.append(doc.rev) - revisions.sort() + if self.type_id != "rfc": + for h in self.history_set.order_by("time", "id"): + if h.rev and not h.rev in revisions: + revisions.append(h.rev) + if not self.rev in revisions: + revisions.append(self.rev) return revisions + def revisions_by_newrevisionevent(self): + revisions = [] + if self.type_id != "rfc": + doc = self.doc if isinstance(self, DocHistory) else self + for e in doc.docevent_set.filter(type='new_revision').distinct(): + if e.rev and not e.rev in revisions: + revisions.append(e.rev) + if not doc.rev in revisions: + revisions.append(doc.rev) + revisions.sort() + return revisions def get_href(self, meeting=None): return self._get_ref(meeting=meeting,meeting_doc_refs=settings.MEETING_DOC_HREFS) @@ -244,7 +253,7 @@ class DocumentInfo(models.Model): format = settings.DOC_HREFS[self.type_id] elif self.type_id in settings.DOC_HREFS: self.is_meeting_related = False - if self.is_rfc(): + if self.type_id == "rfc": format = settings.DOC_HREFS['rfc'] else: format = settings.DOC_HREFS[self.type_id] @@ -334,7 +343,9 @@ class DocumentInfo(models.Model): if not state: return "Unknown state" - if self.type_id == 'draft': + if self.type_id == "rfc": + return f"RFC {self.rfc_number} ({self.std_level})" + elif self.type_id == 'draft': iesg_state = self.get_state("draft-iesg") iesg_state_summary = None if iesg_state: @@ -343,13 +354,15 @@ class DocumentInfo(models.Model): iesg_state_summary = iesg_state.name if iesg_substate: iesg_state_summary = iesg_state_summary + "::"+"::".join(tag.name for tag in iesg_substate) - - if state.slug == "rfc": - return "RFC %s (%s)" % (self.rfc_number(), self.std_level) + + rfc = self.became_rfc() + if rfc: + return f"Became RFC {rfc.rfc_number} ({rfc.std_level})" + elif state.slug == "repl": rs = self.related_that("replaces") if rs: - return mark_safe("Replaced by " + ", ".join("%s" % (urlreverse('ietf.doc.views_doc.document_main', kwargs=dict(name=alias.document.name)), alias.document) for alias in rs)) + return mark_safe("Replaced by " + ", ".join("%s" % (urlreverse('ietf.doc.views_doc.document_main', kwargs=dict(name=related.name)), related) for related in rs)) else: return "Replaced" elif state.slug == "active": @@ -375,27 +388,6 @@ class DocumentInfo(models.Model): else: return state.name - def is_rfc(self): - if not hasattr(self, '_cached_is_rfc'): - self._cached_is_rfc = self.pk and self.type_id == 'draft' and self.states.filter(type='draft',slug='rfc').exists() - return self._cached_is_rfc - - def rfc_number(self): - if not hasattr(self, '_cached_rfc_number'): - self._cached_rfc_number = None - if self.is_rfc(): - n = self.canonical_name() - if n.startswith("rfc"): - self._cached_rfc_number = n[3:] - else: - if isinstance(self,Document): - logger.error("Document self.is_rfc() is True but self.canonical_name() is %s" % n) - return self._cached_rfc_number - - @property - def rfcnum(self): - return self.rfc_number() - def author_list(self): best_addresses = [] for author in self.documentauthor_set.all(): @@ -468,9 +460,9 @@ class DocumentInfo(models.Model): if not isinstance(relationship, tuple): raise TypeError("Expected a string or tuple, received %s" % type(relationship)) if isinstance(self, Document): - return RelatedDocument.objects.filter(target__docs=self, relationship__in=relationship).select_related('source') + return RelatedDocument.objects.filter(target=self, relationship__in=relationship).select_related('source') elif isinstance(self, DocHistory): - return RelatedDocHistory.objects.filter(target__docs=self.doc, relationship__in=relationship).select_related('source') + return RelatedDocHistory.objects.filter(target=self.doc, relationship__in=relationship).select_related('source') else: raise TypeError("Expected method called on Document or DocHistory") @@ -504,15 +496,14 @@ class DocumentInfo(models.Model): for r in rels: if not r in related: related += ( r, ) - for doc in r.target.docs.all(): - related = doc.all_relations_that_doc(relationship, related) + related = r.target.all_relations_that_doc(relationship, related) return related def related_that(self, relationship): - return list(set([x.source.docalias.get(name=x.source.name) for x in self.relations_that(relationship)])) + return list(set([x.source for x in self.relations_that(relationship)])) def all_related_that(self, relationship, related=None): - return list(set([x.source.docalias.get(name=x.source.name) for x in self.all_relations_that(relationship)])) + return list(set([x.source for x in self.all_relations_that(relationship)])) def related_that_doc(self, relationship): return list(set([x.target for x in self.relations_that_doc(relationship)])) @@ -521,12 +512,7 @@ class DocumentInfo(models.Model): return list(set([x.target for x in self.all_relations_that_doc(relationship)])) def replaces(self): - return set([ d for r in self.related_that_doc("replaces") for d in r.docs.all() ]) - - def replaces_canonical_name(self): - s = set([ r.document for r in self.related_that_doc("replaces")]) - first = list(s)[0] if s else None - return None if first is None else first.filename_with_rev() + return self.related_that_doc("replaces") def replaced_by(self): return set([ r.document for r in self.related_that("replaces") ]) @@ -553,10 +539,10 @@ class DocumentInfo(models.Model): return self.text() or "Error; cannot read '%s'"%self.get_base_name() def html_body(self, classes=""): - if self.get_state_slug() == "rfc": + if self.type_id == "rfc": try: html = Path( - os.path.join(settings.RFC_PATH, self.canonical_name() + ".html") + os.path.join(settings.RFC_PATH, self.name + ".html") ).read_text() except (IOError, UnicodeDecodeError): return None @@ -656,10 +642,38 @@ class DocumentInfo(models.Model): return self.relations_that_doc(('refnorm','refinfo','refunk','refold')) def referenced_by(self): - return self.relations_that(('refnorm','refinfo','refunk','refold')).filter(source__states__type__slug='draft',source__states__slug__in=['rfc','active']) - + return self.relations_that(("refnorm", "refinfo", "refunk", "refold")).filter( + models.Q( + source__type__slug="draft", + source__states__type__slug="draft", + source__states__slug="active", + ) + | models.Q(source__type__slug="rfc") + ) + + def referenced_by_rfcs(self): - return self.relations_that(('refnorm','refinfo','refunk','refold')).filter(source__states__type__slug='draft',source__states__slug='rfc') + return self.relations_that(("refnorm", "refinfo", "refunk", "refold")).filter( + source__type__slug="rfc" + ) + + def became_rfc(self): + if not hasattr(self, "_cached_became_rfc"): + doc = self if isinstance(self, Document) else self.doc + self._cached_became_rfc = next(iter(doc.related_that_doc("became_rfc")), None) + return self._cached_became_rfc + + def came_from_draft(self): + if not hasattr(self, "_cached_came_from_draft"): + doc = self if isinstance(self, Document) else self.doc + self._cached_came_from_draft = next(iter(doc.related_that("became_rfc")), None) + return self._cached_came_from_draft + + def contains(self): + return self.related_that_doc("contains") + + def part_of(self): + return self.related_that("contains") class Meta: abstract = True @@ -668,42 +682,40 @@ STATUSCHANGE_RELATIONS = ('tops','tois','tohist','toinf','tobcp','toexp') class RelatedDocument(models.Model): source = ForeignKey('Document') - target = ForeignKey('DocAlias') + target = ForeignKey('Document', related_name='targets_related') relationship = ForeignKey(DocRelationshipName) + originaltargetaliasname = models.CharField(max_length=255, null=True, blank=True) def action(self): return self.relationship.name def __str__(self): return u"%s %s %s" % (self.source.name, self.relationship.name.lower(), self.target.name) def is_downref(self): - if self.source.type.slug != "draft" or self.relationship.slug not in [ + if self.source.type_id not in ["draft","rfc"] or self.relationship.slug not in [ "refnorm", "refold", "refunk", ]: return None - state = self.source.get_state() - if state and state.slug == "rfc": - source_lvl = self.source.std_level.slug if self.source.std_level else None - elif self.source.intended_std_level: - source_lvl = self.source.intended_std_level.slug + if self.source.type_id == "rfc": + source_lvl = self.source.std_level_id else: - source_lvl = None + source_lvl = self.source.intended_std_level_id if source_lvl not in ["bcp", "ps", "ds", "std", "unkn"]: return None - if self.target.document.get_state().slug == "rfc": - if not self.target.document.std_level: - target_lvl = "unkn" + if self.target.type_id == 'rfc': + if not self.target.std_level: + target_lvl = 'unkn' else: - target_lvl = self.target.document.std_level.slug + target_lvl = self.target.std_level_id else: - if not self.target.document.intended_std_level: - target_lvl = "unkn" + if not self.target.intended_std_level: + target_lvl = 'unkn' else: - target_lvl = self.target.document.intended_std_level.slug + target_lvl = self.target.intended_std_level_id if self.relationship.slug not in ["refnorm", "refunk"]: return None @@ -712,7 +724,7 @@ class RelatedDocument(models.Model): return None pos_downref = ( - "Downref" if self.relationship.slug != "refunk" else "Possible Downref" + "Downref" if self.relationship_id != "refunk" else "Possible Downref" ) if source_lvl in ["bcp", "ps", "ds", "std"] and target_lvl in ["inf", "exp"]: @@ -734,8 +746,8 @@ class RelatedDocument(models.Model): def is_approved_downref(self): - if self.target.document.get_state().slug == 'rfc': - if RelatedDocument.objects.filter(relationship_id='downref-approval', target=self.target): + if self.target.type_id == 'rfc': + if RelatedDocument.objects.filter(relationship_id='downref-approval', target=self.target).exists(): return "Approved Downref" return False @@ -831,7 +843,7 @@ class Document(DocumentInfo): name = self.name url = None if self.type_id == "draft" and self.get_state_slug() == "rfc": - name = self.canonical_name() + name = self.name url = urlreverse('ietf.doc.views_doc.document_main', kwargs={ 'name': name }, urlconf="ietf.urls") elif self.type_id in ('slides','bluesheets','recording'): session = self.session_set.first() @@ -869,28 +881,8 @@ class Document(DocumentInfo): e = model.objects.filter(doc=self).filter(**filter_args).order_by('-time', '-id').first() return e - def canonical_name(self): - if not hasattr(self, '_canonical_name'): - name = self.name - if self.type_id == "draft" and self.get_state_slug() == "rfc": - a = self.docalias.filter(name__startswith="rfc").order_by('-name').first() - if a: - name = a.name - elif self.type_id == "charter": - from ietf.doc.utils_charter import charter_name_for_group # Imported locally to avoid circular imports - try: - name = charter_name_for_group(self.chartered_group) - except Group.DoesNotExist: - pass - self._canonical_name = name - return self._canonical_name - - - def canonical_docalias(self): - return self.docalias.get(name=self.name) - def display_name(self): - name = self.canonical_name() + name = self.name if name.startswith('rfc'): name = name.upper() return name @@ -985,17 +977,27 @@ class Document(DocumentInfo): def ipr(self,states=settings.PUBLISH_IPR_STATES): """Returns the IPR disclosures against this document (as a queryset over IprDocRel).""" - from ietf.ipr.models import IprDocRel - return IprDocRel.objects.filter(document__docs=self, disclosure__state__in=states) + # from ietf.ipr.models import IprDocRel + # return IprDocRel.objects.filter(document__docs=self, disclosure__state__in=states) # TODO - clear these comments away + return self.iprdocrel_set.filter(disclosure__state__in=states) def related_ipr(self): """Returns the IPR disclosures against this document and those documents this document directly or indirectly obsoletes or replaces """ from ietf.ipr.models import IprDocRel - iprs = IprDocRel.objects.filter(document__in=list(self.docalias.all())+self.all_related_that_doc(('obs','replaces'))).filter(disclosure__state__in=settings.PUBLISH_IPR_STATES).values_list('disclosure', flat=True).distinct() + iprs = ( + IprDocRel.objects.filter( + document__in=[self] + + self.all_related_that_doc(("obs", "replaces")) + ) + .filter(disclosure__state__in=settings.PUBLISH_IPR_STATES) + .values_list("disclosure", flat=True) + .distinct() + ) return iprs + def future_presentations(self): """ returns related SessionPresentation objects for meetings that have not yet ended. This implementation allows for 2 week meetings """ @@ -1030,7 +1032,7 @@ class Document(DocumentInfo): This is the rfc publication date for RFCs, and the new-revision date for other documents. """ - if self.get_state_slug() == "rfc": + if self.type_id == "rfc": # As of Sept 2022, in ietf.sync.rfceditor.update_docs_from_rfc_index() `published_rfc` events are # created with a timestamp whose date *in the PST8PDT timezone* is the official publication date # assigned by the RFC editor. @@ -1132,8 +1134,9 @@ class DocExtResource(ExtResource): class RelatedDocHistory(models.Model): source = ForeignKey('DocHistory') - target = ForeignKey('DocAlias', related_name="reversely_related_document_history_set") + target = ForeignKey('Document', related_name="reversely_related_document_history_set") relationship = ForeignKey(DocRelationshipName) + originaltargetaliasname = models.CharField(max_length=255, null=True, blank=True) def __str__(self): return u"%s %s %s" % (self.source.doc.name, self.relationship.name.lower(), self.target.name) @@ -1147,10 +1150,7 @@ class DocHistoryAuthor(DocumentAuthorInfo): class DocHistory(DocumentInfo): doc = ForeignKey(Document, related_name="history_set") - # the name here is used to capture the canonical name at the time - # - it would perhaps be more elegant to simply call the attribute - # canonical_name and replace the function on Document with a - # property + name = models.CharField(max_length=255) def __str__(self): @@ -1162,11 +1162,6 @@ class DocHistory(DocumentInfo): def get_related_proceedings_material(self): return self.doc.get_related_proceedings_material() - def canonical_name(self): - if hasattr(self, '_canonical_name'): - return self._canonical_name - return self.name - def latest_event(self, *args, **kwargs): kwargs["time__lte"] = self.time return self.doc.latest_event(*args, **kwargs) @@ -1181,10 +1176,6 @@ class DocHistory(DocumentInfo): def groupmilestone_set(self): return self.doc.groupmilestone_set - @property - def docalias(self): - return self.doc.docalias - def is_dochistory(self): return True @@ -1202,25 +1193,6 @@ class DocHistory(DocumentInfo): verbose_name = "document history" verbose_name_plural = "document histories" -class DocAlias(models.Model): - """This is used for documents that may appear under multiple names, - and in particular for RFCs, which for continuity still keep the - same immutable Document.name, in the tables, but will be referred - to by RFC number, primarily, after achieving RFC status. - """ - name = models.CharField(max_length=255, unique=True) - docs = models.ManyToManyField(Document, related_name='docalias') - - @property - def document(self): - return self.docs.first() - - def __str__(self): - return u"%s-->%s" % (self.name, ','.join([force_str(d.name) for d in self.docs.all() if isinstance(d, Document) ])) - document_link = admin_link("document") - class Meta: - verbose_name = "document alias" - verbose_name_plural = "document aliases" class DocReminder(models.Model): event = ForeignKey('DocEvent') diff --git a/ietf/doc/resources.py b/ietf/doc/resources.py index 99e26ac33..6bb6ffa28 100644 --- a/ietf/doc/resources.py +++ b/ietf/doc/resources.py @@ -12,7 +12,7 @@ from tastypie.cache import SimpleCache from ietf import api from ietf.doc.models import (BallotType, DeletedEvent, StateType, State, Document, - DocumentAuthor, DocEvent, StateDocEvent, DocHistory, ConsensusDocEvent, DocAlias, + DocumentAuthor, DocEvent, StateDocEvent, DocHistory, ConsensusDocEvent, TelechatDocEvent, DocReminder, LastCallDocEvent, NewRevisionDocEvent, WriteupDocEvent, InitialReviewDocEvent, DocHistoryAuthor, BallotDocEvent, RelatedDocument, RelatedDocHistory, BallotPositionDocEvent, AddedMessageEvent, SubmissionDocEvent, @@ -286,21 +286,6 @@ class ConsensusDocEventResource(ModelResource): } api.doc.register(ConsensusDocEventResource()) -class DocAliasResource(ModelResource): - document = ToOneField(DocumentResource, 'document') - class Meta: - cache = SimpleCache() - queryset = DocAlias.objects.all() - serializer = api.Serializer() - detail_uri_name = 'name' - #resource_name = 'docalias' - ordering = ['id', ] - filtering = { - "name": ALL, - "document": ALL_WITH_RELATIONS, - } -api.doc.register(DocAliasResource()) - from ietf.person.resources import PersonResource class TelechatDocEventResource(ModelResource): by = ToOneField(PersonResource, 'by') @@ -490,7 +475,7 @@ api.doc.register(BallotDocEventResource()) from ietf.name.resources import DocRelationshipNameResource class RelatedDocumentResource(ModelResource): source = ToOneField(DocumentResource, 'source') - target = ToOneField(DocAliasResource, 'target') + target = ToOneField(DocumentResource, 'target') relationship = ToOneField(DocRelationshipNameResource, 'relationship') class Meta: cache = SimpleCache() @@ -509,7 +494,7 @@ api.doc.register(RelatedDocumentResource()) from ietf.name.resources import DocRelationshipNameResource class RelatedDocHistoryResource(ModelResource): source = ToOneField(DocHistoryResource, 'source') - target = ToOneField(DocAliasResource, 'target') + target = ToOneField(DocumentResource, 'target') relationship = ToOneField(DocRelationshipNameResource, 'relationship') class Meta: cache = SimpleCache() diff --git a/ietf/doc/templatetags/ietf_filters.py b/ietf/doc/templatetags/ietf_filters.py index c0ea94ab7..8d9336b53 100644 --- a/ietf/doc/templatetags/ietf_filters.py +++ b/ietf/doc/templatetags/ietf_filters.py @@ -22,7 +22,7 @@ from django.utils import timezone import debug # pyflakes:ignore -from ietf.doc.models import BallotDocEvent, DocAlias +from ietf.doc.models import BallotDocEvent, Document from ietf.doc.models import ConsensusDocEvent from ietf.ietfauth.utils import can_request_rfc_publication as utils_can_request_rfc_publication from ietf.utils.html import sanitize_fragment @@ -139,15 +139,16 @@ def rfceditor_info_url(rfcnum : str): return urljoin(settings.RFC_EDITOR_INFO_BASE_URL, f'rfc{rfcnum}') -def doc_canonical_name(name): +def doc_name(name): """Check whether a given document exists, and return its canonical name""" def find_unique(n): key = hash(n) found = cache.get(key) if not found: - exact = DocAlias.objects.filter(name=n).first() + exact = Document.objects.filter(name=n).first() found = exact.name if exact else "_" + # TODO review this cache policy (and the need for these entire function) cache.set(key, found, timeout=60*60*24) # cache for one day return None if found == "_" else found @@ -173,7 +174,7 @@ def doc_canonical_name(name): def link_charter_doc_match(match): - if not doc_canonical_name(match[0]): + if not doc_name(match[0]): return match[0] url = urlreverse( "ietf.doc.views_doc.document_main", @@ -186,7 +187,7 @@ def link_non_charter_doc_match(match): name = match[0] # handle "I-D.*"" reference-style matches name = re.sub(r"^i-d\.(.*)", r"draft-\1", name, flags=re.IGNORECASE) - cname = doc_canonical_name(name) + cname = doc_name(name) if not cname: return match[0] if name == cname: @@ -201,7 +202,7 @@ def link_non_charter_doc_match(match): url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=cname)) return f'{match[0]}' - cname = doc_canonical_name(name) + cname = doc_name(name) if not cname: return match[0] if name == cname: @@ -221,12 +222,11 @@ def link_non_charter_doc_match(match): def link_other_doc_match(match): doc = match[2].strip().lower() rev = match[3] - if not doc_canonical_name(doc + rev): + if not doc_name(doc + rev): return match[0] url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=doc + rev)) return f'{match[1]}' - @register.filter(name="urlize_ietf_docs", is_safe=True, needs_autoescape=True) def urlize_ietf_docs(string, autoescape=None): """ @@ -255,6 +255,7 @@ def urlize_ietf_docs(string, autoescape=None): string, flags=re.IGNORECASE | re.ASCII, ) + return mark_safe(string) @@ -267,7 +268,7 @@ def urlize_related_source_list(related, document_html=False): names = set() titles = set() for rel in related: - name=rel.source.canonical_name() + name=rel.source.name title = rel.source.title if name in names and title in titles: continue @@ -288,8 +289,8 @@ def urlize_related_target_list(related, document_html=False): """Convert a list of RelatedDocuments into list of links using the target document's canonical name""" links = [] for rel in related: - name=rel.target.document.canonical_name() - title = rel.target.document.title + name=rel.target.name + title = rel.target.title url = urlreverse('ietf.doc.views_doc.document_main' if document_html is False else 'ietf.doc.views_doc.document_html', kwargs=dict(name=name)) name = escape(name) title = escape(title) @@ -556,7 +557,7 @@ def consensus(doc): @register.filter def std_level_to_label_format(doc): """Returns valid Bootstrap classes to label a status level badge.""" - if doc.is_rfc(): + if doc.type_id == "rfc": if doc.related_that("obs"): return "obs" else: diff --git a/ietf/doc/templatetags/tests_ietf_filters.py b/ietf/doc/templatetags/tests_ietf_filters.py index f791d6153..72796abeb 100644 --- a/ietf/doc/templatetags/tests_ietf_filters.py +++ b/ietf/doc/templatetags/tests_ietf_filters.py @@ -3,12 +3,12 @@ from django.conf import settings from ietf.doc.factories import ( - WgDraftFactory, + WgRfcFactory, IndividualDraftFactory, CharterFactory, NewRevisionDocEventFactory, ) -from ietf.doc.models import State, DocEvent, DocAlias +from ietf.doc.models import DocEvent from ietf.doc.templatetags.ietf_filters import urlize_ietf_docs, is_valid_url from ietf.person.models import Person from ietf.utils.test_utils import TestCase @@ -25,23 +25,21 @@ class IetfFiltersTests(TestCase): self.assertEqual(is_valid_url(url), result) def test_urlize_ietf_docs(self): - wg_id = WgDraftFactory() - wg_id.set_state(State.objects.get(type="draft", slug="rfc")) - wg_id.std_level_id = "bcp" - wg_id.save_with_history( + rfc = WgRfcFactory(rfc_number=123456,std_level_id="bcp") + rfc.save_with_history( [ DocEvent.objects.create( - doc=wg_id, - rev=wg_id.rev, + doc=rfc, + rev=rfc.rev, type="published_rfc", by=Person.objects.get(name="(System)"), ) ] ) - DocAlias.objects.create(name="rfc123456").docs.add(wg_id) - DocAlias.objects.create(name="bcp123456").docs.add(wg_id) - DocAlias.objects.create(name="std123456").docs.add(wg_id) - DocAlias.objects.create(name="fyi123456").docs.add(wg_id) + # TODO - bring these into existance when subseries are well modeled + # DocAlias.objects.create(name="bcp123456").docs.add(rfc) + # DocAlias.objects.create(name="std123456").docs.add(rfc) + # DocAlias.objects.create(name="fyi123456").docs.add(rfc) id = IndividualDraftFactory(name="draft-me-rfc123456bis") id_num = IndividualDraftFactory(name="draft-rosen-rfcefdp-update-2026") @@ -59,15 +57,17 @@ class IetfFiltersTests(TestCase): cases = [ ("no change", "no change"), - ("bCp123456", 'bCp123456'), - ("Std 00123456", 'Std 00123456'), - ( - "FyI 0123456 changes std 00123456", - 'FyI 0123456 changes std 00123456', - ), + + # TODO: rework subseries when we add them + # ("bCp123456", 'bCp123456'), + # ("Std 00123456", 'Std 00123456'), + # ( + # "FyI 0123456 changes std 00123456", + # 'FyI 0123456 changes std 00123456', + # ), ("rfc123456", 'rfc123456'), ("Rfc 0123456", 'Rfc 0123456'), - (wg_id.name, f'{wg_id.name}'), + (rfc.name, f'{rfc.name}'), ( f"{id.name}-{id.rev}.txt", f'{id.name}-{id.rev}.txt', diff --git a/ietf/doc/tests.py b/ietf/doc/tests.py index ace55a0d7..f14b5b1af 100644 --- a/ietf/doc/tests.py +++ b/ietf/doc/tests.py @@ -33,7 +33,7 @@ from tastypie.test import ResourceTestCaseMixin import debug # pyflakes:ignore -from ietf.doc.models import ( Document, DocAlias, DocRelationshipName, RelatedDocument, State, +from ietf.doc.models import ( Document, DocRelationshipName, RelatedDocument, State, DocEvent, BallotPositionDocEvent, LastCallDocEvent, WriteupDocEvent, NewRevisionDocEvent, BallotType, EditedAuthorsDocEvent ) from ietf.doc.factories import ( DocumentFactory, DocEventFactory, CharterFactory, @@ -66,6 +66,7 @@ class SearchTests(TestCase): def test_search(self): draft = WgDraftFactory(name='draft-ietf-mars-test',group=GroupFactory(acronym='mars',parent=Group.objects.get(acronym='farfut')),authors=[PersonFactory()],ad=PersonFactory()) + rfc = WgRfcFactory() draft.set_state(State.objects.get(used=True, type="draft-iesg", slug="pub-req")) old_draft = IndividualDraftFactory(name='draft-foo-mars-test',authors=[PersonFactory()],title="Optimizing Martian Network Topologies") old_draft.set_state(State.objects.get(used=True, type="draft", slug="expired")) @@ -97,11 +98,12 @@ class SearchTests(TestCase): self.assertEqual(r.status_code, 200) self.assertContains(r, "draft-foo-mars-test") - # find by rfc/active/inactive - draft.set_state(State.objects.get(type="draft", slug="rfc")) - r = self.client.get(base_url + "?rfcs=on&name=%s" % draft.name) + # find by RFC + r = self.client.get(base_url + "?rfcs=on&name=%s" % rfc.name) self.assertEqual(r.status_code, 200) - self.assertContains(r, draft.title) + self.assertContains(r, rfc.title) + + # find by active/inactive draft.set_state(State.objects.get(type="draft", slug="active")) r = self.client.get(base_url + "?activedrafts=on&name=%s" % draft.name) @@ -340,9 +342,7 @@ class SearchTests(TestCase): draft = IndividualDraftFactory(ad=ad) draft.action_holders.set([PersonFactory()]) draft.set_state(State.objects.get(type='draft-iesg', slug='lc')) - rfc = IndividualDraftFactory(ad=ad) - rfc.set_state(State.objects.get(type='draft', slug='rfc')) - DocAlias.objects.create(name='rfc6666').docs.add(rfc) + rfc = IndividualRfcFactory(ad=ad) conflrev = DocumentFactory(type_id='conflrev',ad=ad) conflrev.set_state(State.objects.get(type='conflrev', slug='iesgeval')) statchg = DocumentFactory(type_id='statchg',ad=ad) @@ -366,7 +366,7 @@ class SearchTests(TestCase): self.assertEqual(r.status_code, 200) self.assertContains(r, draft.name) self.assertContains(r, escape(draft.action_holders.first().name)) - self.assertContains(r, rfc.canonical_name()) + self.assertContains(r, rfc.name) self.assertContains(r, conflrev.name) self.assertContains(r, statchg.name) self.assertContains(r, charter.name) @@ -414,16 +414,17 @@ class SearchTests(TestCase): r = self.client.get(urlreverse('ietf.doc.views_search.index_all_drafts')) self.assertEqual(r.status_code, 200) self.assertContains(r, draft.name) - self.assertContains(r, rfc.canonical_name().upper()) + self.assertContains(r, rfc.name.upper()) r = self.client.get(urlreverse('ietf.doc.views_search.index_active_drafts')) self.assertEqual(r.status_code, 200) self.assertContains(r, draft.title) def test_ajax_search_docs(self): - draft = IndividualDraftFactory() + draft = IndividualDraftFactory(name="draft-ietf-rfc1234bis") + rfc = IndividualRfcFactory(rfc_number=1234) + bcp = IndividualRfcFactory(name="bcp12345", type_id="bcp") - # Document url = urlreverse('ietf.doc.views_search.ajax_select2_search_docs', kwargs={ "model_name": "document", "doc_type": "draft", @@ -433,18 +434,27 @@ class SearchTests(TestCase): data = r.json() self.assertEqual(data[0]["id"], draft.pk) - # DocAlias - doc_alias = draft.docalias.first() - url = urlreverse('ietf.doc.views_search.ajax_select2_search_docs', kwargs={ - "model_name": "docalias", - "doc_type": "draft", + "model_name": "document", + "doc_type": "rfc", }) - - r = self.client.get(url, dict(q=doc_alias.name)) + r = self.client.get(url, dict(q=rfc.name)) self.assertEqual(r.status_code, 200) data = r.json() - self.assertEqual(data[0]["id"], doc_alias.pk) + self.assertEqual(data[0]["id"], rfc.pk) + + url = urlreverse('ietf.doc.views_search.ajax_select2_search_docs', kwargs={ + "model_name": "document", + "doc_type": "all", + }) + r = self.client.get(url, dict(q="1234")) + self.assertEqual(r.status_code, 200) + data = r.json() + self.assertEqual(len(data), 3) + pks = set([data[i]["id"] for i in range(3)]) + self.assertEqual(pks, set([bcp.pk, rfc.pk, draft.pk])) + + def test_recent_drafts(self): # Three drafts to show with various warnings @@ -648,23 +658,22 @@ Man Expires September 22, 2015 [Page 3] def test_document_draft(self): draft = WgDraftFactory(name='draft-ietf-mars-test',rev='01', create_revisions=range(0,2)) - HolderIprDisclosureFactory(docs=[draft]) # Docs for testing relationships. Does not test 'possibly-replaces'. The 'replaced_by' direction # is tested separately below. replaced = IndividualDraftFactory() - draft.relateddocument_set.create(relationship_id='replaces',source=draft,target=replaced.docalias.first()) + draft.relateddocument_set.create(relationship_id='replaces',source=draft,target=replaced) obsoleted = IndividualDraftFactory() - draft.relateddocument_set.create(relationship_id='obs',source=draft,target=obsoleted.docalias.first()) + draft.relateddocument_set.create(relationship_id='obs',source=draft,target=obsoleted) obsoleted_by = IndividualDraftFactory() - obsoleted_by.relateddocument_set.create(relationship_id='obs',source=obsoleted_by,target=draft.docalias.first()) + obsoleted_by.relateddocument_set.create(relationship_id='obs',source=obsoleted_by,target=draft) updated = IndividualDraftFactory() - draft.relateddocument_set.create(relationship_id='updates',source=draft,target=updated.docalias.first()) + draft.relateddocument_set.create(relationship_id='updates',source=draft,target=updated) updated_by = IndividualDraftFactory() - updated_by.relateddocument_set.create(relationship_id='updates',source=obsoleted_by,target=draft.docalias.first()) + updated_by.relateddocument_set.create(relationship_id='updates',source=obsoleted_by,target=draft) - external_resource = DocExtResourceFactory(doc=draft) + DocExtResourceFactory(doc=draft) # these tests aren't testing all attributes yet, feel free to # expand them @@ -675,69 +684,32 @@ Man Expires September 22, 2015 [Page 3] if settings.USER_PREFERENCE_DEFAULTS['full_draft'] == 'off': self.assertContains(r, "Show full document") self.assertNotContains(r, "Deimos street") - self.assertContains(r, replaced.canonical_name()) + self.assertContains(r, replaced.name) self.assertContains(r, replaced.title) - # obs/updates not included until draft is RFC - self.assertNotContains(r, obsoleted.canonical_name()) - self.assertNotContains(r, obsoleted.title) - self.assertNotContains(r, obsoleted_by.canonical_name()) - self.assertNotContains(r, obsoleted_by.title) - self.assertNotContains(r, updated.canonical_name()) - self.assertNotContains(r, updated.title) - self.assertNotContains(r, updated_by.canonical_name()) - self.assertNotContains(r, updated_by.title) - self.assertContains(r, external_resource.value) r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)) + "?include_text=0") self.assertEqual(r.status_code, 200) self.assertContains(r, "Active Internet-Draft") self.assertContains(r, "Show full document") self.assertNotContains(r, "Deimos street") - self.assertContains(r, replaced.canonical_name()) + self.assertContains(r, replaced.name) self.assertContains(r, replaced.title) - # obs/updates not included until draft is RFC - self.assertNotContains(r, obsoleted.canonical_name()) - self.assertNotContains(r, obsoleted.title) - self.assertNotContains(r, obsoleted_by.canonical_name()) - self.assertNotContains(r, obsoleted_by.title) - self.assertNotContains(r, updated.canonical_name()) - self.assertNotContains(r, updated.title) - self.assertNotContains(r, updated_by.canonical_name()) - self.assertNotContains(r, updated_by.title) r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)) + "?include_text=foo") self.assertEqual(r.status_code, 200) self.assertContains(r, "Active Internet-Draft") self.assertNotContains(r, "Show full document") self.assertContains(r, "Deimos street") - self.assertContains(r, replaced.canonical_name()) + self.assertContains(r, replaced.name) self.assertContains(r, replaced.title) - # obs/updates not included until draft is RFC - self.assertNotContains(r, obsoleted.canonical_name()) - self.assertNotContains(r, obsoleted.title) - self.assertNotContains(r, obsoleted_by.canonical_name()) - self.assertNotContains(r, obsoleted_by.title) - self.assertNotContains(r, updated.canonical_name()) - self.assertNotContains(r, updated.title) - self.assertNotContains(r, updated_by.canonical_name()) - self.assertNotContains(r, updated_by.title) r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name)) + "?include_text=1") self.assertEqual(r.status_code, 200) self.assertContains(r, "Active Internet-Draft") self.assertNotContains(r, "Show full document") self.assertContains(r, "Deimos street") - self.assertContains(r, replaced.canonical_name()) + self.assertContains(r, replaced.name) self.assertContains(r, replaced.title) - # obs/updates not included until draft is RFC - self.assertNotContains(r, obsoleted.canonical_name()) - self.assertNotContains(r, obsoleted.title) - self.assertNotContains(r, obsoleted_by.canonical_name()) - self.assertNotContains(r, obsoleted_by.title) - self.assertNotContains(r, updated.canonical_name()) - self.assertNotContains(r, updated.title) - self.assertNotContains(r, updated_by.canonical_name()) - self.assertNotContains(r, updated_by.title) self.client.cookies = SimpleCookie({str('full_draft'): str('on')}) r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name))) @@ -745,17 +717,8 @@ Man Expires September 22, 2015 [Page 3] self.assertContains(r, "Active Internet-Draft") self.assertNotContains(r, "Show full document") self.assertContains(r, "Deimos street") - self.assertContains(r, replaced.canonical_name()) + self.assertContains(r, replaced.name) self.assertContains(r, replaced.title) - # obs/updates not included until draft is RFC - self.assertNotContains(r, obsoleted.canonical_name()) - self.assertNotContains(r, obsoleted.title) - self.assertNotContains(r, obsoleted_by.canonical_name()) - self.assertNotContains(r, obsoleted_by.title) - self.assertNotContains(r, updated.canonical_name()) - self.assertNotContains(r, updated.title) - self.assertNotContains(r, updated_by.canonical_name()) - self.assertNotContains(r, updated_by.title) self.client.cookies = SimpleCookie({str('full_draft'): str('off')}) r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name))) @@ -763,17 +726,8 @@ Man Expires September 22, 2015 [Page 3] self.assertContains(r, "Active Internet-Draft") self.assertContains(r, "Show full document") self.assertNotContains(r, "Deimos street") - self.assertContains(r, replaced.canonical_name()) + self.assertContains(r, replaced.name) self.assertContains(r, replaced.title) - # obs/updates not included until draft is RFC - self.assertNotContains(r, obsoleted.canonical_name()) - self.assertNotContains(r, obsoleted.title) - self.assertNotContains(r, obsoleted_by.canonical_name()) - self.assertNotContains(r, obsoleted_by.title) - self.assertNotContains(r, updated.canonical_name()) - self.assertNotContains(r, updated.title) - self.assertNotContains(r, updated_by.canonical_name()) - self.assertNotContains(r, updated_by.title) self.client.cookies = SimpleCookie({str('full_draft'): str('foo')}) r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name))) @@ -782,17 +736,8 @@ Man Expires September 22, 2015 [Page 3] if settings.USER_PREFERENCE_DEFAULTS['full_draft'] == 'off': self.assertContains(r, "Show full document") self.assertNotContains(r, "Deimos street") - self.assertContains(r, replaced.canonical_name()) + self.assertContains(r, replaced.name) self.assertContains(r, replaced.title) - # obs/updates not included until draft is RFC - self.assertNotContains(r, obsoleted.canonical_name()) - self.assertNotContains(r, obsoleted.title) - self.assertNotContains(r, obsoleted_by.canonical_name()) - self.assertNotContains(r, obsoleted_by.title) - self.assertNotContains(r, updated.canonical_name()) - self.assertNotContains(r, updated.title) - self.assertNotContains(r, updated_by.canonical_name()) - self.assertNotContains(r, updated_by.title) r = self.client.get(urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=draft.name))) self.assertEqual(r.status_code, 200) @@ -818,16 +763,16 @@ Man Expires September 22, 2015 [Page 3] rfc = WgRfcFactory() rfc.save_with_history([DocEventFactory(doc=rfc)]) (Path(settings.RFC_PATH) / rfc.get_base_name()).touch() - r = self.client.get(urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=rfc.canonical_name()))) + r = self.client.get(urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=rfc.name))) self.assertEqual(r.status_code, 200) q = PyQuery(r.content) - self.assertEqual(q('title').text(), f'RFC {rfc.rfc_number()} - {rfc.title}') + self.assertEqual(q('title').text(), f'RFC {rfc.rfc_number} - {rfc.title}') # synonyms for the rfc should be redirected to its canonical view - r = self.client.get(urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=rfc.rfc_number()))) - self.assertRedirects(r, urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=rfc.canonical_name()))) - r = self.client.get(urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=f'RFC {rfc.rfc_number()}'))) - self.assertRedirects(r, urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=rfc.canonical_name()))) + r = self.client.get(urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=rfc.rfc_number))) + self.assertRedirects(r, urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=rfc.name))) + r = self.client.get(urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=f'RFC {rfc.rfc_number}'))) + self.assertRedirects(r, urlreverse("ietf.doc.views_doc.document_html", kwargs=dict(name=rfc.name))) # expired draft draft.set_state(State.objects.get(type="draft", slug="expired")) @@ -848,46 +793,49 @@ Man Expires September 22, 2015 [Page 3] shepherd_id=draft.shepherd_id, ad_id=draft.ad_id, expires=draft.expires, notify=draft.notify) rel = RelatedDocument.objects.create(source=replacement, - target=draft.docalias.get(name__startswith="draft"), + target=draft, relationship_id="replaces") r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name))) self.assertEqual(r.status_code, 200) self.assertContains(r, "Replaced Internet-Draft") - self.assertContains(r, replacement.canonical_name()) + self.assertContains(r, replacement.name) self.assertContains(r, replacement.title) rel.delete() # draft published as RFC draft.set_state(State.objects.get(type="draft", slug="rfc")) - draft.std_level_id = "bcp" - draft.save_with_history([DocEvent.objects.create(doc=draft, rev=draft.rev, type="published_rfc", by=Person.objects.get(name="(System)"))]) + draft.std_level_id = "ps" + rfc = WgRfcFactory(group=draft.group, name="rfc123456") + rfc.save_with_history([DocEvent.objects.create(doc=rfc, rev=None, type="published_rfc", by=Person.objects.get(name="(System)"))]) - rfc_alias = DocAlias.objects.create(name="rfc123456") - rfc_alias.docs.add(draft) - bcp_alias = DocAlias.objects.create(name="bcp123456") - bcp_alias.docs.add(draft) + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) + + obsoleted = IndividualRfcFactory() + rfc.relateddocument_set.create(relationship_id='obs',target=obsoleted) + obsoleted_by = IndividualRfcFactory() + obsoleted_by.relateddocument_set.create(relationship_id='obs',target=rfc) + updated = IndividualRfcFactory() + rfc.relateddocument_set.create(relationship_id='updates',target=updated) + updated_by = IndividualRfcFactory() + updated_by.relateddocument_set.create(relationship_id='updates',target=rfc) r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name))) self.assertEqual(r.status_code, 302) - r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=bcp_alias.name))) - self.assertEqual(r.status_code, 302) - r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc_alias.name))) + r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name))) self.assertEqual(r.status_code, 200) self.assertContains(r, "RFC 123456") self.assertContains(r, draft.name) - self.assertContains(r, replaced.canonical_name()) - self.assertContains(r, replaced.title) # obs/updates included with RFC - self.assertContains(r, obsoleted.canonical_name()) + self.assertContains(r, obsoleted.name) self.assertContains(r, obsoleted.title) - self.assertContains(r, obsoleted_by.canonical_name()) + self.assertContains(r, obsoleted_by.name) self.assertContains(r, obsoleted_by.title) - self.assertContains(r, updated.canonical_name()) + self.assertContains(r, updated.name) self.assertContains(r, updated.title) - self.assertContains(r, updated_by.canonical_name()) + self.assertContains(r, updated_by.name) self.assertContains(r, updated_by.title) # naked RFC - also weird that we test a PS from the ISE @@ -920,7 +868,7 @@ Man Expires September 22, 2015 [Page 3] draft = WgRfcFactory() status_change_doc = StatusChangeFactory( group=draft.group, - changes_status_of=[('tops', draft.docalias.first())], + changes_status_of=[('tops', draft)], ) status_change_url = urlreverse( 'ietf.doc.views_doc.document_main', @@ -928,7 +876,7 @@ Man Expires September 22, 2015 [Page 3] ) proposed_status_change_doc = StatusChangeFactory( group=draft.group, - changes_status_of=[('tobcp', draft.docalias.first())], + changes_status_of=[('tobcp', draft)], states=[State.objects.get(slug='needshep', type='statchg')], ) proposed_status_change_url = urlreverse( @@ -939,7 +887,7 @@ Man Expires September 22, 2015 [Page 3] r = self.client.get( urlreverse( 'ietf.doc.views_doc.document_main', - kwargs={'name': draft.canonical_name()}, + kwargs={'name': draft.name}, ) ) self.assertEqual(r.status_code, 200) @@ -1519,11 +1467,11 @@ Man Expires September 22, 2015 [Page 3] self.assertEqual(r.status_code, 200) self.assert_correct_wg_group_link(r, group) - rfc = WgRfcFactory(name='draft-rfc-document-%s' % group_type_id, group=group) + rfc = WgRfcFactory(group=group) + draft = WgDraftFactory(group=group) + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) DocEventFactory.create(doc=rfc, type='published_rfc', time=event_datetime) - # get the rfc name to avoid a redirect - rfc_name = rfc.docalias.filter(name__startswith='rfc').first().name - r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc_name))) + r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name))) self.assertEqual(r.status_code, 200) self.assert_correct_wg_group_link(r, group) @@ -1534,11 +1482,11 @@ Man Expires September 22, 2015 [Page 3] self.assertEqual(r.status_code, 200) self.assert_correct_non_wg_group_link(r, group) - rfc = WgRfcFactory(name='draft-rfc-document-%s' % group_type_id, group=group) + rfc = WgRfcFactory(group=group) + draft = WgDraftFactory(name='draft-rfc-document-%s'% group_type_id, group=group) + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) DocEventFactory.create(doc=rfc, type='published_rfc', time=event_datetime) - # get the rfc name to avoid a redirect - rfc_name = rfc.docalias.filter(name__startswith='rfc').first().name - r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc_name))) + r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name))) self.assertEqual(r.status_code, 200) self.assert_correct_non_wg_group_link(r, group) @@ -1639,8 +1587,8 @@ class DocTestCase(TestCase): statchg = StatusChangeFactory() r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=statchg.name))) self.assertEqual(r.status_code, 200) - r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=statchg.relateddocument_set.first().target.document))) - self.assertEqual(r.status_code, 302) + r = self.client.get(urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=statchg.relateddocument_set.first().target))) + self.assertEqual(r.status_code, 200) def test_document_charter(self): CharterFactory(name='charter-ietf-mars') @@ -1804,8 +1752,8 @@ class DocTestCase(TestCase): self.assertNotContains(r, 'more YES or NO') # status change - DocAlias.objects.create(name='rfc9998').docs.add(IndividualDraftFactory()) - DocAlias.objects.create(name='rfc9999').docs.add(IndividualDraftFactory()) + Document.objects.create(name='rfc9998') + Document.objects.create(name='rfc9999') doc = DocumentFactory(type_id='statchg',name='status-change-imaginary-mid-review') iesgeval_pk = str(State.objects.get(slug='iesgeval',type__slug='statchg').pk) empty_outbox() @@ -1818,12 +1766,12 @@ class DocTestCase(TestCase): self.assertIn('iesg-secretary',outbox[0]['To']) self.assertIn('drafts-eval',outbox[1]['To']) - doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist') + doc.relateddocument_set.create(target=Document.objects.get(name='rfc9998'),relationship_id='tohist') r = self.client.get(urlreverse("ietf.doc.views_doc.document_ballot", kwargs=dict(name=doc.name))) self.assertNotContains(r, 'Needs a YES') self.assertNotContains(r, 'more YES or NO') - doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois') + doc.relateddocument_set.create(target=Document.objects.get(name='rfc9999'),relationship_id='tois') r = self.client.get(urlreverse("ietf.doc.views_doc.document_ballot", kwargs=dict(name=doc.name))) self.assertContains(r, 'more YES or NO') @@ -1886,15 +1834,14 @@ class DocTestCase(TestCase): self.assertContains(r, e.desc) def test_history_bis_00(self): - rfcname='rfc9090' - rfc = WgRfcFactory(alias2=rfcname) - bis_draft = WgDraftFactory(name='draft-ietf-{}-{}bis'.format(rfc.group.acronym,rfcname)) + rfc = WgRfcFactory(rfc_number=9090) + bis_draft = WgDraftFactory(name='draft-ietf-{}-{}bis'.format(rfc.group.acronym,rfc.name)) url = urlreverse('ietf.doc.views_doc.document_history', kwargs=dict(name=bis_draft.name)) r = self.client.get(url) self.assertEqual(r.status_code, 200) q = PyQuery(unicontent(r)) - attr1='value="{}"'.format(rfcname) + attr1='value="{}"'.format(rfc.name) self.assertEqual(len(q('option['+attr1+'][selected="selected"]')), 1) @@ -1944,7 +1891,7 @@ class DocTestCase(TestCase): self.assertContains(r, doc.name) def test_rfc_feed(self): - rfc = WgRfcFactory(alias2__name="rfc9000") + rfc = WgRfcFactory(rfc_number=9000) DocEventFactory(doc=rfc, type="published_rfc") r = self.client.get("/feed/rfc/") self.assertTrue(r.status_code, 200) @@ -2002,75 +1949,84 @@ class DocTestCase(TestCase): @override_settings(RFC_EDITOR_INFO_BASE_URL='https://www.rfc-editor.ietf.org/info/') def test_document_bibtex(self): rfc = WgRfcFactory.create( - #other_aliases = ['rfc6020',], - states = [('draft','rfc'),('draft-iesg','pub')], - std_level_id = 'ps', - time = datetime.datetime(2010, 10, 10, tzinfo=ZoneInfo(settings.TIME_ZONE)), - ) - num = rfc.rfc_number() + time=datetime.datetime(2010, 10, 10, tzinfo=ZoneInfo(settings.TIME_ZONE)) + ) + num = rfc.rfc_number DocEventFactory.create( doc=rfc, - type='published_rfc', + type="published_rfc", time=datetime.datetime(2010, 10, 10, tzinfo=RPC_TZINFO), ) # - url = urlreverse('ietf.doc.views_doc.document_bibtex', kwargs=dict(name=rfc.name)) + url = urlreverse("ietf.doc.views_doc.document_bibtex", kwargs=dict(name=rfc.name)) r = self.client.get(url) - entry = self._parse_bibtex_response(r)["rfc%s"%num] - self.assertEqual(entry['series'], 'Request for Comments') - self.assertEqual(entry['number'], num) - self.assertEqual(entry['doi'], '10.17487/RFC%s'%num) - self.assertEqual(entry['year'], '2010') - self.assertEqual(entry['month'].lower()[0:3], 'oct') - self.assertEqual(entry['url'], f'https://www.rfc-editor.ietf.org/info/rfc{num}') + entry = self._parse_bibtex_response(r)["rfc%s" % num] + self.assertEqual(entry["series"], "Request for Comments") + self.assertEqual(int(entry["number"]), num) + self.assertEqual(entry["doi"], "10.17487/RFC%s" % num) + self.assertEqual(entry["year"], "2010") + self.assertEqual(entry["month"].lower()[0:3], "oct") + self.assertEqual(entry["url"], f"https://www.rfc-editor.ietf.org/info/rfc{num}") # - self.assertNotIn('day', entry) - + self.assertNotIn("day", entry) + # test for incorrect case - revision for RFC rfc = WgRfcFactory(name="rfc0000") - url = urlreverse('ietf.doc.views_doc.document_bibtex', kwargs=dict(name=rfc.name, rev='00')) + url = urlreverse( + "ietf.doc.views_doc.document_bibtex", kwargs=dict(name=rfc.name, rev="00") + ) r = self.client.get(url) self.assertEqual(r.status_code, 404) - + april1 = IndividualRfcFactory.create( - stream_id = 'ise', - states = [('draft','rfc'),('draft-iesg','pub')], - std_level_id = 'inf', - time = datetime.datetime(1990, 4, 1, tzinfo=ZoneInfo(settings.TIME_ZONE)), - ) - num = april1.rfc_number() + stream_id="ise", + std_level_id="inf", + time=datetime.datetime(1990, 4, 1, tzinfo=ZoneInfo(settings.TIME_ZONE)), + ) + num = april1.rfc_number DocEventFactory.create( doc=april1, - type='published_rfc', + type="published_rfc", time=datetime.datetime(1990, 4, 1, tzinfo=RPC_TZINFO), ) # - url = urlreverse('ietf.doc.views_doc.document_bibtex', kwargs=dict(name=april1.name)) + url = urlreverse( + "ietf.doc.views_doc.document_bibtex", kwargs=dict(name=april1.name) + ) r = self.client.get(url) - self.assertEqual(r.get('Content-Type'), 'text/plain; charset=utf-8') - entry = self._parse_bibtex_response(r)["rfc%s"%num] - self.assertEqual(entry['series'], 'Request for Comments') - self.assertEqual(entry['number'], num) - self.assertEqual(entry['doi'], '10.17487/RFC%s'%num) - self.assertEqual(entry['year'], '1990') - self.assertEqual(entry['month'].lower()[0:3], 'apr') - self.assertEqual(entry['day'], '1') - self.assertEqual(entry['url'], f'https://www.rfc-editor.ietf.org/info/rfc{num}') - + self.assertEqual(r.get("Content-Type"), "text/plain; charset=utf-8") + entry = self._parse_bibtex_response(r)["rfc%s" % num] + self.assertEqual(entry["series"], "Request for Comments") + self.assertEqual(int(entry["number"]), num) + self.assertEqual(entry["doi"], "10.17487/RFC%s" % num) + self.assertEqual(entry["year"], "1990") + self.assertEqual(entry["month"].lower()[0:3], "apr") + self.assertEqual(entry["day"], "1") + self.assertEqual(entry["url"], f"https://www.rfc-editor.ietf.org/info/rfc{num}") + draft = IndividualDraftFactory.create() - docname = '%s-%s' % (draft.name, draft.rev) - bibname = docname[6:] # drop the 'draft-' prefix - url = urlreverse('ietf.doc.views_doc.document_bibtex', kwargs=dict(name=draft.name)) + docname = "%s-%s" % (draft.name, draft.rev) + bibname = docname[6:] # drop the 'draft-' prefix + url = urlreverse("ietf.doc.views_doc.document_bibtex", kwargs=dict(name=draft.name)) r = self.client.get(url) entry = self._parse_bibtex_response(r)[bibname] - self.assertEqual(entry['note'], 'Work in Progress') - self.assertEqual(entry['number'], docname) - self.assertEqual(entry['year'], str(draft.pub_date().year)) - self.assertEqual(entry['month'].lower()[0:3], draft.pub_date().strftime('%b').lower()) - self.assertEqual(entry['day'], str(draft.pub_date().day)) - self.assertEqual(entry['url'], settings.IDTRACKER_BASE_URL + urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=draft.name, rev=draft.rev))) + self.assertEqual(entry["note"], "Work in Progress") + self.assertEqual(entry["number"], docname) + self.assertEqual(entry["year"], str(draft.pub_date().year)) + self.assertEqual( + entry["month"].lower()[0:3], draft.pub_date().strftime("%b").lower() + ) + self.assertEqual(entry["day"], str(draft.pub_date().day)) + self.assertEqual( + entry["url"], + settings.IDTRACKER_BASE_URL + + urlreverse( + "ietf.doc.views_doc.document_main", + kwargs=dict(name=draft.name, rev=draft.rev), + ), + ) # - self.assertNotIn('doi', entry) + self.assertNotIn("doi", entry) def test_document_bibxml(self): draft = IndividualDraftFactory.create() @@ -2147,7 +2103,7 @@ class ReferencesTest(TestCase): def test_references(self): doc1 = WgDraftFactory(name='draft-ietf-mars-test') - doc2 = IndividualDraftFactory(name='draft-imaginary-independent-submission').docalias.first() + doc2 = IndividualDraftFactory(name='draft-imaginary-independent-submission') RelatedDocument.objects.get_or_create(source=doc1,target=doc2,relationship=DocRelationshipName.objects.get(slug='refnorm')) url = urlreverse('ietf.doc.views_doc.document_references', kwargs=dict(name=doc1.name)) r = self.client.get(url) @@ -2159,124 +2115,168 @@ class ReferencesTest(TestCase): self.assertContains(r, doc1.name) class GenerateDraftAliasesTests(TestCase): - def setUp(self): - super().setUp() - self.doc_aliases_file = NamedTemporaryFile(delete=False, mode='w+') - self.doc_aliases_file.close() - self.doc_virtual_file = NamedTemporaryFile(delete=False, mode='w+') - self.doc_virtual_file.close() - self.saved_draft_aliases_path = settings.DRAFT_ALIASES_PATH - self.saved_draft_virtual_path = settings.DRAFT_VIRTUAL_PATH - settings.DRAFT_ALIASES_PATH = self.doc_aliases_file.name - settings.DRAFT_VIRTUAL_PATH = self.doc_virtual_file.name + def setUp(self): + super().setUp() + self.doc_aliases_file = NamedTemporaryFile(delete=False, mode="w+") + self.doc_aliases_file.close() + self.doc_virtual_file = NamedTemporaryFile(delete=False, mode="w+") + self.doc_virtual_file.close() + self.saved_draft_aliases_path = settings.DRAFT_ALIASES_PATH + self.saved_draft_virtual_path = settings.DRAFT_VIRTUAL_PATH + settings.DRAFT_ALIASES_PATH = self.doc_aliases_file.name + settings.DRAFT_VIRTUAL_PATH = self.doc_virtual_file.name - def tearDown(self): - settings.DRAFT_ALIASES_PATH = self.saved_draft_aliases_path - settings.DRAFT_VIRTUAL_PATH = self.saved_draft_virtual_path - os.unlink(self.doc_aliases_file.name) - os.unlink(self.doc_virtual_file.name) - super().tearDown() + def tearDown(self): + settings.DRAFT_ALIASES_PATH = self.saved_draft_aliases_path + settings.DRAFT_VIRTUAL_PATH = self.saved_draft_virtual_path + os.unlink(self.doc_aliases_file.name) + os.unlink(self.doc_virtual_file.name) + super().tearDown() - def testManagementCommand(self): - a_month_ago = (timezone.now() - datetime.timedelta(30)).astimezone(RPC_TZINFO) - a_month_ago = a_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) - ad = RoleFactory(name_id='ad', group__type_id='area', group__state_id='active').person - shepherd = PersonFactory() - author1 = PersonFactory() - author2 = PersonFactory() - author3 = PersonFactory() - author4 = PersonFactory() - author5 = PersonFactory() - author6 = PersonFactory() - mars = GroupFactory(type_id='wg', acronym='mars') - marschairman = PersonFactory(user__username='marschairman') - mars.role_set.create(name_id='chair', person=marschairman, email=marschairman.email()) - doc1 = IndividualDraftFactory(authors=[author1], shepherd=shepherd.email(), ad=ad) - doc2 = WgDraftFactory(name='draft-ietf-mars-test', group__acronym='mars', authors=[author2], ad=ad) - doc3 = WgRfcFactory.create(name='draft-ietf-mars-finished', group__acronym='mars', authors=[author3], ad=ad, std_level_id='ps', states=[('draft','rfc'),('draft-iesg','pub')], time=a_month_ago) - DocEventFactory.create(doc=doc3, type='published_rfc', time=a_month_ago) - doc4 = WgRfcFactory.create(authors=[author4,author5], ad=ad, std_level_id='ps', states=[('draft','rfc'),('draft-iesg','pub')], time=datetime.datetime(2010,10,10, tzinfo=ZoneInfo(settings.TIME_ZONE))) - DocEventFactory.create(doc=doc4, type='published_rfc', time=datetime.datetime(2010, 10, 10, tzinfo=RPC_TZINFO)) - doc5 = IndividualDraftFactory(authors=[author6]) + def testManagementCommand(self): + a_month_ago = (timezone.now() - datetime.timedelta(30)).astimezone(RPC_TZINFO) + a_month_ago = a_month_ago.replace(hour=0, minute=0, second=0, microsecond=0) + ad = RoleFactory( + name_id="ad", group__type_id="area", group__state_id="active" + ).person + shepherd = PersonFactory() + author1 = PersonFactory() + author2 = PersonFactory() + author3 = PersonFactory() + author4 = PersonFactory() + author5 = PersonFactory() + author6 = PersonFactory() + mars = GroupFactory(type_id="wg", acronym="mars") + marschairman = PersonFactory(user__username="marschairman") + mars.role_set.create( + name_id="chair", person=marschairman, email=marschairman.email() + ) + doc1 = IndividualDraftFactory( + authors=[author1], shepherd=shepherd.email(), ad=ad + ) + doc2 = WgDraftFactory( + name="draft-ietf-mars-test", group__acronym="mars", authors=[author2], ad=ad + ) + doc3 = WgDraftFactory.create( + name="draft-ietf-mars-finished", + group__acronym="mars", + authors=[author3], + ad=ad, + std_level_id="ps", + states=[("draft", "rfc"), ("draft-iesg", "pub")], + time=a_month_ago, + ) + rfc3 = WgRfcFactory() + DocEventFactory.create(doc=rfc3, type="published_rfc", time=a_month_ago) + doc3.relateddocument_set.create( + relationship_id="became_rfc", target=rfc3 + ) + doc4 = WgDraftFactory.create( + authors=[author4, author5], + ad=ad, + std_level_id="ps", + states=[("draft", "rfc"), ("draft-iesg", "pub")], + time=datetime.datetime(2010, 10, 10, tzinfo=ZoneInfo(settings.TIME_ZONE)), + ) + rfc4 = WgRfcFactory() + DocEventFactory.create( + doc=rfc4, + type="published_rfc", + time=datetime.datetime(2010, 10, 10, tzinfo=RPC_TZINFO), + ) + doc4.relateddocument_set.create( + relationship_id="became_rfc", target=rfc4 + ) + doc5 = IndividualDraftFactory(authors=[author6]) - args = [ ] - kwargs = { } - out = io.StringIO() - call_command("generate_draft_aliases", *args, **kwargs, stdout=out, stderr=out) - self.assertFalse(out.getvalue()) + args = [] + kwargs = {} + out = io.StringIO() + call_command("generate_draft_aliases", *args, **kwargs, stdout=out, stderr=out) + self.assertFalse(out.getvalue()) - with open(settings.DRAFT_ALIASES_PATH) as afile: - acontent = afile.read() - self.assertTrue(all([x in acontent for x in [ - 'xfilter-' + doc1.name, - 'xfilter-' + doc1.name + '.ad', - 'xfilter-' + doc1.name + '.authors', - 'xfilter-' + doc1.name + '.shepherd', - 'xfilter-' + doc1.name + '.all', - 'xfilter-' + doc2.name, - 'xfilter-' + doc2.name + '.ad', - 'xfilter-' + doc2.name + '.authors', - 'xfilter-' + doc2.name + '.chairs', - 'xfilter-' + doc2.name + '.all', - 'xfilter-' + doc3.name, - 'xfilter-' + doc3.name + '.ad', - 'xfilter-' + doc3.name + '.authors', - 'xfilter-' + doc3.name + '.chairs', - 'xfilter-' + doc5.name, - 'xfilter-' + doc5.name + '.authors', - 'xfilter-' + doc5.name + '.all', - ]])) - self.assertFalse(all([x in acontent for x in [ - 'xfilter-' + doc1.name + '.chairs', - 'xfilter-' + doc2.name + '.shepherd', - 'xfilter-' + doc3.name + '.shepherd', - 'xfilter-' + doc4.name, - 'xfilter-' + doc5.name + '.shepherd', - 'xfilter-' + doc5.name + '.ad', - ]])) + with open(settings.DRAFT_ALIASES_PATH) as afile: + acontent = afile.read() + for x in [ + "xfilter-" + doc1.name, + "xfilter-" + doc1.name + ".ad", + "xfilter-" + doc1.name + ".authors", + "xfilter-" + doc1.name + ".shepherd", + "xfilter-" + doc1.name + ".all", + "xfilter-" + doc2.name, + "xfilter-" + doc2.name + ".ad", + "xfilter-" + doc2.name + ".authors", + "xfilter-" + doc2.name + ".chairs", + "xfilter-" + doc2.name + ".all", + "xfilter-" + doc3.name, + "xfilter-" + doc3.name + ".ad", + "xfilter-" + doc3.name + ".authors", + "xfilter-" + doc3.name + ".chairs", + "xfilter-" + doc5.name, + "xfilter-" + doc5.name + ".authors", + "xfilter-" + doc5.name + ".all", + ]: + self.assertIn(x, acontent) - with open(settings.DRAFT_VIRTUAL_PATH) as vfile: - vcontent = vfile.read() - self.assertTrue(all([x in vcontent for x in [ - ad.email_address(), - shepherd.email_address(), - marschairman.email_address(), - author1.email_address(), - author2.email_address(), - author3.email_address(), - author6.email_address(), - ]])) - self.assertFalse(all([x in vcontent for x in [ - author4.email_address(), - author5.email_address(), - ]])) - self.assertTrue(all([x in vcontent for x in [ - 'xfilter-' + doc1.name, - 'xfilter-' + doc1.name + '.ad', - 'xfilter-' + doc1.name + '.authors', - 'xfilter-' + doc1.name + '.shepherd', - 'xfilter-' + doc1.name + '.all', - 'xfilter-' + doc2.name, - 'xfilter-' + doc2.name + '.ad', - 'xfilter-' + doc2.name + '.authors', - 'xfilter-' + doc2.name + '.chairs', - 'xfilter-' + doc2.name + '.all', - 'xfilter-' + doc3.name, - 'xfilter-' + doc3.name + '.ad', - 'xfilter-' + doc3.name + '.authors', - 'xfilter-' + doc3.name + '.chairs', - 'xfilter-' + doc5.name, - 'xfilter-' + doc5.name + '.authors', - 'xfilter-' + doc5.name + '.all', - ]])) - self.assertFalse(all([x in vcontent for x in [ - 'xfilter-' + doc1.name + '.chairs', - 'xfilter-' + doc2.name + '.shepherd', - 'xfilter-' + doc3.name + '.shepherd', - 'xfilter-' + doc4.name, - 'xfilter-' + doc5.name + '.shepherd', - 'xfilter-' + doc5.name + '.ad', - ]])) + for x in [ + "xfilter-" + doc1.name + ".chairs", + "xfilter-" + doc2.name + ".shepherd", + "xfilter-" + doc3.name + ".shepherd", + "xfilter-" + doc4.name, + "xfilter-" + doc5.name + ".shepherd", + "xfilter-" + doc5.name + ".ad", + ]: + self.assertNotIn(x, acontent) + + with open(settings.DRAFT_VIRTUAL_PATH) as vfile: + vcontent = vfile.read() + for x in [ + ad.email_address(), + shepherd.email_address(), + marschairman.email_address(), + author1.email_address(), + author2.email_address(), + author3.email_address(), + author6.email_address(), + ]: + self.assertIn(x, vcontent) + + for x in [ + author4.email_address(), + author5.email_address(), + ]: + self.assertNotIn(x, vcontent) + + for x in [ + "xfilter-" + doc1.name, + "xfilter-" + doc1.name + ".ad", + "xfilter-" + doc1.name + ".authors", + "xfilter-" + doc1.name + ".shepherd", + "xfilter-" + doc1.name + ".all", + "xfilter-" + doc2.name, + "xfilter-" + doc2.name + ".ad", + "xfilter-" + doc2.name + ".authors", + "xfilter-" + doc2.name + ".chairs", + "xfilter-" + doc2.name + ".all", + "xfilter-" + doc3.name, + "xfilter-" + doc3.name + ".ad", + "xfilter-" + doc3.name + ".authors", + "xfilter-" + doc3.name + ".chairs", + "xfilter-" + doc5.name, + "xfilter-" + doc5.name + ".authors", + "xfilter-" + doc5.name + ".all", + ]: + self.assertIn(x, vcontent) + + for x in [ + "xfilter-" + doc1.name + ".chairs", + "xfilter-" + doc2.name + ".shepherd", + "xfilter-" + doc3.name + ".shepherd", + "xfilter-" + doc4.name, + "xfilter-" + doc5.name + ".shepherd", + "xfilter-" + doc5.name + ".ad", + ]: + self.assertNotIn(x, vcontent) class EmailAliasesTests(TestCase): @@ -2710,10 +2710,10 @@ class Idnits2SupportTests(TestCase): settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['DERIVED_DIR'] def test_obsoleted(self): - rfc = WgRfcFactory(alias2__name='rfc1001') - WgRfcFactory(alias2__name='rfc1003',relations=[('obs',rfc)]) - rfc = WgRfcFactory(alias2__name='rfc1005') - WgRfcFactory(alias2__name='rfc1007',relations=[('obs',rfc)]) + rfc = WgRfcFactory(rfc_number=1001) + WgRfcFactory(rfc_number=1003,relations=[('obs',rfc)]) + rfc = WgRfcFactory(rfc_number=1005) + WgRfcFactory(rfc_number=1007,relations=[('obs',rfc)]) url = urlreverse('ietf.doc.views_doc.idnits2_rfcs_obsoleted') r = self.client.get(url) @@ -2738,20 +2738,22 @@ class Idnits2SupportTests(TestCase): def test_idnits2_state(self): rfc = WgRfcFactory() - url = urlreverse('ietf.doc.views_doc.idnits2_state', kwargs=dict(name=rfc.canonical_name())) + draft = WgDraftFactory() + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) + url = urlreverse('ietf.doc.views_doc.idnits2_state', kwargs=dict(name=rfc.name)) r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertContains(r,'rfcnum') draft = WgDraftFactory() - url = urlreverse('ietf.doc.views_doc.idnits2_state', kwargs=dict(name=draft.canonical_name())) + url = urlreverse('ietf.doc.views_doc.idnits2_state', kwargs=dict(name=draft.name)) r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertNotContains(r,'rfcnum') self.assertContains(r,'Unknown') draft = WgDraftFactory(intended_std_level_id='ps') - url = urlreverse('ietf.doc.views_doc.idnits2_state', kwargs=dict(name=draft.canonical_name())) + url = urlreverse('ietf.doc.views_doc.idnits2_state', kwargs=dict(name=draft.name)) r = self.client.get(url) self.assertEqual(r.status_code, 200) self.assertContains(r,'Proposed') @@ -2796,16 +2798,12 @@ class RawIdTests(TestCase): self.should_succeed(dict(name=draft.name, rev='00',ext='txt')) self.should_404(dict(name=draft.name, rev='00',ext='html')) - def test_raw_id_rfc(self): - rfc = WgRfcFactory() - dir = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR - (Path(dir) / f'{rfc.name}-{rfc.rev}.txt').touch() - self.should_succeed(dict(name=rfc.name)) - self.should_404(dict(name=rfc.canonical_name())) + # test_raw_id_rfc intentionally removed + # an rfc is no longer a pseudo-version of a draft. def test_non_draft(self): - charter = CharterFactory() - self.should_404(dict(name=charter.name)) + for doc in [CharterFactory(), WgRfcFactory()]: + self.should_404(dict(name=doc.name)) class PdfizedTests(TestCase): @@ -2824,24 +2822,27 @@ class PdfizedTests(TestCase): r = self.client.get(url) self.assertEqual(r.status_code, 404) + # This takes a _long_ time (32s on a 2022 m1 macbook pro) - is it worth what it covers? def test_pdfized(self): - rfc = WgRfcFactory(create_revisions=range(0,2)) + rfc = WgRfcFactory() + draft = WgDraftFactory(create_revisions=range(0,2)) + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) dir = settings.RFC_PATH - with (Path(dir) / f'{rfc.canonical_name()}.txt').open('w') as f: + with (Path(dir) / f'{rfc.name}.txt').open('w') as f: f.write('text content') dir = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR for r in range(0,2): - with (Path(dir) / f'{rfc.name}-{r:02d}.txt').open('w') as f: + with (Path(dir) / f'{draft.name}-{r:02d}.txt').open('w') as f: f.write('text content') - self.should_succeed(dict(name=rfc.canonical_name())) self.should_succeed(dict(name=rfc.name)) + self.should_succeed(dict(name=draft.name)) for r in range(0,2): - self.should_succeed(dict(name=rfc.name,rev=f'{r:02d}')) + self.should_succeed(dict(name=draft.name,rev=f'{r:02d}')) for ext in ('pdf','txt','html','anythingatall'): - self.should_succeed(dict(name=rfc.name,rev=f'{r:02d}',ext=ext)) - self.should_404(dict(name=rfc.name,rev='02')) + self.should_succeed(dict(name=draft.name,rev=f'{r:02d}',ext=ext)) + self.should_404(dict(name=draft.name,rev='02')) class NotifyValidationTests(TestCase): def test_notify_validation(self): @@ -2924,3 +2925,30 @@ class CanRequestConflictReviewTests(TestCase): r = self.client.get(url) self.assertContains(r, target_string) +class DocInfoMethodsTests(TestCase): + + def test_became_rfc(self): + draft = WgDraftFactory() + rfc = WgRfcFactory() + draft.relateddocument_set.create(relationship_id="became_rfc",target=rfc) + self.assertEqual(draft.became_rfc(), rfc) + self.assertEqual(rfc.came_from_draft(), draft) + + charter = CharterFactory() + self.assertIsNone(charter.became_rfc()) + self.assertIsNone(charter.came_from_draft()) + + def test_revisions(self): + draft = WgDraftFactory(rev="09",create_revisions=range(0,10)) + self.assertEqual(draft.revisions_by_dochistory(),[f"{i:02d}" for i in range(0,10)]) + self.assertEqual(draft.revisions_by_newrevisionevent(),[f"{i:02d}" for i in range(0,10)]) + rfc = WgRfcFactory() + self.assertEqual(rfc.revisions_by_newrevisionevent(),[]) + self.assertEqual(rfc.revisions_by_dochistory(),[]) + + draft.history_set.filter(rev__lt="08").delete() + draft.docevent_set.filter(newrevisiondocevent__rev="05").delete() + self.assertEqual(draft.revisions_by_dochistory(),[f"{i:02d}" for i in range(8,10)]) + self.assertEqual(draft.revisions_by_newrevisionevent(),[f"{i:02d}" for i in [*range(0,5), *range(6,10)]]) + + diff --git a/ietf/doc/tests_ballot.py b/ietf/doc/tests_ballot.py index 8a4717c74..9c9287dab 100644 --- a/ietf/doc/tests_ballot.py +++ b/ietf/doc/tests_ballot.py @@ -803,8 +803,8 @@ class ApproveBallotTests(TestCase): desc='Last call announcement was changed', text='this is simple last call text.' ) rfc = IndividualRfcFactory.create( + name = "rfc6666", stream_id='ise', - other_aliases=['rfc6666',], states=[('draft','rfc'),('draft-iesg','pub')], std_level_id='inf', ) @@ -821,7 +821,7 @@ class ApproveBallotTests(TestCase): self.assertContains(r, "No downward references for") # Add a downref, the page should ask if it should be added to the registry - rel = draft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'),relationship_id='refnorm') + rel = draft.relateddocument_set.create(target=rfc, relationship_id='refnorm') d = [rdoc for rdoc in draft.relateddocument_set.all() if rel.is_approved_downref()] original_len = len(d) r = self.client.get(url) @@ -1121,13 +1121,13 @@ class RegenerateLastCallTestCase(TestCase): self.assertFalse("contains these normative down" in lc_text) rfc = IndividualRfcFactory.create( + rfc_number=6666, stream_id='ise', - other_aliases=['rfc6666',], states=[('draft','rfc'),('draft-iesg','pub')], std_level_id='inf', ) - draft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'),relationship_id='refnorm') + draft.relateddocument_set.create(target=rfc,relationship_id='refnorm') r = self.client.post(url, dict(regenerate_last_call_text="1")) self.assertEqual(r.status_code, 200) @@ -1137,7 +1137,7 @@ class RegenerateLastCallTestCase(TestCase): self.assertTrue("rfc6666" in lc_text) self.assertTrue("Independent Submission" in lc_text) - draft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'), relationship_id='downref-approval') + draft.relateddocument_set.create(target=rfc, relationship_id='downref-approval') r = self.client.post(url, dict(regenerate_last_call_text="1")) self.assertEqual(r.status_code, 200) diff --git a/ietf/doc/tests_bofreq.py b/ietf/doc/tests_bofreq.py index 2fdc8c282..319ab778d 100644 --- a/ietf/doc/tests_bofreq.py +++ b/ietf/doc/tests_bofreq.py @@ -18,7 +18,7 @@ from django.utils import timezone from ietf.group.factories import RoleFactory from ietf.doc.factories import BofreqFactory, NewRevisionDocEventFactory -from ietf.doc.models import State, Document, DocAlias, NewRevisionDocEvent +from ietf.doc.models import State, Document, NewRevisionDocEvent from ietf.doc.utils_bofreq import bofreq_editors, bofreq_responsible from ietf.ietfauth.utils import has_role from ietf.person.factories import PersonFactory @@ -32,7 +32,7 @@ class BofreqTests(TestCase): settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['BOFREQ_PATH'] def write_bofreq_file(self, bofreq): - fname = Path(settings.BOFREQ_PATH) / ("%s-%s.md" % (bofreq.canonical_name(), bofreq.rev)) + fname = Path(settings.BOFREQ_PATH) / ("%s-%s.md" % (bofreq.name, bofreq.rev)) with fname.open("w") as f: f.write(f"""# This is a test bofreq. Version: {bofreq.rev} @@ -366,7 +366,6 @@ This test section has some text. name = f"bofreq-{xslugify(nobody.last_name())[:64]}-{postdict['title']}".replace(' ','-') bofreq = Document.objects.filter(name=name,type_id='bofreq').first() self.assertIsNotNone(bofreq) - self.assertIsNotNone(DocAlias.objects.filter(name=name).first()) self.assertEqual(bofreq.title, postdict['title']) self.assertEqual(bofreq.rev, '00') self.assertEqual(bofreq.get_state_slug(), 'proposed') diff --git a/ietf/doc/tests_charter.py b/ietf/doc/tests_charter.py index 0350fc022..1bd6c1701 100644 --- a/ietf/doc/tests_charter.py +++ b/ietf/doc/tests_charter.py @@ -88,10 +88,7 @@ class EditCharterTests(TestCase): settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['CHARTER_PATH'] def write_charter_file(self, charter): - with (Path(settings.CHARTER_PATH) / - ("%s-%s.txt" % (charter.canonical_name(), charter.rev)) - ).open("w") as f: - f.write("This is a charter.") + (Path(settings.CHARTER_PATH) / f"{charter.name}-{charter.rev}.txt").write_text("This is a charter.") def test_startstop_process(self): CharterFactory(group__acronym='mars') @@ -509,8 +506,13 @@ class EditCharterTests(TestCase): self.assertEqual(charter.rev, next_revision(prev_rev)) self.assertTrue("new_revision" in charter.latest_event().type) - with (Path(settings.CHARTER_PATH) / (charter.canonical_name() + "-" + charter.rev + ".txt")).open(encoding='utf-8') as f: - self.assertEqual(f.read(), "Windows line\nMac line\nUnix line\n" + utf_8_snippet.decode('utf-8')) + file_contents = ( + Path(settings.CHARTER_PATH) / (charter.name + "-" + charter.rev + ".txt") + ).read_text("utf-8") + self.assertEqual( + file_contents, + "Windows line\nMac line\nUnix line\n" + utf_8_snippet.decode("utf-8"), + ) def test_submit_initial_charter(self): group = GroupFactory(type_id='wg',acronym='mars',list_email='mars-wg@ietf.org') @@ -538,6 +540,24 @@ class EditCharterTests(TestCase): group = Group.objects.get(pk=group.pk) self.assertEqual(group.charter, charter) + def test_submit_charter_with_invalid_name(self): + self.client.login(username="secretary", password="secretary+password") + ietf_group = GroupFactory(type_id="wg") + for bad_name in ("charter-irtf-{}", "charter-randomjunk-{}", "charter-ietf-thisisnotagroup"): + url = urlreverse("ietf.doc.views_charter.submit", kwargs={"name": bad_name.format(ietf_group.acronym)}) + r = self.client.get(url) + self.assertEqual(r.status_code, 404, f"GET of charter named {bad_name} should 404") + r = self.client.post(url, {}) + self.assertEqual(r.status_code, 404, f"POST of charter named {bad_name} should 404") + + irtf_group = GroupFactory(type_id="rg") + for bad_name in ("charter-ietf-{}", "charter-whatisthis-{}", "charter-irtf-thisisnotagroup"): + url = urlreverse("ietf.doc.views_charter.submit", kwargs={"name": bad_name.format(irtf_group.acronym)}) + r = self.client.get(url) + self.assertEqual(r.status_code, 404, f"GET of charter named {bad_name} should 404") + r = self.client.post(url, {}) + self.assertEqual(r.status_code, 404, f"POST of charter named {bad_name} should 404") + def test_edit_review_announcement_text(self): area = GroupFactory(type_id='area') RoleFactory(name_id='ad',group=area,person=Person.objects.get(user__username='ad')) diff --git a/ietf/doc/tests_conflict_review.py b/ietf/doc/tests_conflict_review.py index 1b4b8eefa..485f5655e 100644 --- a/ietf/doc/tests_conflict_review.py +++ b/ietf/doc/tests_conflict_review.py @@ -70,12 +70,12 @@ class ConflictReviewTests(TestCase): self.assertEqual(review_doc.ad.name,'Areað Irector') self.assertEqual(review_doc.notify,'ipu@ietf.org') doc = Document.objects.get(name='draft-imaginary-independent-submission') - self.assertTrue(doc in [x.target.document for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')]) + self.assertTrue(doc in [x.target for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')]) self.assertTrue(review_doc.latest_event(DocEvent,type="added_comment").desc.startswith("IETF conflict review requested")) self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith("IETF conflict review initiated")) self.assertTrue('Conflict Review requested' in outbox[-1]['Subject']) - + # verify you can't start a review when a review is already in progress r = self.client.post(url,dict(ad="Areað Irector",create_in_state="Needs Shepherd",notify='ipu@ietf.org')) self.assertEqual(r.status_code, 404) @@ -119,7 +119,7 @@ class ConflictReviewTests(TestCase): self.assertEqual(review_doc.ad.name,'Ietf Chair') self.assertEqual(review_doc.notify,'ipu@ietf.org') doc = Document.objects.get(name='draft-imaginary-independent-submission') - self.assertTrue(doc in [x.target.document for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')]) + self.assertTrue(doc in [x.target for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')]) self.assertEqual(len(outbox), messages_before + 2) @@ -403,7 +403,7 @@ class ConflictReviewSubmitTests(TestCase): # Right now, nothing to test - we let people put whatever the web browser will let them put into that textbox # sane post using textbox - path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev)) + path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.name, doc.rev)) self.assertEqual(doc.rev,'00') self.assertFalse(os.path.exists(path)) r = self.client.post(url,dict(content="Some initial review text\n",submit_response="1")) @@ -423,7 +423,7 @@ class ConflictReviewSubmitTests(TestCase): # A little additional setup # doc.rev is u'00' per the test setup - double-checking that here - if it fails, the breakage is in setUp self.assertEqual(doc.rev,'00') - path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev)) + path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.name, doc.rev)) with io.open(path,'w') as f: f.write('This is the old proposal.') f.close() @@ -450,7 +450,7 @@ class ConflictReviewSubmitTests(TestCase): self.assertEqual(r.status_code, 302) doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission') self.assertEqual(doc.rev,'01') - path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev)) + path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.name, doc.rev)) with io.open(path) as f: self.assertEqual(f.read(),"This is a new proposal.") f.close() diff --git a/ietf/doc/tests_downref.py b/ietf/doc/tests_downref.py index 258494e36..0222ad794 100644 --- a/ietf/doc/tests_downref.py +++ b/ietf/doc/tests_downref.py @@ -19,12 +19,9 @@ class Downref(TestCase): super().setUp() PersonFactory(name='Plain Man',user__username='plain') self.draft = WgDraftFactory(name='draft-ietf-mars-test') - self.draftalias = self.draft.docalias.get(name='draft-ietf-mars-test') self.doc = WgDraftFactory(name='draft-ietf-mars-approved-document',states=[('draft-iesg','rfcqueue')]) - self.docalias = self.doc.docalias.get(name='draft-ietf-mars-approved-document') - self.rfc = WgRfcFactory(alias2__name='rfc9998') - self.rfcalias = self.rfc.docalias.get(name='rfc9998') - RelatedDocument.objects.create(source=self.doc, target=self.rfcalias, relationship_id='downref-approval') + self.rfc = WgRfcFactory(rfc_number=9998) + RelatedDocument.objects.create(source=self.doc, target=self.rfc, relationship_id='downref-approval') def test_downref_registry(self): url = urlreverse('ietf.doc.views_downref.downref_registry') @@ -64,44 +61,44 @@ class Downref(TestCase): self.assertContains(r, 'Save downref') # error - already in the downref registry - r = self.client.post(url, dict(rfc=self.rfcalias.pk, drafts=(self.docalias.pk, ))) + r = self.client.post(url, dict(rfc=self.rfc.pk, drafts=(self.doc.pk, ))) self.assertContains(r, 'Downref is already in the registry') # error - source is not in an approved state r = self.client.get(url) self.assertEqual(r.status_code, 200) - r = self.client.post(url, dict(rfc=self.rfcalias.pk, drafts=(self.draftalias.pk, ))) + r = self.client.post(url, dict(rfc=self.rfc.pk, drafts=(self.draft.pk, ))) self.assertContains(r, 'Draft is not yet approved') # error - the target is not a normative reference of the source self.draft.set_state(State.objects.get(used=True, type="draft-iesg", slug="pub")) r = self.client.get(url) self.assertEqual(r.status_code, 200) - r = self.client.post(url, dict(rfc=self.rfcalias.pk, drafts=(self.draftalias.pk, ))) + r = self.client.post(url, dict(rfc=self.rfc.pk, drafts=(self.draft.pk, ))) self.assertContains(r, 'There does not seem to be a normative reference to RFC') self.assertContains(r, 'Save downref anyway') # normal - approve the document so the downref is now okay - RelatedDocument.objects.create(source=self.draft, target=self.rfcalias, relationship_id='refnorm') + RelatedDocument.objects.create(source=self.draft, target=self.rfc, relationship_id='refnorm') draft_de_count_before = self.draft.docevent_set.count() rfc_de_count_before = self.rfc.docevent_set.count() r = self.client.get(url) self.assertEqual(r.status_code, 200) - r = self.client.post(url, dict(rfc=self.rfcalias.pk, drafts=(self.draftalias.pk, ))) + r = self.client.post(url, dict(rfc=self.rfc.pk, drafts=(self.draft.pk, ))) self.assertEqual(r.status_code, 302) newurl = urlreverse('ietf.doc.views_downref.downref_registry') r = self.client.get(newurl) self.assertContains(r, '(document|docalias))/(?Pdraft)/$', views_search.ajax_select2_search_docs), + url(r'^select2search/(?Pdocument)/(?P(draft|rfc|all))/$', views_search.ajax_select2_search_docs), url(r'^ballots/irsg/$', views_ballot.irsg_ballot_status), url(r'^ballots/rsab/$', views_ballot.rsab_ballot_status), + url(r'^(?P(bcp|std|fyi))/?$', views_search.index_subseries), + url(r'^%(name)s(?:/%(rev)s)?/$' % settings.URL_REGEXPS, views_doc.document_main), url(r'^%(name)s(?:/%(rev)s)?/bibtex/$' % settings.URL_REGEXPS, views_doc.document_bibtex), url(r'^%(name)s(?:/%(rev)s)?/idnits2-state/$' % settings.URL_REGEXPS, views_doc.idnits2_state), diff --git a/ietf/doc/utils.py b/ietf/doc/utils.py index 992659df3..791339c4f 100644 --- a/ietf/doc/utils.py +++ b/ietf/doc/utils.py @@ -32,7 +32,7 @@ from ietf.community.models import CommunityList from ietf.community.utils import docs_tracked_by_community_list from ietf.doc.models import Document, DocHistory, State, DocumentAuthor, DocHistoryAuthor -from ietf.doc.models import DocAlias, RelatedDocument, RelatedDocHistory, BallotType, DocReminder +from ietf.doc.models import RelatedDocument, RelatedDocHistory, BallotType, DocReminder from ietf.doc.models import DocEvent, ConsensusDocEvent, BallotDocEvent, IRSGBallotDocEvent, NewRevisionDocEvent, StateDocEvent from ietf.doc.models import TelechatDocEvent, DocumentActionHolder, EditedAuthorsDocEvent from ietf.name.models import DocReminderTypeName, DocRelationshipName @@ -57,7 +57,7 @@ def save_document_in_history(doc): # copy fields fields = get_model_fields_as_dict(doc) fields["doc"] = doc - fields["name"] = doc.canonical_name() + fields["name"] = doc.name dochist = DocHistory(**fields) dochist.save() @@ -219,7 +219,7 @@ def needed_ballot_positions(doc, active_positions): else: related_set = RelatedDocHistory.objects.none() for rel in related_set.filter(relationship__slug__in=['tops', 'tois', 'tohist', 'toinf', 'tobcp', 'toexp']): - if (rel.target.document.std_level_id in ['bcp','ps','ds','std']) or (rel.relationship_id in ['tops','tois','tobcp']): + if (rel.target.std_level_id in ['bcp','ps','ds','std']) or (rel.relationship_id in ['tops','tois','tobcp']): needed = two_thirds_rule(recused=len(recuse)) break else: @@ -352,16 +352,6 @@ def augment_events_with_revision(doc, events): qs = NewRevisionDocEvent.objects.filter(doc=doc) event_revisions = list(qs.order_by('time', 'id').values('id', 'rev', 'time')) - if doc.type_id == "draft" and doc.get_state_slug() == "rfc": - # add fake "RFC" revision - if isinstance(events, QuerySetAny): - e = events.filter(type="published_rfc").order_by('time').last() - else: - e = doc.latest_event(type="published_rfc") - if e: - event_revisions.append(dict(id=e.id, time=e.time, rev="RFC")) - event_revisions.sort(key=lambda x: (x["time"], x["id"])) - for e in sorted(events, key=lambda e: (e.time, e.id), reverse=True): while event_revisions and (e.time, e.id) < (event_revisions[-1]["time"], event_revisions[-1]["id"]): event_revisions.pop() @@ -798,22 +788,21 @@ def rebuild_reference_relations(doc, filenames): errors = [] unfound = set() for ( ref, refType ) in refs.items(): - refdoc = DocAlias.objects.filter(name=ref) + refdoc = Document.objects.filter(name=ref) if not refdoc and re.match(r"^draft-.*-\d{2}$", ref): - refdoc = DocAlias.objects.filter(name=ref[:-3]) + refdoc = Document.objects.filter(name=ref[:-3]) count = refdoc.count() - # As of Dec 2021, DocAlias has a unique constraint on the name field, so count > 1 should not occur if count == 0: unfound.add( "%s" % ref ) continue elif count > 1: - errors.append("Too many DocAlias objects found for %s"%ref) + errors.append("Too many Document objects found for %s"%ref) else: # Don't add references to ourself - if doc != refdoc[0].document: + if doc != refdoc[0]: RelatedDocument.objects.get_or_create( source=doc, target=refdoc[ 0 ], relationship=DocRelationshipName.objects.get( slug='ref%s' % refType ) ) if unfound: - warnings.append('There were %d references with no matching DocAlias'%len(unfound)) + warnings.append('There were %d references with no matching Document'%len(unfound)) ret = {} if errors: @@ -848,26 +837,26 @@ def set_replaces_for_document(request, doc, new_replaces, by, email_subject, com for d in old_replaces: if d not in new_replaces: - other_addrs = gather_address_lists('doc_replacement_changed',doc=d.document) + other_addrs = gather_address_lists('doc_replacement_changed',doc=d) to.update(other_addrs.to) cc.update(other_addrs.cc) RelatedDocument.objects.filter(source=doc, target=d, relationship=relationship).delete() if not RelatedDocument.objects.filter(target=d, relationship=relationship): - s = 'active' if d.document.expires > timezone.now() else 'expired' - d.document.set_state(State.objects.get(type='draft', slug=s)) + s = 'active' if d.expires > timezone.now() else 'expired' + d.set_state(State.objects.get(type='draft', slug=s)) for d in new_replaces: if d not in old_replaces: - other_addrs = gather_address_lists('doc_replacement_changed',doc=d.document) + other_addrs = gather_address_lists('doc_replacement_changed',doc=d) to.update(other_addrs.to) cc.update(other_addrs.cc) RelatedDocument.objects.create(source=doc, target=d, relationship=relationship) - d.document.set_state(State.objects.get(type='draft', slug='repl')) + d.set_state(State.objects.get(type='draft', slug='repl')) - if d.document.stream_id in ('irtf','ise','iab'): - repl_state = State.objects.get(type_id='draft-stream-%s'%d.document.stream_id, slug='repl') - d.document.set_state(repl_state) - events.append(StateDocEvent.objects.create(doc=d.document, rev=d.document.rev, by=by, type='changed_state', desc="Set stream state to Replaced",state_type=repl_state.type, state=repl_state)) + if d.stream_id in ('irtf','ise','iab'): + repl_state = State.objects.get(type_id='draft-stream-%s'%d.stream_id, slug='repl') + d.set_state(repl_state) + events.append(StateDocEvent.objects.create(doc=d, rev=d.rev, by=by, type='changed_state', desc="Set stream state to Replaced",state_type=repl_state.type, state=repl_state)) # make sure there are no lingering suggestions duplicating new replacements RelatedDocument.objects.filter(source=doc, target__in=new_replaces, relationship="possibly-replaces").delete() @@ -937,7 +926,7 @@ def extract_complete_replaces_ancestor_mapping_for_docs(names): break relations = ( RelatedDocument.objects.filter(source__name__in=front, relationship="replaces") - .select_related("target").values_list("source__name", "target__docs__name") ) + .select_related("target").values_list("source__name", "target__name") ) if not relations: break @@ -958,49 +947,67 @@ def make_rev_history(doc): def get_predecessors(doc, predecessors=None): if predecessors is None: - predecessors = [] + predecessors = set() if hasattr(doc, 'relateddocument_set'): - for alias in doc.related_that_doc('replaces'): - for document in alias.docs.all(): - if document not in predecessors: - predecessors.append(document) - predecessors.extend(get_predecessors(document, predecessors)) + for document in doc.related_that_doc('replaces'): + if document not in predecessors: + predecessors.add(document) + predecessors.update(get_predecessors(document, predecessors)) + if doc.came_from_draft(): + predecessors.add(doc.came_from_draft()) + predecessors.update(get_predecessors(doc.came_from_draft(), predecessors)) return predecessors def get_ancestors(doc, ancestors = None): if ancestors is None: - ancestors = [] + ancestors = set() if hasattr(doc, 'relateddocument_set'): - for alias in doc.related_that('replaces'): - for document in alias.docs.all(): - if document not in ancestors: - ancestors.append(document) - ancestors.extend(get_ancestors(document, ancestors)) + for document in doc.related_that('replaces'): + if document not in ancestors: + ancestors.add(document) + ancestors.update(get_ancestors(document, ancestors)) + if doc.became_rfc(): + if doc.became_rfc() not in ancestors: + ancestors.add(doc.became_rfc()) + ancestors.update(get_ancestors(doc.became_rfc(), ancestors)) return ancestors def get_replaces_tree(doc): tree = get_predecessors(doc) - tree.extend(get_ancestors(doc)) + tree.update(get_ancestors(doc)) return tree history = {} docs = get_replaces_tree(doc) if docs is not None: - docs.append(doc) + docs.add(doc) for d in docs: - for e in d.docevent_set.filter(type='new_revision').distinct(): - if hasattr(e, 'newrevisiondocevent'): - url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=d)) + e.newrevisiondocevent.rev + "/" - history[url] = { - 'name': d.name, - 'rev': e.newrevisiondocevent.rev, - 'published': e.time.isoformat(), - 'url': url, - } - if d.history_set.filter(rev=e.newrevisiondocevent.rev).exists(): - history[url]['pages'] = d.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages + if d.type_id == "rfc": + url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=d)) + e = d.docevent_set.filter(type="published_rfc").order_by("-time").first() + history[url] = { + "name": d.name, + "rev": d.name, + "published": e and e.time.isoformat(), + "url": url, + } + else: + for e in d.docevent_set.filter(type='new_revision').distinct(): + if hasattr(e, 'newrevisiondocevent'): + url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=d)) + e.newrevisiondocevent.rev + "/" + history[url] = { + 'name': d.name, + 'rev': e.newrevisiondocevent.rev, + 'published': e.time.isoformat(), + 'url': url, + } + if d.history_set.filter(rev=e.newrevisiondocevent.rev).exists(): + history[url]['pages'] = d.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages if doc.type_id == "draft": + # Do nothing - all draft revisions are captured above already. + e = None + elif doc.type_id == "rfc": # e.time.date() agrees with RPC publication date when shown in the RPC_TZINFO time zone e = doc.latest_event(type='published_rfc') else: @@ -1008,12 +1015,12 @@ def make_rev_history(doc): if e: url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=e.doc)) history[url] = { - 'name': e.doc.canonical_name(), - 'rev': e.doc.canonical_name(), + 'name': e.doc.name, + 'rev': e.doc.name, 'published': e.time.isoformat(), 'url': url } - if hasattr(e, 'newrevisiondocevent') and doc.history_set.filter(rev=e.newrevisiondocevent.rev).exists(): + if doc.type_id != "rfc" and hasattr(e, 'newrevisiondocevent') and doc.history_set.filter(rev=e.newrevisiondocevent.rev).exists(): history[url]['pages'] = doc.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages history = list(history.values()) return sorted(history, key=lambda x: x['published']) @@ -1025,14 +1032,11 @@ def get_search_cache_key(params): kwargs = dict([ (k,v) for (k,v) in list(params.items()) if k in fields ]) key = "doc:document:search:" + hashlib.sha512(json.dumps(kwargs, sort_keys=True).encode('utf-8')).hexdigest() return key - -def build_file_urls(doc: Union[Document, DocHistory]): - if doc.type_id != 'draft': - return [], [] - if doc.get_state_slug() == "rfc": - name = doc.canonical_name() - base_path = os.path.join(settings.RFC_PATH, name + ".") + +def build_file_urls(doc: Union[Document, DocHistory]): + if doc.type_id == "rfc": + base_path = os.path.join(settings.RFC_PATH, doc.name + ".") possible_types = settings.RFC_FILE_TYPES found_types = [t for t in possible_types if os.path.exists(base_path + t)] @@ -1041,17 +1045,17 @@ def build_file_urls(doc: Union[Document, DocHistory]): file_urls = [] for t in found_types: label = "plain text" if t == "txt" else t - file_urls.append((label, base + name + "." + t)) + file_urls.append((label, base + doc.name + "." + t)) if "pdf" not in found_types and "txt" in found_types: - file_urls.append(("pdf", base + "pdfrfc/" + name + ".txt.pdf")) + file_urls.append(("pdf", base + "pdfrfc/" + doc.name + ".txt.pdf")) if "txt" in found_types: - file_urls.append(("htmlized", urlreverse('ietf.doc.views_doc.document_html', kwargs=dict(name=name)))) + file_urls.append(("htmlized", urlreverse('ietf.doc.views_doc.document_html', kwargs=dict(name=doc.name)))) if doc.tags.filter(slug="verified-errata").exists(): - file_urls.append(("with errata", settings.RFC_EDITOR_INLINE_ERRATA_URL.format(rfc_number=doc.rfc_number()))) - file_urls.append(("bibtex", urlreverse('ietf.doc.views_doc.document_bibtex',kwargs=dict(name=name)))) - elif doc.rev: + file_urls.append(("with errata", settings.RFC_EDITOR_INLINE_ERRATA_URL.format(rfc_number=doc.rfc_number))) + file_urls.append(("bibtex", urlreverse('ietf.doc.views_doc.document_bibtex',kwargs=dict(name=doc.name)))) + elif doc.type_id == "draft" and doc.rev != "": base_path = os.path.join(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR, doc.name + "-" + doc.rev + ".") possible_types = settings.IDSUBMIT_FILE_TYPES found_types = [t for t in possible_types if os.path.exists(base_path + t)] @@ -1067,12 +1071,14 @@ def build_file_urls(doc: Union[Document, DocHistory]): file_urls.append(("bibtex", urlreverse('ietf.doc.views_doc.document_bibtex',kwargs=dict(name=doc.name,rev=doc.rev)))) file_urls.append(("bibxml", urlreverse('ietf.doc.views_doc.document_bibxml',kwargs=dict(name=doc.name,rev=doc.rev)))) else: - # As of 2022-12-14, there are 1463 Document and 3136 DocHistory records with type='draft' and rev=''. - # All of these are in the rfc state and are covered by the above cases. - log.unreachable('2022-12-14') + if doc.type_id == "draft": + # TODO: look at the state of the database post migration and update this comment, or remove the block + # As of 2022-12-14, there are 1463 Document and 3136 DocHistory records with type='draft' and rev=''. + # All of these are in the rfc state and are covered by the above cases. + log.unreachable('2022-12-14') file_urls = [] found_types = [] - + return file_urls, found_types def augment_docs_and_user_with_user_info(docs, user): @@ -1139,21 +1145,24 @@ def generate_idnits2_rfc_status(): 'unkn': 'U', } - rfcs = Document.objects.filter(type_id='draft',states__slug='rfc',states__type='draft') + rfcs = Document.objects.filter(type_id='rfc') for rfc in rfcs: - offset = int(rfc.rfcnum)-1 + offset = int(rfc.rfc_number)-1 blob[offset] = symbols[rfc.std_level_id] if rfc.related_that('obs'): blob[offset] = 'O' # Workarounds for unusual states in the datatracker - # Document.get(docalias='rfc6312').rfcnum == 6342 - # 6312 was published with the wrong rfc number in it - # weird workaround in the datatracker - there are two - # DocAliases starting with rfc - the canonical name code - # searches for the lexically highest alias starting with rfc - # which is getting lucky. + # The explanation for 6312 is from before docalias was removed + # The workaround is still needed, even if the datatracker + # state no longer matches what's described here: + # Document.get(docalias='rfc6312').rfc_number == 6342 + # 6312 was published with the wrong rfc number in it + # weird workaround in the datatracker - there are two + # DocAliases starting with rfc - the canonical name code + # searches for the lexically highest alias starting with rfc + # which is getting lucky. blob[6312 - 1] = 'O' # RFC200 is an old RFC List by Number @@ -1169,7 +1178,7 @@ def generate_idnits2_rfc_status(): def generate_idnits2_rfcs_obsoleted(): obsdict = defaultdict(list) for r in RelatedDocument.objects.filter(relationship_id='obs'): - obsdict[int(r.target.document.rfc_number())].append(int(r.source.rfc_number())) + obsdict[int(r.target.rfc_number)].append(int(r.source.rfc_number)) # Aren't these already guaranteed to be ints? for k in obsdict: obsdict[k] = sorted(obsdict[k]) return render_to_string('doc/idnits2-rfcs-obsoleted.txt', context={'obsitems':sorted(obsdict.items())}) @@ -1198,13 +1207,19 @@ def fuzzy_find_documents(name, rev=None): if re.match("^[0-9]+$", name): name = f'rfc{name}' + if name.startswith("rfc"): + sought_type = "rfc" + log.assertion("rev is None") + else: + sought_type = "draft" + # see if we can find a document using this name - docs = Document.objects.filter(docalias__name=name, type_id='draft') + docs = Document.objects.filter(name=name, type_id=sought_type) if rev and not docs.exists(): # No document found, see if the name/rev split has been misidentified. # Handles some special cases, like draft-ietf-tsvwg-ieee-802-11. name = '%s-%s' % (name, rev) - docs = Document.objects.filter(docalias__name=name, type_id='draft') + docs = Document.objects.filter(name=name, type_id='draft') if docs.exists(): rev = None # found a doc by name with rev = None, so update that diff --git a/ietf/doc/utils_charter.py b/ietf/doc/utils_charter.py index 2e85b3cc1..7d2001e4d 100644 --- a/ietf/doc/utils_charter.py +++ b/ietf/doc/utils_charter.py @@ -3,11 +3,12 @@ import datetime -import io import os import re import shutil +from pathlib import Path + from django.conf import settings from django.urls import reverse as urlreverse from django.template.loader import render_to_string @@ -62,10 +63,9 @@ def next_approved_revision(rev): return "%#02d" % (int(m.group('major')) + 1) def read_charter_text(doc): - filename = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev)) + filename = Path(settings.CHARTER_PATH) / f"{doc.name}-{doc.rev}.txt" try: - with io.open(filename, 'r') as f: - return f.read() + return filename.read_text() except IOError: return "Error: couldn't read charter text" @@ -92,8 +92,8 @@ def change_group_state_after_charter_approval(group, by): def fix_charter_revision_after_approval(charter, by): # according to spec, 00-02 becomes 01, so copy file and record new revision try: - old = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.canonical_name(), charter.rev)) - new = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.canonical_name(), next_approved_revision(charter.rev))) + old = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.name, charter.rev)) + new = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.name, next_approved_revision(charter.rev))) shutil.copy(old, new) except IOError: log("There was an error copying %s to %s" % (old, new)) @@ -101,7 +101,7 @@ def fix_charter_revision_after_approval(charter, by): events = [] e = NewRevisionDocEvent(doc=charter, by=by, type="new_revision") e.rev = next_approved_revision(charter.rev) - e.desc = "New version available: %s-%s.txt" % (charter.canonical_name(), e.rev) + e.desc = "New version available: %s-%s.txt" % (charter.name, e.rev) e.save() events.append(e) diff --git a/ietf/doc/utils_search.py b/ietf/doc/utils_search.py index 0c2dafd16..5a6a43a63 100644 --- a/ietf/doc/utils_search.py +++ b/ietf/doc/utils_search.py @@ -9,7 +9,7 @@ from zoneinfo import ZoneInfo from django.conf import settings -from ietf.doc.models import Document, DocAlias, RelatedDocument, DocEvent, TelechatDocEvent, BallotDocEvent, DocTypeName +from ietf.doc.models import Document, RelatedDocument, DocEvent, TelechatDocEvent, BallotDocEvent, DocTypeName from ietf.doc.expire import expirable_drafts from ietf.doc.utils import augment_docs_and_user_with_user_info from ietf.meeting.models import SessionPresentation, Meeting, Session @@ -54,12 +54,13 @@ def fill_in_document_sessions(docs, doc_dict, doc_ids): def fill_in_document_table_attributes(docs, have_telechat_date=False): # fill in some attributes for the document table results to save # some hairy template code and avoid repeated SQL queries - # TODO - this function evolved from something that assumed it was handling only drafts. It still has places where it assumes all docs are drafts where that is not a correct assumption + # TODO - this function evolved from something that assumed it was handling only drafts. + # It still has places where it assumes all docs are drafts where that is not a correct assumption doc_dict = dict((d.pk, d) for d in docs) doc_ids = list(doc_dict.keys()) - rfc_aliases = dict([ (a.document.id, a.name) for a in DocAlias.objects.filter(name__startswith="rfc", docs__id__in=doc_ids) ]) + rfcs = dict((d.pk, d.name) for d in docs if d.type_id == "rfc") # latest event cache event_types = ("published_rfc", @@ -90,10 +91,8 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False): # misc expirable_pks = expirable_drafts(Document.objects.filter(pk__in=doc_ids)).values_list('pk', flat=True) for d in docs: - # emulate canonical name which is used by a lot of the utils - # d.canonical_name = wrap_value(rfc_aliases[d.pk] if d.pk in rfc_aliases else d.name) - if d.rfc_number() != None and d.latest_event_cache["published_rfc"]: + if d.type_id == "rfc" and d.latest_event_cache["published_rfc"]: d.latest_revision_date = d.latest_event_cache["published_rfc"].time elif d.latest_event_cache["new_revision"]: d.latest_revision_date = d.latest_event_cache["new_revision"].time @@ -118,7 +117,7 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False): d.search_heading = "%s" % (d.type,) d.expirable = False - if d.get_state_slug() != "rfc": + if d.type_id == "draft" and d.get_state_slug() != "rfc": d.milestones = [ m for (t, s, v, m) in sorted(((m.time, m.state.slug, m.desc, m) for m in d.groupmilestone_set.all() if m.state_id == "active")) ] d.review_assignments = review_assignments_to_list_for_docs([d]).get(d.name, []) @@ -128,29 +127,30 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False): # RFCs # errata - erratas = set(Document.objects.filter(tags="errata", id__in=list(rfc_aliases.keys())).distinct().values_list("name", flat=True)) - verified_erratas = set(Document.objects.filter(tags="verified-errata", id__in=list(rfc_aliases.keys())).distinct().values_list("name", flat=True)) + erratas = set(Document.objects.filter(tags="errata", id__in=list(rfcs.keys())).distinct().values_list("name", flat=True)) + verified_erratas = set(Document.objects.filter(tags="verified-errata", id__in=list(rfcs.keys())).distinct().values_list("name", flat=True)) for d in docs: d.has_errata = d.name in erratas d.has_verified_errata = d.name in verified_erratas # obsoleted/updated by - for a in rfc_aliases: - d = doc_dict[a] + for rfc in rfcs: + d = doc_dict[rfc] d.obsoleted_by_list = [] d.updated_by_list = [] # Revisit this block after RFCs become first-class Document objects xed_by = list( RelatedDocument.objects.filter( - target__name__in=list(rfc_aliases.values()), + target__name__in=list(rfcs.values()), relationship__in=("obs", "updates"), ).select_related("target") ) - rel_rfc_aliases = { - a.document.id: re.sub(r"rfc(\d+)", r"RFC \1", a.name, flags=re.IGNORECASE) - for a in DocAlias.objects.filter( - name__startswith="rfc", docs__id__in=[rel.source_id for rel in xed_by] + # TODO - this likely reduces to something even simpler + rel_rfcs = { + d.id: re.sub(r"rfc(\d+)", r"RFC \1", d.name, flags=re.IGNORECASE) + for d in Document.objects.filter( + type_id="rfc", id__in=[rel.source_id for rel in xed_by] ) } xed_by.sort( @@ -158,18 +158,17 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False): re.sub( r"rfc\s*(\d+)", r"\1", - rel_rfc_aliases[rel.source_id], + rel_rfcs[rel.source_id], flags=re.IGNORECASE, ) ) ) for rel in xed_by: - d = doc_dict[rel.target.document.id] - s = rel_rfc_aliases[rel.source_id] + d = doc_dict[rel.target.id] if rel.relationship_id == "obs": - d.obsoleted_by_list.append(s) + d.obsoleted_by_list.append(rel.source) elif rel.relationship_id == "updates": - d.updated_by_list.append(s) + d.updated_by_list.append(rel.source) def augment_docs_with_related_docs_info(docs): """Augment all documents with related documents information. @@ -179,7 +178,7 @@ def augment_docs_with_related_docs_info(docs): if d.type_id == 'conflrev': if len(d.related_that_doc('conflrev')) != 1: continue - originalDoc = d.related_that_doc('conflrev')[0].document + originalDoc = d.related_that_doc('conflrev')[0] d.pages = originalDoc.pages def prepare_document_table(request, docs, query=None, max_results=200): @@ -193,7 +192,7 @@ def prepare_document_table(request, docs, query=None, max_results=200): # the number of queries docs = docs.select_related("ad", "std_level", "intended_std_level", "group", "stream", "shepherd", ) docs = docs.prefetch_related("states__type", "tags", "groupmilestone_set__group", "reviewrequest_set__team", - "ad__email_set", "docalias__iprdocrel_set") + "ad__email_set", "iprdocrel_set") docs = docs[:max_results] # <- that is still a queryset, but with a LIMIT now docs = list(docs) else: @@ -217,7 +216,7 @@ def prepare_document_table(request, docs, query=None, max_results=200): res = [] - rfc_num = d.rfc_number() + rfc_num = num(d.rfc_number) if d.rfc_number else None if d.type_id == "draft": res.append(num(["Active", "Expired", "Replaced", "Withdrawn", "RFC"].index(d.search_heading.split()[0]))) @@ -232,25 +231,25 @@ def prepare_document_table(request, docs, query=None, max_results=200): elif sort_key == "date": res.append(str(d.latest_revision_date.astimezone(ZoneInfo(settings.TIME_ZONE)))) elif sort_key == "status": - if rfc_num != None: - res.append(num(rfc_num)) + if rfc_num is not None: + res.append(rfc_num) else: res.append(num(d.get_state().order) if d.get_state() else None) elif sort_key == "ipr": res.append(len(d.ipr())) elif sort_key == "ad": - if rfc_num != None: - res.append(num(rfc_num)) + if rfc_num is not None: + res.append(rfc_num) elif d.get_state_slug() == "active": if d.get_state("draft-iesg"): res.append(d.get_state("draft-iesg").order) else: res.append(0) else: - if rfc_num != None: - res.append(num(rfc_num)) + if rfc_num is not None: + res.append(rfc_num) else: - res.append(d.canonical_name()) + res.append(d.name) return res diff --git a/ietf/doc/views_ballot.py b/ietf/doc/views_ballot.py index 98fb12610..9b0ccdcea 100644 --- a/ietf/doc/views_ballot.py +++ b/ietf/doc/views_ballot.py @@ -179,7 +179,7 @@ def save_position(form, doc, ballot, balloter, login=None, send_email=False): @role_required("Area Director", "Secretariat", "IRSG Member", "RSAB Member") def edit_position(request, name, ballot_id): """Vote and edit discuss and comment on document""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) ballot = get_object_or_404(BallotDocEvent, type="created_ballot", pk=ballot_id, doc=doc) balloter = login = request.user.person @@ -256,7 +256,7 @@ def api_set_position(request): if not name: return err(400, "Missing document name") try: - doc = Document.objects.get(docalias__name=name) + doc = Document.objects.get(name=name) except Document.DoesNotExist: return err(400, "Document not found") position_names = BallotPositionName.objects.values_list('slug', flat=True) @@ -323,7 +323,7 @@ def build_position_email(balloter, doc, pos): @role_required('Area Director','Secretariat','IRSG Member', 'RSAB Member') def send_ballot_comment(request, name, ballot_id): """Email document ballot position discuss/comment for Area Director.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) ballot = get_object_or_404(BallotDocEvent, type="created_ballot", pk=ballot_id, doc=doc) if not has_role(request.user, 'Secretariat'): @@ -413,7 +413,7 @@ def clear_ballot(request, name, ballot_type_slug): @role_required('Area Director','Secretariat') def defer_ballot(request, name): """Signal post-pone of ballot, notifying relevant parties.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.type_id not in ('draft','conflrev','statchg'): raise Http404 interesting_state = dict(draft='draft-iesg',conflrev='conflrev',statchg='statchg') @@ -467,7 +467,7 @@ def defer_ballot(request, name): @role_required('Area Director','Secretariat') def undefer_ballot(request, name): """undo deferral of ballot ballot.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.type_id not in ('draft','conflrev','statchg'): raise Http404 if doc.type_id == 'draft' and not doc.get_state("draft-iesg"): @@ -503,7 +503,7 @@ class LastCallTextForm(forms.Form): @role_required('Area Director','Secretariat') def lastcalltext(request, name): """Editing of the last call text""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if not doc.get_state("draft-iesg"): raise Http404 @@ -589,7 +589,7 @@ class BallotWriteupForm(forms.Form): @role_required('Area Director','Secretariat') def ballot_writeupnotes(request, name): """Editing of ballot write-up and notes""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) prev_state = doc.get_state("draft-iesg") login = request.user.person @@ -700,7 +700,7 @@ class BallotRfcEditorNoteForm(forms.Form): @role_required('Area Director','Secretariat','IAB Chair','IRTF Chair','ISE') def ballot_rfceditornote(request, name): """Editing of RFC Editor Note""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if not is_authorized_in_doc_stream(request.user, doc): permission_denied(request, "You do not have the necessary permissions to change the RFC Editor Note for this document") @@ -765,7 +765,7 @@ class ApprovalTextForm(forms.Form): @role_required('Area Director','Secretariat') def ballot_approvaltext(request, name): """Editing of approval text""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if not doc.get_state("draft-iesg"): raise Http404 @@ -816,7 +816,7 @@ def ballot_approvaltext(request, name): @role_required('Secretariat') def approve_ballot(request, name): """Approve ballot, sending out announcement, changing state.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if not doc.get_state("draft-iesg"): raise Http404 @@ -947,13 +947,19 @@ class ApproveDownrefsForm(forms.Form): @role_required('Secretariat') def approve_downrefs(request, name): """Document ballot was just approved; add the checked downwared references to the downref registry.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if not doc.get_state("draft-iesg"): raise Http404 login = request.user.person - downrefs_to_rfc = [rel for rel in doc.relateddocument_set.all() if rel.is_downref() and not rel.is_approved_downref() and rel.target.document.is_rfc()] + downrefs_to_rfc = [ + rel + for rel in doc.relateddocument_set.all() + if rel.is_downref() + and not rel.is_approved_downref() + and rel.target.type_id == "rfc" + ] downrefs_to_rfc_qs = RelatedDocument.objects.filter(pk__in=[r.pk for r in downrefs_to_rfc]) @@ -968,12 +974,12 @@ def approve_downrefs(request, name): c = DocEvent(type="downref_approved", doc=rel.source, rev=rel.source.rev, by=login) c.desc = "Downref to RFC %s approved by Last Call for %s-%s" % ( - rel.target.document.rfc_number(), rel.source, rel.source.rev) + rel.target.rfc_number, rel.source, rel.source.rev) c.save() - c = DocEvent(type="downref_approved", doc=rel.target.document, - rev=rel.target.document.rev, by=login) + c = DocEvent(type="downref_approved", doc=rel.target, + rev=rel.target.rev, by=login) c.desc = "Downref to RFC %s approved by Last Call for %s-%s" % ( - rel.target.document.rfc_number(), rel.source, rel.source.rev) + rel.target.rfc_number, rel.source, rel.source.rev) c.save() return HttpResponseRedirect(doc.get_absolute_url()) @@ -995,7 +1001,7 @@ class MakeLastCallForm(forms.Form): @role_required('Secretariat') def make_last_call(request, name): """Make last call for Internet-Draft, sending out announcement.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if not (doc.get_state("draft-iesg") or doc.get_state("statchg")): raise Http404 @@ -1103,7 +1109,7 @@ def make_last_call(request, name): @role_required('Secretariat', 'IRTF Chair') def issue_irsg_ballot(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.stream.slug != "irtf" or doc.type != DocTypeName.objects.get(slug="draft"): raise Http404 @@ -1158,7 +1164,7 @@ def issue_irsg_ballot(request, name): @role_required('Secretariat', 'IRTF Chair') def close_irsg_ballot(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.stream.slug != "irtf" or doc.type != DocTypeName.objects.get(slug="draft"): raise Http404 @@ -1199,7 +1205,7 @@ def irsg_ballot_status(request): @role_required('Secretariat', 'RSAB Chair') def issue_rsab_ballot(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.stream.slug != "editorial" or doc.type != DocTypeName.objects.get(slug="draft"): raise Http404 @@ -1248,7 +1254,7 @@ def issue_rsab_ballot(request, name): @role_required('Secretariat', 'RSAB Chair') def close_rsab_ballot(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.stream.slug != "editorial" or doc.type_id != "draft": raise Http404 diff --git a/ietf/doc/views_bofreq.py b/ietf/doc/views_bofreq.py index 92a130efb..3bd10287b 100644 --- a/ietf/doc/views_bofreq.py +++ b/ietf/doc/views_bofreq.py @@ -15,7 +15,7 @@ from django.utils.html import escape from ietf.doc.mails import (email_bofreq_title_changed, email_bofreq_editors_changed, email_bofreq_new_revision, email_bofreq_responsible_changed) -from ietf.doc.models import (Document, DocAlias, DocEvent, NewRevisionDocEvent, +from ietf.doc.models import (Document, DocEvent, NewRevisionDocEvent, BofreqEditorDocEvent, BofreqResponsibleDocEvent, State) from ietf.doc.utils import add_state_change_event from ietf.doc.utils_bofreq import bofreq_editors, bofreq_responsible @@ -168,8 +168,6 @@ def new_bof_request(request): ) e2.editors.set([request.user.person]) bofreq.save_with_history([e1,e2]) - alias = DocAlias.objects.create(name=name) - alias.docs.set([bofreq]) bofreq_submission = form.cleaned_data['bofreq_submission'] if bofreq_submission == "upload": content = get_cleaned_text_file_content(form.cleaned_data["bofreq_file"]) diff --git a/ietf/doc/views_charter.py b/ietf/doc/views_charter.py index d3173291d..9596970f8 100644 --- a/ietf/doc/views_charter.py +++ b/ietf/doc/views_charter.py @@ -3,11 +3,11 @@ import datetime -import io import json -import os import textwrap +from pathlib import Path + from django.http import HttpResponseRedirect, HttpResponseNotFound, Http404 from django.shortcuts import get_object_or_404, redirect, render from django.urls import reverse as urlreverse @@ -22,7 +22,7 @@ from django.utils.html import escape import debug # pyflakes:ignore -from ietf.doc.models import ( Document, DocAlias, DocHistory, State, DocEvent, +from ietf.doc.models import ( Document, DocHistory, State, DocEvent, BallotDocEvent, BallotPositionDocEvent, InitialReviewDocEvent, NewRevisionDocEvent, WriteupDocEvent, TelechatDocEvent ) from ietf.doc.utils import ( add_state_change_event, close_open_ballots, @@ -32,7 +32,7 @@ from ietf.doc.utils_charter import ( historic_milestones_for_charter, generate_ballot_writeup, generate_issue_ballot_mail, next_revision, derive_new_work_text, change_group_state_after_charter_approval, fix_charter_revision_after_approval, - split_charter_name) + split_charter_name, charter_name_for_group) from ietf.doc.mails import email_state_changed, email_charter_internal_review from ietf.group.mails import email_admin_re_charter from ietf.group.models import Group, ChangeStateGroupEvent, MilestoneGroupEvent @@ -42,6 +42,7 @@ from ietf.ietfauth.utils import has_role, role_required from ietf.name.models import GroupStateName from ietf.person.models import Person from ietf.utils.history import find_history_active_at +from ietf.utils.log import assertion from ietf.utils.mail import send_mail_preformatted from ietf.utils.textupload import get_cleaned_text_file_content from ietf.utils.response import permission_denied @@ -362,38 +363,41 @@ class UploadForm(forms.Form): @login_required def submit(request, name, option=None): - if not name.startswith('charter-'): - raise Http404 - + # Charters are named "charter--" charter = Document.objects.filter(type="charter", name=name).first() if charter: group = charter.group - charter_canonical_name = charter.canonical_name() + assertion("charter.name == charter_name_for_group(group)") charter_rev = charter.rev else: top_org, group_acronym = split_charter_name(name) group = get_object_or_404(Group, acronym=group_acronym) - charter_canonical_name = name + if name != charter_name_for_group(group): + raise Http404 # do not allow creation of misnamed charters charter_rev = "00-00" - if not can_manage_all_groups_of_type(request.user, group.type_id) or not group.features.has_chartering_process: + if ( + not can_manage_all_groups_of_type(request.user, group.type_id) + or not group.features.has_chartering_process + ): permission_denied(request, "You don't have permission to access this view.") - - path = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter_canonical_name, charter_rev)) - not_uploaded_yet = charter_rev.endswith("-00") and not os.path.exists(path) + charter_filename = Path(settings.CHARTER_PATH) / f"{name}-{charter_rev}.txt" + not_uploaded_yet = charter_rev.endswith("-00") and not charter_filename.exists() if not_uploaded_yet or not charter: # this case is special - we recently chartered or rechartered and have no file yet next_rev = charter_rev else: # search history for possible collisions with abandoned efforts - prev_revs = list(charter.history_set.order_by('-time').values_list('rev', flat=True)) + prev_revs = list( + charter.history_set.order_by("-time").values_list("rev", flat=True) + ) next_rev = next_revision(charter.rev) while next_rev in prev_revs: next_rev = next_revision(next_rev) - if request.method == 'POST': + if request.method == "POST": form = UploadForm(request.POST, request.FILES) if form.is_valid(): # Also save group history so we can search for it @@ -408,9 +412,10 @@ def submit(request, name, option=None): abstract=group.name, rev=next_rev, ) - DocAlias.objects.create(name=charter.name).docs.add(charter) - charter.set_state(State.objects.get(used=True, type="charter", slug="notrev")) + charter.set_state( + State.objects.get(used=True, type="charter", slug="notrev") + ) group.charter = charter group.save() @@ -418,56 +423,74 @@ def submit(request, name, option=None): charter.rev = next_rev events = [] - e = NewRevisionDocEvent(doc=charter, by=request.user.person, type="new_revision") - e.desc = "New version available: %s-%s.txt" % (charter.canonical_name(), charter.rev) + e = NewRevisionDocEvent( + doc=charter, by=request.user.person, type="new_revision" + ) + e.desc = "New version available: %s-%s.txt" % ( + charter.name, + charter.rev, + ) e.rev = charter.rev e.save() events.append(e) # Save file on disk - filename = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter.canonical_name(), charter.rev)) - with io.open(filename, 'w', encoding='utf-8') as destination: - if form.cleaned_data['txt']: - destination.write(form.cleaned_data['txt']) + charter_filename = charter_filename.with_name( + f"{name}-{charter.rev}.txt" + ) # update rev + with charter_filename.open("w", encoding="utf-8") as destination: + if form.cleaned_data["txt"]: + destination.write(form.cleaned_data["txt"]) else: - destination.write(form.cleaned_data['content']) + destination.write(form.cleaned_data["content"]) - if option in ['initcharter','recharter'] and charter.ad == None: - charter.ad = getattr(group.ad_role(),'person',None) + if option in ["initcharter", "recharter"] and charter.ad == None: + charter.ad = getattr(group.ad_role(), "person", None) charter.save_with_history(events) if option: - return redirect('ietf.doc.views_charter.change_state', name=charter.name, option=option) + return redirect( + "ietf.doc.views_charter.change_state", + name=charter.name, + option=option, + ) else: return redirect("ietf.doc.views_doc.document_main", name=charter.name) else: - init = { "content": "" } + init = {"content": ""} if not_uploaded_yet and charter: # use text from last approved revision last_approved = charter.rev.split("-")[0] - h = charter.history_set.filter(rev=last_approved).order_by("-time", "-id").first() + h = ( + charter.history_set.filter(rev=last_approved) + .order_by("-time", "-id") + .first() + ) if h: - charter_canonical_name = h.canonical_name() - charter_rev = h.rev - - filename = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter_canonical_name, charter_rev)) + assertion("h.name == charter_name_for_group(group)") + charter_filename = charter_filename.with_name( + f"{name}-{h.rev}.txt" + ) # update rev try: - with io.open(filename, 'r') as f: - init["content"] = f.read() + init["content"] = charter_filename.read_text() except IOError: pass form = UploadForm(initial=init) fill_in_charter_info(group) - return render(request, 'doc/charter/submit.html', { - 'form': form, - 'next_rev': next_rev, - 'group': group, - 'name': name, - }) + return render( + request, + "doc/charter/submit.html", + { + "form": form, + "next_rev": next_rev, + "group": group, + "name": name, + }, + ) class ActionAnnouncementTextForm(forms.Form): announcement_text = forms.CharField(widget=forms.Textarea, required=True, strip=False) @@ -484,7 +507,7 @@ class ReviewAnnouncementTextForm(forms.Form): return self.cleaned_data["announcement_text"].replace("\r", "") -@role_required('Area Director','Secretariat') +@role_required("Area Director", "Secretariat") def review_announcement_text(request, name): """Editing of review announcement text""" charter = get_object_or_404(Document, type="charter", name=name) @@ -493,7 +516,9 @@ def review_announcement_text(request, name): by = request.user.person existing = charter.latest_event(WriteupDocEvent, type="changed_review_announcement") - existing_new_work = charter.latest_event(WriteupDocEvent, type="changed_new_work_text") + existing_new_work = charter.latest_event( + WriteupDocEvent, type="changed_new_work_text" + ) if not existing: (existing, existing_new_work) = default_review_text(group, charter, by) @@ -506,19 +531,23 @@ def review_announcement_text(request, name): existing_new_work.by = by existing_new_work.type = "changed_new_work_text" existing_new_work.desc = "%s review text was changed" % group.type.name - existing_new_work.text = derive_new_work_text(existing.text,group) + existing_new_work.text = derive_new_work_text(existing.text, group) existing_new_work.time = timezone.now() - form = ReviewAnnouncementTextForm(initial=dict(announcement_text=escape(existing.text),new_work_text=escape(existing_new_work.text))) + form = ReviewAnnouncementTextForm( + initial=dict( + announcement_text=escape(existing.text), + new_work_text=escape(existing_new_work.text), + ) + ) - if request.method == 'POST': + if request.method == "POST": form = ReviewAnnouncementTextForm(request.POST) if "save_text" in request.POST and form.is_valid(): - now = timezone.now() events = [] - t = form.cleaned_data['announcement_text'] + t = form.cleaned_data["announcement_text"] if t != existing.text: e = WriteupDocEvent(doc=charter, rev=charter.rev) e.by = by @@ -532,11 +561,11 @@ def review_announcement_text(request, name): existing.save() events.append(existing) - t = form.cleaned_data['new_work_text'] + t = form.cleaned_data["new_work_text"] if t != existing_new_work.text: e = WriteupDocEvent(doc=charter, rev=charter.rev) e.by = by - e.type = "changed_new_work_text" + e.type = "changed_new_work_text" e.desc = "%s new work message text was changed" % (group.type.name) e.text = t e.time = now @@ -549,33 +578,71 @@ def review_announcement_text(request, name): charter.save_with_history(events) if request.GET.get("next", "") == "approve": - return redirect('ietf.doc.views_charter.approve', name=charter.canonical_name()) + return redirect( + "ietf.doc.views_charter.approve", name=charter.name + ) - return redirect('ietf.doc.views_doc.document_writeup', name=charter.canonical_name()) + return redirect( + "ietf.doc.views_doc.document_writeup", name=charter.name + ) if "regenerate_text" in request.POST: (existing, existing_new_work) = default_review_text(group, charter, by) existing.save() existing_new_work.save() - form = ReviewAnnouncementTextForm(initial=dict(announcement_text=escape(existing.text), - new_work_text=escape(existing_new_work.text))) + form = ReviewAnnouncementTextForm( + initial=dict( + announcement_text=escape(existing.text), + new_work_text=escape(existing_new_work.text), + ) + ) - if any(x in request.POST for x in ['send_annc_only','send_nw_only','send_both']) and form.is_valid(): - if any(x in request.POST for x in ['send_annc_only','send_both']): - parsed_msg = send_mail_preformatted(request, form.cleaned_data['announcement_text']) - messages.success(request, "The email To: '%s' with Subject: '%s' has been sent." % (parsed_msg["To"],parsed_msg["Subject"],)) - if any(x in request.POST for x in ['send_nw_only','send_both']): - parsed_msg = send_mail_preformatted(request, form.cleaned_data['new_work_text']) - messages.success(request, "The email To: '%s' with Subject: '%s' has been sent." % (parsed_msg["To"],parsed_msg["Subject"],)) - return redirect('ietf.doc.views_doc.document_writeup', name=charter.name) + if ( + any( + x in request.POST + for x in ["send_annc_only", "send_nw_only", "send_both"] + ) + and form.is_valid() + ): + if any(x in request.POST for x in ["send_annc_only", "send_both"]): + parsed_msg = send_mail_preformatted( + request, form.cleaned_data["announcement_text"] + ) + messages.success( + request, + "The email To: '%s' with Subject: '%s' has been sent." + % ( + parsed_msg["To"], + parsed_msg["Subject"], + ), + ) + if any(x in request.POST for x in ["send_nw_only", "send_both"]): + parsed_msg = send_mail_preformatted( + request, form.cleaned_data["new_work_text"] + ) + messages.success( + request, + "The email To: '%s' with Subject: '%s' has been sent." + % ( + parsed_msg["To"], + parsed_msg["Subject"], + ), + ) + return redirect("ietf.doc.views_doc.document_writeup", name=charter.name) - return render(request, 'doc/charter/review_announcement_text.html', - dict(charter=charter, - back_url=urlreverse('ietf.doc.views_doc.document_writeup', kwargs=dict(name=charter.name)), - announcement_text_form=form, - )) + return render( + request, + "doc/charter/review_announcement_text.html", + dict( + charter=charter, + back_url=urlreverse( + "ietf.doc.views_doc.document_writeup", kwargs=dict(name=charter.name) + ), + announcement_text_form=form, + ), + ) -@role_required('Area Director','Secretariat') +@role_required("Area Director", "Secretariat") def action_announcement_text(request, name): """Editing of action announcement text""" charter = get_object_or_404(Document, type="charter", name=name) @@ -590,16 +657,18 @@ def action_announcement_text(request, name): if not existing: raise Http404 - form = ActionAnnouncementTextForm(initial=dict(announcement_text=escape(existing.text))) + form = ActionAnnouncementTextForm( + initial=dict(announcement_text=escape(existing.text)) + ) - if request.method == 'POST': + if request.method == "POST": form = ActionAnnouncementTextForm(request.POST) if "save_text" in request.POST and form.is_valid(): - t = form.cleaned_data['announcement_text'] + t = form.cleaned_data["announcement_text"] if t != existing.text: e = WriteupDocEvent(doc=charter, rev=charter.rev) e.by = by - e.type = "changed_action_announcement" + e.type = "changed_action_announcement" e.desc = "%s action text was changed" % group.type.name e.text = t e.save() @@ -607,25 +676,46 @@ def action_announcement_text(request, name): existing.save() if request.GET.get("next", "") == "approve": - return redirect('ietf.doc.views_charter.approve', name=charter.canonical_name()) + return redirect( + "ietf.doc.views_charter.approve", name=charter.name + ) - return redirect('ietf.doc.views_doc.document_writeup', name=charter.canonical_name()) + return redirect( + "ietf.doc.views_doc.document_writeup", name=charter.name + ) if "regenerate_text" in request.POST: e = default_action_text(group, charter, by) e.save() - form = ActionAnnouncementTextForm(initial=dict(announcement_text=escape(e.text))) + form = ActionAnnouncementTextForm( + initial=dict(announcement_text=escape(e.text)) + ) if "send_text" in request.POST and form.is_valid(): - parsed_msg = send_mail_preformatted(request, form.cleaned_data['announcement_text']) - messages.success(request, "The email To: '%s' with Subject: '%s' has been sent." % (parsed_msg["To"],parsed_msg["Subject"],)) - return redirect('ietf.doc.views_doc.document_writeup', name=charter.name) + parsed_msg = send_mail_preformatted( + request, form.cleaned_data["announcement_text"] + ) + messages.success( + request, + "The email To: '%s' with Subject: '%s' has been sent." + % ( + parsed_msg["To"], + parsed_msg["Subject"], + ), + ) + return redirect("ietf.doc.views_doc.document_writeup", name=charter.name) - return render(request, 'doc/charter/action_announcement_text.html', - dict(charter=charter, - back_url=urlreverse('ietf.doc.views_doc.document_writeup', kwargs=dict(name=charter.name)), - announcement_text_form=form, - )) + return render( + request, + "doc/charter/action_announcement_text.html", + dict( + charter=charter, + back_url=urlreverse( + "ietf.doc.views_doc.document_writeup", kwargs=dict(name=charter.name) + ), + announcement_text_form=form, + ), + ) class BallotWriteupForm(forms.Form): ballot_writeup = forms.CharField(widget=forms.Textarea, required=True, strip=False) @@ -806,33 +896,37 @@ def approve(request, name): dict(charter=charter, announcement=escape(announcement))) -def charter_with_milestones_txt(request, name, rev): - charter = get_object_or_404(Document, type="charter", docalias__name=name) - revision_event = charter.latest_event(NewRevisionDocEvent, type="new_revision", rev=rev) +def charter_with_milestones_txt(request, name, rev): + charter = get_object_or_404(Document, type="charter", name=name) + + revision_event = charter.latest_event( + NewRevisionDocEvent, type="new_revision", rev=rev + ) if not revision_event: return HttpResponseNotFound("Revision %s not found in database" % rev) # read charter text c = find_history_active_at(charter, revision_event.time) or charter - filename = '%s-%s.txt' % (c.canonical_name(), rev) - - charter_text = "" - + filename = Path(settings.CHARTER_PATH) / f"{c.name}-{rev}.txt" try: - with io.open(os.path.join(settings.CHARTER_PATH, filename), 'r') as f: - charter_text = force_str(f.read(), errors='ignore') + with filename.open() as f: + charter_text = force_str(f.read(), errors="ignore") except IOError: - charter_text = "Error reading charter text %s" % filename + charter_text = f"Error reading charter text {filename.name}" milestones = historic_milestones_for_charter(charter, rev) # wrap the output nicely - wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" " * 11, width=80, break_long_words=False) + wrapper = textwrap.TextWrapper( + initial_indent="", subsequent_indent=" " * 11, width=80, break_long_words=False + ) for m in milestones: m.desc_filled = wrapper.fill(m.desc) - return render(request, 'doc/charter/charter_with_milestones.txt', - dict(charter_text=charter_text, - milestones=milestones), - content_type="text/plain; charset=%s"%settings.DEFAULT_CHARSET) + return render( + request, + "doc/charter/charter_with_milestones.txt", + dict(charter_text=charter_text, milestones=milestones), + content_type="text/plain; charset=%s" % settings.DEFAULT_CHARSET, + ) diff --git a/ietf/doc/views_conflict_review.py b/ietf/doc/views_conflict_review.py index 7d939ec98..ec5a18c7a 100644 --- a/ietf/doc/views_conflict_review.py +++ b/ietf/doc/views_conflict_review.py @@ -16,7 +16,7 @@ from django.utils.html import escape import debug # pyflakes:ignore -from ietf.doc.models import ( BallotDocEvent, BallotPositionDocEvent, DocAlias, DocEvent, +from ietf.doc.models import ( BallotDocEvent, BallotPositionDocEvent, DocEvent, Document, NewRevisionDocEvent, State ) from ietf.doc.utils import ( add_state_change_event, close_open_ballots, create_ballot_if_not_open, update_telechat ) @@ -98,7 +98,7 @@ def change_state(request, name, option=None): ok_to_publish) if new_state.slug in ["appr-reqnopub-sent", "appr-noprob-sent", "withdraw", "dead"]: - doc = review.related_that_doc("conflrev")[0].document + doc = review.related_that_doc("conflrev")[0] update_stream_state(doc, login, 'chair-w' if doc.stream_id=='irtf' else 'ise-rev', 'iesg-com') return redirect('ietf.doc.views_doc.document_main', name=review.name) @@ -123,7 +123,7 @@ def send_conflict_review_ad_changed_email(request, review, event): by = request.user.person, event = event, review = review, - reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target.document, + reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target, review_url = settings.IDTRACKER_BASE_URL+review.get_absolute_url(), ) ) @@ -138,7 +138,7 @@ def send_conflict_review_started_email(request, review): cc = addrs.cc, by = request.user.person, review = review, - reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target.document, + reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target, review_url = settings.IDTRACKER_BASE_URL+review.get_absolute_url(), ) ) @@ -147,7 +147,7 @@ def send_conflict_review_started_email(request, review): addrs = gather_address_lists('conflrev_requested_iana',doc=review).as_strings(compact=False) email_iana(request, - review.relateddocument_set.get(relationship__slug='conflrev').target.document, + review.relateddocument_set.get(relationship__slug='conflrev').target, addrs.to, msg, cc=addrs.cc) @@ -165,7 +165,7 @@ def send_conflict_eval_email(request,review): send_mail_preformatted(request,msg,override=override) addrs = gather_address_lists('ballot_issued_iana',doc=review).as_strings() email_iana(request, - review.relateddocument_set.get(relationship__slug='conflrev').target.document, + review.relateddocument_set.get(relationship__slug='conflrev').target, addrs.to, msg, addrs.cc) @@ -181,7 +181,7 @@ class UploadForm(forms.Form): return get_cleaned_text_file_content(self.cleaned_data["txt"]) def save(self, review): - filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.canonical_name(), review.rev)) + filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.name, review.rev)) with io.open(filename, 'w', encoding='utf-8') as destination: if self.cleaned_data['txt']: destination.write(self.cleaned_data['txt']) @@ -195,7 +195,7 @@ def submit(request, name): login = request.user.person - path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.canonical_name(), review.rev)) + path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.name, review.rev)) not_uploaded_yet = review.rev == "00" and not os.path.exists(path) if not_uploaded_yet: @@ -212,7 +212,7 @@ def submit(request, name): events = [] e = NewRevisionDocEvent(doc=review, by=login, type="new_revision") - e.desc = "New version available: %s-%s.txt" % (review.canonical_name(), review.rev) + e.desc = "New version available: %s-%s.txt" % (review.name, review.rev) e.rev = review.rev e.save() events.append(e) @@ -244,7 +244,7 @@ def submit(request, name): dict(), )) else: - filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.canonical_name(), review.rev)) + filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.name, review.rev)) try: with io.open(filename, 'r') as f: init["content"] = f.read() @@ -257,7 +257,7 @@ def submit(request, name): {'form': form, 'next_rev': next_rev, 'review' : review, - 'conflictdoc' : review.relateddocument_set.get(relationship__slug='conflrev').target.document, + 'conflictdoc' : review.relateddocument_set.get(relationship__slug='conflrev').target, }) @role_required("Area Director", "Secretariat") @@ -285,8 +285,8 @@ def edit_ad(request, name): form = AdForm(initial=init) - conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document - titletext = 'the conflict review of %s-%s' % (conflictdoc.canonical_name(),conflictdoc.rev) + conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target + titletext = 'the conflict review of %s-%s' % (conflictdoc.name,conflictdoc.rev) return render(request, 'doc/change_ad.html', {'form': form, 'doc': review, @@ -297,7 +297,7 @@ def edit_ad(request, name): def default_approval_text(review): current_text = review.text_or_error() # pyflakes:ignore - conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document + conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target if conflictdoc.stream_id=='ise': receiver = 'Independent Submissions Editor' elif conflictdoc.stream_id=='irtf': @@ -365,7 +365,7 @@ def approve_conflict_review(request, name): c.desc = "The following approval message was sent\n"+form.cleaned_data['announcement_text'] c.save() - doc = review.related_that_doc("conflrev")[0].document + doc = review.related_that_doc("conflrev")[0] update_stream_state(doc, login, 'chair-w' if doc.stream_id=='irtf' else 'ise-rev', 'iesg-com') return HttpResponseRedirect(review.get_absolute_url()) @@ -378,7 +378,7 @@ def approve_conflict_review(request, name): return render(request, 'doc/conflict_review/approve.html', dict( review = review, - conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document, + conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target, form = form, )) @@ -429,7 +429,7 @@ def start_review_sanity_check(request, name): raise Http404 # sanity check that there's not already a conflict review document for this document - if [ rel.source for alias in doc_to_review.docalias.all() for rel in alias.relateddocument_set.filter(relationship='conflrev') ]: + if [ rel.source for rel in doc_to_review.targets_related.filter(relationship='conflrev') ]: raise Http404 return doc_to_review @@ -461,11 +461,8 @@ def build_conflict_review_document(login, doc_to_review, ad, notify, create_in_s group=iesg_group, ) conflict_review.set_state(create_in_state) - - DocAlias.objects.create( name=review_name).docs.add( conflict_review ) - - conflict_review.relateddocument_set.create(target=DocAlias.objects.get(name=doc_to_review.name),relationship_id='conflrev') + conflict_review.relateddocument_set.create(target=doc_to_review, relationship_id='conflrev') c = DocEvent(type="added_comment", doc=conflict_review, rev=conflict_review.rev, by=login) c.desc = "IETF conflict review requested" diff --git a/ietf/doc/views_doc.py b/ietf/doc/views_doc.py index a3548fa92..8408f5a0c 100644 --- a/ietf/doc/views_doc.py +++ b/ietf/doc/views_doc.py @@ -54,13 +54,13 @@ from django.contrib.staticfiles import finders import debug # pyflakes:ignore -from ietf.doc.models import ( Document, DocAlias, DocHistory, DocEvent, BallotDocEvent, BallotType, +from ietf.doc.models import ( Document, DocHistory, DocEvent, BallotDocEvent, BallotType, ConsensusDocEvent, NewRevisionDocEvent, TelechatDocEvent, WriteupDocEvent, IanaExpertDocEvent, IESG_BALLOT_ACTIVE_STATES, STATUSCHANGE_RELATIONS, DocumentActionHolder, DocumentAuthor, RelatedDocument, RelatedDocHistory) from ietf.doc.utils import (augment_events_with_revision, can_adopt_draft, can_unadopt_draft, get_chartering_type, get_tags_for_stream_id, - needed_ballot_positions, nice_consensus, prettify_std_name, update_telechat, has_same_ballot, + needed_ballot_positions, nice_consensus, update_telechat, has_same_ballot, get_initial_notify, make_notify_changed_event, make_rev_history, default_consensus, add_events_message_info, get_unicode_document_content, augment_docs_and_user_with_user_info, irsg_needed_ballot_positions, add_action_holder_change_event, @@ -154,8 +154,8 @@ def render_document_top(request, doc, tab, name): None, ) ) - - tabs.append(("Email expansions","email",urlreverse('ietf.doc.views_doc.document_email', kwargs=dict(name=name)), True, None)) + if not doc.type_id in ["bcp", "std", "fyi"]: + tabs.append(("Email expansions","email",urlreverse('ietf.doc.views_doc.document_email', kwargs=dict(name=name)), True, None)) tabs.append(("History", "history", urlreverse('ietf.doc.views_doc.document_history', kwargs=dict(name=name)), True, None)) if name.startswith("rfc"): @@ -163,7 +163,7 @@ def render_document_top(request, doc, tab, name): else: name += "-" + doc.rev - return render_to_string("doc/document_top.html", + return render_to_string("doc/document_top.html" if not doc.type_id in ["bcp", "std", "fyi"] else "doc/document_subseries_top.html", dict(doc=doc, tabs=tabs, selected=tab, @@ -180,42 +180,38 @@ def interesting_doc_relations(doc): else: raise TypeError("Expected this method to be called with a Document or DocHistory object") - that_relationships = STATUSCHANGE_RELATIONS + ('conflrev', 'replaces', 'possibly_replaces', 'updates', 'obs') + that_relationships = STATUSCHANGE_RELATIONS + ('conflrev', 'replaces', 'possibly_replaces', 'updates', 'obs', 'became_rfc') - that_doc_relationships = ('replaces', 'possibly_replaces', 'updates', 'obs') + that_doc_relationships = ('replaces', 'possibly_replaces', 'updates', 'obs', 'became_rfc') - # TODO: This returns the relationships in database order, which may not be the order we want to display them in. - interesting_relations_that = cls.objects.filter(target__docs=target, relationship__in=that_relationships).select_related('source') - interesting_relations_that_doc = cls.objects.filter(source=doc, relationship__in=that_doc_relationships).prefetch_related('target__docs') + interesting_relations_that = cls.objects.filter(target=target, relationship__in=that_relationships).select_related('source') + interesting_relations_that_doc = cls.objects.filter(source=doc, relationship__in=that_doc_relationships).prefetch_related('target') return interesting_relations_that, interesting_relations_that_doc def document_main(request, name, rev=None, document_html=False): - if name.startswith("rfc") and rev is not None: + + doc = get_object_or_404(Document.objects.select_related(), name=name) + + if doc.type_id == "rfc" and rev is not None: raise Http404() - doc = get_object_or_404(Document.objects.select_related(), docalias__name=name) + log.assertion('doc.type_id!="rfc" or doc.name.startswith("rfc")') # take care of possible redirections - aliases = DocAlias.objects.filter(docs=doc).values_list("name", flat=True) - if document_html is False and rev==None and doc.type_id == "draft" and not name.startswith("rfc"): - for a in aliases: - if a.startswith("rfc"): - return redirect("ietf.doc.views_doc.document_main", name=a) - - revisions = [] - for h in doc.history_set.order_by("time", "id"): - if h.rev and not h.rev in revisions: - revisions.append(h.rev) - if not doc.rev in revisions: - revisions.append(doc.rev) + if document_html is False and rev is None: + became_rfc = doc.became_rfc() + if became_rfc: + return redirect("ietf.doc.views_doc.document_main", name=became_rfc.name) + + revisions = doc.revisions_by_dochistory() latest_rev = doc.rev snapshot = False gh = None - if rev: - # find the entry in the history + if rev and rev != doc.rev: + # find the entry in the history if the rev requested is not the current rev for h in doc.history_set.order_by("-time"): if rev == h.rev: snapshot = True @@ -241,9 +237,129 @@ def document_main(request, name, rev=None, document_html=False): if telechat and (not telechat.telechat_date or telechat.telechat_date < date_today(settings.TIME_ZONE)): telechat = None - # specific document types - if doc.type_id == "draft": + if doc.type_id == "rfc": + split_content = request.COOKIES.get("full_draft", settings.USER_PREFERENCE_DEFAULTS["full_draft"]) == "off" + if request.GET.get('include_text') == "0": + split_content = True + elif request.GET.get('include_text') == "1": + split_content = False + else: + pass + + interesting_relations_that, interesting_relations_that_doc = interesting_doc_relations(doc) + + can_edit = has_role(request.user, ("Area Director", "Secretariat")) + can_edit_authors = has_role(request.user, ("Secretariat")) + + stream_slugs = StreamName.objects.values_list("slug", flat=True) + # For some reason, AnonymousUser has __iter__, but is not iterable, + # which causes problems in the filter() below. Work around this: + if request.user.is_authenticated: + roles = Role.objects.filter(group__acronym__in=stream_slugs, person__user=request.user) + roles = group_features_role_filter(roles, request.user.person, 'docman_roles') + else: + roles = [] + + can_change_stream = bool(can_edit or roles) + + file_urls, found_types = build_file_urls(doc) + content = doc.text_or_error() # pyflakes:ignore + content = markup_txt.markup(maybe_split(content, split=split_content)) + + if not found_types: + content = "This RFC is not currently available online." + split_content = False + elif "txt" not in found_types: + content = "This RFC is not available in plain text format." + split_content = False + + # status changes + status_changes = [] + proposed_status_changes = [] + for r in interesting_relations_that.filter(relationship__in=STATUSCHANGE_RELATIONS): + state_slug = r.source.get_state_slug() + if state_slug in ('appr-sent', 'appr-pend'): + status_changes.append(r) + elif state_slug in ('needshep','adrev','iesgeval','defer','appr-pr'): + proposed_status_changes.append(r) + else: + pass + + presentations = doc.future_presentations() + + augment_docs_and_user_with_user_info([doc], request.user) + + exp_comment = doc.latest_event(IanaExpertDocEvent,type="comment") + iana_experts_comment = exp_comment and exp_comment.desc + + html = None + js = None + css = None + diff_revisions = None + simple_diff_revisions = None + if document_html: + diff_revisions=get_diff_revisions(request, name, doc) + simple_diff_revisions = [t[1] for t in diff_revisions if t[0] == doc.name] + simple_diff_revisions.reverse() + html = doc.html_body() + if request.COOKIES.get("pagedeps") == "inline": + js = Path(finders.find("ietf/js/document_html.js")).read_text() + css = Path(finders.find("ietf/css/document_html_inline.css")).read_text() + if html: + css += Path(finders.find("ietf/css/document_html_txt.css")).read_text() + + # submission + submission = "" + if group is None: + submission = "unknown" + elif group.type_id == "individ": + submission = "individual" + elif group.type_id == "area" and doc.stream_id == "ietf": + submission = "individual in %s area" % group.acronym + else: + if group.features.acts_like_wg and not group.type_id == "edwg": + submission = "%s %s" % (group.acronym, group.type) + else: + submission = group.acronym + submission = '%s' % (group.about_url(), submission) + + return render(request, "doc/document_rfc.html" if document_html is False else "doc/document_html.html", + dict(doc=doc, + document_html=document_html, + css=css, + js=js, + html=html, + group=group, + top=top, + name=doc.name, + content=content, + split_content=split_content, + revisions=simple_diff_revisions if document_html else revisions, + latest_rev=latest_rev, + can_edit=can_edit, + can_edit_authors=can_edit_authors, + can_change_stream=can_change_stream, + rfc_number=doc.rfc_number, + updates=interesting_relations_that_doc.filter(relationship="updates"), + updated_by=interesting_relations_that.filter(relationship="updates"), + obsoletes=interesting_relations_that_doc.filter(relationship="obs"), + obsoleted_by=interesting_relations_that.filter(relationship="obs"), + status_changes=status_changes, + proposed_status_changes=proposed_status_changes, + has_errata=doc.pk and doc.tags.filter(slug="errata"), # doc.pk == None if using a fake_history_obj + file_urls=file_urls, + rfc_editor_state=doc.get_state("draft-rfceditor"), + iana_review_state=doc.get_state("draft-iana-review"), + iana_action_state=doc.get_state("draft-iana-action"), + iana_experts_state=doc.get_state("draft-iana-experts"), + iana_experts_comment=iana_experts_comment, + presentations=presentations, + diff_revisions=diff_revisions, + submission=submission + )) + + elif doc.type_id == "draft": split_content = request.COOKIES.get("full_draft", settings.USER_PREFERENCE_DEFAULTS["full_draft"]) == "off" if request.GET.get('include_text') == "0": split_content = True @@ -281,43 +397,13 @@ def document_main(request, name, rev=None, document_html=False): is_author = request.user.is_authenticated and doc.documentauthor_set.filter(person__user=request.user).exists() can_view_possibly_replaces = can_edit_replaces or is_author - rfc_number = name[3:] if name.startswith("rfc") else None - draft_name = None - for a in aliases: - if a.startswith("draft"): - draft_name = a - - rfc_aliases = [prettify_std_name(a) for a in aliases - if a.startswith("fyi") or a.startswith("std") or a.startswith("bcp")] - latest_revision = None - # Workaround to allow displaying last rev of draft that became rfc as a draft - # This should be unwound when RFCs become their own documents. - if snapshot: - doc.name = doc.doc.name - name = doc.doc.name - else: - name = doc.name - file_urls, found_types = build_file_urls(doc) - if not snapshot and doc.get_state_slug() == "rfc": - # content - content = doc.text_or_error() # pyflakes:ignore - content = markup_txt.markup(maybe_split(content, split=split_content)) - content = doc.text_or_error() # pyflakes:ignore content = markup_txt.markup(maybe_split(content, split=split_content)) - if not snapshot and doc.get_state_slug() == "rfc": - if not found_types: - content = "This RFC is not currently available online." - split_content = False - elif "txt" not in found_types: - content = "This RFC is not available in plain text format." - split_content = False - else: - latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision") + latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision") # ballot iesg_ballot_summary = None @@ -497,7 +583,7 @@ def document_main(request, name, rev=None, document_html=False): augment_docs_and_user_with_user_info([doc], request.user) - published = doc.latest_event(type="published_rfc") + published = doc.latest_event(type="published_rfc") # todo rethink this now that published_rfc is on rfc started_iesg_process = doc.latest_event(type="started_iesg_process") review_assignments = review_assignments_to_list_for_docs([doc]).get(doc.name, []) @@ -555,7 +641,7 @@ def document_main(request, name, rev=None, document_html=False): html=html, group=group, top=top, - name=name, + name=doc.name, content=content, split_content=split_content, revisions=simple_diff_revisions if document_html else revisions, @@ -579,8 +665,6 @@ def document_main(request, name, rev=None, document_html=False): can_request_review=can_request_review, can_submit_unsolicited_review_for_teams=can_submit_unsolicited_review_for_teams, - rfc_number=rfc_number, - draft_name=draft_name, telechat=telechat, iesg_ballot_summary=iesg_ballot_summary, submission=submission, @@ -597,7 +681,6 @@ def document_main(request, name, rev=None, document_html=False): conflict_reviews=conflict_reviews, status_changes=status_changes, proposed_status_changes=proposed_status_changes, - rfc_aliases=rfc_aliases, has_errata=doc.pk and doc.tags.filter(slug="errata"), # doc.pk == None if using a fake_history_obj published=published, file_urls=file_urls, @@ -627,7 +710,7 @@ def document_main(request, name, rev=None, document_html=False): diff_revisions=diff_revisions )) - if doc.type_id == "charter": + elif doc.type_id == "charter": content = doc.text_or_error() # pyflakes:ignore content = markdown.markdown(content) @@ -664,7 +747,7 @@ def document_main(request, name, rev=None, document_html=False): can_manage=can_manage, )) - if doc.type_id == "bofreq": + elif doc.type_id == "bofreq": content = markdown.markdown(doc.text_or_error()) editors = bofreq_editors(doc) responsible = bofreq_responsible(doc) @@ -684,8 +767,8 @@ def document_main(request, name, rev=None, document_html=False): editor_can_manage=editor_can_manage, )) - if doc.type_id == "conflrev": - filename = "%s-%s.txt" % (doc.canonical_name(), doc.rev) + elif doc.type_id == "conflrev": + filename = "%s-%s.txt" % (doc.name, doc.rev) pathname = os.path.join(settings.CONFLICT_REVIEW_PATH,filename) if doc.rev == "00" and not os.path.isfile(pathname): @@ -699,7 +782,7 @@ def document_main(request, name, rev=None, document_html=False): if doc.get_state_slug() in ("iesgeval", ) and doc.active_ballot(): ballot_summary = needed_ballot_positions(doc, list(doc.active_ballot().active_balloter_positions().values())) - conflictdoc = doc.related_that_doc('conflrev')[0].document + conflictdoc = doc.related_that_doc('conflrev')[0] return render(request, "doc/document_conflict_review.html", dict(doc=doc, @@ -714,8 +797,8 @@ def document_main(request, name, rev=None, document_html=False): approved_states=('appr-reqnopub-pend','appr-reqnopub-sent','appr-noprob-pend','appr-noprob-sent'), )) - if doc.type_id == "statchg": - filename = "%s-%s.txt" % (doc.canonical_name(), doc.rev) + elif doc.type_id == "statchg": + filename = "%s-%s.txt" % (doc.name, doc.rev) pathname = os.path.join(settings.STATUS_CHANGE_PATH,filename) if doc.rev == "00" and not os.path.isfile(pathname): @@ -748,14 +831,14 @@ def document_main(request, name, rev=None, document_html=False): sorted_relations=sorted_relations, )) - if doc.type_id in ("slides", "agenda", "minutes", "bluesheets", "procmaterials",): + elif doc.type_id in ("slides", "agenda", "minutes", "bluesheets", "procmaterials",): can_manage_material = can_manage_materials(request.user, doc.group) presentations = doc.future_presentations() if doc.uploaded_filename: # we need to remove the extension for the globbing below to work basename = os.path.splitext(doc.uploaded_filename)[0] else: - basename = "%s-%s" % (doc.canonical_name(), doc.rev) + basename = "%s-%s" % (doc.name, doc.rev) pathname = os.path.join(doc.get_file_path(), basename) @@ -804,7 +887,7 @@ def document_main(request, name, rev=None, document_html=False): )) - if doc.type_id == "review": + elif doc.type_id == "review": basename = "{}.txt".format(doc.name) pathname = os.path.join(doc.get_file_path(), basename) content = get_unicode_document_content(basename, pathname) @@ -830,7 +913,7 @@ def document_main(request, name, rev=None, document_html=False): assignments=assignments, )) - if doc.type_id in ("chatlog", "polls"): + elif doc.type_id in ("chatlog", "polls"): if isinstance(doc,DocHistory): session = doc.doc.sessionpresentation_set.last().session else: @@ -851,7 +934,7 @@ def document_main(request, name, rev=None, document_html=False): ) ) - if doc.type_id == "statement": + elif doc.type_id == "statement": if doc.uploaded_filename: basename = doc.uploaded_filename.split(".")[0] # strip extension else: @@ -872,7 +955,6 @@ def document_main(request, name, rev=None, document_html=False): can_manage = has_role(request.user,["Secretariat"]) # Add IAB or IESG as appropriate interesting_relations_that, interesting_relations_that_doc = interesting_doc_relations(doc) published = doc.latest_event(type="published_statement").time - return render(request, "doc/document_statement.html", dict(doc=doc, top=top, @@ -885,6 +967,9 @@ def document_main(request, name, rev=None, document_html=False): replaced_by=interesting_relations_that.filter(relationship="replaces"), can_manage=can_manage, )) + elif doc.type_id in ["bcp", "std", "fyi"]: + return render(request, "doc/document_subseries.html", {"doc": doc, "top": top}) + raise Http404("Document not found: %s" % (name + ("-%s"%rev if rev else ""))) @@ -938,9 +1023,9 @@ def document_html(request, name, rev=None): doc = found.documents.get() rev = found.matched_rev - if not requested_rev and doc.is_rfc(): # Someone asked for /doc/html/8989 + if not requested_rev and doc.type_id == "rfc": # Someone asked for /doc/html/8989 if not name.startswith('rfc'): - return redirect('ietf.doc.views_doc.document_html', name=doc.canonical_name()) + return redirect('ietf.doc.views_doc.document_html', name=doc.name) if rev: doc = doc.history_set.filter(rev=rev).first() or doc.fake_history_obj(rev) @@ -948,7 +1033,12 @@ def document_html(request, name, rev=None): if not os.path.exists(doc.get_file_name()): raise Http404("File not found: %s" % doc.get_file_name()) - return document_main(request, name=doc.name if requested_rev else doc.canonical_name(), rev=doc.rev if requested_rev or not doc.is_rfc() else None, document_html=True) + return document_main( + request, + name=doc.name if requested_rev else doc.name, + rev=doc.rev if requested_rev or doc.type_id != "rfc" else None, + document_html=True, + ) def document_pdfized(request, name, rev=None, ext=None): @@ -1008,7 +1098,7 @@ def get_doc_email_aliases(name): return aliases def document_email(request,name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) top = render_document_top(request, doc, "email", name) aliases = get_doc_email_aliases(name) if doc.type_id=='draft' else None @@ -1026,6 +1116,11 @@ def document_email(request,name): def get_diff_revisions(request, name, doc): + """ Calculate what to offer for diff comparisons + + returns list of (name, rev, time, url, is_this_doc, is_previous_doc) + ordered by -time for use by forms used to get to the diff tools. + """ diffable = any( [ name.startswith(prefix) @@ -1048,17 +1143,21 @@ def get_diff_revisions(request, name, doc): diff_documents = [doc] diff_documents.extend( Document.objects.filter( - docalias__relateddocument__source=doc, - docalias__relateddocument__relationship="replaces", + relateddocument__source=doc, + relateddocument__relationship="replaces", ) ) + if doc.came_from_draft(): + diff_documents.append(doc.came_from_draft()) - if doc.get_state_slug() == "rfc": + if doc.became_rfc(): + rfc = doc.became_rfc() + e = rfc.latest_event(type="published_rfc") + diff_revisions.append((rfc.name, "", e.time if e else rfc.time, rfc.name, False, False)) + + if doc.type_id == "rfc": e = doc.latest_event(type="published_rfc") - aliases = doc.docalias.filter(name__startswith="rfc") - if aliases: - name = aliases[0].name - diff_revisions.append((name, "", e.time if e else doc.time, name)) + diff_revisions.append((name, "", e.time if e else doc.time, name, True, False)) seen = set() for e in ( @@ -1087,13 +1186,22 @@ def get_diff_revisions(request, name, doc): # rfcdiff tool has special support for IDs url = e.doc.name + "-" + e.rev - diff_revisions.append((e.doc.name, e.rev, e.time, url)) + diff_revisions.append((e.doc.name, e.rev, e.time, url, e.doc == doc and e.rev == doc.rev, False)) + + diff_revisions.sort(key=lambda t: t[2], reverse=True) + for index, t in enumerate(diff_revisions): + if t[4]: # is_this_doc + n = index+1 + if n < len(diff_revisions): + t_name, rev, time, url, _, _ = diff_revisions[n] + diff_revisions[n] = (t_name, rev, time, url, False, True) + break return diff_revisions def document_history(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) top = render_document_top(request, doc, "history", name) diff_revisions = get_diff_revisions(request, name, doc) @@ -1104,21 +1212,38 @@ def document_history(request, name): add_events_message_info(events) # figure out if the current user can add a comment to the history - if doc.type_id == "draft" and doc.group != None: - can_add_comment = bool(has_role(request.user, ("Area Director", "Secretariat", "IRTF Chair", "IANA", "RFC Editor")) or ( - request.user.is_authenticated and - Role.objects.filter(name__in=("chair", "secr"), - group__acronym=doc.group.acronym, - person__user=request.user))) + if doc.type_id in ("draft", "rfc") and doc.group is not None: + can_add_comment = bool( + has_role( + request.user, + ("Area Director", "Secretariat", "IRTF Chair", "IANA", "RFC Editor"), + ) + or ( + request.user.is_authenticated + and Role.objects.filter( + name__in=("chair", "secr"), + group__acronym=doc.group.acronym, + person__user=request.user, + ) + ) + ) else: - can_add_comment = has_role(request.user, ("Area Director", "Secretariat", "IRTF Chair")) - return render(request, "doc/document_history.html", - dict(doc=doc, - top=top, - diff_revisions=diff_revisions, - events=events, - can_add_comment=can_add_comment, - )) + can_add_comment = has_role( + request.user, ("Area Director", "Secretariat", "IRTF Chair") + ) + + + return render( + request, + "doc/document_history.html", + { + "doc": doc, + "top": top, + "diff_revisions": diff_revisions, + "events": events, + "can_add_comment": can_add_comment, + }, + ) def document_bibtex(request, name, rev=None): @@ -1135,27 +1260,28 @@ def document_bibtex(request, name, rev=None): name = name+"-"+rev rev = None - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) - latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision") - replaced_by = [d.name for d in doc.related_that("replaces")] - published = doc.latest_event(type="published_rfc") is not None - rfc = latest_revision.doc if latest_revision and latest_revision.doc.get_state_slug() == "rfc" else None + doi = None + draft_became_rfc = None + replaced_by = None + latest_revision = None + if doc.type_id == "draft": + latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision") + replaced_by = [d.name for d in doc.related_that("replaces")] + draft_became_rfc = doc.became_rfc() - if rev != None and rev != doc.rev: - # find the entry in the history - for h in doc.history_set.order_by("-time"): - if rev == h.rev: - doc = h - break + if rev != None and rev != doc.rev: + # find the entry in the history + for h in doc.history_set.order_by("-time"): + if rev == h.rev: + doc = h + break - if doc.is_rfc(): + elif doc.type_id == "rfc": # This needs to be replaced with a lookup, as the mapping may change - # over time. Probably by updating ietf/sync/rfceditor.py to add the - # as a DocAlias, and use a method on Document to retrieve it. - doi = "10.17487/RFC%04d" % int(doc.rfc_number()) - else: - doi = None + # over time. + doi = f"10.17487/RFC{doc.rfc_number:04d}" if doc.is_dochistory(): latest_event = doc.latest_event(type='new_revision', rev=rev) @@ -1165,8 +1291,7 @@ def document_bibtex(request, name, rev=None): return render(request, "doc/document_bibtex.bib", dict(doc=doc, replaced_by=replaced_by, - published=published, - rfc=rfc, + published_as=draft_became_rfc, latest_revision=latest_revision, doi=doi, ), @@ -1203,7 +1328,7 @@ def document_bibxml(request, name, rev=None): def document_writeup(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) top = render_document_top(request, doc, "writeup", name) def text_from_writeup(event_type): @@ -1267,7 +1392,7 @@ def document_writeup(request, name): )) def document_shepherd_writeup(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) lastwriteup = doc.latest_event(WriteupDocEvent,type="changed_protocol_writeup") if lastwriteup: writeup_text = lastwriteup.text @@ -1304,22 +1429,28 @@ def document_shepherd_writeup_template(request, type): def document_references(request, name): - doc = get_object_or_404(Document,docalias__name=name) + doc = get_object_or_404(Document,name=name) refs = doc.references() + if doc.type_id in ["bcp","std","fyi"]: + for rfc in doc.contains(): + refs |= rfc.references() return render(request, "doc/document_references.html",dict(doc=doc,refs=sorted(refs,key=lambda x:x.target.name),)) def document_referenced_by(request, name): - doc = get_object_or_404(Document,docalias__name=name) + doc = get_object_or_404(Document,name=name) refs = doc.referenced_by() + if doc.type_id in ["bcp","std","fyi"]: + for rfc in doc.contains(): + refs |= rfc.referenced_by() full = ( request.GET.get('full') != None ) numdocs = refs.count() if not full and numdocs>250: refs=refs[:250] else: numdocs=None - refs=sorted(refs,key=lambda x:(['refnorm','refinfo','refunk','refold'].index(x.relationship.slug),x.source.canonical_name())) + refs=sorted(refs,key=lambda x:(['refnorm','refinfo','refunk','refold'].index(x.relationship.slug),x.source.name)) return render(request, "doc/document_referenced_by.html", - dict(alias_name=name, + dict(name=name, doc=doc, numdocs=numdocs, refs=refs, @@ -1393,7 +1524,7 @@ def document_ballot_content(request, doc, ballot_id, editable=True): request=request) def document_ballot(request, name, ballot_id=None): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) all_ballots = list(BallotDocEvent.objects.filter(doc=doc, type="created_ballot").order_by("time")) if not ballot_id: if all_ballots: @@ -1429,7 +1560,7 @@ def document_ballot(request, name, ballot_id=None): )) def document_irsg_ballot(request, name, ballot_id=None): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) top = render_document_top(request, doc, "irsgballot", name) if not ballot_id: ballot = doc.latest_event(BallotDocEvent, type="created_ballot", ballot_type__slug='irsg-approve') @@ -1448,7 +1579,7 @@ def document_irsg_ballot(request, name, ballot_id=None): )) def document_rsab_ballot(request, name, ballot_id=None): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) top = render_document_top(request, doc, "rsabballot", name) if not ballot_id: ballot = doc.latest_event(BallotDocEvent, type="created_ballot", ballot_type__slug='rsab-approve') @@ -1470,7 +1601,7 @@ def document_rsab_ballot(request, name, ballot_id=None): ) def ballot_popup(request, name, ballot_id): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) c = document_ballot_content(request, doc, ballot_id=ballot_id, editable=False) ballot = get_object_or_404(BallotDocEvent,id=ballot_id) return render(request, "doc/ballot_popup.html", @@ -1483,7 +1614,7 @@ def ballot_popup(request, name, ballot_id): def document_json(request, name, rev=None): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) def extract_name(s): return s.name if s else None @@ -1503,7 +1634,6 @@ def document_json(request, name, rev=None): data["expires"] = doc.expires.strftime("%Y-%m-%d %H:%M:%S") if doc.expires else None data["title"] = doc.title data["abstract"] = doc.abstract - data["aliases"] = list(doc.docalias.values_list("name", flat=True)) data["state"] = extract_name(doc.get_state()) data["intended_std_level"] = extract_name(doc.intended_std_level) data["std_level"] = extract_name(doc.std_level) @@ -1519,7 +1649,7 @@ def document_json(request, name, rev=None): latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision") data["rev_history"] = make_rev_history(latest_revision.doc if latest_revision else doc) - if doc.type_id == "draft": + if doc.type_id == "draft": # These live only on drafts data["iesg_state"] = extract_name(doc.get_state("draft-iesg")) data["rfceditor_state"] = extract_name(doc.get_state("draft-rfceditor")) data["iana_review_state"] = extract_name(doc.get_state("draft-iana-review")) @@ -1528,6 +1658,8 @@ def document_json(request, name, rev=None): if doc.stream_id in ("ietf", "irtf", "iab"): e = doc.latest_event(ConsensusDocEvent, type="changed_consensus") data["consensus"] = e.consensus if e else None + + if doc.type_id in ["draft", "rfc"]: data["stream"] = extract_name(doc.stream) return HttpResponse(json.dumps(data, indent=2), content_type='application/json') @@ -1538,7 +1670,7 @@ class AddCommentForm(forms.Form): @role_required('Area Director', 'Secretariat', 'IRTF Chair', 'WG Chair', 'RG Chair', 'WG Secretary', 'RG Secretary', 'IANA', 'RFC Editor') def add_comment(request, name): """Add comment to history of document.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) login = request.user.person @@ -1622,9 +1754,9 @@ def telechat_date(request, name): def doc_titletext(doc): if doc.type.slug=='conflrev': - conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document - return 'the conflict review of %s' % conflictdoc.canonical_name() - return doc.canonical_name() + conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target + return 'the conflict review of %s' % conflictdoc.name + return doc.name def edit_notify(request, name): @@ -1863,7 +1995,7 @@ def remind_action_holders(request, name): form = ReminderEmailForm(request.POST) if form.is_valid(): email_remind_action_holders(request, doc, form.cleaned_data['note']) - return redirect('ietf.doc.views_doc.document_main', name=doc.canonical_name()) + return redirect('ietf.doc.views_doc.document_main', name=doc.name) form = ReminderEmailForm() return render( @@ -2028,10 +2160,16 @@ def idnits2_rfc_status(request): def idnits2_state(request, name, rev=None): - doc = get_object_or_404(Document, docalias__name=name) - if doc.type_id!='draft': + doc = get_object_or_404(Document, name=name) + if doc.type_id not in ["draft", "rfc"]: raise Http404 - zero_revision = NewRevisionDocEvent.objects.filter(doc=doc,rev='00').first() + zero_revision = None + if doc.type_id == "rfc": + draft = doc.came_from_draft() + if draft: + zero_revision = NewRevisionDocEvent.objects.filter(doc=draft,rev='00').first() + else: + zero_revision = NewRevisionDocEvent.objects.filter(doc=doc,rev='00').first() if zero_revision: doc.created = zero_revision.time else: diff --git a/ietf/doc/views_downref.py b/ietf/doc/views_downref.py index 1b7b51edb..2668baae3 100644 --- a/ietf/doc/views_downref.py +++ b/ietf/doc/views_downref.py @@ -19,7 +19,7 @@ def downref_registry(request): downref_doc_pairs = [ ] downref_relations = RelatedDocument.objects.filter(relationship_id='downref-approval') for rel in downref_relations: - downref_doc_pairs.append((rel.target.document, rel.source)) + downref_doc_pairs.append((rel.target, rel.source)) return render(request, 'doc/downref.html', { "doc_pairs": downref_doc_pairs, @@ -38,18 +38,18 @@ def downref_registry_add(request): if form.is_valid(): drafts = form.cleaned_data['drafts'] rfc = form.cleaned_data['rfc'] - for da in drafts: - RelatedDocument.objects.create(source=da.document, + for d in drafts: + RelatedDocument.objects.create(source=d, target=rfc, relationship_id='downref-approval') - c = DocEvent(type="downref_approved", doc=da.document, - rev=da.document.rev, by=login) + c = DocEvent(type="downref_approved", doc=d, + rev=d.rev, by=login) c.desc = "Downref to RFC %s approved by Last Call for %s-%s" % ( - rfc.document.rfc_number(), da.name, da.document.rev) + rfc.rfc_number, d.name, d.rev) c.save() - c = DocEvent(type="downref_approved", doc=rfc.document, - rev=rfc.document.rev, by=login) + c = DocEvent(type="downref_approved", doc=rfc, + rev=rfc.rev, by=login) c.desc = "Downref to RFC %s approved by Last Call for %s-%s" % ( - rfc.document.rfc_number(), da.name, da.document.rev) + rfc.rfc_number, d.name, d.rev) c.save() return HttpResponseRedirect(urlreverse('ietf.doc.views_downref.downref_registry')) diff --git a/ietf/doc/views_draft.py b/ietf/doc/views_draft.py index b74042ac5..4f6659af9 100644 --- a/ietf/doc/views_draft.py +++ b/ietf/doc/views_draft.py @@ -23,7 +23,7 @@ from django.utils import timezone import debug # pyflakes:ignore -from ietf.doc.models import ( Document, DocAlias, RelatedDocument, State, +from ietf.doc.models import ( Document, RelatedDocument, State, StateType, DocEvent, ConsensusDocEvent, TelechatDocEvent, WriteupDocEvent, StateDocEvent, IanaExpertDocEvent, IESG_SUBSTATE_TAGS) from ietf.doc.mails import ( email_pulled_from_rfc_queue, email_resurrect_requested, @@ -38,7 +38,7 @@ from ietf.doc.utils import ( add_state_change_event, can_adopt_draft, can_unadop set_replaces_for_document, default_consensus, tags_suffix, can_edit_docextresources, update_doc_extresources ) from ietf.doc.lastcall import request_last_call -from ietf.doc.fields import SearchableDocAliasesField +from ietf.doc.fields import SearchableDocumentsField from ietf.doc.forms import ExtResourceForm from ietf.group.models import Group, Role, GroupFeatures from ietf.iesg.models import TelechatDate @@ -72,7 +72,7 @@ class ChangeStateForm(forms.Form): state = self.cleaned_data.get('state', '(None)') tag = self.cleaned_data.get('substate','') comment = self.cleaned_data['comment'].strip() # pyflakes:ignore - doc = get_object_or_404(Document, docalias__name=self.docname) + doc = get_object_or_404(Document, name=self.docname) prev = doc.get_state("draft-iesg") # tag handling is a bit awkward since the UI still works @@ -92,7 +92,7 @@ class ChangeStateForm(forms.Form): def change_state(request, name): """Change IESG state of Internet-Draft, notifying parties as necessary and logging the change as a comment.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if (not doc.latest_event(type="started_iesg_process")) or doc.get_state_slug() == "expired": raise Http404 @@ -212,7 +212,7 @@ class AddIanaExpertsCommentForm(forms.Form): @role_required('Secretariat', 'IANA') def add_iana_experts_comment(request, name): - doc = get_object_or_404(Document, docalias__name = name) + doc = get_object_or_404(Document, name = name) if request.method == 'POST': form = AddIanaExpertsCommentForm(request.POST) if form.is_valid(): @@ -238,7 +238,7 @@ class ChangeIanaStateForm(forms.Form): def change_iana_state(request, name, state_type): """Change IANA review state of Internet-Draft. Normally, this is done via automatic sync, but this form allows one to set it manually.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) state_type = doc.type_id + "-" + state_type @@ -278,7 +278,7 @@ class ChangeStreamForm(forms.Form): def change_stream(request, name): """Change the stream of a Document of type 'draft', notifying parties as necessary and logging the change as a comment.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if not doc.type_id=='draft': raise Http404 @@ -340,7 +340,7 @@ def change_stream(request, name): )) class ReplacesForm(forms.Form): - replaces = SearchableDocAliasesField(required=False) + replaces = SearchableDocumentsField(required=False) comment = forms.CharField(widget=forms.Textarea, required=False, strip=False) def __init__(self, *args, **kwargs): @@ -350,16 +350,16 @@ class ReplacesForm(forms.Form): def clean_replaces(self): for d in self.cleaned_data['replaces']: - if d.document == self.doc: + if d == self.doc: raise forms.ValidationError("An Internet-Draft can't replace itself") - if d.document.type_id == "draft" and d.document.get_state_slug() == "rfc": + if d.type_id == "draft" and d.get_state_slug() == "rfc": raise forms.ValidationError("An Internet-Draft can't replace an RFC") return self.cleaned_data['replaces'] def replaces(request, name): """Change 'replaces' set of a Document of type 'draft' , notifying parties as necessary and logging the change as a comment.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.type_id != 'draft': raise Http404 if not (has_role(request.user, ("Secretariat", "Area Director", "WG Chair", "RG Chair", "WG Secretary", "RG Secretary")) @@ -390,7 +390,7 @@ def replaces(request, name): )) class SuggestedReplacesForm(forms.Form): - replaces = forms.ModelMultipleChoiceField(queryset=DocAlias.objects.all(), + replaces = forms.ModelMultipleChoiceField(queryset=Document.objects.all(), label="Suggestions", required=False, widget=forms.CheckboxSelectMultiple, help_text="Select only the documents that are replaced by this document") comment = forms.CharField(label="Optional comment", widget=forms.Textarea, required=False, strip=False) @@ -403,7 +403,7 @@ class SuggestedReplacesForm(forms.Form): self.fields["replaces"].choices = [(d.pk, d.name) for d in suggested] def review_possibly_replaces(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.type_id != 'draft': raise Http404 if not (has_role(request.user, ("Secretariat", "Area Director")) @@ -458,7 +458,7 @@ class ChangeIntentionForm(forms.Form): def change_intention(request, name): """Change the intended publication status of a Document of type 'draft' , notifying parties as necessary and logging the change as a comment.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.type_id != 'draft': raise Http404 @@ -523,7 +523,7 @@ class EditInfoForm(forms.Form): def to_iesg(request,name): """ Submit an IETF stream document to the IESG for publication """ - doc = get_object_or_404(Document, docalias__name=name, stream='ietf') + doc = get_object_or_404(Document, name=name, stream='ietf') if doc.get_state_slug('draft') == "expired" or doc.get_state_slug('draft-iesg') == 'pub-req' : raise Http404 @@ -636,7 +636,7 @@ def to_iesg(request,name): def edit_info(request, name): """Edit various Internet-Draft attributes, notifying parties as necessary and logging changes as document events.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.get_state_slug() == "expired": raise Http404 @@ -676,7 +676,7 @@ def edit_info(request, name): e.save() events.append(e) - replaces = Document.objects.filter(docalias__relateddocument__source=doc, docalias__relateddocument__relationship="replaces") + replaces = Document.objects.filter(targets_related__source=doc, targets_related__relationship="replaces") if replaces: # this should perhaps be somewhere else, e.g. the # place where the replace relationship is established? @@ -781,7 +781,7 @@ def edit_info(request, name): @role_required('Area Director','Secretariat') def request_resurrect(request, name): """Request resurrect of expired Internet-Draft.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.get_state_slug() != "expired": raise Http404 @@ -804,7 +804,7 @@ def request_resurrect(request, name): @role_required('Secretariat') def resurrect(request, name): """Resurrect expired Internet-Draft.""" - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if doc.get_state_slug() != "expired": raise Http404 diff --git a/ietf/doc/views_material.py b/ietf/doc/views_material.py index 21b93397a..5b16c247b 100644 --- a/ietf/doc/views_material.py +++ b/ietf/doc/views_material.py @@ -16,7 +16,7 @@ from django.urls import reverse as urlreverse import debug # pyflakes:ignore -from ietf.doc.models import Document, DocAlias, DocTypeName, DocEvent, State +from ietf.doc.models import Document, DocTypeName, DocEvent, State from ietf.doc.models import NewRevisionDocEvent from ietf.doc.utils import add_state_change_event, check_common_doc_name_rules from ietf.group.models import Group @@ -156,10 +156,6 @@ def edit_material(request, name=None, acronym=None, action=None, doc_type=None): for chunk in f.chunks(): dest.write(chunk) - if action == "new": - alias, __ = DocAlias.objects.get_or_create(name=doc.name) - alias.docs.add(doc) - if prev_rev != doc.rev: e = NewRevisionDocEvent(type="new_revision", doc=doc, rev=doc.rev) e.by = request.user.person diff --git a/ietf/doc/views_review.py b/ietf/doc/views_review.py index e0e6cb05b..f1f9d7be4 100644 --- a/ietf/doc/views_review.py +++ b/ietf/doc/views_review.py @@ -28,7 +28,7 @@ from django.core.exceptions import ValidationError from django.template.loader import render_to_string, TemplateDoesNotExist from django.urls import reverse as urlreverse -from ietf.doc.models import (Document, NewRevisionDocEvent, State, DocAlias, +from ietf.doc.models import (Document, NewRevisionDocEvent, State, LastCallDocEvent, ReviewRequestDocEvent, ReviewAssignmentDocEvent, DocumentAuthor) from ietf.name.models import (ReviewRequestStateName, ReviewAssignmentStateName, ReviewResultName, ReviewTypeName) @@ -117,7 +117,7 @@ class RequestReviewForm(forms.ModelForm): @login_required def request_review(request, name): - doc = get_object_or_404(Document, name=name) + doc = get_object_or_404(Document, type_id="draft", name=name) if not can_request_review_of_doc(request.user, doc): permission_denied(request, "You do not have permission to perform this action") @@ -753,9 +753,7 @@ def complete_review(request, name, assignment_id=None, acronym=None): name=review_name, defaults={'type_id': 'review', 'group': team}, ) - if created: - DocAlias.objects.create(name=review_name).docs.add(review) - else: + if not created: messages.warning(request, message='Attempt to save review failed: review document already exists. This most likely occurred because the review was submitted twice in quick succession. If you intended to submit a new review, rather than update an existing one, things are probably OK. Please verify that the shown review is what you expected.') return redirect("ietf.doc.views_doc.document_main", name=review_name) @@ -1093,7 +1091,7 @@ class ReviewWishAddForm(forms.Form): @login_required def review_wish_add(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if request.method == "POST": form = ReviewWishAddForm(request.user, doc, request.POST) @@ -1110,7 +1108,7 @@ def review_wish_add(request, name): @login_required def review_wishes_remove(request, name): - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) person = get_object_or_404(Person, user=request.user) if request.method == "POST": diff --git a/ietf/doc/views_search.py b/ietf/doc/views_search.py index 6ba8abe1e..eef57a29c 100644 --- a/ietf/doc/views_search.py +++ b/ietf/doc/views_search.py @@ -1,4 +1,4 @@ -# Copyright The IETF Trust 2009-2022, All Rights Reserved +# Copyright The IETF Trust 2009-2023, All Rights Reserved # -*- coding: utf-8 -*- # # Some parts Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies). @@ -37,7 +37,9 @@ import re import datetime import copy +import operator +from functools import reduce from django import forms from django.conf import settings from django.core.cache import cache, caches @@ -53,7 +55,7 @@ from django.utils.text import slugify import debug # pyflakes:ignore -from ietf.doc.models import ( Document, DocHistory, DocAlias, State, +from ietf.doc.models import ( Document, DocHistory, State, LastCallDocEvent, NewRevisionDocEvent, IESG_SUBSTATE_TAGS, IESG_BALLOT_ACTIVE_STATES, IESG_STATCHG_CONFLREV_ACTIVE_STATES, IESG_CHARTER_ACTIVE_STATES ) @@ -96,7 +98,7 @@ class SearchForm(forms.Form): ("ad", "AD"), ("-ad", "AD (desc)"), ), required=False, widget=forms.HiddenInput) - doctypes = forms.ModelMultipleChoiceField(queryset=DocTypeName.objects.filter(used=True).exclude(slug__in=('draft','liai-att')).order_by('name'), required=False) + doctypes = forms.ModelMultipleChoiceField(queryset=DocTypeName.objects.filter(used=True).exclude(slug__in=('draft', 'rfc', 'bcp', 'std', 'fyi', 'liai-att')).order_by('name'), required=False) def __init__(self, *args, **kwargs): super(SearchForm, self).__init__(*args, **kwargs) @@ -155,8 +157,11 @@ def retrieve_search_results(form, all_types=False): else: types = [] - if query['activedrafts'] or query['olddrafts'] or query['rfcs']: + if query['activedrafts'] or query['olddrafts']: types.append('draft') + + if query['rfcs']: + types.append('rfc') types.extend(query["doctypes"]) @@ -167,13 +172,50 @@ def retrieve_search_results(form, all_types=False): # name if query["name"]: - docs = docs.filter(Q(docalias__name__icontains=query["name"]) | - Q(title__icontains=query["name"])).distinct() + look_for = query["name"] + queries = [ + Q(name__icontains=look_for), + Q(title__icontains=look_for) + ] + # Check to see if this is just a search for an rfc look for a few variants + if look_for.lower()[:3] == "rfc" and look_for[3:].strip().isdigit(): + spaceless = look_for.lower()[:3]+look_for[3:].strip() + if spaceless != look_for: + queries.extend([ + Q(name__icontains=spaceless), + Q(title__icontains=spaceless) + ]) + singlespace = look_for.lower()[:3]+" "+look_for[3:].strip() + if singlespace != look_for: + queries.extend([ + Q(name__icontains=singlespace), + Q(title__icontains=singlespace) + ]) + + # Do a similar thing if the search is just for a subseries doc, like a bcp. + if look_for.lower()[:3] in ["bcp", "fyi", "std"] and look_for[3:].strip().isdigit() and query["rfcs"]: # Also look for rfcs contained in the subseries. + queries.extend([ + Q(targets_related__source__name__icontains=look_for, targets_related__relationship_id="contains"), + Q(targets_related__source__title__icontains=look_for, targets_related__relationship_id="contains"), + ]) + spaceless = look_for.lower()[:3]+look_for[3:].strip() + if spaceless != look_for: + queries.extend([ + Q(targets_related__source__name__icontains=spaceless, targets_related__relationship_id="contains"), + Q(targets_related__source__title__icontains=spaceless, targets_related__relationship_id="contains"), + ]) + singlespace = look_for.lower()[:3]+" "+look_for[3:].strip() + if singlespace != look_for: + queries.extend([ + Q(targets_related__source__name__icontains=singlespace, targets_related__relationship_id="contains"), + Q(targets_related__source__title__icontains=singlespace, targets_related__relationship_id="contains"), + ]) + + combined_query = reduce(operator.or_, queries) + docs = docs.filter(combined_query).distinct() # rfc/active/old check buttons allowed_draft_states = [] - if query["rfcs"]: - allowed_draft_states.append("rfc") if query["activedrafts"]: allowed_draft_states.append("active") if query["olddrafts"]: @@ -249,17 +291,17 @@ def frontpage(request): def search_for_name(request, name): def find_unique(n): - exact = DocAlias.objects.filter(name__iexact=n).first() + exact = Document.objects.filter(name__iexact=n).first() if exact: return exact.name - aliases = DocAlias.objects.filter(name__istartswith=n)[:2] - if len(aliases) == 1: - return aliases[0].name + startswith = Document.objects.filter(name__istartswith=n)[:2] + if len(startswith) == 1: + return startswith[0].name - aliases = DocAlias.objects.filter(name__icontains=n)[:2] - if len(aliases) == 1: - return aliases[0].name + contains = Document.objects.filter(name__icontains=n)[:2] + if len(contains) == 1: + return contains[0].name return None @@ -292,13 +334,13 @@ def search_for_name(request, name): if redirect_to: rev = rev_split.group(2) # check if we can redirect directly to the rev if it's draft, if rfc - always redirect to main page - if not redirect_to.startswith('rfc') and DocHistory.objects.filter(doc__docalias__name=redirect_to, rev=rev).exists(): + if not redirect_to.startswith('rfc') and DocHistory.objects.filter(doc__name=redirect_to, rev=rev).exists(): return cached_redirect(cache_key, urlreverse("ietf.doc.views_doc.document_main", kwargs={ "name": redirect_to, "rev": rev })) else: return cached_redirect(cache_key, urlreverse("ietf.doc.views_doc.document_main", kwargs={ "name": redirect_to })) # build appropriate flags based on string prefix - doctypenames = DocTypeName.objects.filter(used=True) + doctypenames = DocTypeName.objects.filter(used=True).exclude(slug__in=["bcp","std","fyi"]) # This would have been more straightforward if document prefixes couldn't # contain a dash. Probably, document prefixes shouldn't contain a dash ... search_args = "?name=%s" % n @@ -317,9 +359,21 @@ def search_for_name(request, name): def state_name(doc_type, state, shorten=True): name = "" - if doc_type in ["draft", "rfc"] and state not in ["rfc", "expired"]: + # Note doc_type rfc here is _not_ necessarily Document.type - for some callers + # it is a type derived from draft... The ad_workload view needs more rework so that + # the code isn't having to shadow-box so much. + if doc_type == "rfc": + if state == "rfc": + name = "RFC" + if name == "": + s = State.objects.filter(type="rfc",slug=state).first() + if s: + name = s.name + if name == "": + name = State.objects.get(type__in=["draft", "draft-iesg"], slug=state).name + elif doc_type == "draft" and state not in ["rfc", "expired"]: name = State.objects.get(type__in=["draft", "draft-iesg"], slug=state).name - elif state == "rfc": + elif doc_type == "draft" and state == "rfc": name = "RFC" elif doc_type == "conflrev" and state.startswith("appr"): name = "Approved" @@ -403,7 +457,7 @@ def ad_workload(request): ) ad.buckets = copy.deepcopy(bucket_template) - for doc in Document.objects.filter(ad=ad): + for doc in Document.objects.exclude(type_id="rfc").filter(ad=ad): dt = doc_type(doc) state = doc_state(doc) @@ -696,47 +750,50 @@ def recent_drafts(request, days=7): }) -def index_all_drafts(request): +def index_all_drafts(request): # Should we rename this # try to be efficient since this view returns a lot of data categories = [] - for s in ("active", "rfc", "expired", "repl", "auth-rm", "ietf-rm"): + # Gather drafts + for s in ("active", "expired", "repl", "auth-rm", "ietf-rm"): state = State.objects.get(type="draft", slug=s) - if state.slug == "rfc": - heading = "RFCs" - elif state.slug in ("ietf-rm", "auth-rm"): + if state.slug in ("ietf-rm", "auth-rm"): heading = "Internet-Drafts %s" % state.name else: heading = "%s Internet-Drafts" % state.name - draft_names = DocAlias.objects.filter(docs__states=state).values_list("name", "docs__name") + drafts = Document.objects.filter(type_id="draft", states=state).order_by("name") - names = [] - names_to_skip = set() - for name, doc in draft_names: - sort_key = name - if name != doc: - if not name.startswith("rfc"): - name, doc = doc, name - names_to_skip.add(doc) - - if name.startswith("rfc"): - name = name.upper() - sort_key = '%09d' % (100000000-int(name[3:])) - - names.append((name, sort_key)) - - names.sort(key=lambda t: t[1]) - - names = [f'{n}' - for n, __ in names if n not in names_to_skip] + names = [ + f'{doc.name}' + for doc in drafts + ] categories.append((state, heading, len(names), "
".join(names) )) + + # gather RFCs + rfcs = Document.objects.filter(type_id="rfc").order_by('-rfc_number') + names = [ + f'{rfc.name.upper()}' + for rfc in rfcs + ] + + state = State.objects.get(type_id="rfc", slug="published") + + categories.append((state, + "RFCs", + len(names), + "
".join(names) + )) + + # Return to the previous section ordering + categories = categories[0:1]+categories[5:]+categories[1:5] + return render(request, 'doc/index_all_drafts.html', { "categories": categories }) def index_active_drafts(request): @@ -748,27 +805,42 @@ def index_active_drafts(request): slowcache.set(cache_key, groups, 15*60) return render(request, "doc/index_active_drafts.html", { 'groups': groups }) -def ajax_select2_search_docs(request, model_name, doc_type): - if model_name == "docalias": - model = DocAlias - else: - model = Document +def ajax_select2_search_docs(request, model_name, doc_type): # TODO - remove model_name argument... + """Get results for a select2 search field + + doc_type can be "draft", "rfc", or "all", to search for only docs of type "draft", only docs of + type "rfc", or docs of type "draft" or "rfc" or any of the subseries ("bcp", "std", ...). + + If a need arises for searching _only_ for draft or rfc, without including the subseries, then an + additional option or options will be needed. + """ + model = Document # Earlier versions allowed searching over DocAlias which no longer exists q = [w.strip() for w in request.GET.get('q', '').split() if w.strip()] if not q: objs = model.objects.none() else: - qs = model.objects.all() - - if model == Document: - qs = qs.filter(type=doc_type) - elif model == DocAlias: - qs = qs.filter(docs__type=doc_type) - + if doc_type == "draft": + types = ["draft"] + elif doc_type == "rfc": + types = ["rfc"] + elif doc_type == "all": + types = ("draft", "rfc", "bcp", "fyi", "std") + else: + return HttpResponseBadRequest("Invalid document type") + qs = model.objects.filter(type__in=[t.strip() for t in types]) for t in q: qs = qs.filter(name__icontains=t) objs = qs.distinct().order_by("name")[:20] return HttpResponse(select2_id_doc_name_json(model, objs), content_type='application/json') + +def index_subseries(request, type_id): + docs = sorted(Document.objects.filter(type_id=type_id),key=lambda o: int(o.name[3:])) + if len(docs)>0: + type = docs[0].type + else: + type = DocTypeName.objects.get(slug=type_id) + return render(request, "doc/index_subseries.html", {"type": type, "docs": docs}) diff --git a/ietf/doc/views_statement.py b/ietf/doc/views_statement.py index 7f10af356..04adb5d1d 100644 --- a/ietf/doc/views_statement.py +++ b/ietf/doc/views_statement.py @@ -13,7 +13,7 @@ from django.template.loader import render_to_string from ietf.utils import markdown from django.utils.html import escape -from ietf.doc.models import Document, DocAlias, DocEvent, NewRevisionDocEvent, State +from ietf.doc.models import Document, DocEvent, NewRevisionDocEvent, State from ietf.group.models import Group from ietf.ietfauth.utils import role_required from ietf.utils.text import xslugify @@ -242,8 +242,6 @@ def new_statement(request): time=statement.time, ) statement.save_with_history([e1, e2]) - alias = DocAlias.objects.create(name=name) - alias.docs.set([statement]) markdown_content = "" if statement_submission == "upload": if not writing_pdf: diff --git a/ietf/doc/views_status_change.py b/ietf/doc/views_status_change.py index 6db4338f4..9034971eb 100644 --- a/ietf/doc/views_status_change.py +++ b/ietf/doc/views_status_change.py @@ -21,7 +21,7 @@ from django.utils.html import escape import debug # pyflakes:ignore from ietf.doc.mails import email_ad_approved_status_change -from ietf.doc.models import ( Document, DocAlias, State, DocEvent, BallotDocEvent, +from ietf.doc.models import ( Document, State, DocEvent, BallotDocEvent, BallotPositionDocEvent, NewRevisionDocEvent, WriteupDocEvent, STATUSCHANGE_RELATIONS ) from ietf.doc.forms import AdForm from ietf.doc.lastcall import request_last_call @@ -104,8 +104,8 @@ def change_state(request, name, option=None): relationship__slug__in=STATUSCHANGE_RELATIONS ) related_doc_info = [ - dict(title=rel_doc.target.document.title, - canonical_name=rel_doc.target.document.canonical_name(), + dict(title=rel_doc.target.title, + name=rel_doc.target.name, newstatus=newstatus(rel_doc)) for rel_doc in related_docs ] @@ -154,7 +154,7 @@ class UploadForm(forms.Form): return get_cleaned_text_file_content(self.cleaned_data["txt"]) def save(self, doc): - filename = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev)) + filename = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.name, doc.rev)) with io.open(filename, 'w', encoding='utf-8') as destination: if self.cleaned_data['txt']: destination.write(self.cleaned_data['txt']) @@ -168,7 +168,7 @@ def submit(request, name): login = request.user.person - path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev)) + path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.name, doc.rev)) not_uploaded_yet = doc.rev == "00" and not os.path.exists(path) if not_uploaded_yet: @@ -185,7 +185,7 @@ def submit(request, name): events = [] e = NewRevisionDocEvent(doc=doc, by=login, type="new_revision") - e.desc = "New version available: %s-%s.txt" % (doc.canonical_name(), doc.rev) + e.desc = "New version available: %s-%s.txt" % (doc.name, doc.rev) e.rev = doc.rev e.save() events.append(e) @@ -217,7 +217,7 @@ def submit(request, name): dict(), ) else: - filename = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev)) + filename = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.name, doc.rev)) try: with io.open(filename, 'r') as f: init["content"] = f.read() @@ -259,7 +259,7 @@ def edit_title(request, name): init = { "title" : status_change.title } form = ChangeTitleForm(initial=init) - titletext = '%s-%s.txt' % (status_change.canonical_name(),status_change.rev) + titletext = '%s-%s.txt' % (status_change.name,status_change.rev) return render(request, 'doc/change_title.html', {'form': form, 'doc': status_change, @@ -290,7 +290,7 @@ def edit_ad(request, name): init = { "ad" : status_change.ad_id } form = AdForm(initial=init) - titletext = '%s-%s.txt' % (status_change.canonical_name(),status_change.rev) + titletext = '%s-%s.txt' % (status_change.name,status_change.rev) return render(request, 'doc/change_ad.html', {'form': form, 'doc': status_change, @@ -315,7 +315,7 @@ def default_approval_text(status_change,relateddoc): current_text = status_change.text_or_error() # pyflakes:ignore - if relateddoc.target.document.std_level_id in ('std','ps','ds','bcp',): + if relateddoc.target.std_level_id in ('std','ps','ds','bcp',): action = "Protocol Action" else: action = "Document Action" @@ -326,7 +326,7 @@ def default_approval_text(status_change,relateddoc): dict(status_change=status_change, status_change_url = settings.IDTRACKER_BASE_URL+status_change.get_absolute_url(), relateddoc= relateddoc, - relateddoc_url = settings.IDTRACKER_BASE_URL+relateddoc.target.document.get_absolute_url(), + relateddoc_url = settings.IDTRACKER_BASE_URL+relateddoc.target.get_absolute_url(), approved_text = current_text, action=action, newstatus=newstatus(relateddoc), @@ -394,7 +394,7 @@ def approve(request, name): for rel in status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS): # Add a document event to each target - c = DocEvent(type="added_comment", doc=rel.target.document, rev=rel.target.document.rev, by=login) + c = DocEvent(type="added_comment", doc=rel.target, rev=rel.target.rev, by=login) c.desc = "New status of %s approved by the IESG\n%s%s" % (newstatus(rel), settings.IDTRACKER_BASE_URL,reverse('ietf.doc.views_doc.document_main', kwargs={'name': status_change.name})) c.save() @@ -405,7 +405,7 @@ def approve(request, name): init = [] for rel in status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS): init.append({"announcement_text" : escape(default_approval_text(status_change,rel)), - "label": "Announcement text for %s to %s"%(rel.target.document.canonical_name(),newstatus(rel)), + "label": "Announcement text for %s to %s"%(rel.target.name,newstatus(rel)), }) formset = AnnouncementFormSet(initial=init) for form in formset.forms: @@ -445,7 +445,7 @@ def clean_helper(form, formtype): if not re.match(r'(?i)rfc\d{1,4}',key): errors.append(key+" is not a valid RFC - please use the form RFCn\n") - elif not DocAlias.objects.filter(name=key): + elif not Document.objects.filter(name=key): errors.append(key+" does not exist\n") if new_relations[key] not in STATUSCHANGE_RELATIONS: @@ -543,7 +543,7 @@ def start_rfc_status_change(request, name=None): if name: if not re.match("(?i)rfc[0-9]{1,4}",name): raise Http404 - seed_rfc = get_object_or_404(Document, type="draft", docalias__name=name) + seed_rfc = get_object_or_404(Document, type="rfc", name=name) login = request.user.person @@ -566,14 +566,11 @@ def start_rfc_status_change(request, name=None): group=iesg_group, ) status_change.set_state(form.cleaned_data['create_in_state']) - - DocAlias.objects.create( name= 'status-change-'+form.cleaned_data['document_name']).docs.add(status_change) for key in form.cleaned_data['relations']: - status_change.relateddocument_set.create(target=DocAlias.objects.get(name=key), + status_change.relateddocument_set.create(target=Document.objects.get(name=key), relationship_id=form.cleaned_data['relations'][key]) - tc_date = form.cleaned_data['telechat_date'] if tc_date: update_telechat(request, status_change, login, tc_date) @@ -583,9 +580,9 @@ def start_rfc_status_change(request, name=None): init = {} if name: init['title'] = "%s to CHANGETHIS" % seed_rfc.title - init['document_name'] = "%s-to-CHANGETHIS" % seed_rfc.canonical_name() + init['document_name'] = "%s-to-CHANGETHIS" % seed_rfc.name relations={} - relations[seed_rfc.canonical_name()]=None + relations[seed_rfc.name]=None init['relations'] = relations form = StartStatusChangeForm(initial=init) @@ -611,11 +608,11 @@ def edit_relations(request, name): old_relations={} for rel in status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS): - old_relations[rel.target.document.canonical_name()]=rel.relationship.slug + old_relations[rel.target.name]=rel.relationship.slug new_relations=form.cleaned_data['relations'] status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS).delete() for key in new_relations: - status_change.relateddocument_set.create(target=DocAlias.objects.get(name=key), + status_change.relateddocument_set.create(target=Document.objects.get(name=key), relationship_id=new_relations[key]) c = DocEvent(type="added_comment", doc=status_change, rev=status_change.rev, by=login) c.desc = "Affected RFC list changed.\nOLD:" @@ -632,7 +629,7 @@ def edit_relations(request, name): else: relations={} for rel in status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS): - relations[rel.target.document.canonical_name()]=rel.relationship.slug + relations[rel.target.name]=rel.relationship.slug init = { "relations":relations, } form = EditStatusChangeForm(initial=init) @@ -659,8 +656,8 @@ def generate_last_call_text(request, doc): settings=settings, requester=requester, expiration_date=expiration_date.strftime("%Y-%m-%d"), - changes=['%s from %s to %s\n (%s)'%(rel.target.name.upper(),rel.target.document.std_level.name,newstatus(rel),rel.target.document.title) for rel in doc.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS)], - urls=[rel.target.document.get_absolute_url() for rel in doc.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS)], + changes=['%s from %s to %s\n (%s)'%(rel.target.name.upper(),rel.target.std_level.name,newstatus(rel),rel.target.title) for rel in doc.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS)], + urls=[rel.target.get_absolute_url() for rel in doc.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS)], cc=cc ) ) diff --git a/ietf/group/milestones.py b/ietf/group/milestones.py index 64ebb389e..039fdb44c 100644 --- a/ietf/group/milestones.py +++ b/ietf/group/milestones.py @@ -369,7 +369,7 @@ def edit_milestones(request, acronym, group_type=None, milestone_set="current"): email_milestones_changed(request, group, changes, states) if milestone_set == "charter": - return redirect('ietf.doc.views_doc.document_main', name=group.charter.canonical_name()) + return redirect('ietf.doc.views_doc.document_main', name=group.charter.name) else: return HttpResponseRedirect(group.about_url()) else: diff --git a/ietf/group/tests.py b/ietf/group/tests.py index 233cde55e..b11ed8e5f 100644 --- a/ietf/group/tests.py +++ b/ietf/group/tests.py @@ -69,7 +69,7 @@ class GroupStatsTests(TestCase): a = WgDraftFactory() b = WgDraftFactory() RelatedDocument.objects.create( - source=a, target=b.docalias.first(), relationship_id="refnorm" + source=a, target=b, relationship_id="refnorm" ) def test_group_stats(self): @@ -95,7 +95,7 @@ class GroupDocDependencyTests(TestCase): a = WgDraftFactory() b = WgDraftFactory() RelatedDocument.objects.create( - source=a, target=b.docalias.first(), relationship_id="refnorm" + source=a, target=b, relationship_id="refnorm" ) def test_group_document_dependencies(self): diff --git a/ietf/group/tests_info.py b/ietf/group/tests_info.py index 39a1e69e2..4c353f110 100644 --- a/ietf/group/tests_info.py +++ b/ietf/group/tests_info.py @@ -27,7 +27,7 @@ from django.utils.html import escape from ietf.community.models import CommunityList from ietf.community.utils import reset_name_contains_index_for_rule from ietf.doc.factories import WgDraftFactory, IndividualDraftFactory, CharterFactory, BallotDocEventFactory -from ietf.doc.models import Document, DocAlias, DocEvent, State +from ietf.doc.models import Document, DocEvent, State from ietf.doc.utils_charter import charter_name_for_group from ietf.group.admin import GroupForm as AdminGroupForm from ietf.group.factories import (GroupFactory, RoleFactory, GroupEventFactory, @@ -117,8 +117,9 @@ class GroupPagesTests(TestCase): chair = Email.objects.filter(role__group=group, role__name="chair")[0] - with (Path(settings.CHARTER_PATH) / ("%s-%s.txt" % (group.charter.canonical_name(), group.charter.rev))).open("w") as f: - f.write("This is a charter.") + ( + Path(settings.CHARTER_PATH) / f"{group.charter.name}-{group.charter.rev}.txt" + ).write_text("This is a charter.") url = urlreverse('ietf.group.views.wg_summary_area', kwargs=dict(group_type="wg")) r = self.client.get(url) @@ -264,8 +265,9 @@ class GroupPagesTests(TestCase): group = CharterFactory().group draft = WgDraftFactory(group=group) - with (Path(settings.CHARTER_PATH) / ("%s-%s.txt" % (group.charter.canonical_name(), group.charter.rev))).open("w") as f: - f.write("This is a charter.") + ( + Path(settings.CHARTER_PATH) / f"{group.charter.name}-{group.charter.rev}.txt" + ).write_text("This is a charter.") milestone = GroupMilestone.objects.create( group=group, @@ -385,7 +387,6 @@ class GroupPagesTests(TestCase): type_id="slides", ) doc.set_state(State.objects.get(type="slides", slug="active")) - DocAlias.objects.create(name=doc.name).docs.add(doc) for url in group_urlreverse_list(group, 'ietf.group.views.materials'): r = self.client.get(url) @@ -668,8 +669,9 @@ class GroupEditTests(TestCase): self.assertTrue(len(q('form .is-invalid')) > 0) # edit info - with (Path(settings.CHARTER_PATH) / ("%s-%s.txt" % (group.charter.canonical_name(), group.charter.rev))).open("w") as f: - f.write("This is a charter.") + ( + Path(settings.CHARTER_PATH) / f"{group.charter.name}-{group.charter.rev}.txt" + ).write_text("This is a charter.") area = group.parent ad = Person.objects.get(name="Areað Irector") state = GroupStateName.objects.get(slug="bof") @@ -711,7 +713,9 @@ class GroupEditTests(TestCase): self.assertEqual(group.list_archive, "archive.mars") self.assertEqual(group.description, '') - self.assertTrue((Path(settings.CHARTER_PATH) / ("%s-%s.txt" % (group.charter.canonical_name(), group.charter.rev))).exists()) + self.assertTrue( + (Path(settings.CHARTER_PATH) / f"{group.charter.name}-{group.charter.rev}.txt").exists() + ) self.assertEqual(len(outbox), 2) self.assertTrue('Personnel change' in outbox[0]['Subject']) for prefix in ['ad1','ad2','aread','marschairman','marsdelegate']: diff --git a/ietf/group/utils.py b/ietf/group/utils.py index f7e4b2f17..92b9ac1bd 100644 --- a/ietf/group/utils.py +++ b/ietf/group/utils.py @@ -2,8 +2,7 @@ # -*- coding: utf-8 -*- -import io -import os +from pathlib import Path from django.db.models import Q from django.shortcuts import get_object_or_404 @@ -55,15 +54,14 @@ def get_charter_text(group): if (h.rev > c.rev and not (c_appr and not h_appr)) or (h_appr and not c_appr): c = h - filename = os.path.join(c.get_file_path(), "%s-%s.txt" % (c.canonical_name(), c.rev)) + filename = Path(c.get_file_path()) / f"{c.name}-{c.rev}.txt" try: - with io.open(filename, 'rb') as f: - text = f.read() - try: - text = text.decode('utf8') - except UnicodeDecodeError: - text = text.decode('latin1') - return text + text = filename.read_bytes() + try: + text = text.decode('utf8') + except UnicodeDecodeError: + text = text.decode('latin1') + return text except IOError: return 'Error Loading Group Charter' @@ -191,7 +189,7 @@ def setup_default_community_list_for_group(group): community_list=clist, rule_type="group_rfc", group=group, - state=State.objects.get(slug="rfc", type="draft"), + state=State.objects.get(slug="published", type="rfc"), ) SearchRule.objects.create( community_list=clist, diff --git a/ietf/group/views.py b/ietf/group/views.py index 129247455..f2abe73ca 100644 --- a/ietf/group/views.py +++ b/ietf/group/views.py @@ -61,7 +61,7 @@ import debug # pyflakes:ignore from ietf.community.models import CommunityList, EmailSubscription from ietf.community.utils import docs_tracked_by_community_list -from ietf.doc.models import DocTagName, State, DocAlias, RelatedDocument, Document, DocEvent +from ietf.doc.models import DocTagName, State, RelatedDocument, Document, DocEvent from ietf.doc.templatetags.ietf_filters import clean_whitespace from ietf.doc.utils import get_chartering_type, get_tags_for_stream_id from ietf.doc.utils_charter import charter_name_for_group, replace_charter_of_replaced_group @@ -186,17 +186,12 @@ def fill_in_wg_roles(group): group.secretaries = get_roles("secr", []) def fill_in_wg_drafts(group): - aliases = DocAlias.objects.filter(docs__type="draft", docs__group=group).prefetch_related('docs').order_by("name") - group.drafts = [] - group.rfcs = [] - for a in aliases: - if a.name.startswith("draft"): - group.drafts.append(a) - else: - group.rfcs.append(a) - a.remote_field = RelatedDocument.objects.filter(source=a.document,relationship_id__in=['obs','updates']).distinct() - a.invrel = RelatedDocument.objects.filter(target=a,relationship_id__in=['obs','updates']).distinct() - + group.drafts = Document.objects.filter(type_id="draft", group=group).order_by("name") + group.rfcs = Document.objects.filter(type_id="rfc", group=group).order_by("rfc_number") + for rfc in group.rfcs: + # TODO: remote_field? + rfc.remote_field = RelatedDocument.objects.filter(source=rfc,relationship_id__in=['obs','updates']).distinct() + rfc.invrel = RelatedDocument.objects.filter(target=rfc,relationship_id__in=['obs','updates']).distinct() def check_group_email_aliases(): pattern = re.compile(r'expand-(.*?)(-\w+)@.*? +(.*)$') @@ -475,8 +470,8 @@ def prepare_group_documents(request, group, clist): # non-WG drafts and call for WG adoption are considered related if (d.group != group or (d.stream_id and d.get_state_slug("draft-stream-%s" % d.stream_id) in ("c-adopt", "wg-cand"))): - if d.get_state_slug() != "expired": - d.search_heading = "Related Internet-Draft" + if (d.type_id == "draft" and d.get_state_slug() not in ["expired","rfc"]) or d.type_id == "rfc": + d.search_heading = "Related Internet-Drafts and RFCs" docs_related.append(d) else: if not (d.get_state_slug('draft-iesg') == "dead" or (d.stream_id and d.get_state_slug("draft-stream-%s" % d.stream_id) == "dead")): @@ -535,9 +530,8 @@ def group_documents_txt(request, acronym, group_type=None): rows = [] for d in itertools.chain(docs, docs_related): - rfc_number = d.rfc_number() - if rfc_number != None: - name = rfc_number + if d.type_id == "rfc": + name = str(d.rfc_number) else: name = "%s-%s" % (d.name, d.rev) @@ -747,7 +741,7 @@ def dependencies(request, acronym, group_type=None): relationship__slug__startswith="ref", ) - both_rfcs = Q(source__states__slug="rfc", target__docs__states__slug="rfc") + both_rfcs = Q(source__type_id="rfc", target__type_id="rfc") inactive = Q(source__states__slug__in=["expired", "repl"]) attractor = Q(target__name__in=["rfc5000", "rfc5741"]) removed = Q(source__states__slug__in=["auth-rm", "ietf-rm"]) @@ -761,23 +755,23 @@ def dependencies(request, acronym, group_type=None): links = set() for x in relations: - target_state = x.target.document.get_state_slug("draft") + target_state = x.target.get_state_slug("draft") if target_state != "rfc" or x.is_downref(): links.add(x) replacements = RelatedDocument.objects.filter( relationship__slug="replaces", - target__docs__in=[x.target.document for x in links], + target__in=[x.target for x in links], ) for x in replacements: links.add(x) - nodes = set([x.source for x in links]).union([x.target.document for x in links]) + nodes = set([x.source for x in links]).union([x.target for x in links]) graph = { "nodes": [ { - "id": x.canonical_name(), + "id": x.name, "rfc": x.get_state("draft").slug == "rfc", "post-wg": not x.get_state("draft-iesg").slug in ["idexists", "watching", "dead"], @@ -795,8 +789,8 @@ def dependencies(request, acronym, group_type=None): ], "links": [ { - "source": x.source.canonical_name(), - "target": x.target.document.canonical_name(), + "source": x.source.name, + "target": x.target.name, "rel": "downref" if x.is_downref() else x.relationship.slug, } for x in links @@ -1283,7 +1277,10 @@ def stream_documents(request, acronym): editable = has_role(request.user, "Secretariat") or group.has_role(request.user, "chair") stream = StreamName.objects.get(slug=acronym) - qs = Document.objects.filter(states__type="draft", states__slug__in=["active", "rfc"], stream=acronym) + qs = Document.objects.filter(stream=acronym).filter( + Q(type_id="draft", states__type="draft", states__slug="active") + | Q(type_id="rfc") + ) docs, meta = prepare_document_table(request, qs, max_results=1000) return render(request, 'group/stream_documents.html', {'stream':stream, 'docs':docs, 'meta':meta, 'editable':editable } ) diff --git a/ietf/idindex/index.py b/ietf/idindex/index.py index cda8310b4..11c452148 100644 --- a/ietf/idindex/index.py +++ b/ietf/idindex/index.py @@ -14,7 +14,7 @@ from django.utils import timezone import debug # pyflakes:ignore -from ietf.doc.models import Document, DocEvent, DocumentAuthor, RelatedDocument, DocAlias, State +from ietf.doc.models import Document, DocEvent, DocumentAuthor, RelatedDocument, State from ietf.doc.models import LastCallDocEvent, NewRevisionDocEvent from ietf.doc.models import IESG_SUBSTATE_TAGS from ietf.doc.templatetags.ietf_filters import clean_whitespace @@ -31,15 +31,18 @@ def all_id_txt(): t = revision_time.get(name) return t.strftime("%Y-%m-%d") if t else "" - rfc_aliases = dict(DocAlias.objects.filter(name__startswith="rfc", - docs__states=State.objects.get(type="draft", slug="rfc")).values_list("docs__name", "name")) + rfcs = dict() + for rfc in Document.objects.filter(type_id="rfc"): + draft = rfc.came_from_draft() + if draft is not None: + rfcs[draft.name] = rfc.name - replacements = dict(RelatedDocument.objects.filter(target__docs__states=State.objects.get(type="draft", slug="repl"), + replacements = dict(RelatedDocument.objects.filter(target__states=State.objects.get(type="draft", slug="repl"), relationship="replaces").values_list("target__name", "source__name")) # we need a distinct to prevent the queries below from multiplying the result - all_ids = Document.objects.filter(type="draft").order_by('name').exclude(name__startswith="rfc").distinct() + all_ids = Document.objects.filter(type="draft").order_by('name').distinct() res = ["\nInternet-Drafts Status Summary\n"] @@ -77,9 +80,9 @@ def all_id_txt(): last_field = "" if s.slug == "rfc": - a = rfc_aliases.get(name) - if a: - last_field = a[3:] + rfc = rfcs.get(name) + if rfc: + last_field = rfc[3:] # Rework this to take advantage of having the number at hand already. elif s.slug == "repl": state += " replaced by " + replacements.get(name, "0") @@ -108,14 +111,17 @@ def file_types_for_drafts(): def all_id2_txt(): # this returns a lot of data so try to be efficient - drafts = Document.objects.filter(type="draft").exclude(name__startswith="rfc").order_by('name') + drafts = Document.objects.filter(type="draft").order_by('name') drafts = drafts.select_related('group', 'group__parent', 'ad', 'intended_std_level', 'shepherd', ) drafts = drafts.prefetch_related("states") - rfc_aliases = dict(DocAlias.objects.filter(name__startswith="rfc", - docs__states=State.objects.get(type="draft", slug="rfc")).values_list("docs__name", "name")) + rfcs = dict() + for rfc in Document.objects.filter(type_id="rfc"): + draft = rfc.came_from_draft() + if draft is not None: + rfcs[draft.name] = rfc.name - replacements = dict(RelatedDocument.objects.filter(target__docs__states=State.objects.get(type="draft", slug="repl"), + replacements = dict(RelatedDocument.objects.filter(target__states=State.objects.get(type="draft", slug="repl"), relationship="replaces").values_list("target__name", "source__name")) revision_time = dict(DocEvent.objects.filter(type="new_revision", doc__name__startswith="draft-").order_by('time').values_list("doc__name", "time")) @@ -164,9 +170,9 @@ def all_id2_txt(): # 4 rfc_number = "" if state == "rfc": - a = rfc_aliases.get(d.name) - if a: - rfc_number = a[3:] + rfc = rfcs.get(d.name) + if rfc: + rfc_number = rfc[3:] fields.append(rfc_number) # 5 repl = "" diff --git a/ietf/idindex/tests.py b/ietf/idindex/tests.py index f207fa562..c55878378 100644 --- a/ietf/idindex/tests.py +++ b/ietf/idindex/tests.py @@ -11,8 +11,8 @@ from django.utils import timezone import debug # pyflakes:ignore -from ietf.doc.factories import WgDraftFactory -from ietf.doc.models import Document, DocAlias, RelatedDocument, State, LastCallDocEvent, NewRevisionDocEvent +from ietf.doc.factories import WgDraftFactory, RfcFactory +from ietf.doc.models import Document, RelatedDocument, State, LastCallDocEvent, NewRevisionDocEvent from ietf.group.factories import GroupFactory from ietf.name.models import DocRelationshipName from ietf.idindex.index import all_id_txt, all_id2_txt, id_index_txt @@ -41,7 +41,8 @@ class IndexTests(TestCase): # published draft.set_state(State.objects.get(type="draft", slug="rfc")) - DocAlias.objects.create(name="rfc1234").docs.add(draft) + rfc = RfcFactory(rfc_number=1234) + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) txt = all_id_txt() self.assertTrue(draft.name + "-" + draft.rev in txt) @@ -52,8 +53,13 @@ class IndexTests(TestCase): RelatedDocument.objects.create( relationship=DocRelationshipName.objects.get(slug="replaces"), - source=Document.objects.create(type_id="draft", rev="00", name="draft-test-replacement"), - target=draft.docalias.get(name__startswith="draft")) + source=Document.objects.create( + type_id="draft", + rev="00", + name="draft-test-replacement" + ), + target=draft + ) txt = all_id_txt() self.assertTrue(draft.name + "-" + draft.rev in txt) @@ -103,7 +109,8 @@ class IndexTests(TestCase): # test RFC draft.set_state(State.objects.get(type="draft", slug="rfc")) - DocAlias.objects.create(name="rfc1234").docs.add(draft) + rfc = RfcFactory(rfc_number=1234) + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) t = get_fields(all_id2_txt()) self.assertEqual(t[4], "1234") @@ -111,8 +118,12 @@ class IndexTests(TestCase): draft.set_state(State.objects.get(type="draft", slug="repl")) RelatedDocument.objects.create( relationship=DocRelationshipName.objects.get(slug="replaces"), - source=Document.objects.create(type_id="draft", rev="00", name="draft-test-replacement"), - target=draft.docalias.get(name__startswith="draft")) + source=Document.objects.create( + type_id="draft", + rev="00", + name="draft-test-replacement" + ), + target=draft) t = get_fields(all_id2_txt()) self.assertEqual(t[5], "draft-test-replacement") diff --git a/ietf/iesg/agenda.py b/ietf/iesg/agenda.py index 0abc5e6cd..587713089 100644 --- a/ietf/iesg/agenda.py +++ b/ietf/iesg/agenda.py @@ -66,7 +66,7 @@ def get_doc_section(doc): elif doc.type_id == 'statchg': protocol_action = False for relation in doc.relateddocument_set.filter(relationship__slug__in=('tops','tois','tohist','toinf','tobcp','toexp')): - if relation.relationship_id in ('tops','tois') or relation.target.document.std_level_id in ('std','ds','ps'): + if relation.relationship_id in ('tops','tois') or relation.target.std_level_id in ('std','ds','ps'): protocol_action = True if protocol_action: s = "2.3" @@ -186,7 +186,7 @@ def fill_in_agenda_docs(date, sections, docs=None): doc.review_assignments = review_assignments_for_docs.get(doc.name, []) elif doc.type_id == "conflrev": - doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document + doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target elif doc.type_id == "charter": pass @@ -219,4 +219,4 @@ def agenda_data(date=None): fill_in_agenda_docs(date, sections) fill_in_agenda_management_issues(date, sections) - return { 'date': date.isoformat(), 'sections': sections } \ No newline at end of file + return { 'date': date.isoformat(), 'sections': sections } diff --git a/ietf/iesg/tests.py b/ietf/iesg/tests.py index ceda918bb..7211a6bc0 100644 --- a/ietf/iesg/tests.py +++ b/ietf/iesg/tests.py @@ -17,7 +17,7 @@ from django.utils.html import escape import debug # pyflakes:ignore from ietf.doc.models import DocEvent, BallotPositionDocEvent, TelechatDocEvent -from ietf.doc.models import Document, DocAlias, State, RelatedDocument +from ietf.doc.models import Document, State, RelatedDocument from ietf.doc.factories import WgDraftFactory, IndividualDraftFactory, ConflictReviewFactory, BaseDocumentFactory, CharterFactory, WgRfcFactory, IndividualRfcFactory from ietf.doc.utils import create_ballot_if_not_open from ietf.group.factories import RoleFactory, GroupFactory, DatedGroupMilestoneFactory, DatelessGroupMilestoneFactory @@ -150,8 +150,8 @@ class IESGAgendaTests(TestCase): super().setUp() mars = GroupFactory(acronym='mars',parent=Group.objects.get(acronym='farfut')) wgdraft = WgDraftFactory(name='draft-ietf-mars-test', group=mars, intended_std_level_id='ps') - rfc = IndividualRfcFactory.create(stream_id='irtf', other_aliases=['rfc6666',], states=[('draft','rfc'),('draft-iesg','pub')], std_level_id='inf', ) - wgdraft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'), relationship_id='refnorm') + rfc = IndividualRfcFactory.create(stream_id='irtf', rfc_number=6666, std_level_id='inf', ) + wgdraft.relateddocument_set.create(target=rfc, relationship_id='refnorm') ise_draft = IndividualDraftFactory(name='draft-imaginary-independent-submission') ise_draft.stream = StreamName.objects.get(slug="ise") ise_draft.save_with_history([DocEvent(doc=ise_draft, rev=ise_draft.rev, type="changed_stream", by=Person.objects.get(user__username="secretary"), desc="Test")]) @@ -281,7 +281,7 @@ class IESGAgendaTests(TestCase): relation = RelatedDocument.objects.create( source=statchg, - target=DocAlias.objects.filter(name__startswith='rfc', docs__std_level="ps")[0], + target=Document.objects.filter(type_id="rfc", std_level="ps").first(), relationship_id="tohist") statchg.group = Group.objects.get(acronym="mars") @@ -299,7 +299,7 @@ class IESGAgendaTests(TestCase): self.assertTrue(statchg in agenda_data(date_str)["sections"]["2.3.3"]["docs"]) # 3.3 document status changes - relation.target = DocAlias.objects.filter(name__startswith='rfc', docs__std_level="inf")[0] + relation.target = Document.objects.filter(type_id="rfc", std_level="inf").first() relation.save() statchg.group = Group.objects.get(acronym="mars") diff --git a/ietf/iesg/utils.py b/ietf/iesg/utils.py index 4ddc9cb40..3f4883798 100644 --- a/ietf/iesg/utils.py +++ b/ietf/iesg/utils.py @@ -32,10 +32,10 @@ def telechat_page_count(date=None, docs=None): pages_for_action += d.pages or 0 elif d.type_id == 'statchg': for rel in d.related_that_doc(STATUSCHANGE_RELATIONS): - pages_for_action += rel.document.pages or 0 + pages_for_action += rel.pages or 0 elif d.type_id == 'conflrev': for rel in d.related_that_doc('conflrev'): - pages_for_action += rel.document.pages or 0 + pages_for_action += rel.pages or 0 else: pass @@ -43,10 +43,10 @@ def telechat_page_count(date=None, docs=None): for d in for_approval-set(drafts): if d.type_id == 'statchg': for rel in d.related_that_doc(STATUSCHANGE_RELATIONS): - related_pages += rel.document.pages or 0 + related_pages += rel.pages or 0 elif d.type_id == 'conflrev': for rel in d.related_that_doc('conflrev'): - related_pages += rel.document.pages or 0 + related_pages += rel.pages or 0 else: # There's really nothing to rely on to give a reading load estimate for charters pass diff --git a/ietf/iesg/views.py b/ietf/iesg/views.py index 0830db261..a219a6b5d 100644 --- a/ietf/iesg/views.py +++ b/ietf/iesg/views.py @@ -122,7 +122,7 @@ def agenda_json(request, date=None): for doc in docs: wginfo = { - 'docname': doc.canonical_name(), + 'docname': doc.name, 'rev': doc.rev, 'wgname': doc.group.name, 'acronym': doc.group.acronym, @@ -137,7 +137,7 @@ def agenda_json(request, date=None): for doc in docs: docinfo = { - 'docname':doc.canonical_name(), + 'docname':doc.name, 'title':doc.title, 'ad':doc.ad.name if doc.ad else None, } @@ -149,8 +149,8 @@ def agenda_json(request, date=None): if doc.type_id == "draft": docinfo['rev'] = doc.rev docinfo['intended-std-level'] = str(doc.intended_std_level) - if doc.rfc_number(): - docinfo['rfc-number'] = doc.rfc_number() + if doc.type_id == "rfc": + docinfo['rfc-number'] = doc.rfc_number iana_state = doc.get_state("draft-iana-review") if iana_state and iana_state.slug in ("not-ok", "changed", "need-rev"): @@ -170,8 +170,8 @@ def agenda_json(request, date=None): elif doc.type_id == 'conflrev': docinfo['rev'] = doc.rev - td = doc.relateddocument_set.get(relationship__slug='conflrev').target.document - docinfo['target-docname'] = td.canonical_name() + td = doc.relateddocument_set.get(relationship__slug='conflrev').target + docinfo['target-docname'] = td.name docinfo['target-title'] = td.title docinfo['target-rev'] = td.rev docinfo['intended-std-level'] = str(td.intended_std_level) diff --git a/ietf/ipr/admin.py b/ietf/ipr/admin.py index a0185f58c..afc1952d7 100644 --- a/ietf/ipr/admin.py +++ b/ietf/ipr/admin.py @@ -94,7 +94,7 @@ admin.site.register(IprDocRel, IprDocRelAdmin) class RelatedIprAdmin(admin.ModelAdmin): list_display = ['source', 'target', 'relationship', ] - search_fields = ['source__name', 'target__name', 'target__docs__name', ] + search_fields = ['source__name', 'target__name', ] raw_id_fields = ['source', 'target', ] admin.site.register(RelatedIpr, RelatedIprAdmin) diff --git a/ietf/ipr/factories.py b/ietf/ipr/factories.py index ca48b32d0..8a8a74015 100644 --- a/ietf/ipr/factories.py +++ b/ietf/ipr/factories.py @@ -42,7 +42,7 @@ class IprDisclosureBaseFactory(factory.django.DjangoModelFactory): return if extracted: for doc in extracted: - IprDocRel.objects.create(disclosure=self,document=doc.docalias.first()) + IprDocRel.objects.create(disclosure=self,document=doc) @factory.post_generation def updates(self, create, extracted, **kwargs): diff --git a/ietf/ipr/forms.py b/ietf/ipr/forms.py index fe4a70f8c..8ea179789 100644 --- a/ietf/ipr/forms.py +++ b/ietf/ipr/forms.py @@ -14,7 +14,7 @@ from django.utils.encoding import force_str import debug # pyflakes:ignore from ietf.group.models import Group -from ietf.doc.fields import SearchableDocAliasField +from ietf.doc.fields import SearchableDocumentField from ietf.ipr.mail import utc_from_string from ietf.ipr.fields import SearchableIprDisclosuresField from ietf.ipr.models import (IprDocRel, IprDisclosureBase, HolderIprDisclosure, @@ -95,7 +95,7 @@ class AddEmailForm(forms.Form): return self.cleaned_data class DraftForm(forms.ModelForm): - document = SearchableDocAliasField(label="I-D name/RFC number", required=True, doc_type="draft") + document = SearchableDocumentField(label="I-D name/RFC number", required=True, doc_type="all") class Meta: model = IprDocRel diff --git a/ietf/ipr/migrations/0002_iprdocrel_no_aliases.py b/ietf/ipr/migrations/0002_iprdocrel_no_aliases.py new file mode 100644 index 000000000..bcfc73a32 --- /dev/null +++ b/ietf/ipr/migrations/0002_iprdocrel_no_aliases.py @@ -0,0 +1,104 @@ +# Generated by Django 4.2.2 on 2023-06-16 13:40 + +from django.db import migrations +import django.db.models.deletion +from django.db.models import F, Subquery, OuterRef, ManyToManyField, CharField +import ietf.utils.models + +def forward(apps, schema_editor): + IprDocRel = apps.get_model("ipr", "IprDocRel") + DocAlias = apps.get_model("doc", "DocAlias") + document_subquery = Subquery( + DocAlias.objects.filter( + pk=OuterRef("deprecated_document") + ).values("docs")[:1] + ) + name_subquery = Subquery( + DocAlias.objects.filter( + pk=OuterRef("deprecated_document") + ).values("name")[:1] + ) + IprDocRel.objects.annotate( + firstdoc=document_subquery, + aliasname=name_subquery, + ).update( + document=F("firstdoc"), + originaldocumentaliasname=F("aliasname"), + ) + # This might not be right - we may need here (and in the relateddocument migrations) to pay attention to + # whether the name being pointed to is and rfc name or a draft name and point to the right object instead... + +def reverse(apps, schema_editor): + pass + +class Migration(migrations.Migration): + dependencies = [ + ("ipr", "0001_initial"), + ("doc", "0016_relate_hist_no_aliases") + ] + + operations = [ + migrations.AlterField( + model_name='iprdocrel', + name='document', + field=ietf.utils.models.ForeignKey( + db_index=False, + on_delete=django.db.models.deletion.CASCADE, + to='doc.docalias', + ), + ), + migrations.RenameField( + model_name="iprdocrel", + old_name="document", + new_name="deprecated_document" + ), + migrations.AlterField( + model_name='iprdocrel', + name='deprecated_document', + field=ietf.utils.models.ForeignKey( + db_index=True, + on_delete=django.db.models.deletion.CASCADE, + to='doc.docalias', + ), + ), + migrations.AddField( + model_name="iprdocrel", + name="document", + field=ietf.utils.models.ForeignKey( + default=1, # A lie, but a convenient one - no iprdocrel objects point here. + on_delete=django.db.models.deletion.CASCADE, + to="doc.document", + db_index=False, + ), + preserve_default=False, + ), + migrations.AddField( + model_name="iprdocrel", + name="originaldocumentaliasname", + field=CharField(max_length=255,null=True,blank=True), + preserve_default=True, + ), + migrations.RunPython(forward, reverse), + migrations.AlterField( + model_name="iprdocrel", + name="document", + field=ietf.utils.models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to="doc.document", + db_index=True, + ), + ), + migrations.AlterField( + model_name='iprdisclosurebase', + name='docs', + field=ManyToManyField(through='ipr.IprDocRel', to='doc.Document'), + ), + migrations.RemoveField( + model_name="iprdocrel", + name="deprecated_document", + field=ietf.utils.models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to='doc.DocAlias', + ), + ), + ] diff --git a/ietf/ipr/migrations/0003_alter_iprdisclosurebase_docs.py b/ietf/ipr/migrations/0003_alter_iprdisclosurebase_docs.py new file mode 100644 index 000000000..23b349f56 --- /dev/null +++ b/ietf/ipr/migrations/0003_alter_iprdisclosurebase_docs.py @@ -0,0 +1,18 @@ +# Copyright The IETF Trust 2023, All Rights Reserved + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("doc", "0017_delete_docalias"), + ("ipr", "0002_iprdocrel_no_aliases"), + ] + + operations = [ + migrations.AlterField( + model_name="iprdisclosurebase", + name="docs", + field=models.ManyToManyField(through="ipr.IprDocRel", to="doc.document"), + ), + ] diff --git a/ietf/ipr/models.py b/ietf/ipr/models.py index b3add079d..a3c917747 100644 --- a/ietf/ipr/models.py +++ b/ietf/ipr/models.py @@ -7,7 +7,7 @@ from django.db import models from django.urls import reverse from django.utils import timezone -from ietf.doc.models import DocAlias, DocEvent +from ietf.doc.models import Document, DocEvent from ietf.name.models import DocRelationshipName,IprDisclosureStateName,IprLicenseTypeName,IprEventTypeName from ietf.person.models import Person from ietf.message.models import Message @@ -16,7 +16,7 @@ from ietf.utils.models import ForeignKey class IprDisclosureBase(models.Model): by = ForeignKey(Person) # who was logged in, or System if nobody was logged in compliant = models.BooleanField("Complies to RFC3979", default=True) - docs = models.ManyToManyField(DocAlias, through='IprDocRel') + docs = models.ManyToManyField(Document, through='IprDocRel') holder_legal_name = models.CharField(max_length=255) notes = models.TextField("Additional notes", blank=True) other_designations = models.CharField("Designations for other contributions", blank=True, max_length=255) @@ -160,9 +160,10 @@ class GenericIprDisclosure(IprDisclosureBase): class IprDocRel(models.Model): disclosure = ForeignKey(IprDisclosureBase) - document = ForeignKey(DocAlias) + document = ForeignKey(Document) sections = models.TextField(blank=True) revisions = models.CharField(max_length=16,blank=True) # allows strings like 01-07 + originaldocumentaliasname = models.CharField(max_length=255, null=True, blank=True) def doc_type(self): name = self.document.name @@ -175,7 +176,7 @@ class IprDocRel(models.Model): def formatted_name(self): name = self.document.name - if name.startswith("rfc"): + if len(name) >= 3 and name[:3] in ("rfc", "bcp", "fyi", "std"): return name.upper() #elif self.revisions: # return "%s-%s" % (name, self.revisions) @@ -234,10 +235,7 @@ class IprEvent(models.Model): 'removed_objfalse': 'removed_objfalse_related_ipr', } if self.type_id in event_type_map: - related_docs = set() # related docs, no duplicates - for alias in self.disclosure.docs.all(): - related_docs.update(alias.docs.all()) - for doc in related_docs: + for doc in self.disclosure.docs.distinct(): DocEvent.objects.create( type=event_type_map[self.type_id], time=self.time, diff --git a/ietf/ipr/resources.py b/ietf/ipr/resources.py index 665b0ab02..0d8421cde 100644 --- a/ietf/ipr/resources.py +++ b/ietf/ipr/resources.py @@ -16,11 +16,11 @@ from ietf.ipr.models import ( IprDisclosureBase, IprDocRel, HolderIprDisclosure, from ietf.person.resources import PersonResource from ietf.name.resources import IprDisclosureStateNameResource -from ietf.doc.resources import DocAliasResource +from ietf.doc.resources import DocumentResource class IprDisclosureBaseResource(ModelResource): by = ToOneField(PersonResource, 'by') state = ToOneField(IprDisclosureStateNameResource, 'state') - docs = ToManyField(DocAliasResource, 'docs', null=True) + docs = ToManyField(DocumentResource, 'docs', null=True) rel = ToManyField('ietf.ipr.resources.IprDisclosureBaseResource', 'rel', null=True) class Meta: queryset = IprDisclosureBase.objects.all() @@ -45,10 +45,9 @@ class IprDisclosureBaseResource(ModelResource): } api.ipr.register(IprDisclosureBaseResource()) -from ietf.doc.resources import DocAliasResource class IprDocRelResource(ModelResource): disclosure = ToOneField(IprDisclosureBaseResource, 'disclosure') - document = ToOneField(DocAliasResource, 'document') + document = ToOneField(DocumentResource, 'document') class Meta: cache = SimpleCache() queryset = IprDocRel.objects.all() @@ -66,13 +65,12 @@ api.ipr.register(IprDocRelResource()) from ietf.person.resources import PersonResource from ietf.name.resources import IprDisclosureStateNameResource, IprLicenseTypeNameResource -from ietf.doc.resources import DocAliasResource class HolderIprDisclosureResource(ModelResource): by = ToOneField(PersonResource, 'by') state = ToOneField(IprDisclosureStateNameResource, 'state') iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr') licensing = ToOneField(IprLicenseTypeNameResource, 'licensing') - docs = ToManyField(DocAliasResource, 'docs', null=True) + docs = ToManyField(DocumentResource, 'docs', null=True) rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True) class Meta: cache = SimpleCache() @@ -111,12 +109,11 @@ api.ipr.register(HolderIprDisclosureResource()) from ietf.person.resources import PersonResource from ietf.name.resources import IprDisclosureStateNameResource -from ietf.doc.resources import DocAliasResource class ThirdPartyIprDisclosureResource(ModelResource): by = ToOneField(PersonResource, 'by') state = ToOneField(IprDisclosureStateNameResource, 'state') iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr') - docs = ToManyField(DocAliasResource, 'docs', null=True) + docs = ToManyField(DocumentResource, 'docs', null=True) rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True) class Meta: cache = SimpleCache() @@ -168,12 +165,11 @@ api.ipr.register(RelatedIprResource()) from ietf.person.resources import PersonResource from ietf.name.resources import IprDisclosureStateNameResource -from ietf.doc.resources import DocAliasResource class NonDocSpecificIprDisclosureResource(ModelResource): by = ToOneField(PersonResource, 'by') state = ToOneField(IprDisclosureStateNameResource, 'state') iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr') - docs = ToManyField(DocAliasResource, 'docs', null=True) + docs = ToManyField(DocumentResource, 'docs', null=True) rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True) class Meta: cache = SimpleCache() @@ -207,12 +203,11 @@ api.ipr.register(NonDocSpecificIprDisclosureResource()) from ietf.person.resources import PersonResource from ietf.name.resources import IprDisclosureStateNameResource -from ietf.doc.resources import DocAliasResource class GenericIprDisclosureResource(ModelResource): by = ToOneField(PersonResource, 'by') state = ToOneField(IprDisclosureStateNameResource, 'state') iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr') - docs = ToManyField(DocAliasResource, 'docs', null=True) + docs = ToManyField(DocumentResource, 'docs', null=True) rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True) class Meta: cache = SimpleCache() diff --git a/ietf/ipr/templatetags/ipr_filters.py b/ietf/ipr/templatetags/ipr_filters.py index 013038750..8b3b420c4 100644 --- a/ietf/ipr/templatetags/ipr_filters.py +++ b/ietf/ipr/templatetags/ipr_filters.py @@ -32,7 +32,7 @@ def to_class_name(value): return value.__class__.__name__ def draft_rev_at_time(iprdocrel): - draft = iprdocrel.document.document + draft = iprdocrel.document event = iprdocrel.disclosure.get_latest_event_posted() if event is None: return ("","The Internet-Draft's revision at the time this disclosure was posted could not be determined.") @@ -47,7 +47,7 @@ def draft_rev_at_time(iprdocrel): @register.filter def no_revisions_message(iprdocrel): - draft = iprdocrel.document.document + draft = iprdocrel.document if draft.type_id != "draft" or iprdocrel.revisions.strip() != "": return "" rev_at_time, exception = draft_rev_at_time(iprdocrel) diff --git a/ietf/ipr/tests.py b/ietf/ipr/tests.py index 66337bff2..73b5d0dc5 100644 --- a/ietf/ipr/tests.py +++ b/ietf/ipr/tests.py @@ -15,11 +15,11 @@ from django.utils import timezone import debug # pyflakes:ignore -from ietf.doc.models import DocAlias from ietf.doc.factories import ( DocumentFactory, WgDraftFactory, WgRfcFactory, + RfcFactory, NewRevisionDocEventFactory ) from ietf.group.factories import RoleFactory @@ -180,7 +180,8 @@ class IprTests(TestCase): self.assertContains(r, draft.name) self.assertNotContains(r, ipr.title) - DocAlias.objects.create(name="rfc321").docs.add(draft) + rfc = RfcFactory(rfc_number=321) + draft.relateddocument_set.create(relationship_id="became_rfc",target=rfc) # find RFC r = self.client.get(url + "?submit=rfc&rfc=321") @@ -285,7 +286,7 @@ class IprTests(TestCase): """Add a new specific disclosure. Note: submitter does not need to be logged in. """ draft = WgDraftFactory() - WgRfcFactory() + rfc = WgRfcFactory() url = urlreverse("ietf.ipr.views.new", kwargs={ "type": "specific" }) # successful post @@ -299,9 +300,9 @@ class IprTests(TestCase): "ietfer_contact_info": "555-555-0101", "iprdocrel_set-TOTAL_FORMS": 2, "iprdocrel_set-INITIAL_FORMS": 0, - "iprdocrel_set-0-document": draft.docalias.first().pk, + "iprdocrel_set-0-document": draft.pk, "iprdocrel_set-0-revisions": '00', - "iprdocrel_set-1-document": DocAlias.objects.filter(name__startswith="rfc").first().pk, + "iprdocrel_set-1-document": rfc.pk, "patent_number": "SE12345678901", "patent_inventor": "A. Nonymous", "patent_title": "A method of transferring bits", @@ -341,7 +342,7 @@ class IprTests(TestCase): def test_new_specific_no_revision(self): draft = WgDraftFactory() - WgRfcFactory() + rfc = WgRfcFactory() url = urlreverse("ietf.ipr.views.new", kwargs={ "type": "specific" }) # successful post @@ -355,8 +356,8 @@ class IprTests(TestCase): "ietfer_contact_info": "555-555-0101", "iprdocrel_set-TOTAL_FORMS": 2, "iprdocrel_set-INITIAL_FORMS": 0, - "iprdocrel_set-0-document": draft.docalias.first().pk, - "iprdocrel_set-1-document": DocAlias.objects.filter(name__startswith="rfc").first().pk, + "iprdocrel_set-0-document": draft.pk, + "iprdocrel_set-1-document": rfc.pk, "patent_number": "SE12345678901", "patent_inventor": "A. Nonymous", "patent_title": "A method of transferring bits", @@ -375,7 +376,7 @@ class IprTests(TestCase): """Add a new third-party disclosure. Note: submitter does not need to be logged in. """ draft = WgDraftFactory() - WgRfcFactory() + rfc = WgRfcFactory() url = urlreverse("ietf.ipr.views.new", kwargs={ "type": "third-party" }) # successful post @@ -387,9 +388,9 @@ class IprTests(TestCase): "ietfer_contact_info": "555-555-0101", "iprdocrel_set-TOTAL_FORMS": 2, "iprdocrel_set-INITIAL_FORMS": 0, - "iprdocrel_set-0-document": draft.docalias.first().pk, + "iprdocrel_set-0-document": draft.pk, "iprdocrel_set-0-revisions": '00', - "iprdocrel_set-1-document": DocAlias.objects.filter(name__startswith="rfc").first().pk, + "iprdocrel_set-1-document": rfc.pk, "patent_number": "SE12345678901", "patent_inventor": "A. Nonymous", "patent_title": "A method of transferring bits", @@ -434,7 +435,7 @@ class IprTests(TestCase): "holder_legal_name": "Test Legal", "ietfer_contact_info": "555-555-0101", "ietfer_name": "Test Participant", - "iprdocrel_set-0-document": draft.docalias.first().pk, + "iprdocrel_set-0-document": draft.pk, "iprdocrel_set-0-revisions": '00', "iprdocrel_set-INITIAL_FORMS": 0, "iprdocrel_set-TOTAL_FORMS": 1, @@ -462,7 +463,7 @@ class IprTests(TestCase): def test_update(self): draft = WgDraftFactory() - WgRfcFactory() + rfc = WgRfcFactory() original_ipr = HolderIprDisclosureFactory(docs=[draft,]) # get @@ -483,9 +484,9 @@ class IprTests(TestCase): "ietfer_contact_info": "555-555-0101", "iprdocrel_set-TOTAL_FORMS": 2, "iprdocrel_set-INITIAL_FORMS": 0, - "iprdocrel_set-0-document": draft.docalias.first().pk, + "iprdocrel_set-0-document": draft.pk, "iprdocrel_set-0-revisions": '00', - "iprdocrel_set-1-document": DocAlias.objects.filter(name__startswith="rfc").first().pk, + "iprdocrel_set-1-document": rfc.pk, "patent_number": "SE12345678901", "patent_inventor": "A. Nonymous", "patent_title": "A method of transferring bits", @@ -520,7 +521,7 @@ class IprTests(TestCase): "holder_contact_email": "test@holder.com", "iprdocrel_set-TOTAL_FORMS": 1, "iprdocrel_set-INITIAL_FORMS": 0, - "iprdocrel_set-0-document": draft.docalias.first().pk, + "iprdocrel_set-0-document": draft.pk, "iprdocrel_set-0-revisions": '00', "patent_number": "SE12345678901", "patent_inventor": "A. Nonymous", @@ -786,7 +787,7 @@ Subject: test 'iprdocrel_set-INITIAL_FORMS' : 0, 'iprdocrel_set-0-id': '', "iprdocrel_set-0-document": disclosure.docs.first().pk, - "iprdocrel_set-0-revisions": disclosure.docs.first().document.rev, + "iprdocrel_set-0-revisions": disclosure.docs.first().rev, 'holder_legal_name': disclosure.holder_legal_name, 'patent_number': patent_dict['Number'], 'patent_title': patent_dict['Title'], @@ -848,7 +849,7 @@ Subject: test NewRevisionDocEventFactory(doc=draft, rev=f"{rev:02d}", time=now-datetime.timedelta(days=30*(2-rev))) # Disclosure has non-empty revisions field on its related draft - iprdocrel = IprDocRelFactory(document=draft.docalias.first()) + iprdocrel = IprDocRelFactory(document=draft) IprEventFactory(type_id="posted",time=now,disclosure=iprdocrel.disclosure) self.assertEqual( no_revisions_message(iprdocrel), @@ -856,7 +857,7 @@ Subject: test ) # Disclosure has more than one revision, none called out, disclosure after submissions - iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="") + iprdocrel = IprDocRelFactory(document=draft, revisions="") IprEventFactory(type_id="posted",time=now,disclosure=iprdocrel.disclosure) self.assertEqual( no_revisions_message(iprdocrel), @@ -864,7 +865,7 @@ Subject: test ) # Disclosure has more than one revision, none called out, disclosure after 01 - iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="") + iprdocrel = IprDocRelFactory(document=draft, revisions="") e = IprEventFactory(type_id="posted",disclosure=iprdocrel.disclosure) e.time = now-datetime.timedelta(days=15) e.save() @@ -874,7 +875,7 @@ Subject: test ) # Disclosure has more than one revision, none called out, disclosure was before the 00 - iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="") + iprdocrel = IprDocRelFactory(document=draft, revisions="") e = IprEventFactory(type_id="posted",disclosure=iprdocrel.disclosure) e.time = now-datetime.timedelta(days=180) e.save() @@ -886,7 +887,7 @@ Subject: test # disclosed draft has no NewRevisionDocEvents draft = WgDraftFactory(rev="20") draft.docevent_set.all().delete() - iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="") + iprdocrel = IprDocRelFactory(document=draft, revisions="") IprEventFactory(type_id="posted",disclosure=iprdocrel.disclosure) self.assertEqual( no_revisions_message(iprdocrel), @@ -895,7 +896,7 @@ Subject: test # disclosed draft has only one revision draft = WgDraftFactory(rev="00") - iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="") + iprdocrel = IprDocRelFactory(document=draft, revisions="") IprEventFactory(type_id="posted",disclosure=iprdocrel.disclosure) self.assertEqual( no_revisions_message(iprdocrel), diff --git a/ietf/ipr/utils.py b/ietf/ipr/utils.py index f288803de..c4f17c482 100644 --- a/ietf/ipr/utils.py +++ b/ietf/ipr/utils.py @@ -32,33 +32,31 @@ def get_ipr_summary(disclosure): return summary if len(summary) <= 128 else summary[:125]+'...' -def iprs_from_docs(aliases,**kwargs): - """Returns a list of IPRs related to doc aliases""" +def iprs_from_docs(docs,**kwargs): + """Returns a list of IPRs related to docs""" iprdocrels = [] - for alias in aliases: - for document in alias.docs.all(): - if document.ipr(**kwargs): - iprdocrels += document.ipr(**kwargs) + for document in docs: + if document.ipr(**kwargs): + iprdocrels += document.ipr(**kwargs) return list(set([i.disclosure for i in iprdocrels])) -def related_docs(alias, relationship=('replaces', 'obs')): +def related_docs(doc, relationship=('replaces', 'obs'), reverse_relationship=("became_rfc",)): """Returns list of related documents""" - results = [] - for doc in alias.docs.all(): - results += list(doc.docalias.all()) - - rels = [] - for doc in alias.docs.all(): - rels += list(doc.all_relations_that_doc(relationship)) + results = [doc] + + rels = doc.all_relations_that_doc(relationship) for rel in rels: - rel_aliases = list(rel.target.document.docalias.all()) - - for x in rel_aliases: - x.related = rel - x.relation = rel.relationship.revname - results += rel_aliases + rel.target.related = rel + rel.target.relation = rel.relationship.revname + results += [x.target for x in rels] + + rev_rels = doc.all_relations_that(reverse_relationship) + for rel in rev_rels: + rel.source.related = rel + rel.source.relation = rel.relationship.name + results += [x.source for x in rev_rels] return list(set(results)) @@ -67,17 +65,16 @@ def generate_draft_recursive_txt(): docipr = {} for o in IprDocRel.objects.filter(disclosure__state='posted').select_related('document'): - alias = o.document - name = alias.name - for document in alias.docs.all(): - related = set(document.docalias.all()) | set(document.all_related_that_doc(('obs', 'replaces'))) - for alias in related: - name = alias.name - if name.startswith("rfc"): - name = name.upper() - if not name in docipr: - docipr[name] = [] - docipr[name].append(o.disclosure_id) + doc = o.document + name = doc.name + related_set = set(doc) | set(doc.all_related_that_doc(('obs', 'replaces'))) + for related in related_set: + name = related.name + if name.startswith("rfc"): + name = name.upper() + if not name in docipr: + docipr[name] = [] + docipr[name].append(o.disclosure_id) lines = [ "# Machine-readable list of IPR disclosures by Internet-Draft name" ] for name, iprs in docipr.items(): diff --git a/ietf/ipr/views.py b/ietf/ipr/views.py index e2ddb3bcc..a061232b8 100644 --- a/ietf/ipr/views.py +++ b/ietf/ipr/views.py @@ -18,7 +18,7 @@ from django.utils.html import escape import debug # pyflakes:ignore -from ietf.doc.models import DocAlias +from ietf.doc.models import Document from ietf.group.models import Role, Group from ietf.ietfauth.utils import role_required, has_role from ietf.ipr.mail import (message_from_message, get_reply_to, get_update_submitter_emails) @@ -38,7 +38,7 @@ from ietf.message.models import Message from ietf.message.utils import infer_message from ietf.name.models import IprLicenseTypeName from ietf.person.models import Person -from ietf.secr.utils.document import get_rfc_num, is_draft +from ietf.utils import log from ietf.utils.draft_search import normalize_draftname from ietf.utils.mail import send_mail, send_mail_message from ietf.utils.response import permission_denied @@ -69,12 +69,15 @@ def get_document_emails(ipr): has been posted""" messages = [] for rel in ipr.iprdocrel_set.all(): - doc = rel.document.document + doc = rel.document - if is_draft(doc): + if doc.type_id=="draft": doc_info = 'Internet-Draft entitled "{}" ({})'.format(doc.title,doc.name) + elif doc.type_id=="rfc": + doc_info = 'RFC entitled "{}" (RFC{})'.format(doc.title, doc.rfc_number) else: - doc_info = 'RFC entitled "{}" (RFC{})'.format(doc.title,get_rfc_num(doc)) + log.unreachable("2023-08-15") + return "" addrs = gather_address_lists('ipr_posted_on_doc',doc=doc).as_strings(compact=False) @@ -674,17 +677,18 @@ def search(request): doc = q if docid: - start = DocAlias.objects.filter(name__iexact=docid) - elif search_type == "draft": - q = normalize_draftname(q) - start = DocAlias.objects.filter(name__icontains=q, name__startswith="draft") - else: # search_type == "rfc" - start = DocAlias.objects.filter(name="rfc%s" % q.lstrip("0")) - + start = Document.objects.filter(name__iexact=docid) + else: + if search_type == "draft": + q = normalize_draftname(q) + start = Document.objects.filter(name__icontains=q, name__startswith="draft") + elif search_type == "rfc": + start = Document.objects.filter(name="rfc%s" % q.lstrip("0")) + # one match if len(start) == 1: first = start[0] - doc = first.document + doc = first docs = related_docs(first) iprs = iprs_from_docs(docs,states=states) template = "ipr/search_doc_result.html" @@ -716,27 +720,27 @@ def search(request): # Search by wg acronym # Document list with IPRs elif search_type == "group": - docs = list(DocAlias.objects.filter(docs__group=q)) + docs = list(Document.objects.filter(group=q)) related = [] for doc in docs: doc.product_of_this_wg = True related += related_docs(doc) iprs = iprs_from_docs(list(set(docs+related)),states=states) - docs = [ doc for doc in docs if doc.document.ipr() ] - docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.document.ipr()]), reverse=True) + docs = [ doc for doc in docs if doc.ipr() ] + docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.ipr()]), reverse=True) template = "ipr/search_wg_result.html" q = Group.objects.get(id=q).acronym # make acronym for use in template # Search by rfc and id title # Document list with IPRs elif search_type == "doctitle": - docs = list(DocAlias.objects.filter(docs__title__icontains=q)) + docs = list(Document.objects.filter(title__icontains=q)) related = [] for doc in docs: related += related_docs(doc) iprs = iprs_from_docs(list(set(docs+related)),states=states) - docs = [ doc for doc in docs if doc.document.ipr() ] - docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.document.ipr()]), reverse=True) + docs = [ doc for doc in docs if doc.ipr() ] + docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.ipr()]), reverse=True) template = "ipr/search_doctitle_result.html" # Search by title of IPR disclosure diff --git a/ietf/liaisons/forms.py b/ietf/liaisons/forms.py index 605c19902..0a6974e5b 100644 --- a/ietf/liaisons/forms.py +++ b/ietf/liaisons/forms.py @@ -31,7 +31,7 @@ from ietf.liaisons.fields import SearchableLiaisonStatementsField from ietf.group.models import Group from ietf.person.models import Email from ietf.person.fields import SearchableEmailField -from ietf.doc.models import Document, DocAlias +from ietf.doc.models import Document from ietf.utils.fields import DatepickerDateField from ietf.utils.timezone import date_today, datetime_from_date, DEADLINE_TZINFO from functools import reduce @@ -375,8 +375,6 @@ class LiaisonModelForm(forms.ModelForm): uploaded_filename = name + extension, ) ) - if created: - DocAlias.objects.create(name=attach.name).docs.add(attach) LiaisonStatementAttachment.objects.create(statement=self.instance,document=attach) attach_file = io.open(os.path.join(settings.LIAISON_ATTACH_PATH, attach.name + extension), 'wb') attach_file.write(attached_file.read()) diff --git a/ietf/mailtrigger/migrations/0005_rfc_recipients.py b/ietf/mailtrigger/migrations/0005_rfc_recipients.py new file mode 100644 index 000000000..dee49d913 --- /dev/null +++ b/ietf/mailtrigger/migrations/0005_rfc_recipients.py @@ -0,0 +1,25 @@ +# Copyright The IETF Trust 2023, All Rights Reserved + +from django.db import migrations + + +def forward(apps, schema_editor): + Recipient = apps.get_model("mailtrigger", "Recipient") + Recipient.objects.filter(slug="doc_authors").update( + template='{% if doc.type_id == "draft" or doc.type_id == "rfc" %}<{{doc.name}}@ietf.org>{% endif %}' + ) + + +def reverse(apps, schema_editor): + Recipient = apps.get_model("mailtrigger", "Recipient") + Recipient.objects.filter(slug="doc_authors").update( + template='{% if doc.type_id == "draft" %}<{{doc.name}}@ietf.org>{% endif %}' + ) + + +class Migration(migrations.Migration): + dependencies = [ + ("mailtrigger", "0004_slides_approved"), + ] + + operations = [migrations.RunPython(forward, reverse)] diff --git a/ietf/mailtrigger/models.py b/ietf/mailtrigger/models.py index 171dbd85e..1cf84033b 100644 --- a/ietf/mailtrigger/models.py +++ b/ietf/mailtrigger/models.py @@ -96,35 +96,35 @@ class Recipient(models.Model): addrs = [] if 'doc' in kwargs: for reldoc in kwargs['doc'].related_that_doc(('conflrev','tohist','tois','tops')): - addrs.extend(Recipient.objects.get(slug='doc_authors').gather(**{'doc':reldoc.document})) + addrs.extend(Recipient.objects.get(slug='doc_authors').gather(**{'doc':reldoc})) return addrs def gather_doc_affecteddoc_group_chairs(self, **kwargs): addrs = [] if 'doc' in kwargs: for reldoc in kwargs['doc'].related_that_doc(('conflrev','tohist','tois','tops')): - addrs.extend(Recipient.objects.get(slug='doc_group_chairs').gather(**{'doc':reldoc.document})) + addrs.extend(Recipient.objects.get(slug='doc_group_chairs').gather(**{'doc':reldoc})) return addrs def gather_doc_affecteddoc_notify(self, **kwargs): addrs = [] if 'doc' in kwargs: for reldoc in kwargs['doc'].related_that_doc(('conflrev','tohist','tois','tops')): - addrs.extend(Recipient.objects.get(slug='doc_notify').gather(**{'doc':reldoc.document})) + addrs.extend(Recipient.objects.get(slug='doc_notify').gather(**{'doc':reldoc})) return addrs def gather_conflict_review_stream_manager(self, **kwargs): addrs = [] if 'doc' in kwargs: for reldoc in kwargs['doc'].related_that_doc(('conflrev',)): - addrs.extend(Recipient.objects.get(slug='doc_stream_manager').gather(**{'doc':reldoc.document})) + addrs.extend(Recipient.objects.get(slug='doc_stream_manager').gather(**{'doc':reldoc})) return addrs def gather_conflict_review_steering_group(self,**kwargs): addrs = [] if 'doc' in kwargs: for reldoc in kwargs['doc'].related_that_doc(('conflrev',)): - if reldoc.document.stream_id=='irtf': + if reldoc.stream_id=='irtf': addrs.append('"Internet Research Steering Group" ') return addrs diff --git a/ietf/mailtrigger/utils.py b/ietf/mailtrigger/utils.py index 496f20dc2..d8b23ff05 100644 --- a/ietf/mailtrigger/utils.py +++ b/ietf/mailtrigger/utils.py @@ -2,44 +2,53 @@ from collections import namedtuple -import debug # pyflakes:ignore +import debug # pyflakes:ignore from ietf.mailtrigger.models import MailTrigger, Recipient from ietf.submit.models import Submission from ietf.utils.mail import excludeaddrs -class AddrLists(namedtuple('AddrLists',['to','cc'])): +class AddrLists(namedtuple("AddrLists", ["to", "cc"])): __slots__ = () - def as_strings(self,compact=True): - + def as_strings(self, compact=True): separator = ", " if compact else ",\n " to_string = separator.join(self.to) cc_string = separator.join(self.cc) - return namedtuple('AddrListsAsStrings',['to','cc'])(to=to_string,cc=cc_string) + return namedtuple("AddrListsAsStrings", ["to", "cc"])( + to=to_string, cc=cc_string + ) -def gather_address_lists(slug, skipped_recipients=None, create_from_slug_if_not_exists=None, - desc_if_not_exists=None, **kwargs): - mailtrigger = get_mailtrigger(slug, create_from_slug_if_not_exists, desc_if_not_exists) +def gather_address_lists( + slug, + skipped_recipients=None, + create_from_slug_if_not_exists=None, + desc_if_not_exists=None, + **kwargs +): + mailtrigger = get_mailtrigger( + slug, create_from_slug_if_not_exists, desc_if_not_exists + ) to = set() for recipient in mailtrigger.to.all(): to.update(recipient.gather(**kwargs)) - to.discard('') + to.discard("") if skipped_recipients: to = excludeaddrs(to, skipped_recipients) cc = set() for recipient in mailtrigger.cc.all(): cc.update(recipient.gather(**kwargs)) - cc.discard('') + cc.discard("") if skipped_recipients: cc = excludeaddrs(cc, skipped_recipients) - return AddrLists(to=sorted(list(to)),cc=sorted(list(cc))) + return AddrLists(to=sorted(list(to)), cc=sorted(list(cc))) + def get_mailtrigger(slug, create_from_slug_if_not_exists, desc_if_not_exists): try: @@ -50,77 +59,99 @@ def get_mailtrigger(slug, create_from_slug_if_not_exists, desc_if_not_exists): mailtrigger = MailTrigger.objects.create(slug=slug, desc=desc_if_not_exists) mailtrigger.to.set(template.to.all()) mailtrigger.cc.set(template.cc.all()) - if slug.startswith('review_completed') and slug.endswith('early'): - mailtrigger.cc.remove('ietf_last_call') + if slug.startswith("review_completed") and slug.endswith("early"): + mailtrigger.cc.remove("ietf_last_call") else: raise return mailtrigger def gather_relevant_expansions(**kwargs): - def starts_with(prefix): - return MailTrigger.objects.filter(slug__startswith=prefix).values_list('slug',flat=True) + return MailTrigger.objects.filter(slug__startswith=prefix).values_list( + "slug", flat=True + ) - relevant = set() - - if 'doc' in kwargs: + relevant = set() - doc = kwargs['doc'] + if "doc" in kwargs: + doc = kwargs["doc"] - relevant.add('doc_state_edited') - - if not doc.type_id in ['bofreq', 'statement']: - relevant.update(['doc_telechat_details_changed','ballot_deferred','iesg_ballot_saved']) + relevant.add("doc_state_edited") - if doc.type_id in ['draft','statchg']: - relevant.update(starts_with('last_call_')) + if not doc.type_id in ["bofreq", "statement", "rfc"]: + relevant.update( + ["doc_telechat_details_changed", "ballot_deferred", "iesg_ballot_saved"] + ) - if doc.type_id == 'draft': - relevant.update(starts_with('doc_')) - relevant.update(starts_with('resurrection_')) - relevant.update(['ipr_posted_on_doc',]) - if doc.stream_id == 'ietf': - relevant.update(['ballot_approved_ietf_stream','pubreq_iesg']) + if doc.type_id in ["draft", "statchg"]: + relevant.update(starts_with("last_call_")) + + if doc.type_id == "rfc": + relevant.update( + [ + "doc_added_comment", + "doc_external_resource_change_requested", + "doc_state_edited", + "ipr_posted_on_doc", + ] + ) + + if doc.type_id == "draft": + relevant.update(starts_with("doc_")) + relevant.update(starts_with("resurrection_")) + relevant.update( + [ + "ipr_posted_on_doc", + ] + ) + if doc.stream_id == "ietf": + relevant.update(["ballot_approved_ietf_stream", "pubreq_iesg"]) else: - relevant.update(['pubreq_rfced']) - last_submission = Submission.objects.filter(name=doc.name,state='posted').order_by('-rev').first() - if last_submission and 'submission' not in kwargs: - kwargs['submission'] = last_submission + relevant.update(["pubreq_rfced"]) + last_submission = ( + Submission.objects.filter(name=doc.name, state="posted") + .order_by("-rev") + .first() + ) + if last_submission and "submission" not in kwargs: + kwargs["submission"] = last_submission - if doc.type_id == 'conflrev': - relevant.update(['conflrev_requested','ballot_approved_conflrev']) - if doc.type_id == 'charter': - relevant.update(['charter_external_review','ballot_approved_charter']) + if doc.type_id == "conflrev": + relevant.update(["conflrev_requested", "ballot_approved_conflrev"]) + if doc.type_id == "charter": + relevant.update(["charter_external_review", "ballot_approved_charter"]) - if doc.type_id == 'bofreq': - relevant.update(starts_with('bofreq')) + if doc.type_id == "bofreq": + relevant.update(starts_with("bofreq")) - if 'group' in kwargs: - - relevant.update(starts_with('group_')) - relevant.update(starts_with('milestones_')) - group = kwargs['group'] + if "group" in kwargs: + relevant.update(starts_with("group_")) + relevant.update(starts_with("milestones_")) + group = kwargs["group"] if group.features.acts_like_wg: - relevant.update(starts_with('session_')) + relevant.update(starts_with("session_")) if group.features.has_chartering_process: - relevant.update(['charter_external_review',]) + relevant.update( + [ + "charter_external_review", + ] + ) - if 'submission' in kwargs: - - relevant.update(starts_with('sub_')) + if "submission" in kwargs: + relevant.update(starts_with("sub_")) rule_list = [] for mailtrigger in MailTrigger.objects.filter(slug__in=relevant): - addrs = gather_address_lists(mailtrigger.slug,**kwargs) + addrs = gather_address_lists(mailtrigger.slug, **kwargs) if addrs.to or addrs.cc: - rule_list.append((mailtrigger.slug,mailtrigger.desc,addrs.to,addrs.cc)) + rule_list.append((mailtrigger.slug, mailtrigger.desc, addrs.to, addrs.cc)) return sorted(rule_list) + def get_base_submission_message_address(): - return Recipient.objects.get(slug='submission_manualpost_handling').gather()[0] + return Recipient.objects.get(slug="submission_manualpost_handling").gather()[0] + def get_base_ipr_request_address(): - return Recipient.objects.get(slug='ipr_requests').gather()[0] - - + return Recipient.objects.get(slug="ipr_requests").gather()[0] diff --git a/ietf/meeting/forms.py b/ietf/meeting/forms.py index 822f56b97..84853b83e 100644 --- a/ietf/meeting/forms.py +++ b/ietf/meeting/forms.py @@ -19,7 +19,7 @@ from django.utils.functional import cached_property import debug # pyflakes:ignore -from ietf.doc.models import Document, DocAlias, State, NewRevisionDocEvent +from ietf.doc.models import Document, State, NewRevisionDocEvent from ietf.group.models import Group from ietf.group.utils import groups_managed_by from ietf.meeting.models import Session, Meeting, Schedule, countries, timezones, TimeSlot, Room @@ -341,7 +341,6 @@ class InterimSessionModelForm(forms.ModelForm): # FIXME: What about agendas in html or markdown format? uploaded_filename='{}-00.txt'.format(filename)) doc.set_state(State.objects.get(type__slug=doc.type.slug, slug='active')) - DocAlias.objects.create(name=doc.name).docs.add(doc) self.instance.sessionpresentation_set.create(document=doc, rev=doc.rev) NewRevisionDocEvent.objects.create( type='new_revision', diff --git a/ietf/meeting/models.py b/ietf/meeting/models.py index 8fadf124d..20fa9cf1b 100644 --- a/ietf/meeting/models.py +++ b/ietf/meeting/models.py @@ -26,7 +26,6 @@ from django.conf import settings from django.urls import reverse as urlreverse from django.utils import timezone from django.utils.text import slugify -from django.utils.safestring import mark_safe from ietf.dbtemplate.models import DBTemplate from ietf.doc.models import Document @@ -582,19 +581,23 @@ class TimeSlot(models.Model): self._session_cache = self.sessions.filter(timeslotassignments__schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting else None]).first() return self._session_cache - def meeting_date(self): - return self.time.date() + # Unused + # + # def meeting_date(self): + # return self.time.date() - def registration(self): - # below implements a object local cache - # it tries to find a timeslot of type registration which starts at the same time as this slot - # so that it can be shown at the top of the agenda. - if not hasattr(self, '_reg_info'): - try: - self._reg_info = TimeSlot.objects.get(meeting=self.meeting, time__month=self.time.month, time__day=self.time.day, type="reg") - except TimeSlot.DoesNotExist: - self._reg_info = None - return self._reg_info + # Unused + # + # def registration(self): + # # below implements a object local cache + # # it tries to find a timeslot of type registration which starts at the same time as this slot + # # so that it can be shown at the top of the agenda. + # if not hasattr(self, '_reg_info'): + # try: + # self._reg_info = TimeSlot.objects.get(meeting=self.meeting, time__month=self.time.month, time__day=self.time.day, type="reg") + # except TimeSlot.DoesNotExist: + # self._reg_info = None + # return self._reg_info def __str__(self): location = self.get_location() @@ -621,30 +624,33 @@ class TimeSlot(models.Model): def get_location(self): return self.get_hidden_location() if self.show_location else "" - def get_functional_location(self): - name_parts = [] - room = self.location - if room and room.functional_name: - name_parts.append(room.functional_name) - location = self.get_hidden_location() - if location: - name_parts.append(location) - return ' - '.join(name_parts) + # Unused + # + # def get_functional_location(self): + # name_parts = [] + # room = self.location + # if room and room.functional_name: + # name_parts.append(room.functional_name) + # location = self.get_hidden_location() + # if location: + # name_parts.append(location) + # return ' - '.join(name_parts) - def get_html_location(self): - if not hasattr(self, '_cached_html_location'): - self._cached_html_location = self.get_location() - if len(self._cached_html_location) > 8: - self._cached_html_location = mark_safe(self._cached_html_location.replace('/', '/')) - else: - self._cached_html_location = mark_safe(self._cached_html_location.replace(' ', ' ')) - return self._cached_html_location + # def get_html_location(self): + # if not hasattr(self, '_cached_html_location'): + # self._cached_html_location = self.get_location() + # if len(self._cached_html_location) > 8: + # self._cached_html_location = mark_safe(self._cached_html_location.replace('/', '/')) + # else: + # self._cached_html_location = mark_safe(self._cached_html_location.replace(' ', ' ')) + # return self._cached_html_location def tz(self): return self.meeting.tz() - def tzname(self): - return self.tz().tzname(self.time) + # Unused + # def tzname(self): + # return self.tz().tzname(self.time) def utc_start_time(self): return self.time.astimezone(pytz.utc) # USE_TZ is True, so time is aware @@ -658,30 +664,32 @@ class TimeSlot(models.Model): def local_end_time(self): return (self.time.astimezone(pytz.utc) + self.duration).astimezone(self.tz()) - @property - def js_identifier(self): - # this returns a unique identifier that is js happy. - # {{s.timeslot.time|date:'Y-m-d'}}_{{ s.timeslot.time|date:'Hi' }}" - # also must match: - # {{r|slugify}}_{{day}}_{{slot.0|date:'Hi'}} - dom_id="ts%u" % (self.pk) - if self.location is not None: - dom_id = self.location.dom_id() - return "%s_%s_%s" % (dom_id, self.time.strftime('%Y-%m-%d'), self.time.strftime('%H%M')) + # Unused + # + # @property + # def js_identifier(self): + # # this returns a unique identifier that is js happy. + # # {{s.timeslot.time|date:'Y-m-d'}}_{{ s.timeslot.time|date:'Hi' }}" + # # also must match: + # # {{r|slugify}}_{{day}}_{{slot.0|date:'Hi'}} + # dom_id="ts%u" % (self.pk) + # if self.location is not None: + # dom_id = self.location.dom_id() + # return "%s_%s_%s" % (dom_id, self.time.strftime('%Y-%m-%d'), self.time.strftime('%H%M')) - def delete_concurrent_timeslots(self): - """Delete all timeslots which are in the same time as this slot""" - # can not include duration in filter, because there is no support - # for having it a WHERE clause. - # below will delete self as well. - for ts in self.meeting.timeslot_set.filter(time=self.time).all(): - if ts.duration!=self.duration: - continue + # def delete_concurrent_timeslots(self): + # """Delete all timeslots which are in the same time as this slot""" + # # can not include duration in filter, because there is no support + # # for having it a WHERE clause. + # # below will delete self as well. + # for ts in self.meeting.timeslot_set.filter(time=self.time).all(): + # if ts.duration!=self.duration: + # continue - # now remove any schedule that might have been made to this - # timeslot. - ts.sessionassignments.all().delete() - ts.delete() + # # now remove any schedule that might have been made to this + # # timeslot. + # ts.sessionassignments.all().delete() + # ts.delete() """ Find a timeslot that comes next, in the same room. It must be on the same day, diff --git a/ietf/meeting/tests_views.py b/ietf/meeting/tests_views.py index 4db762213..47e2334f4 100644 --- a/ietf/meeting/tests_views.py +++ b/ietf/meeting/tests_views.py @@ -552,7 +552,7 @@ class MeetingTests(BaseMeetingTestCase): if material.type_id == 'draft': expected_url = urlreverse( 'ietf.doc.views_doc.document_main', - kwargs={'name': material.canonical_name()}, + kwargs={'name': material.name}, ) else: expected_url = material.get_href(meeting) @@ -563,7 +563,7 @@ class MeetingTests(BaseMeetingTestCase): if material.type_id == 'draft': expected_url = urlreverse( 'ietf.doc.views_doc.document_main', - kwargs={'name': material.canonical_name()}, + kwargs={'name': material.name}, ) else: expected_url = material.get_href(meeting) @@ -7773,7 +7773,7 @@ class ProceedingsTests(BaseMeetingTestCase): if material.type_id == 'draft': expected_url = urlreverse( 'ietf.doc.views_doc.document_main', - kwargs={'name': material.canonical_name()}, + kwargs={'name': material.name}, ) else: expected_url = material.get_href(meeting) @@ -7784,7 +7784,7 @@ class ProceedingsTests(BaseMeetingTestCase): if material.type_id == 'draft': expected_url = urlreverse( 'ietf.doc.views_doc.document_main', - kwargs={'name': material.canonical_name()}, + kwargs={'name': material.name}, ) else: expected_url = material.get_href(meeting) diff --git a/ietf/meeting/utils.py b/ietf/meeting/utils.py index b8bb08247..416e9c61f 100644 --- a/ietf/meeting/utils.py +++ b/ietf/meeting/utils.py @@ -20,7 +20,7 @@ import debug # pyflakes:ignore from ietf.dbtemplate.models import DBTemplate from ietf.meeting.models import (Session, SchedulingEvent, TimeSlot, Constraint, SchedTimeSessAssignment, SessionPresentation, Attended) -from ietf.doc.models import Document, DocAlias, State, NewRevisionDocEvent +from ietf.doc.models import Document, State, NewRevisionDocEvent from ietf.doc.models import DocEvent from ietf.group.models import Group from ietf.group.utils import can_manage_materials @@ -596,7 +596,6 @@ def save_session_minutes_revision(session, file, ext, request, encoding=None, ap group = session.group, rev = '00', ) - DocAlias.objects.create(name=doc.name).docs.add(doc) doc.states.add(State.objects.get(type_id='minutes',slug='active')) if session.sessionpresentation_set.filter(document=doc).exists(): sp = session.sessionpresentation_set.get(document=doc) @@ -720,7 +719,6 @@ def new_doc_for_session(type_id, session): rev = '00', ) doc.states.add(State.objects.get(type_id=type_id, slug='active')) - DocAlias.objects.create(name=doc.name).docs.add(doc) session.sessionpresentation_set.create(document=doc,rev='00') return doc @@ -753,8 +751,6 @@ def create_recording(session, url, title=None, user=None): rev='00', type_id='recording') doc.set_state(State.objects.get(type='recording', slug='active')) - - DocAlias.objects.create(name=doc.name).docs.add(doc) # create DocEvent NewRevisionDocEvent.objects.create(type='new_revision', @@ -773,11 +769,11 @@ def get_next_sequence(group, meeting, type): Returns the next sequence number to use for a document of type = type. Takes a group=Group object, meeting=Meeting object, type = string ''' - aliases = DocAlias.objects.filter(name__startswith='{}-{}-{}-'.format(type, meeting.number, group.acronym)) - if not aliases: + docs = Document.objects.filter(name__startswith='{}-{}-{}-'.format(type, meeting.number, group.acronym)) + if not docs: return 1 - aliases = aliases.order_by('name') - sequence = int(aliases.last().name.split('-')[-1]) + 1 + docs = docs.order_by('name') + sequence = int(docs.last().name.split('-')[-1]) + 1 return sequence def get_activity_stats(sdate, edate): diff --git a/ietf/meeting/views.py b/ietf/meeting/views.py index e97e8a7eb..ab3926639 100644 --- a/ietf/meeting/views.py +++ b/ietf/meeting/views.py @@ -48,7 +48,7 @@ from django.views.generic import RedirectView import debug # pyflakes:ignore from ietf.doc.fields import SearchableDocumentsField -from ietf.doc.models import Document, State, DocEvent, NewRevisionDocEvent, DocAlias +from ietf.doc.models import Document, State, DocEvent, NewRevisionDocEvent from ietf.group.models import Group from ietf.group.utils import can_manage_session_materials, can_manage_some_groups, can_manage_group from ietf.person.models import Person, User @@ -238,7 +238,7 @@ def _get_materials_doc(meeting, name): docname, rev = name.rsplit("-", 1) if len(rev) == 2 and rev.isdigit(): doc = Document.objects.get(name=docname) # may raise Document.DoesNotExist - if doc.get_related_meeting() == meeting and rev in doc.revisions(): + if doc.get_related_meeting() == meeting and rev in doc.revisions_by_newrevisionevent(): return doc, rev # give up raise Document.DoesNotExist @@ -248,7 +248,6 @@ def _get_materials_doc(meeting, name): def materials_document(request, document, num=None, ext=None): meeting=get_meeting(num,type_in=['ietf','interim']) num = meeting.number - # This view does not allow the use of DocAliases. Right now we are probably only creating one (identity) alias, but that may not hold in the future. try: doc, rev = _get_materials_doc(meeting=meeting, name=document) except Document.DoesNotExist: @@ -2595,7 +2594,6 @@ def save_bluesheet(request, session, file, encoding='utf-8'): rev = '00', ) doc.states.add(State.objects.get(type_id='bluesheets',slug='active')) - DocAlias.objects.create(name=doc.name).docs.add(doc) session.sessionpresentation_set.create(document=doc,rev='00') filename = '%s-%s%s'% ( doc.name, doc.rev, ext) doc.uploaded_filename = filename @@ -2724,7 +2722,6 @@ def upload_session_agenda(request, session_id, num): group = session.group, rev = '00', ) - DocAlias.objects.create(name=doc.name).docs.add(doc) doc.states.add(State.objects.get(type_id='agenda',slug='active')) if session.sessionpresentation_set.filter(document=doc).exists(): sp = session.sessionpresentation_set.get(document=doc) @@ -2817,7 +2814,6 @@ def upload_session_slides(request, session_id, num, name=None): group = session.group, rev = '00', ) - DocAlias.objects.create(name=doc.name).docs.add(doc) doc.states.add(State.objects.get(type_id='slides',slug='active')) doc.states.add(State.objects.get(type_id='reuse_policy',slug='single')) if session.sessionpresentation_set.filter(document=doc).exists(): @@ -4551,7 +4547,6 @@ def approve_proposed_slides(request, slidesubmission_id, num): group = submission.session.group, rev = '00', ) - DocAlias.objects.create(name=doc.name).docs.add(doc) doc.states.add(State.objects.get(type_id='slides',slug='active')) doc.states.add(State.objects.get(type_id='reuse_policy',slug='single')) if submission.session.sessionpresentation_set.filter(document=doc).exists(): diff --git a/ietf/meeting/views_proceedings.py b/ietf/meeting/views_proceedings.py index 87b7ffea3..d1169bff2 100644 --- a/ietf/meeting/views_proceedings.py +++ b/ietf/meeting/views_proceedings.py @@ -8,7 +8,7 @@ from django.shortcuts import render, redirect, get_object_or_404 import debug # pyflakes:ignore from ietf.doc.utils import add_state_change_event -from ietf.doc.models import DocAlias, DocEvent, Document, NewRevisionDocEvent, State +from ietf.doc.models import DocEvent, Document, NewRevisionDocEvent, State from ietf.ietfauth.utils import role_required from ietf.meeting.forms import FileUploadForm from ietf.meeting.models import Meeting, MeetingHost @@ -98,10 +98,6 @@ def save_proceedings_material_doc(meeting, material_type, title, request, file=N ) created = True - # do this even if we did not create the document, just to be sure the alias exists - alias, _ = DocAlias.objects.get_or_create(name=doc.name) - alias.docs.add(doc) - if file: if not created: doc.rev = '{:02}'.format(int(doc.rev) + 1) diff --git a/ietf/name/fixtures/names.json b/ietf/name/fixtures/names.json index a039af3c4..fc46970f9 100644 --- a/ietf/name/fixtures/names.json +++ b/ietf/name/fixtures/names.json @@ -2565,6 +2565,19 @@ "model": "doc.state", "pk": 176 }, + { + "fields": { + "desc": "", + "name": "Published", + "next_states": [], + "order": 1, + "slug": "published", + "type": "rfc", + "used": true + }, + "model": "doc.state", + "pk": 177 + }, { "fields": { "label": "State" @@ -2572,6 +2585,13 @@ "model": "doc.statetype", "pk": "agenda" }, + { + "fields": { + "label": "bcp state" + }, + "model": "doc.statetype", + "pk": "bcp" + }, { "fields": { "label": "State" @@ -2691,6 +2711,13 @@ "model": "doc.statetype", "pk": "draft-stream-ise" }, + { + "fields": { + "label": "fyi state" + }, + "model": "doc.statetype", + "pk": "fyi" + }, { "fields": { "label": "State" @@ -2747,6 +2774,13 @@ "model": "doc.statetype", "pk": "review" }, + { + "fields": { + "label": "State" + }, + "model": "doc.statetype", + "pk": "rfc" + }, { "fields": { "label": "Shepherd's Writeup State" @@ -2775,6 +2809,13 @@ "model": "doc.statetype", "pk": "statement" }, + { + "fields": { + "label": "std state" + }, + "model": "doc.statetype", + "pk": "std" + }, { "fields": { "about_page": "ietf.group.views.group_about", @@ -5903,7 +5944,7 @@ { "fields": { "desc": "The document's authors", - "template": "{% if doc.type_id == \"draft\" %}<{{doc.name}}@ietf.org>{% endif %}" + "template": "{% if doc.type_id == \"draft\" or doc.type_id == \"rfc\" %}<{{doc.name}}@ietf.org>{% endif %}" }, "model": "mailtrigger.recipient", "pk": "doc_authors" @@ -10052,6 +10093,17 @@ "model": "name.dbtemplatetypename", "pk": "rst" }, + { + "fields": { + "desc": "", + "name": "became RFC", + "order": 0, + "revname": "came from draft", + "used": true + }, + "model": "name.docrelationshipname", + "pk": "became_rfc" + }, { "fields": { "desc": "", @@ -10063,6 +10115,17 @@ "model": "name.docrelationshipname", "pk": "conflrev" }, + { + "fields": { + "desc": "This document contains other documents (e.g., STDs contain RFCs)", + "name": "Contains", + "order": 0, + "revname": "Is part of", + "used": true + }, + "model": "name.docrelationshipname", + "pk": "contains" + }, { "fields": { "desc": "Approval for downref", @@ -10579,6 +10642,17 @@ "model": "name.doctypename", "pk": "agenda" }, + { + "fields": { + "desc": "", + "name": "Best Current Practice", + "order": 0, + "prefix": "bcp", + "used": true + }, + "model": "name.doctypename", + "pk": "bcp" + }, { "fields": { "desc": "", @@ -10645,6 +10719,17 @@ "model": "name.doctypename", "pk": "draft" }, + { + "fields": { + "desc": "", + "name": "For Your Information", + "order": 0, + "prefix": "fyi", + "used": true + }, + "model": "name.doctypename", + "pk": "fyi" + }, { "fields": { "desc": "", @@ -10722,6 +10807,17 @@ "model": "name.doctypename", "pk": "review" }, + { + "fields": { + "desc": "", + "name": "RFC", + "order": 0, + "prefix": "rfc", + "used": true + }, + "model": "name.doctypename", + "pk": "rfc" + }, { "fields": { "desc": "", @@ -10766,6 +10862,17 @@ "model": "name.doctypename", "pk": "statement" }, + { + "fields": { + "desc": "", + "name": "Standard", + "order": 0, + "prefix": "std", + "used": true + }, + "model": "name.doctypename", + "pk": "std" + }, { "fields": { "desc": "", @@ -11796,7 +11903,7 @@ { "fields": { "default_offset_days": -57, - "desc": "Cut-off date for BOF proposal requests. To request a BOF, please see instructions at https://www.ietf.org/how/bofs/bof-procedures on Requesting a BOF", + "desc": "Cut-off date for BOF proposal requests. To request a __BoF__ session use the [IETF BoF Request Tool](/doc/bof-requests).", "name": "Cut-off preliminary BOF requests", "order": 0, "used": true @@ -11807,7 +11914,7 @@ { "fields": { "default_offset_days": -57, - "desc": "Preliminary BOF proposals requested. To request a BOF, please see instructions on requesting a BOF at https://www.ietf.org/how/bofs/bof-procedures/", + "desc": "Preliminary BOF proposals requested. To request a __BoF__ session use the [IETF BoF Request Tool](/doc/bof-requests).", "name": "Preliminary BOF proposals requested", "order": 0, "used": false @@ -11840,7 +11947,7 @@ { "fields": { "default_offset_days": -43, - "desc": "Cut-off date for BOF proposal requests to Area Directors at UTC 23:59", + "desc": "Cut-off date for BOF proposal requests to Area Directors at UTC 23:59. To request a __BoF__ session use the [IETF BoF Request Tool](/doc/bof-requests).", "name": "Cut-off BOF scheduling Requests", "order": 0, "used": false @@ -11884,7 +11991,7 @@ { "fields": { "default_offset_days": -43, - "desc": "Cut-off date for requests to schedule Working Group Meetings at UTC 23:59", + "desc": "Cut-off date for requests to schedule Working Group Meetings at UTC 23:59. To request a __Working Group__ session, use the [IETF Meeting Session Request Tool](/secr/sreq/).", "name": "Cut-off WG scheduling Requests", "order": 0, "used": true @@ -11939,7 +12046,7 @@ { "fields": { "default_offset_days": -12, - "desc": "Internet-Draft submission cut-off (for all Internet-Drafts, including -00) by UTC 23:59", + "desc": "Internet-Draft submission cut-off (for all Internet-Drafts, including -00) by UTC 23:59. Upload using the [I-D Submission Tool](/submit/).", "name": "I-D Cutoff", "order": 0, "used": true @@ -11972,7 +12079,7 @@ { "fields": { "default_offset_days": -82, - "desc": "IETF Online Registration Opens", + "desc": "IETF Online Registration Opens [Register Here](https://www.ietf.org/how/meetings/register/).", "name": "Registration Opens", "order": 0, "used": true @@ -11983,7 +12090,7 @@ { "fields": { "default_offset_days": -89, - "desc": "Working Group and BOF scheduling begins", + "desc": "Working Group and BOF scheduling begins. To request a Working Group session, use the [IETF Meeting Session Request Tool](/secr/sreq/). If you are working on a BOF request, it is highly recommended to tell the IESG now by sending an [email to iesg@ietf.org](mailtp:iesg@ietf.org) to get advance help with the request.", "name": "Scheduling Opens", "order": 0, "used": true diff --git a/ietf/name/migrations/0010_rfc_doctype_names.py b/ietf/name/migrations/0010_rfc_doctype_names.py new file mode 100644 index 000000000..8d7a565f2 --- /dev/null +++ b/ietf/name/migrations/0010_rfc_doctype_names.py @@ -0,0 +1,30 @@ +# Generated by Django 4.2.2 on 2023-06-14 20:39 + +from django.db import migrations + + +def forward(apps, schema_editor): + DocTypeName = apps.get_model("name", "DocTypeName") + DocTypeName.objects.get_or_create( + slug="rfc", + name="RFC", + used=True, + prefix="rfc", + ) + + DocRelationshipName = apps.get_model("name", "DocRelationshipName") + DocRelationshipName.objects.get_or_create( + slug="became_rfc", + name="became RFC", + used=True, + revname="came from draft", + ) + +class Migration(migrations.Migration): + dependencies = [ + ("name", "0009_iabworkshops"), + ] + + operations = [ + migrations.RunPython(forward), + ] diff --git a/ietf/name/migrations/0011_subseries.py b/ietf/name/migrations/0011_subseries.py new file mode 100644 index 000000000..b3fe10792 --- /dev/null +++ b/ietf/name/migrations/0011_subseries.py @@ -0,0 +1,38 @@ +# Copyright The IETF Trust 2023, All Rights Reserved + +from django.db import migrations + + +def forward(apps, schema_editor): + DocTypeName = apps.get_model("name", "DocTypeName") + DocRelationshipName = apps.get_model("name", "DocRelationshipName") + for slug, name, prefix in [ + ("std", "Standard", "std"), + ("bcp", "Best Current Practice", "bcp"), + ("fyi", "For Your Information", "fyi"), + ]: + DocTypeName.objects.create( + slug=slug, name=name, prefix=prefix, desc="", used=True + ) + DocRelationshipName.objects.create( + slug="contains", + name="Contains", + revname="Is part of", + desc="This document contains other documents (e.g., STDs contain RFCs)", + used=True, + ) + + +def reverse(apps, schema_editor): + DocTypeName = apps.get_model("name", "DocTypeName") + DocRelationshipName = apps.get_model("name", "DocRelationshipName") + DocTypeName.objects.filter(slug__in=["std", "bcp", "fyi"]).delete() + DocRelationshipName.objects.filter(slug="contains").delete() + + +class Migration(migrations.Migration): + dependencies = [ + ("name", "0010_rfc_doctype_names"), + ] + + operations = [migrations.RunPython(forward, reverse)] diff --git a/ietf/name/migrations/0012_adjust_important_dates.py b/ietf/name/migrations/0012_adjust_important_dates.py new file mode 100644 index 000000000..7a3252bb5 --- /dev/null +++ b/ietf/name/migrations/0012_adjust_important_dates.py @@ -0,0 +1,29 @@ +# Copyright The IETF Trust 2023, All Rights Reserved + +from django.db import migrations + +def markdown_names(apps, schema_editor): + ImportantDateName = apps.get_model("name", "ImportantDateName") + changes = [ + ('bofproposals', "Preliminary BOF proposals requested. To request a __BoF__ session use the [IETF BoF Request Tool](/doc/bof-requests)."), + ('openreg', "IETF Online Registration Opens [Register Here](https://www.ietf.org/how/meetings/register/)."), + ('opensched', "Working Group and BOF scheduling begins. To request a Working Group session, use the [IETF Meeting Session Request Tool](/secr/sreq/). If you are working on a BOF request, it is highly recommended to tell the IESG now by sending an [email to iesg@ietf.org](mailtp:iesg@ietf.org) to get advance help with the request."), + ('cutoffwgreq', "Cut-off date for requests to schedule Working Group Meetings at UTC 23:59. To request a __Working Group__ session, use the [IETF Meeting Session Request Tool](/secr/sreq/)."), + ('idcutoff', "Internet-Draft submission cut-off (for all Internet-Drafts, including -00) by UTC 23:59. Upload using the [I-D Submission Tool](/submit/)."), + ('cutoffwgreq', "Cut-off date for requests to schedule Working Group Meetings at UTC 23:59. To request a __Working Group__ session, use the [IETF Meeting Session Request Tool](/secr/sreq/)."), + ('bofprelimcutoff', "Cut-off date for BOF proposal requests. To request a __BoF__ session use the [IETF BoF Request Tool](/doc/bof-requests)."), + ('cutoffbofreq', "Cut-off date for BOF proposal requests to Area Directors at UTC 23:59. To request a __BoF__ session use the [IETF BoF Request Tool](/doc/bof-requests)."), + ] + for slug, newDescription in changes: + datename = ImportantDateName.objects.get(pk=slug) # If the slug does not exist, then Django will throw an exception :-) + datename.desc = newDescription + datename.save() + +class Migration(migrations.Migration): + dependencies = [ + ("name", "0011_subseries"), + ] + + operations = [ + migrations.RunPython(markdown_names), + ] diff --git a/ietf/name/models.py b/ietf/name/models.py index b5adeccc6..9bedd6cc2 100644 --- a/ietf/name/models.py +++ b/ietf/name/models.py @@ -42,7 +42,7 @@ class DocRelationshipName(NameModel): class DocTypeName(NameModel): """Draft, Agenda, Minutes, Charter, Discuss, Guideline, Email, - Review, Issue, Wiki""" + Review, Issue, Wiki, RFC""" prefix = models.CharField(max_length=16, default="") class DocTagName(NameModel): """Waiting for Reference, IANA Coordination, Revised ID Needed, diff --git a/ietf/nomcom/views.py b/ietf/nomcom/views.py index 71f76679c..ce7ca9a82 100644 --- a/ietf/nomcom/views.py +++ b/ietf/nomcom/views.py @@ -57,7 +57,7 @@ def index(request): for nomcom in nomcom_list: year = int(nomcom.acronym[6:]) nomcom.year = year - nomcom.label = "%s/%s" % (year, year+1) + nomcom.label = str(year) if year > 2012: nomcom.url = "/nomcom/%04d" % year else: diff --git a/ietf/person/models.py b/ietf/person/models.py index 22c63d4a0..0bb2b149e 100644 --- a/ietf/person/models.py +++ b/ietf/person/models.py @@ -190,8 +190,8 @@ class Person(models.Model): def rfcs(self): from ietf.doc.models import Document - rfcs = list(Document.objects.filter(documentauthor__person=self, type='draft', states__slug='rfc')) - rfcs.sort(key=lambda d: d.canonical_name() ) + rfcs = list(Document.objects.filter(documentauthor__person=self, type='rfc')) + rfcs.sort(key=lambda d: d.name ) return rfcs def active_drafts(self): diff --git a/ietf/review/policies.py b/ietf/review/policies.py index 2b97fda14..6738db95f 100644 --- a/ietf/review/policies.py +++ b/ietf/review/policies.py @@ -7,7 +7,7 @@ from django.db.models.aggregates import Max from django.utils import timezone from simple_history.utils import bulk_update_with_history -from ietf.doc.models import DocumentAuthor, DocAlias +from ietf.doc.models import DocumentAuthor from ietf.doc.utils import extract_complete_replaces_ancestor_mapping_for_docs from ietf.group.models import Role from ietf.name.models import ReviewAssignmentStateName @@ -293,8 +293,6 @@ class AssignmentOrderResolver: def _collect_context(self): """Collect all relevant data about this team, document and review request.""" - self.doc_aliases = DocAlias.objects.filter(docs=self.doc).values_list("name", flat=True) - # This data is collected as a dict, keys being person IDs, values being numbers/objects. self.rotation_index = {p.pk: i for i, p in enumerate(self.rotation_list)} self.reviewer_settings = self._reviewer_settings_for_person_ids(self.possible_person_ids) @@ -360,8 +358,7 @@ class AssignmentOrderResolver: add_boolean_score(+1, email.person_id in self.wish_to_review, "wishes to review document") add_boolean_score(-1, email.person_id in self.connections, self.connections.get(email.person_id)) # reviewer is somehow connected: bad - add_boolean_score(-1, settings.filter_re and any( - re.search(settings.filter_re, n) for n in self.doc_aliases), "filter regexp matches") + add_boolean_score(-1, settings.filter_re and re.search(settings.filter_re, self.doc.name), "filter regexp matches") # minimum interval between reviews days_needed = self.days_needed_for_reviewers.get(email.person_id, 0) diff --git a/ietf/review/utils.py b/ietf/review/utils.py index a91bcbd62..8869efaee 100644 --- a/ietf/review/utils.py +++ b/ietf/review/utils.py @@ -50,6 +50,8 @@ def can_request_review_of_doc(user, doc): if not user.is_authenticated: return False + # This is in a strange place as it has nothing to do with the user + # but this utility is used in too many places to move this quickly. if doc.type_id == 'draft' and doc.get_state_slug() != 'active': return False diff --git a/ietf/secr/telechat/tests.py b/ietf/secr/telechat/tests.py index e4661b767..39949b83a 100644 --- a/ietf/secr/telechat/tests.py +++ b/ietf/secr/telechat/tests.py @@ -67,10 +67,8 @@ class SecrTelechatTestCase(TestCase): def test_doc_detail_draft_with_downref(self): ad = Person.objects.get(user__username="ad") draft = WgDraftFactory(ad=ad, intended_std_level_id='ps', states=[('draft-iesg','pub-req'),]) - rfc = IndividualRfcFactory.create(stream_id='irtf', other_aliases=['rfc6666',], - states=[('draft','rfc'),('draft-iesg','pub')], std_level_id='inf', ) - draft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'), - relationship_id='refnorm') + rfc = IndividualRfcFactory.create(stream_id='irtf', rfc_number=6666, std_level_id='inf') + draft.relateddocument_set.create(target=rfc, relationship_id='refnorm') create_ballot_if_not_open(None, draft, ad, 'approve') d = get_next_telechat_date() date = d.strftime('%Y-%m-%d') diff --git a/ietf/secr/telechat/views.py b/ietf/secr/telechat/views.py index f13a082f2..356a9b934 100644 --- a/ietf/secr/telechat/views.py +++ b/ietf/secr/telechat/views.py @@ -175,7 +175,7 @@ def doc_detail(request, date, name): This view displays the ballot information for the document, and lets the user make changes to ballot positions and document state. ''' - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) if not is_doc_on_telechat(doc, date): messages.warning(request, 'Dcoument: {name} is not on the Telechat agenda for {date}'.format( name=doc.name, @@ -313,7 +313,7 @@ def doc_detail(request, date, name): # if this is a conflict review document add referenced document if doc.type_id == 'conflrev': - conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document + conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target else: conflictdoc = None @@ -342,7 +342,7 @@ def doc_navigate(request, date, name, nav): nav - [next|previous] which direction the user wants to navigate in the list of docs The view retrieves the appropriate document and redirects to the doc view. ''' - doc = get_object_or_404(Document, docalias__name=name) + doc = get_object_or_404(Document, name=name) agenda = agenda_data(date=date) target = name diff --git a/ietf/secr/templates/telechat/doc.html b/ietf/secr/templates/telechat/doc.html index 9d37db4cb..7891c1b1e 100644 --- a/ietf/secr/templates/telechat/doc.html +++ b/ietf/secr/templates/telechat/doc.html @@ -85,13 +85,13 @@ {% if downrefs %}

Downward References

{% for ref in downrefs %} -

Add {{ref.target.document.canonical_name}} - ({{ref.target.document.std_level}} - {{ref.target.document.stream.desc}}) +

Add {{ref.target.name}} + ({{ref.target.std_level}} - {{ref.target.stream.desc}}) to downref registry.
- {% if not ref.target.document.std_level %} + {% if not ref.target.std_level %} +++ Warning: The standards level has not been set yet!!!
{% endif %} - {% if not ref.target.document.stream %} + {% if not ref.target.stream %} +++ Warning: document stream has not been set yet!!!
{% endif %} {% endfor %}

diff --git a/ietf/secr/utils/document.py b/ietf/secr/utils/document.py index 0a34512a1..361bf836d 100644 --- a/ietf/secr/utils/document.py +++ b/ietf/secr/utils/document.py @@ -13,15 +13,6 @@ def get_full_path(doc): return None return os.path.join(doc.get_file_path(), doc.uploaded_filename) -def get_rfc_num(doc): - qs = doc.docalias.filter(name__startswith='rfc') - return qs[0].name[3:] if qs else None - -def is_draft(doc): - if doc.docalias.filter(name__startswith='rfc'): - return False - else: - return True def get_start_date(doc): ''' diff --git a/ietf/secr/utils/group.py b/ietf/secr/utils/group.py index a4c1c0f98..40a9065ac 100644 --- a/ietf/secr/utils/group.py +++ b/ietf/secr/utils/group.py @@ -3,11 +3,8 @@ # Python imports -import io -import os # Django imports -from django.conf import settings from django.core.exceptions import ObjectDoesNotExist # Datatracker imports @@ -15,27 +12,6 @@ from ietf.group.models import Group from ietf.ietfauth.utils import has_role - - -def current_nomcom(): - qs = Group.objects.filter(acronym__startswith='nomcom',state__slug="active").order_by('-time') - if qs.count(): - return qs[0] - else: - return None - -def get_charter_text(group): - ''' - Takes a group object and returns the text or the group's charter as a string - ''' - charter = group.charter - path = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter.canonical_name(), charter.rev)) - f = io.open(path,'r') - text = f.read() - f.close() - - return text - def get_my_groups(user,conclude=False): ''' Takes a Django user object (from request) diff --git a/ietf/settings.py b/ietf/settings.py index 5487c0de9..eb5066e97 100644 --- a/ietf/settings.py +++ b/ietf/settings.py @@ -598,7 +598,6 @@ TEST_CODE_COVERAGE_EXCLUDE_FILES = [ "ietf/utils/test_runner.py", "ietf/name/generate_fixtures.py", "ietf/review/import_from_review_tool.py", - "ietf/stats/backfill_data.py", "ietf/utils/patch.py", "ietf/utils/test_data.py", ] @@ -703,7 +702,7 @@ LIAISON_ATTACH_URL = 'https://www.ietf.org/lib/dt/documents/LIAISON/' # should e DOC_HREFS = { "charter": "https://www.ietf.org/charter/{doc.name}-{doc.rev}.txt", "draft": "https://www.ietf.org/archive/id/{doc.name}-{doc.rev}.txt", - "rfc": "https://www.rfc-editor.org/rfc/rfc{doc.rfcnum}.txt", + "rfc": "https://www.rfc-editor.org/rfc/rfc{doc.rfc_number}.txt", "slides": "https://www.ietf.org/slides/{doc.name}-{doc.rev}", "procmaterials": "https://www.ietf.org/procmaterials/{doc.name}-{doc.rev}", "conflrev": "https://www.ietf.org/cr/{doc.name}-{doc.rev}.txt", diff --git a/ietf/stats/backfill_data.py b/ietf/stats/backfill_data.py deleted file mode 100755 index 176ee3335..000000000 --- a/ietf/stats/backfill_data.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/env python -# Copyright The IETF Trust 2017-2020, All Rights Reserved -# -*- coding: utf-8 -*- - - -import io -import sys -import os -import os.path -import argparse -import time - -from typing import Set, Optional # pyflakes:ignore - -basedir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) -sys.path = [ basedir ] + sys.path -os.environ["DJANGO_SETTINGS_MODULE"] = "ietf.settings" - -virtualenv_activation = os.path.join(basedir, "env", "bin", "activate_this.py") -if os.path.exists(virtualenv_activation): - exec(compile(io.open(virtualenv_activation, "rb").read(), virtualenv_activation, 'exec'), dict(__file__=virtualenv_activation)) - -import django -django.setup() - -from django.conf import settings - -import debug # pyflakes:ignore - -from ietf.doc.models import Document -from ietf.name.models import FormalLanguageName -from ietf.utils.draft import PlaintextDraft - -parser = argparse.ArgumentParser() -parser.add_argument("--document", help="specific document name") -parser.add_argument("--words", action="store_true", help="fill in word count") -parser.add_argument("--formlang", action="store_true", help="fill in formal languages") -parser.add_argument("--authors", action="store_true", help="fill in author info") -args = parser.parse_args() - -formal_language_dict = { l.pk: l for l in FormalLanguageName.objects.all() } - -docs_qs = Document.objects.filter(type="draft") - -if args.document: - docs_qs = docs_qs.filter(docalias__name=args.document) - -ts = time.strftime("%Y-%m-%d_%H:%M%z") -logfile = io.open('backfill-authorstats-%s.log'%ts, 'w') -print("Writing log to %s" % os.path.abspath(logfile.name)) - -def say(msg): - msg = msg.encode('utf8') - sys.stderr.write(msg) - sys.stderr.write('\n') - logfile.write(msg) - logfile.write('\n') - -def unicode(text): - if text is None: - return text - # order matters here: - for encoding in ['ascii', 'utf8', 'latin1', ]: - try: - utext = text.decode(encoding) -# if encoding == 'latin1': -# say("Warning: falling back to latin1 decoding for %s ..." % utext[:216]]) - return utext - except UnicodeDecodeError: - pass - -start = time.time() -say("Running query for documents to process ...") -for doc in docs_qs.prefetch_related("docalias", "formal_languages", "documentauthor_set", "documentauthor_set__person", "documentauthor_set__person__alias_set"): - canonical_name = doc.name - for n in doc.docalias.all(): - if n.name.startswith("rfc"): - canonical_name = n.name - - if canonical_name.startswith("rfc"): - path = os.path.join(settings.RFC_PATH, canonical_name + ".txt") - else: - path = os.path.join(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR, canonical_name + "-" + doc.rev + ".txt") - - if not os.path.exists(path): - say("Skipping %s, no txt file found at %s" % (doc.name, path)) - continue - - with io.open(path, 'rb') as f: - say("\nProcessing %s" % doc.name) - sys.stdout.flush() - d = PlaintextDraft(unicode(f.read()), path) - - updated = False - - updates = {} - - if args.words: - words = d.get_wordcount() - if words != doc.words: - updates["words"] = words - - if args.formlang: - langs = d.get_formal_languages() - - new_formal_languages = set(formal_language_dict[l] for l in langs) - old_formal_languages = set(doc.formal_languages.all()) - - if new_formal_languages != old_formal_languages: - for l in new_formal_languages - old_formal_languages: - doc.formal_languages.add(l) - updated = True - for l in old_formal_languages - new_formal_languages: - doc.formal_languages.remove(l) - updated = True - - if args.authors: - old_authors = doc.documentauthor_set.all() - old_authors_by_name = {} - old_authors_by_email = {} - for author in old_authors: - for alias in author.person.alias_set.all(): - old_authors_by_name[alias.name] = author - old_authors_by_name[author.person.plain_name()] = author - - if author.email_id: - old_authors_by_email[author.email_id] = author - - # the draft parser sometimes has a problem when - # affiliation isn't in the second line and it then thinks - # it's an extra author - skip those extra authors - seen = set() # type: Set[Optional[str]] - for full, _, _, _, _, email, country, company in d.get_author_list(): - assert full is None or isinstance(full, str) - assert email is None or isinstance(email, str) - assert country is None or isinstance(country, str) - assert isinstance(company, str) - #full, email, country, company = [ unicode(s) for s in [full, email, country, company, ] ] - if email in seen: - continue - seen.add(email) - - old_author = None - if email: - old_author = old_authors_by_email.get(email) - if not old_author: - old_author = old_authors_by_name.get(full) - - if not old_author: - say("UNKNOWN AUTHOR: %s, %s, %s, %s, %s" % (doc.name, full, email, country, company)) - continue - - if old_author.affiliation != company: - say("new affiliation: %s [ %s <%s> ] %s -> %s" % (canonical_name, full, email, old_author.affiliation, company)) - old_author.affiliation = company - old_author.save(update_fields=["affiliation"]) - updated = True - - if country is None: - country = "" - - if old_author.country != country: - say("new country: %s [ %s <%s> ] %s -> %s" % (canonical_name , full, email, old_author.country, country)) - old_author.country = country - old_author.save(update_fields=["country"]) - updated = True - - - if updates: - Document.objects.filter(pk=doc.pk).update(**updates) - updated = True - - if updated: - say("updated: %s" % canonical_name) - -stop = time.time() -dur = stop-start -sec = dur%60 -min = dur//60 -say("Processing time %d:%02d" % (min, sec)) - -print("\n\nWrote log to %s" % os.path.abspath(logfile.name)) -logfile.close() - diff --git a/ietf/stats/models.py b/ietf/stats/models.py index 0871804b0..699334392 100644 --- a/ietf/stats/models.py +++ b/ietf/stats/models.py @@ -11,6 +11,10 @@ from ietf.name.models import CountryName from ietf.person.models import Person from ietf.utils.models import ForeignKey +### NOTE WELL: These models are expected to be removed and the stats app reimplemented. +# A bare python file that should have been a management command was used to populate +# these models when the app was first installed - it has been removed from main, but +# can be seen at https://github.com/ietf-tools/datatracker/blob/f2b716fc052a0152c32b86b428ba6ebfdcdf5cd2/ietf/stats/backfill_data.py class AffiliationAlias(models.Model): """Records that alias should be treated as name for statistical diff --git a/ietf/stats/tests.py b/ietf/stats/tests.py index dae352752..5f23b1b0a 100644 --- a/ietf/stats/tests.py +++ b/ietf/stats/tests.py @@ -20,7 +20,7 @@ import ietf.stats.views from ietf.submit.models import Submission from ietf.doc.factories import WgDraftFactory, WgRfcFactory -from ietf.doc.models import Document, DocAlias, State, RelatedDocument, NewRevisionDocEvent, DocumentAuthor +from ietf.doc.models import Document, State, RelatedDocument, NewRevisionDocEvent, DocumentAuthor from ietf.group.factories import RoleFactory from ietf.meeting.factories import MeetingFactory, AttendedFactory from ietf.person.factories import PersonFactory @@ -79,10 +79,9 @@ class StatisticsTests(TestCase): words=100 ) referencing_draft.set_state(State.objects.get(used=True, type="draft", slug="active")) - DocAlias.objects.create(name=referencing_draft.name).docs.add(referencing_draft) RelatedDocument.objects.create( source=referencing_draft, - target=draft.docalias.first(), + target=draft, relationship=DocRelationshipName.objects.get(slug="refinfo") ) NewRevisionDocEvent.objects.create( @@ -273,3 +272,31 @@ class StatisticsTests(TestCase): self.assertEqual(query.count(), 1) self.assertEqual(query.filter(reg_type='onsite').count(), 1) self.assertEqual(query.filter(reg_type='hackathon').count(), 0) + + @patch('requests.get') + def test_get_meeting_registration_data_duplicates(self, mock_get): + '''Test that get_meeting_registration_data does not create duplicate + MeetingRegistration records + ''' + person = PersonFactory() + data = { + 'LastName': person.last_name() + ' ', + 'FirstName': person.first_name(), + 'Company': 'ABC', + 'Country': 'US', + 'Email': person.email().address, + 'RegType': 'onsite', + 'TicketType': 'week_pass', + 'CheckedIn': 'True', + } + data2 = data.copy() + data2['RegType'] = 'hackathon' + response = Response() + response.status_code = 200 + response._content = json.dumps([data, data2, data]).encode('utf8') + mock_get.return_value = response + meeting = MeetingFactory(type_id='ietf', date=datetime.date(2016, 7, 14), number="96") + self.assertEqual(MeetingRegistration.objects.count(), 0) + get_meeting_registration_data(meeting) + query = MeetingRegistration.objects.all() + self.assertEqual(query.count(), 2) diff --git a/ietf/stats/utils.py b/ietf/stats/utils.py index ca1163e07..1f9c0e3c3 100644 --- a/ietf/stats/utils.py +++ b/ietf/stats/utils.py @@ -270,11 +270,10 @@ def get_meeting_registration_data(meeting): object = meeting_registrations.pop((address, reg_type)) created = False else: - object = MeetingRegistration.objects.create( + object, created = MeetingRegistration.objects.get_or_create( meeting_id=meeting.pk, email=address, reg_type=reg_type) - created = True if (object.first_name != first_name[:200] or object.last_name != last_name[:200] or diff --git a/ietf/stats/views.py b/ietf/stats/views.py index 44fbfb717..e2b7706a2 100644 --- a/ietf/stats/views.py +++ b/ietf/stats/views.py @@ -34,7 +34,7 @@ from ietf.group.models import Role, Group from ietf.person.models import Person from ietf.name.models import ReviewResultName, CountryName, DocRelationshipName, ReviewAssignmentStateName from ietf.person.name import plain_name -from ietf.doc.models import DocAlias, Document, State, DocEvent +from ietf.doc.models import Document, State, DocEvent from ietf.meeting.models import Meeting from ietf.stats.models import MeetingRegistration, CountryAlias from ietf.stats.utils import get_aliased_affiliations, get_aliased_countries, compute_hirsch_index @@ -214,13 +214,13 @@ def document_stats(request, stats_type=None): if any(stats_type == t[0] for t in possible_document_stats_types): # filter documents - docalias_filters = Q(docs__type="draft") + document_filters = Q(type__in=["draft","rfc"]) # TODO - review lots of "rfc is a draft" assumptions below - rfc_state = State.objects.get(type="draft", slug="rfc") + rfc_state = State.objects.get(type="rfc", slug="published") if document_type == "rfc": - docalias_filters &= Q(docs__states=rfc_state) + document_filters &= Q(states=rfc_state) elif document_type == "draft": - docalias_filters &= ~Q(docs__states=rfc_state) + document_filters &= ~Q(states=rfc_state) if from_time: # this is actually faster than joining in the database, @@ -229,11 +229,11 @@ def document_stats(request, stats_type=None): type="draft", docevent__time__gte=from_time, docevent__type__in=["published_rfc", "new_revision"], - ).values_list("pk")) + ).values_list("pk",flat=True)) - docalias_filters &= Q(docs__in=docs_within_time_constraint) + document_filters &= Q(pk__in=docs_within_time_constraint) - docalias_qs = DocAlias.objects.filter(docalias_filters) + document_qs = Document.objects.filter(document_filters) if document_type == "rfc": doc_label = "RFC" @@ -242,28 +242,15 @@ def document_stats(request, stats_type=None): else: doc_label = "document" - total_docs = docalias_qs.values_list("docs__name").distinct().count() - - def generate_canonical_names(values): - for doc_id, ts in itertools.groupby(values.order_by("docs__name"), lambda a: a[0]): - chosen = None - for t in ts: - if chosen is None: - chosen = t - else: - if t[1].startswith("rfc"): - chosen = t - elif t[1].startswith("draft") and not chosen[1].startswith("rfc"): - chosen = t - yield chosen + total_docs = document_qs.values_list("name").distinct().count() if stats_type == "authors": stats_title = "Number of authors for each {}".format(doc_label) bins = defaultdict(set) - for name, canonical_name, author_count in generate_canonical_names(docalias_qs.values_list("docs__name", "name").annotate(Count("docs__documentauthor"))): - bins[author_count or 0].add(canonical_name) + for name, author_count in document_qs.values_list("name").annotate(Count("documentauthor")).values_list("name","documentauthor__count"): + bins[author_count or 0].add(name) series_data = [] for author_count, names in sorted(bins.items(), key=lambda t: t[0]): @@ -278,8 +265,8 @@ def document_stats(request, stats_type=None): bins = defaultdict(set) - for name, canonical_name, pages in generate_canonical_names(docalias_qs.values_list("docs__name", "name", "docs__pages")): - bins[pages or 0].add(canonical_name) + for name, pages in document_qs.values_list("name", "pages"): + bins[pages or 0].add(name) series_data = [] for pages, names in sorted(bins.items(), key=lambda t: t[0]): @@ -297,8 +284,8 @@ def document_stats(request, stats_type=None): bins = defaultdict(set) - for name, canonical_name, words in generate_canonical_names(docalias_qs.values_list("docs__name", "name", "docs__words")): - bins[put_into_bin(words, bin_size)].add(canonical_name) + for name, words in document_qs.values_list("name", "words"): + bins[put_into_bin(words, bin_size)].add(name) series_data = [] for (value, words), names in sorted(bins.items(), key=lambda t: t[0][0]): @@ -322,20 +309,20 @@ def document_stats(request, stats_type=None): submission_types[doc_name] = file_types doc_names_with_missing_types = {} - for doc_name, canonical_name, rev in generate_canonical_names(docalias_qs.values_list("docs__name", "name", "docs__rev")): + for doc_name, doc_type, rev in document_qs.values_list("name", "type_id", "rev"): types = submission_types.get(doc_name) if types: for dot_ext in types.split(","): - bins[dot_ext.lstrip(".").upper()].add(canonical_name) + bins[dot_ext.lstrip(".").upper()].add(doc_name) else: - if canonical_name.startswith("rfc"): - filename = canonical_name + if doc_type == "rfc": + filename = doc_name else: - filename = canonical_name + "-" + rev + filename = doc_name + "-" + rev - doc_names_with_missing_types[filename] = canonical_name + doc_names_with_missing_types[filename] = doc_name # look up the remaining documents on disk for filename in itertools.chain(os.listdir(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR), os.listdir(settings.RFC_PATH)): @@ -348,10 +335,10 @@ def document_stats(request, stats_type=None): if not any(ext==allowlisted_ext for allowlisted_ext in settings.DOCUMENT_FORMAT_ALLOWLIST): continue - canonical_name = doc_names_with_missing_types.get(basename) + name = doc_names_with_missing_types.get(basename) - if canonical_name: - bins[ext.upper()].add(canonical_name) + if name: + bins[ext.upper()].add(name) series_data = [] for fmt, names in sorted(bins.items(), key=lambda t: t[0]): @@ -367,8 +354,8 @@ def document_stats(request, stats_type=None): bins = defaultdict(set) - for name, canonical_name, formal_language_name in generate_canonical_names(docalias_qs.values_list("docs__name", "name", "docs__formal_languages__name")): - bins[formal_language_name or ""].add(canonical_name) + for name, formal_language_name in document_qs.values_list("name", "formal_languages__name"): + bins[formal_language_name or ""].add(name) series_data = [] for formal_language, names in sorted(bins.items(), key=lambda t: t[0]): @@ -383,7 +370,7 @@ def document_stats(request, stats_type=None): person_filters = Q(documentauthor__document__type="draft") # filter persons - rfc_state = State.objects.get(type="draft", slug="rfc") + rfc_state = State.objects.get(type="rfc", slug="published") if document_type == "rfc": person_filters &= Q(documentauthor__document__states=rfc_state) elif document_type == "draft": @@ -563,11 +550,11 @@ def document_stats(request, stats_type=None): bins = defaultdict(set) cite_relationships = list(DocRelationshipName.objects.filter(slug__in=['refnorm', 'refinfo', 'refunk', 'refold'])) - person_filters &= Q(documentauthor__document__docalias__relateddocument__relationship__in=cite_relationships) + person_filters &= Q(documentauthor__document__relateddocument__relationship__in=cite_relationships) person_qs = Person.objects.filter(person_filters) - for name, citations in person_qs.values_list("name").annotate(Count("documentauthor__document__docalias__relateddocument")): + for name, citations in person_qs.values_list("name").annotate(Count("documentauthor__document__relateddocument")): bins[citations or 0].add(name) total_persons = count_bins(bins) @@ -587,11 +574,11 @@ def document_stats(request, stats_type=None): bins = defaultdict(set) cite_relationships = list(DocRelationshipName.objects.filter(slug__in=['refnorm', 'refinfo', 'refunk', 'refold'])) - person_filters &= Q(documentauthor__document__docalias__relateddocument__relationship__in=cite_relationships) + person_filters &= Q(documentauthor__document__relateddocument__relationship__in=cite_relationships) person_qs = Person.objects.filter(person_filters) - values = person_qs.values_list("name", "documentauthor__document").annotate(Count("documentauthor__document__docalias__relateddocument")) + values = person_qs.values_list("name", "documentauthor__document").annotate(Count("documentauthor__document__relateddocument")) for name, ts in itertools.groupby(values.order_by("name"), key=lambda t: t[0]): h_index = compute_hirsch_index([citations for _, document, citations in ts]) bins[h_index or 0].add(name) @@ -612,7 +599,7 @@ def document_stats(request, stats_type=None): person_filters = Q(documentauthor__document__type="draft") # filter persons - rfc_state = State.objects.get(type="draft", slug="rfc") + rfc_state = State.objects.get(type="rfc", slug="published") if document_type == "rfc": person_filters &= Q(documentauthor__document__states=rfc_state) elif document_type == "draft": diff --git a/ietf/submit/forms.py b/ietf/submit/forms.py index 4a71d7beb..f857ac9fd 100644 --- a/ietf/submit/forms.py +++ b/ietf/submit/forms.py @@ -28,8 +28,7 @@ import debug # pyflakes:ignore from ietf.doc.models import Document from ietf.group.models import Group from ietf.ietfauth.utils import has_role -from ietf.doc.fields import SearchableDocAliasesField -from ietf.doc.models import DocAlias +from ietf.doc.fields import SearchableDocumentsField from ietf.ipr.mail import utc_from_string from ietf.meeting.models import Meeting from ietf.message.models import Message @@ -688,9 +687,9 @@ class SubmissionAutoUploadForm(SubmissionBaseUploadForm): if self.cleaned_data['replaces']: names_replaced = [s.strip() for s in self.cleaned_data['replaces'].split(',')] self.cleaned_data['replaces'] = ','.join(names_replaced) - aliases_replaced = DocAlias.objects.filter(name__in=names_replaced) - if len(names_replaced) != len(aliases_replaced): - known_names = aliases_replaced.values_list('name', flat=True) + documents_replaced = Document.objects.filter(name__in=names_replaced) + if len(names_replaced) != len(documents_replaced): + known_names = documents_replaced.values_list('name', flat=True) unknown_names = [n for n in names_replaced if n not in known_names] self.add_error( 'replaces', @@ -698,27 +697,27 @@ class SubmissionAutoUploadForm(SubmissionBaseUploadForm): 'Unknown Internet-Draft name(s): ' + ', '.join(unknown_names) ), ) - for alias in aliases_replaced: - if alias.document.name == self.filename: + for doc in documents_replaced: + if doc.name == self.filename: self.add_error( 'replaces', forms.ValidationError("An Internet-Draft cannot replace itself"), ) - elif alias.document.type_id != "draft": + elif doc.type_id != "draft": self.add_error( 'replaces', forms.ValidationError("An Internet-Draft can only replace another Internet-Draft"), ) - elif alias.document.get_state_slug() == "rfc": + elif doc.get_state_slug() == "rfc": self.add_error( 'replaces', - forms.ValidationError("An Internet-Draft cannot replace an RFC"), + forms.ValidationError("An Internet-Draft cannot replace another Internet-Draft that has become an RFC"), ) - elif alias.document.get_state_slug('draft-iesg') in ('approved', 'ann', 'rfcqueue'): + elif doc.get_state_slug('draft-iesg') in ('approved', 'ann', 'rfcqueue'): self.add_error( 'replaces', forms.ValidationError( - alias.name + " is approved by the IESG and cannot be replaced" + doc.name + " is approved by the IESG and cannot be replaced" ), ) return cleaned_data @@ -773,22 +772,20 @@ class SubmitterForm(NameEmailForm): return name class ReplacesForm(forms.Form): - replaces = SearchableDocAliasesField(required=False, help_text="Any Internet-Drafts that this document replaces (approval required for replacing an Internet-Draft you are not the author of)") + replaces = SearchableDocumentsField(required=False, help_text="Any Internet-Drafts that this document replaces (approval required for replacing an Internet-Draft you are not the author of)") def __init__(self, *args, **kwargs): self.name = kwargs.pop("name") super(ReplacesForm, self).__init__(*args, **kwargs) def clean_replaces(self): - for alias in self.cleaned_data['replaces']: - if alias.document.name == self.name: + for doc in self.cleaned_data['replaces']: + if doc.name == self.name: raise forms.ValidationError("An Internet-Draft cannot replace itself.") - if alias.document.type_id != "draft": + if doc.type_id != "draft": raise forms.ValidationError("An Internet-Draft can only replace another Internet-Draft") - if alias.document.get_state_slug() == "rfc": - raise forms.ValidationError("An Internet-Draft cannot replace an RFC") - if alias.document.get_state_slug('draft-iesg') in ('approved','ann','rfcqueue'): - raise forms.ValidationError(alias.name+" is approved by the IESG and cannot be replaced") + if doc.get_state_slug('draft-iesg') in ('approved','ann','rfcqueue'): + raise forms.ValidationError(doc.name+" is approved by the IESG and cannot be replaced") return self.cleaned_data['replaces'] class EditSubmissionForm(forms.ModelForm): diff --git a/ietf/submit/models.py b/ietf/submit/models.py index bb6d8b66b..51f7541e3 100644 --- a/ietf/submit/models.py +++ b/ietf/submit/models.py @@ -115,14 +115,14 @@ class Submission(models.Model): @property def active_wg_drafts_replaced(self): return Document.objects.filter( - docalias__name__in=self.replaces.split(','), + name__in=self.replaces.split(','), group__in=Group.objects.active_wgs() ) @property def closed_wg_drafts_replaced(self): return Document.objects.filter( - docalias__name__in=self.replaces.split(','), + name__in=self.replaces.split(','), group__in=Group.objects.closed_wgs() ) diff --git a/ietf/submit/tests.py b/ietf/submit/tests.py index 8b1551cc1..9871023e4 100644 --- a/ietf/submit/tests.py +++ b/ietf/submit/tests.py @@ -32,9 +32,9 @@ from ietf.submit.utils import (expirable_submissions, expire_submission, find_su process_and_accept_uploaded_submission, SubmissionError, process_submission_text, process_submission_xml, process_uploaded_submission, process_and_validate_submission) -from ietf.doc.factories import (DocumentFactory, WgDraftFactory, IndividualDraftFactory, IndividualRfcFactory, +from ietf.doc.factories import (DocumentFactory, WgDraftFactory, IndividualDraftFactory, ReviewFactory, WgRfcFactory) -from ietf.doc.models import ( Document, DocAlias, DocEvent, State, +from ietf.doc.models import ( Document, DocEvent, State, BallotPositionDocEvent, DocumentAuthor, SubmissionDocEvent ) from ietf.doc.utils import create_ballot_if_not_open, can_edit_docextresources, update_action_holders from ietf.group.factories import GroupFactory, RoleFactory @@ -302,7 +302,7 @@ class SubmitTests(BaseSubmitTestCase): submission = Submission.objects.get(name=name) self.assertEqual(submission.submitter, email.utils.formataddr((submitter_name, submitter_email))) self.assertEqual([] if submission.replaces == "" else submission.replaces.split(','), - [ d.name for d in DocAlias.objects.filter(pk__in=replaces) ]) + [ d.name for d in Document.objects.filter(pk__in=replaces) ]) self.assertCountEqual( [str(r) for r in submission.external_resources.all()], [str(r) for r in extresources] if extresources else [], @@ -357,8 +357,6 @@ class SubmitTests(BaseSubmitTestCase): notify="aliens@example.mars", ) sug_replaced_draft.set_state(State.objects.get(used=True, type="draft", slug="active")) - sug_replaced_alias = DocAlias.objects.create(name=sug_replaced_draft.name) - sug_replaced_alias.docs.add(sug_replaced_draft) name = "draft-ietf-mars-testing-tests" rev = "00" @@ -368,9 +366,8 @@ class SubmitTests(BaseSubmitTestCase): # supply submitter info, then draft should be in and ready for approval mailbox_before = len(outbox) - replaced_alias = draft.docalias.first() r = self.supply_extra_metadata(name, status_url, author.ascii, author.email().address.lower(), - replaces=[str(replaced_alias.pk), str(sug_replaced_alias.pk)]) + replaces=[str(draft.pk), str(sug_replaced_draft.pk)]) self.assertEqual(r.status_code, 302) status_url = r["Location"] @@ -400,7 +397,7 @@ class SubmitTests(BaseSubmitTestCase): r = self.client.post(status_url, dict(action=action)) self.assertEqual(r.status_code, 302) - draft = Document.objects.get(docalias__name=name) + draft = Document.objects.get(name=name) self.assertEqual(draft.rev, rev) new_revision = draft.latest_event(type="new_revision") self.assertEqual(draft.group.acronym, "mars") @@ -418,9 +415,9 @@ class SubmitTests(BaseSubmitTestCase): self.assertEqual(authors[0].person, author) self.assertEqual(set(draft.formal_languages.all()), set(FormalLanguageName.objects.filter(slug="json"))) self.assertEqual(draft.relations_that_doc("replaces").count(), 1) - self.assertTrue(draft.relations_that_doc("replaces").first().target, replaced_alias) + self.assertTrue(draft.relations_that_doc("replaces").first().target, draft) self.assertEqual(draft.relations_that_doc("possibly-replaces").count(), 1) - self.assertTrue(draft.relations_that_doc("possibly-replaces").first().target, sug_replaced_alias) + self.assertTrue(draft.relations_that_doc("possibly-replaces").first().target, sug_replaced_draft) self.assertEqual(len(outbox), mailbox_before + 5) self.assertIn(("I-D Action: %s" % name), outbox[-4]["Subject"]) self.assertIn(author.ascii, get_payload_text(outbox[-4])) @@ -433,7 +430,7 @@ class SubmitTests(BaseSubmitTestCase): # Check "Review of suggested possible replacements for..." mail self.assertIn("review", outbox[-1]["Subject"].lower()) self.assertIn(name, get_payload_text(outbox[-1])) - self.assertIn(sug_replaced_alias.name, get_payload_text(outbox[-1])) + self.assertIn(sug_replaced_draft.name, get_payload_text(outbox[-1])) self.assertIn("ames-chairs@", outbox[-1]["To"].lower()) self.assertIn("mars-chairs@", outbox[-1]["To"].lower()) # Check submission settings @@ -703,7 +700,7 @@ class SubmitTests(BaseSubmitTestCase): self.assertTrue('New version approved' in edescs) self.assertTrue('Uploaded new revision' in edescs) - draft = Document.objects.get(docalias__name=name) + draft = Document.objects.get(name=name) self.assertEqual(draft.rev, rev) self.assertEqual(draft.group.acronym, name.split("-")[2]) # @@ -930,7 +927,7 @@ class SubmitTests(BaseSubmitTestCase): r = self.client.post(confirmation_url, {'action':'confirm'}) self.assertEqual(r.status_code, 302) - draft = Document.objects.get(docalias__name=name) + draft = Document.objects.get(name=name) self.assertEqual(draft.rev, rev) new_revision = draft.latest_event() self.assertEqual(new_revision.type, "new_revision") @@ -970,7 +967,7 @@ class SubmitTests(BaseSubmitTestCase): action = force_post_button.parents("form").find('input[type=hidden][name="action"]').val() r = self.client.post(status_url, dict(action=action)) - doc = Document.objects.get(docalias__name=name) + doc = Document.objects.get(name=name) self.assertEqual(doc.documentauthor_set.count(), 1) docauth = doc.documentauthor_set.first() self.assertEqual(docauth.person, author) @@ -1103,7 +1100,7 @@ class SubmitTests(BaseSubmitTestCase): self.assertIn("New Version Notification", notification_email["Subject"]) self.assertIn(author.email().address.lower(), notification_email["To"]) - draft = Document.objects.get(docalias__name=name) + draft = Document.objects.get(name=name) self.assertEqual(draft.rev, rev) self.assertEqual(draft.docextresource_set.count(), 0) new_revision = draft.latest_event() @@ -1151,7 +1148,7 @@ class SubmitTests(BaseSubmitTestCase): self._assert_extresources_form_not_present(r) # Check that the draft itself got the resources - draft = Document.objects.get(docalias__name=name) + draft = Document.objects.get(name=name) self.assertCountEqual( [str(r) for r in draft.docextresource_set.all()], [str(r) for r in resources], @@ -1160,7 +1157,7 @@ class SubmitTests(BaseSubmitTestCase): self.verify_bibxml_ids_creation(draft) def test_submit_update_individual(self): - IndividualDraftFactory(name='draft-ietf-random-thing', states=[('draft','rfc')], other_aliases=['rfc9999',], pages=5) + IndividualDraftFactory(name='draft-ietf-random-thing', states=[('draft','active'),('draft-iesg','approved')], pages=5) ad=Person.objects.get(user__username='ad') # Group of None here does not reflect real individual submissions draft = IndividualDraftFactory(group=None, ad = ad, authors=[ad,], notify='aliens@example.mars', pages=5) @@ -1170,23 +1167,14 @@ class SubmitTests(BaseSubmitTestCase): status_url, author = self.do_submission(name,rev) mailbox_before = len(outbox) - replaced_alias = draft.docalias.first() - r = self.supply_extra_metadata(name, status_url, "Submitter Name", "author@example.com", replaces=[str(replaced_alias.pk)]) + r = self.supply_extra_metadata(name, status_url, "Submitter Name", "author@example.com", replaces=[str(draft.pk)]) self.assertEqual(r.status_code, 200) self.assertContains(r, 'cannot replace itself') self._assert_extresources_in_table(r, []) self._assert_extresources_form(r, []) - replaced_alias = DocAlias.objects.get(name='draft-ietf-random-thing') - r = self.supply_extra_metadata(name, status_url, "Submitter Name", "author@example.com", replaces=[str(replaced_alias.pk)]) - self.assertEqual(r.status_code, 200) - self.assertContains(r, 'cannot replace an RFC') - self._assert_extresources_in_table(r, []) - self._assert_extresources_form(r, []) - - replaced_alias.document.set_state(State.objects.get(type='draft-iesg',slug='approved')) - replaced_alias.document.set_state(State.objects.get(type='draft',slug='active')) - r = self.supply_extra_metadata(name, status_url, "Submitter Name", "author@example.com", replaces=[str(replaced_alias.pk)]) + replaced = Document.objects.get(name='draft-ietf-random-thing') + r = self.supply_extra_metadata(name, status_url, "Submitter Name", "author@example.com", replaces=[str(replaced.pk)]) self.assertEqual(r.status_code, 200) self.assertContains(r, 'approved by the IESG and cannot') self._assert_extresources_in_table(r, []) @@ -1206,7 +1194,7 @@ class SubmitTests(BaseSubmitTestCase): r = self.client.post(confirmation_url, {'action':'confirm'}) self.assertEqual(r.status_code, 302) self.assertEqual(len(outbox), mailbox_before+3) - draft = Document.objects.get(docalias__name=name) + draft = Document.objects.get(name=name) self.assertEqual(draft.rev, rev) self.assertEqual(draft.relateddocument_set.filter(relationship_id='replaces').count(), replaces_count) self.assertEqual(draft.docextresource_set.count(), 0) @@ -1280,7 +1268,7 @@ class SubmitTests(BaseSubmitTestCase): status_url, "Submitter Name", "submitter@example.com", - replaces=[str(replaced_draft.docalias.first().pk)], + replaces=[str(replaced_draft.pk)], ) submission = Submission.objects.get(name=name, rev=rev) @@ -1324,7 +1312,7 @@ class SubmitTests(BaseSubmitTestCase): r = self.client.post(confirmation_url, {'action':'cancel'}) self.assertEqual(r.status_code, 302) self.assertEqual(len(outbox), mailbox_before) - draft = Document.objects.get(docalias__name=name) + draft = Document.objects.get(name=name) self.assertEqual(draft.rev, old_rev) def test_submit_new_wg_with_dash(self): @@ -1430,7 +1418,7 @@ class SubmitTests(BaseSubmitTestCase): "edit-pages": "123", "submitter-name": "Some Random Test Person", "submitter-email": "random@example.com", - "replaces": [str(draft.docalias.first().pk)], + "replaces": [str(draft.pk)], "authors-0-name": "Person 1", "authors-0-email": "person1@example.com", "authors-1-name": "Person 2", @@ -1447,7 +1435,7 @@ class SubmitTests(BaseSubmitTestCase): self.assertEqual(submission.abstract, "some abstract") self.assertEqual(submission.pages, 123) self.assertEqual(submission.submitter, "Some Random Test Person ") - self.assertEqual(submission.replaces, draft.docalias.first().name) + self.assertEqual(submission.replaces, draft.name) self.assertEqual(submission.state_id, "manual") authors = submission.authors @@ -1479,7 +1467,7 @@ class SubmitTests(BaseSubmitTestCase): r = self.client.post(status_url, dict(action=action)) self.assertEqual(r.status_code, 302) - draft = Document.objects.get(docalias__name=name) + draft = Document.objects.get(name=name) self.assertEqual(draft.rev, rev) self.assertEqual(draft.docextresource_set.count(), 0) self.verify_bibxml_ids_creation(draft) @@ -3113,13 +3101,15 @@ class SubmissionUploadFormTests(BaseSubmitTestCase): # can't replace RFC rfc = WgRfcFactory() + draft = WgDraftFactory(states=[("draft", "rfc")]) + draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc) form = SubmissionAutoUploadForm( request_factory.get('/some/url'), - data={'user': auth.user.username, 'replaces': rfc.name}, + data={'user': auth.user.username, 'replaces': draft.name}, files=files_dict, ) self.assertFalse(form.is_valid()) - self.assertIn('An Internet-Draft cannot replace an RFC', form.errors['replaces']) + self.assertIn('An Internet-Draft cannot replace another Internet-Draft that has become an RFC', form.errors['replaces']) # can't replace draft approved by iesg existing_drafts[0].set_state(State.objects.get(type='draft-iesg', slug='approved')) @@ -3725,25 +3715,9 @@ class RefsTests(BaseSubmitTestCase): class PostSubmissionTests(BaseSubmitTestCase): - @override_settings(RFC_FILE_TYPES=('txt', 'xml'), IDSUBMIT_FILE_TYPES=('pdf', 'md')) - def test_find_submission_filenames_rfc(self): - """Posting an RFC submission should use RFC_FILE_TYPES""" - rfc = IndividualRfcFactory() - path = Path(self.staging_dir) - for ext in ['txt', 'xml', 'pdf', 'md']: - (path / f'{rfc.name}-{rfc.rev}.{ext}').touch() - files = find_submission_filenames(rfc) - self.assertCountEqual( - files, - { - 'txt': f'{path}/{rfc.name}-{rfc.rev}.txt', - 'xml': f'{path}/{rfc.name}-{rfc.rev}.xml', - # should NOT find the pdf or md - } - ) @override_settings(RFC_FILE_TYPES=('txt', 'xml'), IDSUBMIT_FILE_TYPES=('pdf', 'md')) - def test_find_submission_filenames_draft(self): + def test_find_submission_filenames(self): """Posting an I-D submission should use IDSUBMIT_FILE_TYPES""" draft = WgDraftFactory() path = Path(self.staging_dir) diff --git a/ietf/submit/utils.py b/ietf/submit/utils.py index ebe79e4c4..7253210f0 100644 --- a/ietf/submit/utils.py +++ b/ietf/submit/utils.py @@ -27,7 +27,7 @@ from django.utils import timezone import debug # pyflakes:ignore -from ietf.doc.models import ( Document, State, DocAlias, DocEvent, SubmissionDocEvent, +from ietf.doc.models import ( Document, State, DocEvent, SubmissionDocEvent, DocumentAuthor, AddedMessageEvent ) from ietf.doc.models import NewRevisionDocEvent from ietf.doc.models import RelatedDocument, DocRelationshipName, DocExtResource @@ -289,7 +289,7 @@ def find_submission_filenames(draft): """ path = pathlib.Path(settings.IDSUBMIT_STAGING_PATH) stem = f'{draft.name}-{draft.rev}' - allowed_types = settings.RFC_FILE_TYPES if draft.get_state_slug() == 'rfc' else settings.IDSUBMIT_FILE_TYPES + allowed_types = settings.IDSUBMIT_FILE_TYPES candidates = {ext: path / f'{stem}.{ext}' for ext in allowed_types} return {ext: str(filename) for ext, filename in candidates.items() if filename.exists()} @@ -377,10 +377,6 @@ def post_submission(request, submission, approved_doc_desc, approved_subm_desc): events.append(e) log.log(f"{submission.name}: created doc events") - # update related objects - alias, __ = DocAlias.objects.get_or_create(name=submission.name) - alias.docs.add(draft) - draft.set_state(State.objects.get(used=True, type="draft", slug="active")) update_authors(draft, submission) @@ -506,7 +502,7 @@ def update_replaces_from_submission(request, submission, draft): if request.user.is_authenticated: is_chair_of = list(Group.objects.filter(role__person__user=request.user, role__name="chair")) - replaces = DocAlias.objects.filter(name__in=submission.replaces.split(",")).prefetch_related("docs", "docs__group") + replaces = Document.objects.filter(name__in=submission.replaces.split(",")).prefetch_related("group") existing_replaces = list(draft.related_that_doc("replaces")) existing_suggested = set(draft.related_that_doc("possibly-replaces")) @@ -518,14 +514,12 @@ def update_replaces_from_submission(request, submission, draft): if r in existing_replaces: continue - rdoc = r.document - - if rdoc == draft: + if r == draft: continue if (is_secretariat - or (draft.group in is_chair_of and (rdoc.group.type_id == "individ" or rdoc.group in is_chair_of)) - or (submitter_email and rdoc.documentauthor_set.filter(email__address__iexact=submitter_email).exists())): + or (draft.group in is_chair_of and (r.group.type_id == "individ" or r.group in is_chair_of)) + or (submitter_email and r.documentauthor_set.filter(email__address__iexact=submitter_email).exists())): approved.append(r) else: if r not in existing_suggested: @@ -1007,7 +1001,7 @@ def accept_submission(submission: Submission, request: Optional[HttpRequest] = N docevent_from_submission(submission, desc="Uploaded new revision", who=requester if requester_is_author else None) - replaces = DocAlias.objects.filter(name__in=submission.replaces_names) + replaces = Document.objects.filter(name__in=submission.replaces_names) pretty_replaces = '(none)' if not replaces else ( ', '.join(prettify_std_name(r.name) for r in replaces) ) diff --git a/ietf/submit/views.py b/ietf/submit/views.py index edb977326..b583a53fc 100644 --- a/ietf/submit/views.py +++ b/ietf/submit/views.py @@ -22,7 +22,7 @@ from django.views.decorators.csrf import csrf_exempt import debug # pyflakes:ignore -from ietf.doc.models import Document, DocAlias, AddedMessageEvent +from ietf.doc.models import Document, AddedMessageEvent from ietf.doc.forms import ExtResourceForm from ietf.group.models import Group from ietf.group.utils import group_features_group_filter @@ -429,7 +429,7 @@ def submission_status(request, submission_id, access_token=None): ) submitter_form = SubmitterForm(initial=submission.submitter_parsed(), prefix="submitter") - replaces_form = ReplacesForm(name=submission.name,initial=DocAlias.objects.filter(name__in=submission.replaces.split(","))) + replaces_form = ReplacesForm(name=submission.name,initial=Document.objects.filter(name__in=submission.replaces.split(","))) extresources_form = ExtResourceForm( initial=dict(resources=[er['res'] for er in external_resources]), extresource_model=SubmissionExtResource, @@ -646,7 +646,7 @@ def edit_submission(request, submission_id, access_token=None): else: edit_form = EditSubmissionForm(instance=submission, prefix="edit") submitter_form = SubmitterForm(initial=submission.submitter_parsed(), prefix="submitter") - replaces_form = ReplacesForm(name=submission.name,initial=DocAlias.objects.filter(name__in=submission.replaces.split(","))) + replaces_form = ReplacesForm(name=submission.name, initial=Document.objects.filter(name__in=submission.replaces.split(","))) author_forms = [ AuthorForm(initial=author, prefix="authors-%s" % i) for i, author in enumerate(submission.authors) ] diff --git a/ietf/sync/iana.py b/ietf/sync/iana.py index dc61f9159..9ce54a687 100644 --- a/ietf/sync/iana.py +++ b/ietf/sync/iana.py @@ -45,7 +45,7 @@ def update_rfc_log_from_protocol_page(rfc_names, rfc_must_published_later_than): updated = [] - docs = Document.objects.filter(docalias__name__in=rfc_names).exclude( + docs = Document.objects.filter(name__in=rfc_names).exclude( docevent__type="rfc_in_iana_registry").filter( # only take those that were published after cutoff since we # have a big bunch of old RFCs that we unfortunately don't have data for @@ -189,7 +189,7 @@ def update_history_with_changes(changes, send_email=True): state_type=state_type, state=state) if not e: try: - doc = Document.objects.get(docalias__name=docname) + doc = Document.objects.get(name=docname) except Document.DoesNotExist: warnings.append("Document %s not found" % docname) continue diff --git a/ietf/sync/rfceditor.py b/ietf/sync/rfceditor.py index 784e7a2f0..a2f85f478 100644 --- a/ietf/sync/rfceditor.py +++ b/ietf/sync/rfceditor.py @@ -7,20 +7,23 @@ import datetime import re import requests +from typing import Iterator, Optional, Union from urllib.parse import urlencode from xml.dom import pulldom, Node from django.conf import settings +from django.db.models import Subquery, OuterRef, F, Q from django.utils import timezone from django.utils.encoding import smart_bytes, force_str import debug # pyflakes:ignore -from ietf.doc.models import ( Document, DocAlias, State, StateType, DocEvent, DocRelationshipName, - DocTagName, DocTypeName, RelatedDocument ) +from ietf.doc.models import ( Document, State, StateType, DocEvent, DocRelationshipName, + DocTagName, RelatedDocument, RelatedDocHistory ) from ietf.doc.expire import move_draft_files_to_archive from ietf.doc.utils import add_state_change_event, prettify_std_name, update_action_holders from ietf.group.models import Group +from ietf.ipr.models import IprDocRel from ietf.name.models import StdLevelName, StreamName from ietf.person.models import Person from ietf.utils.log import log @@ -155,7 +158,7 @@ def update_drafts_from_queue(drafts): names = [t[0] for t in drafts] drafts_in_db = dict((d.name, d) - for d in Document.objects.filter(type="draft", docalias__name__in=names)) + for d in Document.objects.filter(type="draft", name__in=names)) changed = set() @@ -228,7 +231,7 @@ def update_drafts_from_queue(drafts): # remove tags and states for those not in the queue anymore - for d in Document.objects.exclude(docalias__name__in=names).filter(states__type="draft-rfceditor").distinct(): + for d in Document.objects.exclude(name__in=names).filter(states__type="draft-rfceditor").distinct(): d.tags.remove(*list(tag_mapping.values())) d.unset_state("draft-rfceditor") # we do not add a history entry here - most likely we already @@ -326,23 +329,28 @@ def parse_index(response): log("node: %s" % node) raise for d in data: - k = "RFC%04d" % d[0] + k = "RFC%d" % d[0] if k in also_list: d[9].extend(also_list[k]) return data -def update_docs_from_rfc_index(index_data, errata_data, skip_older_than_date=None): +def update_docs_from_rfc_index( + index_data, errata_data, skip_older_than_date=None +) -> Iterator[tuple[int, list[str], Document, bool]]: """Given parsed data from the RFC Editor index, update the documents in the database - Yields a list of change descriptions for each document, if any. + Returns an iterator that yields (rfc_number, change_list, doc, rfc_published) for the + RFC document and, if applicable, the I-D that it came from. The skip_older_than_date is a bare date, not a datetime. """ - - errata = {} + # Create dict mapping doc-id to list of errata records that apply to it + errata: dict[str, list[dict]] = {} for item in errata_data: - name = item['doc-id'] + name = item["doc-id"] + if name.upper().startswith("RFC"): + name = f"RFC{int(name[3:])}" # removes leading 0s on the rfc number if not name in errata: errata[name] = [] errata[name].append(item) @@ -357,7 +365,7 @@ def update_docs_from_rfc_index(index_data, errata_data, skip_older_than_date=Non "Best Current Practice": StdLevelName.objects.get(slug="bcp"), "Historic": StdLevelName.objects.get(slug="hist"), "Unknown": StdLevelName.objects.get(slug="unkn"), - } + } stream_mapping = { "IETF": StreamName.objects.get(slug="ietf"), @@ -367,15 +375,35 @@ def update_docs_from_rfc_index(index_data, errata_data, skip_older_than_date=Non "Legacy": StreamName.objects.get(slug="legacy"), } - tag_has_errata = DocTagName.objects.get(slug='errata') - tag_has_verified_errata = DocTagName.objects.get(slug='verified-errata') + tag_has_errata = DocTagName.objects.get(slug="errata") + tag_has_verified_errata = DocTagName.objects.get(slug="verified-errata") relationship_obsoletes = DocRelationshipName.objects.get(slug="obs") relationship_updates = DocRelationshipName.objects.get(slug="updates") + rfc_published_state = State.objects.get(type_id="rfc", slug="published") system = Person.objects.get(name="(System)") - for rfc_number, title, authors, rfc_published_date, current_status, updates, updated_by, obsoletes, obsoleted_by, also, draft, has_errata, stream, wg, file_formats, pages, abstract in index_data: + first_sync_creating_subseries = not Document.objects.filter(type_id__in=["bcp","std","fyi"]).exists() + for ( + rfc_number, + title, + authors, + rfc_published_date, + current_status, + updates, + updated_by, + obsoletes, + obsoleted_by, + also, + draft_name, + has_errata, + stream, + wg, + file_formats, + pages, + abstract, + ) in index_data: if skip_older_than_date and rfc_published_date < skip_older_than_date: # speed up the process by skipping old entries continue @@ -383,64 +411,180 @@ def update_docs_from_rfc_index(index_data, errata_data, skip_older_than_date=Non # we assume two things can happen: we get a new RFC, or an # attribute has been updated at the RFC Editor (RFC Editor # attributes take precedence over our local attributes) - events = [] - changes = [] + rfc_events = [] + rfc_changes = [] rfc_published = False - # make sure we got the document and alias - doc = None - name = "rfc%s" % rfc_number - a = DocAlias.objects.filter(name=name) - if a: - doc = a[0].document - else: + # Find the draft, if any + draft = None + if draft_name: + try: + draft = Document.objects.get(name=draft_name, type_id="draft") + except Document.DoesNotExist: + pass + # Logging below warning turns out to be unhelpful - there are many references + # to such things in the index: + # * all april-1 RFCs have an internal name that looks like a draft name, but there + # was never such a draft. More of these will exist in the future + # * Several documents were created with out-of-band input to the RFC-editor, for a + # variety of reasons. + # + # What this exposes is that the rfc-index needs to stop talking about these things. + # If there is no draft to point to, don't point to one, even if there was an RPC + # internal name in use (and in the RPC database). This will be a requirement on the + # reimplementation of the creation of the rfc-index. + # + # log(f"Warning: RFC index for {rfc_number} referred to unknown draft {draft_name}") + + # Find or create the RFC document + creation_args: dict[str, Optional[Union[str, int]]] = {"name": f"rfc{rfc_number}"} + if draft: + creation_args.update( + { + "title": draft.title, + "stream": draft.stream, + "group": draft.group, + "abstract": draft.abstract, + "pages": draft.pages, + "words": draft.words, + "std_level": draft.std_level, + "ad": draft.ad, + "external_url": draft.external_url, + "uploaded_filename": draft.uploaded_filename, + "note": draft.note, + } + ) + doc, created_rfc = Document.objects.get_or_create( + rfc_number=rfc_number, type_id="rfc", defaults=creation_args + ) + if created_rfc: + rfc_changes.append(f"created document {prettify_std_name(doc.name)}") + doc.set_state(rfc_published_state) if draft: - try: - doc = Document.objects.get(name=draft) - except Document.DoesNotExist: - pass + doc.formal_languages.set(draft.formal_languages.all()) - if not doc: - changes.append("created document %s" % prettify_std_name(name)) - doc = Document.objects.create(name=name, type=DocTypeName.objects.get(slug="draft")) + if draft: + draft_events = [] + draft_changes = [] - # add alias - alias, __ = DocAlias.objects.get_or_create(name=name) - alias.docs.add(doc) - changes.append("created alias %s" % prettify_std_name(name)) + # Ensure the draft is in the "rfc" state and move its files to the archive + # if necessary. + if draft.get_state_slug() != "rfc": + draft.set_state( + State.objects.get(used=True, type="draft", slug="rfc") + ) + move_draft_files_to_archive(draft, draft.rev) + draft_changes.append(f"changed state to {draft.get_state()}") + + # Ensure the draft and rfc are linked with a "became_rfc" relationship + r, created_relateddoc = RelatedDocument.objects.get_or_create( + source=draft, target=doc, relationship_id="became_rfc" + ) + if created_relateddoc: + change = "created {rel_name} relationship between {pretty_draft_name} and {pretty_rfc_name}".format( + rel_name=r.relationship.name.lower(), + pretty_draft_name=prettify_std_name(draft_name), + pretty_rfc_name=prettify_std_name(doc.name), + ) + draft_changes.append(change) + rfc_changes.append(change) + + # Always set the "draft-iesg" state. This state should be set for all drafts, so + # log a warning if it is not set. What should happen here is that ietf stream + # RFCs come in as "rfcqueue" and are set to "pub" when they appear in the RFC index. + # Other stream documents should normally be "idexists" and be left that way. The + # code here *actually* leaves "draft-iesg" state alone if it is "idexists" or "pub", + # and changes any other state to "pub". If unset, it changes it to "idexists". + # This reflects historical behavior and should probably be updated, but a migration + # of existing drafts (and validation of the change) is needed before we change the + # handling. + prev_iesg_state = draft.get_state("draft-iesg") + if prev_iesg_state is None: + log(f'Warning while processing {doc.name}: {draft.name} has no "draft-iesg" state') + new_iesg_state = State.objects.get(type_id="draft-iesg", slug="idexists") + elif prev_iesg_state.slug not in ("pub", "idexists"): + if prev_iesg_state.slug != "rfcqueue": + log( + 'Warning while processing {}: {} is in "draft-iesg" state {} (expected "rfcqueue")'.format( + doc.name, draft.name, prev_iesg_state.slug + ) + ) + new_iesg_state = State.objects.get(type_id="draft-iesg", slug="pub") + else: + new_iesg_state = prev_iesg_state + + if new_iesg_state != prev_iesg_state: + draft.set_state(new_iesg_state) + draft_changes.append(f"changed {new_iesg_state.type.label} to {new_iesg_state}") + e = update_action_holders(draft, prev_iesg_state, new_iesg_state) + if e: + draft_events.append(e) + + # If the draft and RFC streams agree, move draft to "pub" stream state. If not, complain. + if draft.stream != doc.stream: + log("Warning while processing {}: draft {} stream is {} but RFC stream is {}".format( + doc.name, draft.name, draft.stream, doc.stream + )) + elif draft.stream.slug in ["iab", "irtf", "ise"]: + stream_slug = f"draft-stream-{draft.stream.slug}" + prev_state = draft.get_state(stream_slug) + if prev_state is not None and prev_state.slug != "pub": + new_state = State.objects.select_related("type").get(used=True, type__slug=stream_slug, slug="pub") + draft.set_state(new_state) + draft_changes.append( + f"changed {new_state.type.label} to {new_state}" + ) + e = update_action_holders(draft, prev_state, new_state) + if e: + draft_events.append(e) + if draft_changes: + draft_events.append( + DocEvent.objects.create( + doc=draft, + rev=doc.rev, + by=system, + type="sync_from_rfc_editor", + desc=f"Received changes through RFC Editor sync ({', '.join(draft_changes)})", + ) + ) + draft.save_with_history(draft_events) + yield rfc_number, draft_changes, draft, False # yield changes to the draft # check attributes + verbed = "set" if created_rfc else "changed" if title != doc.title: doc.title = title - changes.append("changed title to '%s'" % doc.title) + rfc_changes.append(f"{verbed} title to '{doc.title}'") if abstract and abstract != doc.abstract: doc.abstract = abstract - changes.append("changed abstract to '%s'" % doc.abstract) + rfc_changes.append(f"{verbed} abstract to '{doc.abstract}'") if pages and int(pages) != doc.pages: doc.pages = int(pages) - changes.append("changed pages to %s" % doc.pages) + rfc_changes.append(f"{verbed} pages to {doc.pages}") if std_level_mapping[current_status] != doc.std_level: doc.std_level = std_level_mapping[current_status] - changes.append("changed standardization level to %s" % doc.std_level) - - if doc.get_state_slug() != "rfc": - doc.set_state(State.objects.get(used=True, type="draft", slug="rfc")) - move_draft_files_to_archive(doc, doc.rev) - changes.append("changed state to %s" % doc.get_state()) + rfc_changes.append(f"{verbed} standardization level to {doc.std_level}") if doc.stream != stream_mapping[stream]: doc.stream = stream_mapping[stream] - changes.append("changed stream to %s" % doc.stream) + rfc_changes.append(f"{verbed} stream to {doc.stream}") - if not doc.group: # if we have no group assigned, check if RFC Editor has a suggestion + if doc.get_state() != rfc_published_state: + doc.set_state(rfc_published_state) + rfc_changes.append(f"{verbed} {rfc_published_state.type.label} to {rfc_published_state}") + + # if we have no group assigned, check if RFC Editor has a suggestion + if not doc.group: if wg: doc.group = Group.objects.get(acronym=wg) - changes.append("set group to %s" % doc.group) + rfc_changes.append(f"set group to {doc.group}") else: - doc.group = Group.objects.get(type="individ") # fallback for newly created doc + doc.group = Group.objects.get( + type="individ" + ) # fallback for newly created doc if not doc.latest_event(type="published_rfc"): e = DocEvent(doc=doc, rev=doc.rev, type="published_rfc") @@ -467,90 +611,181 @@ def update_docs_from_rfc_index(index_data, errata_data, skip_older_than_date=Non e.by = system e.desc = "RFC published" e.save() - events.append(e) + rfc_events.append(e) - changes.append("added RFC published event at %s" % e.time.strftime("%Y-%m-%d")) + rfc_changes.append( + f"added RFC published event at {e.time.strftime('%Y-%m-%d')}" + ) rfc_published = True - for t in ("draft-iesg", "draft-stream-iab", "draft-stream-irtf", "draft-stream-ise"): - prev_state = doc.get_state(t) - if prev_state is not None: - if prev_state.slug not in ("pub", "idexists"): - new_state = State.objects.select_related("type").get(used=True, type=t, slug="pub") - doc.set_state(new_state) - changes.append("changed %s to %s" % (new_state.type.label, new_state)) - e = update_action_holders(doc, prev_state, new_state) - if e: - events.append(e) - elif t == 'draft-iesg': - doc.set_state(State.objects.get(type_id='draft-iesg', slug='idexists')) - def parse_relation_list(l): res = [] for x in l: - if x[:3] in ("NIC", "IEN", "STD", "RTR"): - # try translating this to RFCs that we can handle - # sensibly; otherwise we'll have to ignore them - l = DocAlias.objects.filter(name__startswith="rfc", docs__docalias__name=x.lower()) - else: - l = DocAlias.objects.filter(name=x.lower()) - - for a in l: + for a in Document.objects.filter(name=x.lower(), type_id="rfc"): if a not in res: res.append(a) return res for x in parse_relation_list(obsoletes): - if not RelatedDocument.objects.filter(source=doc, target=x, relationship=relationship_obsoletes): - r = RelatedDocument.objects.create(source=doc, target=x, relationship=relationship_obsoletes) - changes.append("created %s relation between %s and %s" % (r.relationship.name.lower(), prettify_std_name(r.source.name), prettify_std_name(r.target.name))) + if not RelatedDocument.objects.filter( + source=doc, target=x, relationship=relationship_obsoletes + ): + r = RelatedDocument.objects.create( + source=doc, target=x, relationship=relationship_obsoletes + ) + rfc_changes.append( + "created {rel_name} relation between {src_name} and {tgt_name}".format( + rel_name=r.relationship.name.lower(), + src_name=prettify_std_name(r.source.name), + tgt_name=prettify_std_name(r.target.name), + ) + ) for x in parse_relation_list(updates): - if not RelatedDocument.objects.filter(source=doc, target=x, relationship=relationship_updates): - r = RelatedDocument.objects.create(source=doc, target=x, relationship=relationship_updates) - changes.append("created %s relation between %s and %s" % (r.relationship.name.lower(), prettify_std_name(r.source.name), prettify_std_name(r.target.name))) + if not RelatedDocument.objects.filter( + source=doc, target=x, relationship=relationship_updates + ): + r = RelatedDocument.objects.create( + source=doc, target=x, relationship=relationship_updates + ) + rfc_changes.append( + "created {rel_name} relation between {src_name} and {tgt_name}".format( + rel_name=r.relationship.name.lower(), + src_name=prettify_std_name(r.source.name), + tgt_name=prettify_std_name(r.target.name), + ) + ) if also: + # recondition also to have proper subseries document names: + conditioned_also = [] for a in also: a = a.lower() - if not DocAlias.objects.filter(name=a): - DocAlias.objects.create(name=a).docs.add(doc) - changes.append("created alias %s" % prettify_std_name(a)) + subseries_slug = a[:3] + if subseries_slug not in ["bcp", "std", "fyi"]: + log(f"Unexpected 'also' relationship of {a} encountered for {doc}") + next + maybe_number = a[3:].strip() + if not maybe_number.isdigit(): + log(f"Unexpected 'also' subseries element identifier {a} encountered for {doc}") + next + else: + subseries_number = int(maybe_number) + conditioned_also.append(f"{subseries_slug}{subseries_number}") # Note the lack of leading zeros + also = conditioned_also - doc_errata = errata.get('RFC%04d'%rfc_number, []) - all_rejected = doc_errata and all( er['errata_status_code']=='Rejected' for er in doc_errata ) + for a in also: + subseries_doc_name = a + subseries_slug=a[:3] + # Leaving most things to the default intentionally + # Of note, title and stream are left to the defaults of "" and none. + subseries_doc, created = Document.objects.get_or_create(type_id=subseries_slug, name=subseries_doc_name) + if created: + if first_sync_creating_subseries: + subseries_doc.docevent_set.create(type=f"{subseries_slug}_history_marker", by=system, desc=f"No history of this {subseries_slug.upper()} document is currently available in the datatracker before this point") + subseries_doc.docevent_set.create(type=f"{subseries_slug}_doc_created", by=system, desc=f"Imported {subseries_doc_name} into the datatracker via sync to the rfc-index") + else: + subseries_doc.docevent_set.create(type=f"{subseries_slug}_doc_created", by=system, desc=f"Created {subseries_doc_name} via sync to the rfc-index") + _, relationship_created = subseries_doc.relateddocument_set.get_or_create(relationship_id="contains", target=doc) + if relationship_created: + if first_sync_creating_subseries: + subseries_doc.docevent_set.create(type="sync_from_rfc_editor", by=system, desc=f"Imported membership of {doc.name} in {subseries_doc.name} via sync to the rfc-index") + rfc_events.append(doc.docevent_set.create(type=f"{subseries_slug}_history_marker", by=system, desc=f"No history of {subseries_doc.name.upper()} is currently available in the datatracker before this point")) + rfc_events.append(doc.docevent_set.create(type="sync_from_rfc_editor", by=system, desc=f"Imported membership of {doc.name} in {subseries_doc.name} via sync to the rfc-index")) + else: + subseries_doc.docevent_set.create(type="sync_from_rfc_editor", by=system, desc=f"Added {doc.name} to {subseries_doc.name}") + rfc_events.append(doc.docevent_set.create(type="sync_from_rfc_editor", by=system, desc=f"Added {doc.name} to {subseries_doc.name}")) + + for subdoc in doc.related_that("contains"): + if subdoc.name not in also: + assert(not first_sync_creating_subseries) + subseries_doc.relateddocument_set.filter(target=subdoc).delete() + rfc_events.append(doc.docevent_set.create(type="sync_from_rfc_editor", by=system, desc=f"Removed {doc.name} from {subseries_doc.name}")) + subseries_doc.docevent_set.create(type="sync_from_rfc_editor", by=system, desc=f"Removed {doc.name} from {subseries_doc.name}") + + doc_errata = errata.get(f"RFC{rfc_number}", []) + all_rejected = doc_errata and all( + er["errata_status_code"] == "Rejected" for er in doc_errata + ) if has_errata and not all_rejected: if not doc.tags.filter(pk=tag_has_errata.pk).exists(): doc.tags.add(tag_has_errata) - changes.append("added Errata tag") - has_verified_errata = any([ er['errata_status_code']=='Verified' for er in doc_errata ]) - if has_verified_errata and not doc.tags.filter(pk=tag_has_verified_errata.pk).exists(): + rfc_changes.append("added Errata tag") + has_verified_errata = any( + [er["errata_status_code"] == "Verified" for er in doc_errata] + ) + if ( + has_verified_errata + and not doc.tags.filter(pk=tag_has_verified_errata.pk).exists() + ): doc.tags.add(tag_has_verified_errata) - changes.append("added Verified Errata tag") + rfc_changes.append("added Verified Errata tag") else: if doc.tags.filter(pk=tag_has_errata.pk): doc.tags.remove(tag_has_errata) if all_rejected: - changes.append("removed Errata tag (all errata rejected)") + rfc_changes.append("removed Errata tag (all errata rejected)") else: - changes.append("removed Errata tag") + rfc_changes.append("removed Errata tag") if doc.tags.filter(pk=tag_has_verified_errata.pk): doc.tags.remove(tag_has_verified_errata) - changes.append("removed Verified Errata tag") + rfc_changes.append("removed Verified Errata tag") - if changes: - events.append(DocEvent.objects.create( - doc=doc, - rev=doc.rev, - by=system, - type="sync_from_rfc_editor", - desc="Received changes through RFC Editor sync (%s)" % ", ".join(changes), - )) + if rfc_changes: + rfc_events.append( + DocEvent.objects.create( + doc=doc, + rev=doc.rev, + by=system, + type="sync_from_rfc_editor", + desc=f"Received changes through RFC Editor sync ({', '.join(rfc_changes)})", + ) + ) + doc.save_with_history(rfc_events) + yield rfc_number, rfc_changes, doc, rfc_published # yield changes to the RFC + + if first_sync_creating_subseries: + # First - create the known subseries documents that have ghosted. + # The RFC editor (as of 31 Oct 2023) claims these subseries docs do not exist. + # The datatracker, on the other hand, will say that the series doc currently contains no RFCs. + for name in ["fyi17", "std1", "bcp12", "bcp113", "bcp66"]: + # Leaving most things to the default intentionally + # Of note, title and stream are left to the defaults of "" and none. + subseries_doc, created = Document.objects.get_or_create(type_id=name[:3], name=name) + if not created: + log(f"Warning: {name} unexpectedly already exists") + else: + subseries_slug = name[:3] + subseries_doc.docevent_set.create(type=f"{subseries_slug}_history_marker", by=system, desc=f"No history of this {subseries_slug.upper()} document is currently available in the datatracker before this point") - doc.save_with_history(events) - if changes: - yield changes, doc, rfc_published + RelatedDocument.objects.filter( + Q(originaltargetaliasname__startswith="bcp") | + Q(originaltargetaliasname__startswith="std") | + Q(originaltargetaliasname__startswith="fyi") + ).annotate( + subseries_target=Subquery( + Document.objects.filter(name=OuterRef("originaltargetaliasname")).values_list("pk",flat=True)[:1] + ) + ).update(target=F("subseries_target")) + RelatedDocHistory.objects.filter( + Q(originaltargetaliasname__startswith="bcp") | + Q(originaltargetaliasname__startswith="std") | + Q(originaltargetaliasname__startswith="fyi") + ).annotate( + subseries_target=Subquery( + Document.objects.filter(name=OuterRef("originaltargetaliasname")).values_list("pk",flat=True)[:1] + ) + ).update(target=F("subseries_target")) + IprDocRel.objects.filter( + Q(originaldocumentaliasname__startswith="bcp") | + Q(originaldocumentaliasname__startswith="std") | + Q(originaldocumentaliasname__startswith="fyi") + ).annotate( + subseries_target=Subquery( + Document.objects.filter(name=OuterRef("originaldocumentaliasname")).values_list("pk",flat=True)[:1] + ) + ).update(document=F("subseries_target")) def post_approved_draft(url, name): diff --git a/ietf/sync/tests.py b/ietf/sync/tests.py index f245145d2..6ac8f4afb 100644 --- a/ietf/sync/tests.py +++ b/ietf/sync/tests.py @@ -6,6 +6,7 @@ import os import io import json import datetime +import mock import quopri from django.conf import settings @@ -14,8 +15,8 @@ from django.utils import timezone import debug # pyflakes:ignore -from ietf.doc.factories import WgDraftFactory -from ietf.doc.models import Document, DocAlias, DocEvent, DeletedEvent, DocTagName, RelatedDocument, State, StateDocEvent +from ietf.doc.factories import WgDraftFactory, RfcFactory +from ietf.doc.models import Document, DocEvent, DeletedEvent, DocTagName, RelatedDocument, State, StateDocEvent from ietf.doc.utils import add_state_change_event from ietf.group.factories import GroupFactory from ietf.person.models import Person @@ -29,19 +30,20 @@ from ietf.utils.timezone import date_today, RPC_TZINFO class IANASyncTests(TestCase): def test_protocol_page_sync(self): draft = WgDraftFactory() - DocAlias.objects.create(name="rfc1234").docs.add(draft) - DocEvent.objects.create(doc=draft, rev=draft.rev, type="published_rfc", by=Person.objects.get(name="(System)")) + rfc = RfcFactory(rfc_number=1234) + draft.relateddocument_set.create(relationship_id="became_rfc", target = rfc) + DocEvent.objects.create(doc=rfc, rev="", type="published_rfc", by=Person.objects.get(name="(System)")) rfc_names = iana.parse_protocol_page('RFC 1234') self.assertEqual(len(rfc_names), 1) self.assertEqual(rfc_names[0], "rfc1234") iana.update_rfc_log_from_protocol_page(rfc_names, timezone.now() - datetime.timedelta(days=1)) - self.assertEqual(DocEvent.objects.filter(doc=draft, type="rfc_in_iana_registry").count(), 1) + self.assertEqual(DocEvent.objects.filter(doc=rfc, type="rfc_in_iana_registry").count(), 1) # make sure it doesn't create duplicates iana.update_rfc_log_from_protocol_page(rfc_names, timezone.now() - datetime.timedelta(days=1)) - self.assertEqual(DocEvent.objects.filter(doc=draft, type="rfc_in_iana_registry").count(), 1) + self.assertEqual(DocEvent.objects.filter(doc=rfc, type="rfc_in_iana_registry").count(), 1) def test_changes_sync(self): draft = WgDraftFactory(ad=Person.objects.get(user__username='ad')) @@ -226,18 +228,16 @@ class RFCSyncTests(TestCase): def test_rfc_index(self): area = GroupFactory(type_id='area') - doc = WgDraftFactory( + draft_doc = WgDraftFactory( group__parent=area, - states=[('draft-iesg','rfcqueue'),('draft-stream-ise','rfc-edit')], + states=[('draft-iesg','rfcqueue')], ad=Person.objects.get(user__username='ad'), + external_url="http://my-external-url.example.com", + note="this is a note", ) - # it's a bit strange to have draft-stream-ise set when draft-iesg is set - # too, but for testing purposes ... - doc.action_holders.add(doc.ad) # not normally set, but add to be sure it's cleared + draft_doc.action_holders.add(draft_doc.ad) # not normally set, but add to be sure it's cleared - updated_doc = Document.objects.create(name="draft-ietf-something") - DocAlias.objects.create(name=updated_doc.name).docs.add(updated_doc) - DocAlias.objects.create(name="rfc123").docs.add(updated_doc) + RfcFactory(rfc_number=123) today = date_today() @@ -260,7 +260,7 @@ class RFCSyncTests(TestCase): - STD0001 + STD0002 Test RFC1234 @@ -300,14 +300,14 @@ class RFCSyncTests(TestCase): ''' % dict(year=today.strftime("%Y"), month=today.strftime("%B"), - name=doc.name, - rev=doc.rev, - area=doc.group.parent.acronym, - group=doc.group.acronym) + name=draft_doc.name, + rev=draft_doc.rev, + area=draft_doc.group.parent.acronym, + group=draft_doc.group.acronym) errata = [{ "errata_id":1, - "doc-id":"RFC123", + "doc-id":"RFC123", # n.b. this is not the same RFC as in the above index XML! "errata_status_code":"Verified", "errata_type_code":"Editorial", "section": "4.1", @@ -323,7 +323,6 @@ class RFCSyncTests(TestCase): data = rfceditor.parse_index(io.StringIO(t)) self.assertEqual(len(data), 1) - rfc_number, title, authors, rfc_published_date, current_status, updates, updated_by, obsoletes, obsoleted_by, also, draft, has_errata, stream, wg, file_formats, pages, abstract = data[0] # currently, we only check what we actually use @@ -333,45 +332,115 @@ class RFCSyncTests(TestCase): self.assertEqual(rfc_published_date.month, today.month) self.assertEqual(current_status, "Proposed Standard") self.assertEqual(updates, ["RFC123"]) - self.assertEqual(set(also), set(["BCP1", "FYI1", "STD1"])) - self.assertEqual(draft, doc.name) - self.assertEqual(wg, doc.group.acronym) + self.assertEqual(set(also), set(["BCP1", "FYI1", "STD2"])) + self.assertEqual(draft, draft_doc.name) + self.assertEqual(wg, draft_doc.group.acronym) self.assertEqual(has_errata, True) self.assertEqual(stream, "IETF") self.assertEqual(pages, "42") self.assertEqual(abstract, "This is some interesting text.") - draft_filename = "%s-%s.txt" % (doc.name, doc.rev) + draft_filename = "%s-%s.txt" % (draft_doc.name, draft_doc.rev) self.write_draft_file(draft_filename, 5000) + event_count_before = draft_doc.docevent_set.count() + draft_title_before = draft_doc.title + draft_abstract_before = draft_doc.abstract + draft_pages_before = draft_doc.pages + changes = [] - for cs, d, rfc_published in rfceditor.update_docs_from_rfc_index(data, errata, today - datetime.timedelta(days=30)): - changes.append(cs) + with mock.patch("ietf.sync.rfceditor.log") as mock_log: + for rfc_number, _, d, rfc_published in rfceditor.update_docs_from_rfc_index(data, errata, today - datetime.timedelta(days=30)): + changes.append({"doc_pk": d.pk, "rfc_published": rfc_published}) # we ignore the actual change list + self.assertEqual(rfc_number, 1234) + if rfc_published: + self.assertEqual(d.type_id, "rfc") + self.assertEqual(d.rfc_number, rfc_number) + else: + self.assertEqual(d.type_id, "draft") + self.assertIsNone(d.rfc_number) + + self.assertFalse(mock_log.called, "No log messages expected") - doc = Document.objects.get(name=doc.name) - - events = doc.docevent_set.all() - self.assertEqual(events[0].type, "sync_from_rfc_editor") - self.assertEqual(events[1].type, "changed_action_holders") - self.assertEqual(events[2].type, "published_rfc") - self.assertEqual(events[2].time.astimezone(RPC_TZINFO).date(), today) - self.assertTrue("errata" in doc.tags.all().values_list("slug", flat=True)) - self.assertTrue(DocAlias.objects.filter(name="rfc1234", docs=doc)) - self.assertTrue(DocAlias.objects.filter(name="bcp1", docs=doc)) - self.assertTrue(DocAlias.objects.filter(name="fyi1", docs=doc)) - self.assertTrue(DocAlias.objects.filter(name="std1", docs=doc)) - self.assertTrue(RelatedDocument.objects.filter(source=doc, target__name="rfc123", relationship="updates")) - self.assertEqual(doc.title, "A Testing RFC") - self.assertEqual(doc.abstract, "This is some interesting text.") - self.assertEqual(doc.get_state_slug(), "rfc") - self.assertEqual(doc.get_state_slug("draft-iesg"), "pub") - self.assertCountEqual(doc.action_holders.all(), []) - self.assertEqual(doc.get_state_slug("draft-stream-ise"), "pub") - self.assertEqual(doc.std_level_id, "ps") - self.assertEqual(doc.pages, 42) + draft_doc = Document.objects.get(name=draft_doc.name) + draft_events = draft_doc.docevent_set.all() + self.assertEqual(len(draft_events) - event_count_before, 2) + self.assertEqual(draft_events[0].type, "sync_from_rfc_editor") + self.assertEqual(draft_events[1].type, "changed_action_holders") + self.assertEqual(draft_doc.get_state_slug(), "rfc") + self.assertEqual(draft_doc.get_state_slug("draft-iesg"), "pub") + self.assertCountEqual(draft_doc.action_holders.all(), []) + self.assertEqual(draft_doc.title, draft_title_before) + self.assertEqual(draft_doc.abstract, draft_abstract_before) + self.assertEqual(draft_doc.pages, draft_pages_before) self.assertTrue(not os.path.exists(os.path.join(settings.INTERNET_DRAFT_PATH, draft_filename))) self.assertTrue(os.path.exists(os.path.join(settings.INTERNET_DRAFT_ARCHIVE_DIR, draft_filename))) + rfc_doc = Document.objects.filter(rfc_number=1234, type_id="rfc").first() + self.assertIsNotNone(rfc_doc, "RFC document should have been created") + rfc_events = rfc_doc.docevent_set.all() + self.assertEqual(len(rfc_events), 8) + expected_events = [ + ["sync_from_rfc_editor", ""], # Not looking for exact desc match here - see detailed tests below + ["sync_from_rfc_editor", "Imported membership of rfc1234 in std2 via sync to the rfc-index"], + ["std_history_marker", "No history of STD2 is currently available in the datatracker before this point"], + ["sync_from_rfc_editor", "Imported membership of rfc1234 in fyi1 via sync to the rfc-index"], + ["fyi_history_marker", "No history of FYI1 is currently available in the datatracker before this point"], + ["sync_from_rfc_editor", "Imported membership of rfc1234 in bcp1 via sync to the rfc-index"], + ["bcp_history_marker", "No history of BCP1 is currently available in the datatracker before this point"], + ["published_rfc", "RFC published"] + ] + for index, [event_type, desc] in enumerate(expected_events): + self.assertEqual(rfc_events[index].type, event_type) + if index == 0: + self.assertIn("Received changes through RFC Editor sync (created document RFC 1234,", rfc_events[0].desc) + self.assertIn(f"created became rfc relationship between {rfc_doc.came_from_draft().name} and RFC 1234", rfc_events[0].desc) + self.assertIn("set title to 'A Testing RFC'", rfc_events[0].desc) + self.assertIn("set abstract to 'This is some interesting text.'", rfc_events[0].desc) + self.assertIn("set pages to 42", rfc_events[0].desc) + self.assertIn("set standardization level to Proposed Standard", rfc_events[0].desc) + self.assertIn(f"added RFC published event at {rfc_events[0].time.astimezone(RPC_TZINFO):%Y-%m-%d}", rfc_events[0].desc) + self.assertIn("created updates relation between RFC 1234 and RFC 123", rfc_events[0].desc) + self.assertIn("added Errata tag", rfc_events[0].desc) + else: + self.assertEqual(rfc_events[index].desc, desc) + self.assertEqual(rfc_events[7].time.astimezone(RPC_TZINFO).date(), today) + for subseries_name in ["bcp1", "fyi1", "std2"]: + sub = Document.objects.filter(type_id=subseries_name[:3],name=subseries_name).first() + self.assertIsNotNone(sub, f"{subseries_name} not created") + self.assertTrue(rfc_doc in sub.contains()) + self.assertTrue(sub in rfc_doc.part_of()) + self.assertEqual(rfc_doc.get_state_slug(), "published") + # Should have an "errata" tag because there is an errata-url in the index XML, but no "verified-errata" tag + # because there is no verified item in the errata JSON with doc-id matching the RFC document. + tag_slugs = rfc_doc.tags.values_list("slug", flat=True) + self.assertTrue("errata" in tag_slugs) + self.assertFalse("verified-errata" in tag_slugs) + # TODO: adjust these when we have subseries document types + # self.assertTrue(DocAlias.objects.filter(name="rfc1234", docs=rfc_doc)) + # self.assertTrue(DocAlias.objects.filter(name="bcp1", docs=rfc_doc)) + # self.assertTrue(DocAlias.objects.filter(name="fyi1", docs=rfc_doc)) + # self.assertTrue(DocAlias.objects.filter(name="std1", docs=rfc_doc)) + self.assertTrue(RelatedDocument.objects.filter(source=rfc_doc, target__name="rfc123", relationship="updates").exists()) + self.assertTrue(RelatedDocument.objects.filter(source=draft_doc, target=rfc_doc, relationship="became_rfc").exists()) + self.assertEqual(rfc_doc.title, "A Testing RFC") + self.assertEqual(rfc_doc.abstract, "This is some interesting text.") + self.assertEqual(rfc_doc.std_level_id, "ps") + self.assertEqual(rfc_doc.pages, 42) + self.assertEqual(rfc_doc.stream, draft_doc.stream) + self.assertEqual(rfc_doc.group, draft_doc.group) + self.assertEqual(rfc_doc.words, draft_doc.words) + self.assertEqual(rfc_doc.ad, draft_doc.ad) + self.assertEqual(rfc_doc.external_url, draft_doc.external_url) + self.assertEqual(rfc_doc.note, draft_doc.note) + + # check that we got the expected changes + self.assertEqual(len(changes), 2) + self.assertEqual(changes[0]["doc_pk"], draft_doc.pk) + self.assertEqual(changes[0]["rfc_published"], False) + self.assertEqual(changes[1]["doc_pk"], rfc_doc.pk) + self.assertEqual(changes[1]["rfc_published"], True) + # make sure we can apply it again with no changes changed = list(rfceditor.update_docs_from_rfc_index(data, errata, today - datetime.timedelta(days=30))) self.assertEqual(len(changed), 0) @@ -602,4 +671,4 @@ class RFCEditorUndoTests(TestCase): e = DeletedEvent.objects.all().order_by("-time", "-id")[0] e.content_type.model_class().objects.create(**json.loads(e.json)) - self.assertTrue(StateDocEvent.objects.filter(desc="First", doc=draft)) \ No newline at end of file + self.assertTrue(StateDocEvent.objects.filter(desc="First", doc=draft)) diff --git a/ietf/templates/base.html b/ietf/templates/base.html index 7dc552268..ccecd8eb1 100644 --- a/ietf/templates/base.html +++ b/ietf/templates/base.html @@ -68,7 +68,7 @@ diff --git a/ietf/templates/base/menu.html b/ietf/templates/base/menu.html index 8d70fd481..714c98b57 100644 --- a/ietf/templates/base/menu.html +++ b/ietf/templates/base/menu.html @@ -211,6 +211,27 @@ Editorial + {% if flavor == 'top' %} +
  • +
  • + {% endif %} +
  • + Subseries +
  • +
  • + + STD + + + BCP + + + FYI + +
  • {% if flavor == 'top' %} diff --git a/ietf/templates/doc/ballot/approvaltext.html b/ietf/templates/doc/ballot/approvaltext.html index cc47c1715..3cb632b8f 100644 --- a/ietf/templates/doc/ballot/approvaltext.html +++ b/ietf/templates/doc/ballot/approvaltext.html @@ -29,7 +29,7 @@ href="{% url 'ietf.doc.views_ballot.approve_ballot' name=doc.name %}">Approve ballot {% endif %} + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/ballot/approve_ballot.html b/ietf/templates/doc/ballot/approve_ballot.html index ff8ab0894..30dd05fa4 100644 --- a/ietf/templates/doc/ballot/approve_ballot.html +++ b/ietf/templates/doc/ballot/approve_ballot.html @@ -21,7 +21,7 @@ {% endif %} + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/ballot/approve_downrefs.html b/ietf/templates/doc/ballot/approve_downrefs.html index 6dea10a33..ad528c67b 100644 --- a/ietf/templates/doc/ballot/approve_downrefs.html +++ b/ietf/templates/doc/ballot/approve_downrefs.html @@ -12,16 +12,16 @@

    The ballot for - {{ doc }} + {{ doc }} was just approved.

    {% if not downrefs_to_rfc %}

    No downward references for - {{ doc }} + {{ doc }}

    Back + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}">Back {% else %}

    Add downward references to RFCs to the DOWNREF registry, if they were identified in the IETF Last Call and approved by the Sponsoring Area Director. @@ -41,7 +41,7 @@ {% csrf_token %} {% bootstrap_form approve_downrefs_form %} + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Add no DOWNREF entries diff --git a/ietf/templates/doc/ballot/ballot_issued.html b/ietf/templates/doc/ballot/ballot_issued.html index baf428ee6..dfa03896e 100644 --- a/ietf/templates/doc/ballot/ballot_issued.html +++ b/ietf/templates/doc/ballot/ballot_issued.html @@ -11,7 +11,7 @@

    Ballot for - {{ doc }} + {{ doc }} has been sent out.

    {% if doc.telechat_date %} @@ -24,5 +24,5 @@

    {% endif %} Back + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}">Back {% endblock %} diff --git a/ietf/templates/doc/ballot/clear_ballot.html b/ietf/templates/doc/ballot/clear_ballot.html index 1d5bc4600..09e7dfef1 100644 --- a/ietf/templates/doc/ballot/clear_ballot.html +++ b/ietf/templates/doc/ballot/clear_ballot.html @@ -14,14 +14,14 @@ {% csrf_token %}

    Clear the ballot for - {{ doc }}? + {{ doc }}?
    This will clear all ballot positions and discuss entries.

    + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/ballot/defer_ballot.html b/ietf/templates/doc/ballot/defer_ballot.html index 43fdc82a5..ae7099e9e 100644 --- a/ietf/templates/doc/ballot/defer_ballot.html +++ b/ietf/templates/doc/ballot/defer_ballot.html @@ -14,14 +14,14 @@ {% csrf_token %}

    Defer the ballot for - {{ doc }}? + {{ doc }}?
    The ballot will then be put on the IESG agenda of {{ telechat_date }}.

    + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/ballot/edit_position.html b/ietf/templates/doc/ballot/edit_position.html index bf119236f..293c18611 100644 --- a/ietf/templates/doc/ballot/edit_position.html +++ b/ietf/templates/doc/ballot/edit_position.html @@ -54,7 +54,7 @@ {% endif %} {% endif %} + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/ballot/lastcalltext.html b/ietf/templates/doc/ballot/lastcalltext.html index c1d7bd70d..fe2b884c2 100644 --- a/ietf/templates/doc/ballot/lastcalltext.html +++ b/ietf/templates/doc/ballot/lastcalltext.html @@ -39,7 +39,7 @@ href="{% url 'ietf.doc.views_ballot.make_last_call' name=doc.name %}">Issue last call {% endif %} + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/ballot/rfceditornote.html b/ietf/templates/doc/ballot/rfceditornote.html index b4cc9dadc..8a6d57379 100644 --- a/ietf/templates/doc/ballot/rfceditornote.html +++ b/ietf/templates/doc/ballot/rfceditornote.html @@ -31,7 +31,7 @@ Clear + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/ballot/send_ballot_comment.html b/ietf/templates/doc/ballot/send_ballot_comment.html index ffd2d4119..1c5f52185 100644 --- a/ietf/templates/doc/ballot/send_ballot_comment.html +++ b/ietf/templates/doc/ballot/send_ballot_comment.html @@ -37,7 +37,7 @@ + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/ballot/undefer_ballot.html b/ietf/templates/doc/ballot/undefer_ballot.html index da74326a2..4e8669816 100644 --- a/ietf/templates/doc/ballot/undefer_ballot.html +++ b/ietf/templates/doc/ballot/undefer_ballot.html @@ -19,7 +19,7 @@

    + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/ballot/writeupnotes.html b/ietf/templates/doc/ballot/writeupnotes.html index 9c52493d8..925387d28 100644 --- a/ietf/templates/doc/ballot/writeupnotes.html +++ b/ietf/templates/doc/ballot/writeupnotes.html @@ -32,7 +32,7 @@ Save & {% if ballot_issued %}re-{% endif %}issue ballot + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/bofreq/change_editors.html b/ietf/templates/doc/bofreq/change_editors.html index 98c4ddb3a..0c30cdecb 100644 --- a/ietf/templates/doc/bofreq/change_editors.html +++ b/ietf/templates/doc/bofreq/change_editors.html @@ -16,7 +16,7 @@ {% bootstrap_form form %} + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/bofreq/change_responsible.html b/ietf/templates/doc/bofreq/change_responsible.html index aabb0048e..8c51c6e1f 100644 --- a/ietf/templates/doc/bofreq/change_responsible.html +++ b/ietf/templates/doc/bofreq/change_responsible.html @@ -16,7 +16,7 @@ {% bootstrap_form form %} + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/change_ad.html b/ietf/templates/doc/change_ad.html index 24e808951..9cc743757 100644 --- a/ietf/templates/doc/change_ad.html +++ b/ietf/templates/doc/change_ad.html @@ -15,7 +15,7 @@ {% bootstrap_form form %} + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/change_title.html b/ietf/templates/doc/change_title.html index 25026be35..14d7956cf 100644 --- a/ietf/templates/doc/change_title.html +++ b/ietf/templates/doc/change_title.html @@ -15,7 +15,7 @@ {% bootstrap_form form %} + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back diff --git a/ietf/templates/doc/charter/action_announcement_text.html b/ietf/templates/doc/charter/action_announcement_text.html index 88a1b6141..e087b175b 100644 --- a/ietf/templates/doc/charter/action_announcement_text.html +++ b/ietf/templates/doc/charter/action_announcement_text.html @@ -21,7 +21,7 @@ {% if user|has_role:"Secretariat" %} + href="{% url 'ietf.doc.views_charter.approve' name=charter.name %}"> Charter approval page {% endif %} diff --git a/ietf/templates/doc/charter/approve.html b/ietf/templates/doc/charter/approve.html index f109da687..2a8654482 100644 --- a/ietf/templates/doc/charter/approve.html +++ b/ietf/templates/doc/charter/approve.html @@ -2,16 +2,16 @@ {# Copyright The IETF Trust 2015, All Rights Reserved #} {% load origin %} {% load django_bootstrap5 %} -{% block title %}Approve {{ charter.canonical_name }}{% endblock %} +{% block title %}Approve {{ charter.name }}{% endblock %} {% block content %} {% origin %} -

    Approve {{ charter.canonical_name }}-{{ charter.rev }}

    +

    Approve {{ charter.name }}-{{ charter.rev }}

    {% csrf_token %}
    {{ announcement }}
    + href="{% url "ietf.doc.views_charter.action_announcement_text" name=charter.name %}?next=approve"> Edit/regenerate announcement Change responsible AD
    - {{ charter.canonical_name }}-{{ charter.rev }} + {{ charter.name }}-{{ charter.rev }} {% csrf_token %} {% bootstrap_form form %}
    + href="{% url "ietf.doc.views_doc.document_main" name=charter.name %}"> Back
    diff --git a/ietf/templates/doc/conflict_review/approval_text.txt b/ietf/templates/doc/conflict_review/approval_text.txt index 8e211e21c..a52ac11a7 100644 --- a/ietf/templates/doc/conflict_review/approval_text.txt +++ b/ietf/templates/doc/conflict_review/approval_text.txt @@ -1,9 +1,9 @@ {% load ietf_filters %}{% load mail_filters %}{% autoescape off %}From: The IESG To: {{ to }} Cc: {{ cc }} -Subject: Results of IETF-conflict review for {{conflictdoc.canonical_name}}-{{conflictdoc.rev}} +Subject: Results of IETF-conflict review for {{conflictdoc.name}}-{{conflictdoc.rev}} -{% filter wordwrap:78 %}The IESG has completed a review of {{conflictdoc.canonical_name}}-{{conflictdoc.rev}} consistent with RFC5742. +{% filter wordwrap:78 %}The IESG has completed a review of {{conflictdoc.name}}-{{conflictdoc.rev}} consistent with RFC5742. {% if review.get_state_slug == 'appr-reqnopub-pend' %} The IESG recommends that '{{ conflictdoc.title }}' {{ conflictdoc.file_tag|safe }} NOT be published as {{ conflictdoc|std_level_prompt_with_article }}. diff --git a/ietf/templates/doc/conflict_review/approve.html b/ietf/templates/doc/conflict_review/approve.html index 5283587f0..ccbac9c4c 100644 --- a/ietf/templates/doc/conflict_review/approve.html +++ b/ietf/templates/doc/conflict_review/approve.html @@ -2,10 +2,10 @@ {# Copyright The IETF Trust 2015, All Rights Reserved #} {% load origin %} {% load django_bootstrap5 %} -{% block title %}Approve {{ review.canonical_name }}{% endblock %} +{% block title %}Approve {{ review.name }}{% endblock %} {% block content %} {% origin %} -

    Approve {{ review.canonical_name }}

    +

    Approve {{ review.name }}

    {% csrf_token %} {% bootstrap_form form %} diff --git a/ietf/templates/doc/conflict_review/start.html b/ietf/templates/doc/conflict_review/start.html index 9b96ec617..d8abc2b81 100644 --- a/ietf/templates/doc/conflict_review/start.html +++ b/ietf/templates/doc/conflict_review/start.html @@ -3,13 +3,13 @@ {% load origin %} {% load django_bootstrap5 %} {% load ietf_filters %} -{% block title %}Begin IETF conflict review for {{ doc_to_review.canonical_name }}-{{ doc_to_review.rev }}{% endblock %} +{% block title %}Begin IETF conflict review for {{ doc_to_review.name }}-{{ doc_to_review.rev }}{% endblock %} {% block content %} {% origin %}

    Begin IETF conflict review
    - {{ doc_to_review.canonical_name }}-{{ doc_to_review.rev }} + {{ doc_to_review.name }}-{{ doc_to_review.rev }}

    {% if user|has_role:"Secretariat" %}

    diff --git a/ietf/templates/doc/conflict_review/submit.html b/ietf/templates/doc/conflict_review/submit.html index 586de9356..8259c6b12 100644 --- a/ietf/templates/doc/conflict_review/submit.html +++ b/ietf/templates/doc/conflict_review/submit.html @@ -2,16 +2,16 @@ {# Copyright The IETF Trust 2015, All Rights Reserved #} {% load origin %} {% load django_bootstrap5 %} -{% block title %}Edit conflict review for {{ conflictdoc.canonical_name }}-{{ conflictdoc.rev }}{% endblock %} +{% block title %}Edit conflict review for {{ conflictdoc.name }}-{{ conflictdoc.rev }}{% endblock %} {% block content %} {% origin %}

    Edit conflict review
    - {{ conflictdoc.canonical_name }}-{{ conflictdoc.rev }} + {{ conflictdoc.name }}-{{ conflictdoc.rev }}

    - The text will be submitted as {{ review.canonical_name }}-{{ next_rev }} + The text will be submitted as {{ review.name }}-{{ next_rev }}

    {% csrf_token %} @@ -27,7 +27,7 @@ Reset to template text + href="{% url "ietf.doc.views_doc.document_main" name=review.name %}"> Back
    diff --git a/ietf/templates/doc/document_bibtex.bib b/ietf/templates/doc/document_bibtex.bib index 360b73fd7..5e52ec3c5 100644 --- a/ietf/templates/doc/document_bibtex.bib +++ b/ietf/templates/doc/document_bibtex.bib @@ -3,7 +3,7 @@ {% load ietf_filters %} {% load textfilters %} -{% if doc.get_state_slug == "rfc" %} +{% if doc.type_id == "rfc" %} {% if doc.stream|slugify == "legacy" %} % Datatracker information for RFCs on the Legacy Stream is unfortunately often % incorrect. Please correct the bibtex below based on the information in the @@ -16,7 +16,7 @@ publisher = {RFC Editor}, doi = {% templatetag openbrace %}{{ doi }}{% templatetag closebrace %}, url = {% templatetag openbrace %}{{ doc.rfc_number|rfceditor_info_url }}{% templatetag closebrace %},{% else %} -{% if published %}%% You should probably cite rfc{{ latest_revision.doc.rfc_number }} instead of this I-D.{% else %}{% if replaced_by %}%% You should probably cite {{replaced_by|join:" or "}} instead of this I-D.{% else %} +{% if published_as %}%% You should probably cite rfc{{ published_as.rfc_number }} instead of this I-D.{% else %}{% if replaced_by %}%% You should probably cite {{replaced_by|join:" or "}} instead of this I-D.{% else %} {% if doc.rev != latest_revision.rev %}%% You should probably cite {{latest_revision.doc.name}}-{{latest_revision.rev}} instead of this revision.{%endif%}{% endif %}{% endif %} @techreport{% templatetag openbrace %}{{doc.name|slice:"6:"}}-{{doc.rev}}, number = {% templatetag openbrace %}{{doc.name}}-{{doc.rev}}{% templatetag closebrace %}, @@ -29,7 +29,7 @@ title = {% templatetag openbrace %}{% templatetag openbrace %}{{doc.title|texescape}}{% templatetag closebrace %}{% templatetag closebrace %}, pagetotal = {{ doc.pages }}, year = {{ doc.pub_date.year }}, - month = {{ doc.pub_date|date:"b" }},{% if not doc.rfc_number or doc.pub_date.day == 1 and doc.pub_date.month == 4 %} + month = {{ doc.pub_date|date:"b" }},{% if not doc.type_id == "rfc" or doc.pub_date.day == 1 and doc.pub_date.month == 4 %} day = {{ doc.pub_date.day }},{% endif %} abstract = {% templatetag openbrace %}{{ doc.abstract|clean_whitespace|texescape }}{% templatetag closebrace %}, {% templatetag closebrace %} diff --git a/ietf/templates/doc/document_charter.html b/ietf/templates/doc/document_charter.html index 64765653e..7564e1d21 100644 --- a/ietf/templates/doc/document_charter.html +++ b/ietf/templates/doc/document_charter.html @@ -227,7 +227,7 @@ {% if doc.rev != "" %}
    - {{ doc.canonical_name }}-{{ doc.rev }} + {{ doc.name }}-{{ doc.rev }}
    {{ content }} diff --git a/ietf/templates/doc/document_conflict_review.html b/ietf/templates/doc/document_conflict_review.html index f70f5925f..8a2361832 100644 --- a/ietf/templates/doc/document_conflict_review.html +++ b/ietf/templates/doc/document_conflict_review.html @@ -27,8 +27,8 @@ - - {% if conflictdoc.get_state_slug == 'rfc' %}{{ conflictdoc.canonical_name|prettystdname }}{% else %}{{ conflictdoc.canonical_name }}-{{ conflictdoc.rev }}{% endif %} + + {% if conflictdoc.type_id == 'rfc' %}{{ conflictdoc.name|prettystdname }}{% else %}{{ conflictdoc.name }}-{{ conflictdoc.rev }}{% endif %} {{ conflictdoc.stream }} stream {% if snapshot %}Snapshot{% endif %} diff --git a/ietf/templates/doc/document_draft.html b/ietf/templates/doc/document_draft.html index 09cab6920..6eeacfc38 100644 --- a/ietf/templates/doc/document_draft.html +++ b/ietf/templates/doc/document_draft.html @@ -13,11 +13,11 @@ title="Document changes" href="/feed/document-changes/{{ name }}/"> + content="{{ doc.title }} {% if doc.type_id == 'rfc' and not snapshot %}(RFC {{ rfc_number }}{% if published %}, {{ doc.pub_date|date:'F Y' }}{% endif %}{% if obsoleted_by %}; obsoleted by {% for rel in obsoleted_by %}{{ rel.source.name|prettystdname}}{% if not forloop.last%}, {% endif %}{% endfor %}{% endif %}){% endif %}"> {% endblock %} {% block morecss %}.inline { display: inline; }{% endblock %} {% block title %} - {% if doc.get_state_slug == "rfc" and not snapshot %} + {% if doc.type_id == "rfc" and not snapshot %} RFC {{ rfc_number }} - {{ doc.title }} {% else %} {{ name }}-{{ doc.rev }} - {{ doc.title }} @@ -31,7 +31,7 @@ {% if doc.rev != latest_rev %}
    The information below is for an old version of the document.
    {% else %} - {% if doc.get_state_slug == "rfc" and snapshot %} + {% if doc.type_id == "rfc" and snapshot %}
    The information below is for an old version of the document that is already published as an RFC.
    @@ -637,14 +637,14 @@ {% endif %} References @@ -751,10 +751,10 @@ {% endfor %} {% endif %}
    - {% if doc.get_state_slug == "active" or doc.get_state_slug == "rfc" %} + {% if doc.get_state_slug == "active" or doc.type_id == "rfc" %}
    - {% if doc.get_state_slug == "rfc" and not snapshot %} + {% if doc.type_id == "rfc" and not snapshot %} RFC {{ rfc_number }} {% else %} {{ name }}-{{ doc.rev }} diff --git a/ietf/templates/doc/document_format_buttons.html b/ietf/templates/doc/document_format_buttons.html index 2c79c9264..043b9a797 100644 --- a/ietf/templates/doc/document_format_buttons.html +++ b/ietf/templates/doc/document_format_buttons.html @@ -4,11 +4,11 @@ {% if label != skip_format %} - + {% endblock %} {% block content %} {% origin %} @@ -21,6 +21,27 @@ {% include "doc/document_history_form.html" with doc=doc diff_revisions=diff_revisions action=rfcdiff_base_url snapshot=snapshot only %} {% endif %}

    Document history

    + {% if doc.came_from_draft %} +
    + {% endif %} + {% if doc.became_rfc %} + + {% endif %} + {% if can_add_comment %}
    Date - Rev. + {% if doc.type_id not in "rfc,bcp,std,fyi" %}Rev.{% endif %} By Action @@ -45,7 +66,7 @@
    {{ e.time|date:"Y-m-d" }}
    - {{ e.rev }} + {% if doc.type_id not in "rfc,bcp,std,fyi" %}{{ e.rev }}{% endif %} {{ e.by|escape }} {{ e.desc|format_history_text }} diff --git a/ietf/templates/doc/document_history_form.html b/ietf/templates/doc/document_history_form.html index 30e0d2800..646da0038 100644 --- a/ietf/templates/doc/document_history_form.html +++ b/ietf/templates/doc/document_history_form.html @@ -11,7 +11,6 @@
    {% endif %} -{% with prev_rev=doc.rev|add:"-1"|stringformat:"02d" %} {{ doc.title }}

    - The text will be submitted as {{ doc.canonical_name }}-{{ next_rev }}. + The text will be submitted as {{ doc.name }}-{{ next_rev }}.

    {% csrf_token %} @@ -27,7 +27,7 @@ Reset to template text + href="{% url "ietf.doc.views_doc.document_main" name=doc.name %}"> Back
    diff --git a/ietf/templates/group/edit_milestones.html b/ietf/templates/group/edit_milestones.html index 297b92501..b576ace18 100644 --- a/ietf/templates/group/edit_milestones.html +++ b/ietf/templates/group/edit_milestones.html @@ -14,8 +14,8 @@ {{ group.acronym }} {{ group.type.name }} {% if group.charter %} - {{ group.charter.canonical_name }} + href="{% url "ietf.doc.views_doc.document_main" name=group.charter.name %}"> + {{ group.charter.name }} {% endif %} {% if can_change_uses_milestone_dates %} @@ -106,7 +106,7 @@
    + href="{% if milestone_set == "charter" %}{% url "ietf.doc.views_doc.document_main" name=group.charter.name %}{% else %}{{ group.about_url }}{% endif %}"> Cancel
    diff --git a/ietf/templates/iesg/agenda_conflict_doc.txt b/ietf/templates/iesg/agenda_conflict_doc.txt index c6be94f8b..e4fbcc32b 100644 --- a/ietf/templates/iesg/agenda_conflict_doc.txt +++ b/ietf/templates/iesg/agenda_conflict_doc.txt @@ -1,7 +1,7 @@ {% load ietf_filters %}{% with doc.conflictdoc as conflictdoc %} - o {{ doc.canonical_name }}-{{ doc.rev }} + o {{ doc.name }}-{{ doc.rev }} {% filter wordwrap:"68"|indent|indent %}{{ doc.title }}{% endfilter %} - {{ conflictdoc.canonical_name }}-{{ conflictdoc.rev }} + {{ conflictdoc.name }}-{{ conflictdoc.rev }} {% filter wordwrap:"66"|indent:"4" %}{{ conflictdoc.title }} ({{ conflictdoc.stream }}: {{ conflictdoc.intended_std_level }}){% endfilter %} Token: {{ doc.ad }} {% with doc.active_defer_event as defer %}{% if defer %} Was deferred by {{defer.by}} on {{defer.time|date:"Y-m-d"}}{% endif %}{% endwith %}{% endwith %} diff --git a/ietf/templates/iesg/agenda_doc.html b/ietf/templates/iesg/agenda_doc.html index 16b223186..ae78d96d6 100644 --- a/ietf/templates/iesg/agenda_doc.html +++ b/ietf/templates/iesg/agenda_doc.html @@ -19,9 +19,9 @@ {% with doc.rfc_number as rfc_number %} {% endwith %} - {{ doc.canonical_name }} + {{ doc.name }} {% if doc.has_rfc_editor_note %} - + (Has RFC Editor Note) {% endif %} diff --git a/ietf/templates/iesg/agenda_doc.txt b/ietf/templates/iesg/agenda_doc.txt index 7e90885d3..015af3240 100644 --- a/ietf/templates/iesg/agenda_doc.txt +++ b/ietf/templates/iesg/agenda_doc.txt @@ -1,5 +1,5 @@ {% load ietf_filters %}{% with doc.rfc_number as rfc_number %} - o {{doc.canonical_name}}{% if not rfc_number %}-{{doc.rev}}{% endif %}{% endwith %}{%if doc.has_rfc_editor_note %} (Has RFC Editor Note){% endif %}{% if doc.stream %} - {{ doc.stream }} stream{% endif %} + o {{doc.name}}{% if not rfc_number %}-{{doc.rev}}{% endif %}{% endwith %}{%if doc.has_rfc_editor_note %} (Has RFC Editor Note){% endif %}{% if doc.stream %} - {{ doc.stream }} stream{% endif %} {% filter wordwrap:"68"|indent|indent %}{{ doc.title }} ({{ doc.intended_std_level }}){% endfilter %} Token: {{ doc.ad }}{% if doc.iana_review_state %} IANA Review: {{ doc.iana_review_state }}{% endif %}{% if doc.consensus %} diff --git a/ietf/templates/iesg/moderator_doc.html b/ietf/templates/iesg/moderator_doc.html index 3c497858e..1a06ffa1d 100644 --- a/ietf/templates/iesg/moderator_doc.html +++ b/ietf/templates/iesg/moderator_doc.html @@ -164,13 +164,13 @@ Parts Copyright (c) 2009 The IETF Trust, all rights reserved. {% if downrefs %}

    If APPROVED - The Secretariat will add to the downref registry:
    {% for ref in downrefs %} - + Add {{ref.target.document.canonical_name}} - ({{ref.target.document.std_level}} - {{ref.target.document.stream.desc}}) + + Add {{ref.target.name}} + ({{ref.target.std_level}} - {{ref.target.stream.desc}}) to downref registry.
    - {% if not ref.target.document.std_level %} + {% if not ref.target.std_level %} +++ Warning: The standards level has not been set yet!!!
    {% endif %} - {% if not ref.target.document.stream %} + {% if not ref.target.stream %} +++ Warning: document stream has not been set yet!!!
    {% endif %} {% endfor %}

    diff --git a/ietf/templates/ipr/details_view.html b/ietf/templates/ipr/details_view.html index b36efd414..9ba63114a 100644 --- a/ietf/templates/ipr/details_view.html +++ b/ietf/templates/ipr/details_view.html @@ -383,7 +383,8 @@ {{ iprdocrel.doc_type }}:
    - {{ iprdocrel.formatted_name|urlize_ietf_docs }} ("{{ iprdocrel.document.document.title }}") + {{ iprdocrel.formatted_name|urlize_ietf_docs }} + {% if iprdocrel.document.title %}("{{ iprdocrel.document.title }}"){% endif %}
    {% if iprdocrel.revisions %}
    @@ -429,7 +430,8 @@ {{ iprdocrel.doc_type }}:
    - {{ iprdocrel.formatted_name|urlize_ietf_docs }} ("{{ iprdocrel.document.document.title }}") + {{ iprdocrel.formatted_name|urlize_ietf_docs }} + {% if iprdocrel.document.title %}("{{ iprdocrel.document.title }}"){% endif %}
    {% if iprdocrel.revisions %}
    diff --git a/ietf/templates/ipr/search_doc_list.html b/ietf/templates/ipr/search_doc_list.html index f67f1e472..e67f67fec 100644 --- a/ietf/templates/ipr/search_doc_list.html +++ b/ietf/templates/ipr/search_doc_list.html @@ -13,9 +13,9 @@ Please select one of following I-Ds:

    diff --git a/ietf/templates/ipr/search_doc_result.html b/ietf/templates/ipr/search_doc_result.html index 3848fbe08..dc7d8b95b 100644 --- a/ietf/templates/ipr/search_doc_result.html +++ b/ietf/templates/ipr/search_doc_result.html @@ -58,7 +58,7 @@ - Results for {{ doc.name|prettystdname|urlize_ietf_docs }} ("{{ doc.document.title }}"){% if not forloop.first %}{% if doc.related %}, which was {{ doc.relation|lower }} {{ doc.related.source|prettystdname|urlize_ietf_docs }} ("{{ doc.related.source.title }}"){% endif %}{% endif %} + Results for {{ doc.name|prettystdname|urlize_ietf_docs }} ("{{ doc.title }}"){% if not forloop.first %}{% if doc.related %}, which was {{ doc.relation|lower }} {{ doc.related.source|prettystdname|urlize_ietf_docs }} ("{{ doc.related.source.title }}"){% endif %}{% endif %} diff --git a/ietf/templates/ipr/search_doctitle_result.html b/ietf/templates/ipr/search_doctitle_result.html index b7a8a8b31..57fa17e91 100644 --- a/ietf/templates/ipr/search_doctitle_result.html +++ b/ietf/templates/ipr/search_doctitle_result.html @@ -24,20 +24,20 @@ Statement - {% for alias in docs %} + {% for doc in docs %} - IPR that is related to {{ alias.name|prettystdname:""|urlize_ietf_docs }} ("{{ alias.document.title }}") - {% if alias.related %} - that was {{ alias.relation|lower }} {{ alias.related.source.name|prettystdname:""|urlize_ietf_docs }} ("{{ alias.related.source.title }}") + IPR that is related to {{ doc.name|prettystdname:""|urlize_ietf_docs }} ("{{ doc.title }}") + {% if doc.related %} + that was {{ doc.relation|lower }} {{ doc.related.source.name|prettystdname:""|urlize_ietf_docs }} ("{{ doc.related.source.title }}") {% endif %} - {% if alias.document.ipr %} - {% for ipr in alias.document.ipr %} + {% if doc.ipr %} + {% for ipr in doc.ipr %} {{ ipr.disclosure.time|date:"Y-m-d" }} {{ ipr.disclosure.id }} @@ -58,7 +58,7 @@ - No IPR disclosures related to {{ alias.name|prettystdname|urlize_ietf_docs }} have been submitted. + No IPR disclosures related to {{ doc.name|prettystdname|urlize_ietf_docs }} have been submitted. {% endif %} diff --git a/ietf/templates/ipr/search_result.html b/ietf/templates/ipr/search_result.html index fc93ec92a..449a6f7f8 100644 --- a/ietf/templates/ipr/search_result.html +++ b/ietf/templates/ipr/search_result.html @@ -54,7 +54,7 @@ is related to {% for item in iprdocrels %} {% if forloop.last and forloop.counter > 1 %}and{% endif %} - {{ item.formatted_name|urlize_ietf_docs }} ("{{ item.document.document.title }}"){% if not forloop.last and forloop.counter > 1 %},{% endif %} + {{ item.formatted_name|urlize_ietf_docs }}{% if item.document.title %} ("{{ item.document.title }}"){% endif %}{% if not forloop.last and forloop.counter > 1 %},{% endif %} {% endfor %} {% endif %} {% endwith %} diff --git a/ietf/templates/ipr/search_wg_result.html b/ietf/templates/ipr/search_wg_result.html index ae477a269..1561e05ba 100644 --- a/ietf/templates/ipr/search_wg_result.html +++ b/ietf/templates/ipr/search_wg_result.html @@ -20,22 +20,22 @@ Statement - {% for alias in docs %} + {% for doc in docs %} - IPR related to {{ alias.name|prettystdname|urlize_ietf_docs }} ("{{ alias.document.title }}") - {% if alias.related %} - that was {{ alias.relation|lower }} {{ alias.related.source|prettystdname|urlize_ietf_docs }} ("{{ alias.related.source.title|escape }}") + IPR related to {{ doc.name|prettystdname|urlize_ietf_docs }} ("{{ doc.title }}") + {% if doc.related %} + that was {{ doc.relation|lower }} {{ doc.related.source|prettystdname|urlize_ietf_docs }} ("{{ doc.related.source.title|escape }}") {% endif %} - {% if alias.product_of_this_wg %}, a product of the {{ q }} WG{% endif %} + {% if doc.product_of_this_wg %}, a product of the {{ q }} WG{% endif %} : - {% if alias.document.ipr %} - {% for ipr in alias.document.ipr %} + {% if doc.ipr %} + {% for ipr in doc.ipr %} {{ ipr.disclosure.time|date:"Y-m-d" }} {{ ipr.disclosure.id }} @@ -57,7 +57,7 @@ - No IPR disclosures related to {{ alias.name|prettystdname|urlize_ietf_docs }} have been submitted. + No IPR disclosures related to {{ doc.name|prettystdname|urlize_ietf_docs }} have been submitted. {% endif %} diff --git a/ietf/templates/meeting/activity_report.html b/ietf/templates/meeting/activity_report.html index 1552bb468..0de8b88c6 100644 --- a/ietf/templates/meeting/activity_report.html +++ b/ietf/templates/meeting/activity_report.html @@ -45,7 +45,7 @@ {% for rfc in rfcs %} - {{ rfc.doc.canonical_name|prettystdname }} + {{ rfc.doc.name|prettystdname }} {{ rfc.doc.intended_std_level.name }} diff --git a/ietf/templates/meeting/group_materials.html b/ietf/templates/meeting/group_materials.html index 52e77d83a..aea20827d 100644 --- a/ietf/templates/meeting/group_materials.html +++ b/ietf/templates/meeting/group_materials.html @@ -76,8 +76,8 @@ {% endif %} - - {{ draft.material.canonical_name }} + + {{ draft.material.name }}
    {% empty %} diff --git a/ietf/templates/meeting/group_proceedings.html b/ietf/templates/meeting/group_proceedings.html index 92fe04675..618c28164 100644 --- a/ietf/templates/meeting/group_proceedings.html +++ b/ietf/templates/meeting/group_proceedings.html @@ -77,8 +77,8 @@ {# drafts #} {% for draft in entry.drafts %} - - {{ draft.material.canonical_name }} + + {{ draft.material.name }}
    {% empty %} diff --git a/ietf/templates/meeting/important-dates.html b/ietf/templates/meeting/important-dates.html index 1d786ebc8..1d41b4a7f 100644 --- a/ietf/templates/meeting/important-dates.html +++ b/ietf/templates/meeting/important-dates.html @@ -1,7 +1,7 @@ {% extends "base.html" %} {# Copyright The IETF Trust 2017, All Rights Reserved #} {% load origin %} -{% load ietf_filters static textfilters ietf_filters %} +{% load ietf_filters static textfilters htmlfilters %} {% block pagehead %} {% endblock %} @@ -45,30 +45,7 @@ {% endif %} - {{ d.name.desc|urlize_ietf_docs|linkify }}{% if d.name.desc|slice:"-1:" != "." %}.{% endif %} - {% if first and d.name.slug == 'openreg' or first and d.name.slug == 'earlybird' %} - Register here. - {% endif %} - {% if d.name.slug == 'opensched' %} - To request a Working Group session, use the - IETF Meeting Session Request Tool. - If you are working on a BOF request, it is highly recommended - to tell the IESG now by sending an email to - iesg@ietf.org - to get advance help with the request. - {% endif %} - {% if d.name.slug == 'cutoffwgreq' %} - To request a Working Group session, use the - IETF Meeting Session Request Tool. - {% endif %} - {% if d.name.slug == 'cutoffbofreq' %} - To request a BOF, please see instructions on - Requesting a BOF. - {% endif %} - {% if d.name.slug == 'idcutoff' %} - Upload using the - I-D Submission Tool. - {% endif %} + {{ d.name.desc|urlize_ietf_docs|markdown|linkify }}{% if d.name.desc|slice:"-1:" != "." %}.{% endif %} {% if d.name.slug == 'draftwgagenda' or d.name.slug == 'revwgagenda' or d.name.slug == 'procsub' or d.name.slug == 'revslug' %} Upload using the Meeting Materials Management Tool. diff --git a/ietf/templates/nomcom/announcements.html b/ietf/templates/nomcom/announcements.html index aa67fbb68..771f2b4fb 100644 --- a/ietf/templates/nomcom/announcements.html +++ b/ietf/templates/nomcom/announcements.html @@ -15,7 +15,7 @@

    {% for regime in regimes %}

    - Messages from {{ regime.group.start_year }}/{{ regime.group.end_year }} + Messages from {{ regime.group.start_year }} NomCom

    {# use person email address here rather than the generic nomcom-chair@ietf.org #}

    @@ -56,9 +56,14 @@

  • - IAB, IESG, IETF Trust, and IETF LLC Selection, Confirmation, and Recall Process: Operation of the IETF Nominating and Recall Committees (RFC 8713) (Also BCP10) + IAB, IESG, IETF Trust, and IETF LLC Selection, Confirmation, and Recall Process: Operation of the IETF Nominating and Recall Committees (RFC 8713)
  • +
  • + + Nominating Committee Eligibility (RFC 9389) + +
  • Publicly Verifiable Nominations Committee (NomCom) Random Selection (RFC 3797) diff --git a/ietf/templates/nomcom/history.html b/ietf/templates/nomcom/history.html index 09cdeae98..8262876b1 100644 --- a/ietf/templates/nomcom/history.html +++ b/ietf/templates/nomcom/history.html @@ -11,7 +11,7 @@ Note: The data for concluded NomComs is occasionally incorrect.

    {% for regime in regimes %} -

    {{ regime.label }}

    +

    {{ regime.year }} NomCom

    {% for slug, label, roles in regime.nomcom.personnel %}
    @@ -25,7 +25,7 @@ {% endfor %}
    {% endfor %} -

    2012/2013

    +

    2012 NomCom

    Chair @@ -64,7 +64,7 @@ Rudi Vansnick (ISOC Liaison)
    -

    2011/2012

    +

    2011 NomCom

    Chair @@ -104,7 +104,7 @@ Jason Livingood (ISOC Liaison)
    -

    2010/2011

    +

    2010 NomCom

    Chair @@ -143,7 +143,7 @@ Eric Burger (ISOC Liaison)
    -

    2009/2010

    +

    2009 NomCom

    Chair @@ -184,7 +184,7 @@

    - 2008/2009 + 2008 NomCom

    @@ -224,7 +224,7 @@

    - 2007/2008 + 2007 NomCom

    @@ -264,7 +264,7 @@

    - 2006/2007 + 2006 NomCom

    @@ -304,7 +304,7 @@

    - 2005/2006 + 2005 NomCom

    @@ -344,7 +344,7 @@

    - 2004/2005 + 2004 NomCom

    @@ -385,7 +385,7 @@

    - 2003/2004 + 2003 NomCom

    @@ -424,7 +424,7 @@

    - 2002/2003 + 2002 NomCom

    @@ -463,7 +463,7 @@

    - 2001/2002 + 2001 NomCom

    @@ -502,7 +502,7 @@

    - 2000/2001 + 2000 NomCom

    @@ -541,7 +541,7 @@

    - 1999/2000 + 1999 NomCom

    @@ -580,7 +580,7 @@

    - 1998/1999 + 1998 NomCom

    @@ -620,7 +620,7 @@

    - 1997/1998 + 1997 NomCom

    @@ -659,7 +659,7 @@

    - 1996/1997 + 1996 NomCom

    @@ -700,7 +700,7 @@

    - 1995/1996 + 1995 NomCom

    @@ -739,7 +739,7 @@

    - 1994/1995 + 1994 NomCom

    @@ -771,7 +771,7 @@

    - 1993/1994 + 1993 NomCom

    @@ -802,7 +802,7 @@

    - 1992/1993 + 1992 NomCom

    diff --git a/ietf/templates/nomcom/index.html b/ietf/templates/nomcom/index.html index 91f63f74e..e765dd7c2 100644 --- a/ietf/templates/nomcom/index.html +++ b/ietf/templates/nomcom/index.html @@ -12,7 +12,7 @@ - + diff --git a/ietf/templates/person/profile.html b/ietf/templates/person/profile.html index c8b02bfae..cc504ebc8 100644 --- a/ietf/templates/person/profile.html +++ b/ietf/templates/person/profile.html @@ -112,7 +112,7 @@ {% with doc.referenced_by_rfcs.count as refbycount %} {% if refbycount %} {{ refbycount }} RFC{{ refbycount|pluralize }} @@ -133,7 +133,7 @@ @@ -149,7 +149,7 @@ {% if not doc.replaced_by %}
  • - {{ doc.canonical_name }} + {{ doc.name }}
  • {% endif %} diff --git a/ietf/utils/management/commands/check_draft_event_revision_integrity.py b/ietf/utils/management/commands/check_draft_event_revision_integrity.py index c8d2cbd21..c2d427278 100644 --- a/ietf/utils/management/commands/check_draft_event_revision_integrity.py +++ b/ietf/utils/management/commands/check_draft_event_revision_integrity.py @@ -54,7 +54,7 @@ class Command(BaseCommand): doc = getattr(obj, docattr) time = getattr(obj, timeattr) if not obj.rev: - if not doc.is_rfc(): + if doc.type_id != "rfc": self.stdout.write("Bad revision number: %-52s: '%s'" % (doc.name, obj.rev)) continue rev = int(obj.rev.lstrip('0') or '0') diff --git a/ietf/utils/management/commands/run_yang_model_checks.py b/ietf/utils/management/commands/run_yang_model_checks.py index 13fb61c46..7e2f7165b 100644 --- a/ietf/utils/management/commands/run_yang_model_checks.py +++ b/ietf/utils/management/commands/run_yang_model_checks.py @@ -10,7 +10,7 @@ from django.core.management.base import BaseCommand import debug # pyflakes:ignore -from ietf.doc.models import Document, State, DocAlias +from ietf.doc.models import Document, State from ietf.submit.models import Submission from ietf.submit.checkers import DraftYangChecker @@ -78,7 +78,7 @@ class Command(BaseCommand): parts = name.rsplit('-',1) if len(parts)==2 and len(parts[1])==2 and parts[1].isdigit(): name = parts[0] - draft = DocAlias.objects.get(name=name).document + draft = Document.objects.get(name=name) self.check_yang(checker, draft, force=True) else: for draft in Document.objects.filter(states=active_state, type_id='draft'): diff --git a/ietf/utils/templatetags/htmlfilters.py b/ietf/utils/templatetags/htmlfilters.py index a0f9232c5..1e399e2d7 100644 --- a/ietf/utils/templatetags/htmlfilters.py +++ b/ietf/utils/templatetags/htmlfilters.py @@ -7,6 +7,7 @@ from django.template.library import Library from django.template.defaultfilters import stringfilter from ietf.utils.html import remove_tags +from ietf.utils.markdown import markdown as utils_markdown register = Library() @@ -16,3 +17,9 @@ register = Library() def removetags(value, tags): """Removes a comma-separated list of [X]HTML tags from the output.""" return remove_tags(value, re.split(r"\s*,\s*", tags)) + +@register.filter(name="markdown", is_safe=True) +def markdown(string): + # One issue is that the string is enclosed in

    ... Let's remove the leading/trailing ones... + return utils_markdown(string)[3:-4] + diff --git a/ietf/utils/test_data.py b/ietf/utils/test_data.py index ce5a46995..7123af5c8 100644 --- a/ietf/utils/test_data.py +++ b/ietf/utils/test_data.py @@ -11,12 +11,13 @@ from django.utils.encoding import smart_str import debug # pyflakes:ignore -from ietf.doc.models import Document, DocAlias, State, DocumentAuthor, DocEvent, RelatedDocument, NewRevisionDocEvent +from ietf.doc.models import Document, State, DocumentAuthor, DocEvent, RelatedDocument, NewRevisionDocEvent +from ietf.doc.factories import IndividualDraftFactory, ConflictReviewFactory, StatusChangeFactory, WgDraftFactory, WgRfcFactory from ietf.group.models import Group, GroupHistory, Role, RoleHistory from ietf.iesg.models import TelechatDate from ietf.ipr.models import HolderIprDisclosure, IprDocRel, IprDisclosureStateName, IprLicenseTypeName from ietf.meeting.models import Meeting, ResourceAssociation -from ietf.name.models import StreamName, DocRelationshipName, RoomResourceName, ConstraintName +from ietf.name.models import DocRelationshipName, RoomResourceName, ConstraintName from ietf.person.models import Person, Email from ietf.group.utils import setup_default_community_list_for_group from ietf.review.models import (ReviewRequest, ReviewerSettings, ReviewResultName, ReviewTypeName, ReviewTeamSettings ) @@ -176,7 +177,6 @@ def make_test_data(): charter.set_state(State.objects.get(used=True, slug="approved", type="charter")) group.charter = charter group.save() - DocAlias.objects.create(name=charter.name).docs.add(charter) setup_default_community_list_for_group(group) # ames WG @@ -198,7 +198,6 @@ def make_test_data(): rev="00", ) charter.set_state(State.objects.get(used=True, slug="infrev", type="charter")) - DocAlias.objects.create(name=charter.name).docs.add(charter) group.charter = charter group.save() setup_default_community_list_for_group(group) @@ -243,7 +242,6 @@ def make_test_data(): # rev="00", # ) #charter.set_state(State.objects.get(used=True, slug="infrev", type="charter")) - #DocAlias.objects.create(name=charter.name).docs.add(charter) #group.charter = charter #group.save() @@ -287,8 +285,6 @@ def make_test_data(): expires=timezone.now(), ) old_draft.set_state(State.objects.get(used=True, type="draft", slug="expired")) - old_alias = DocAlias.objects.create(name=old_draft.name) - old_alias.docs.add(old_draft) # draft draft = Document.objects.create( @@ -312,10 +308,7 @@ def make_test_data(): draft.set_state(State.objects.get(used=True, type="draft-iesg", slug="pub-req")) draft.set_state(State.objects.get(used=True, type="draft-stream-%s" % draft.stream_id, slug="wg-doc")) - doc_alias = DocAlias.objects.create(name=draft.name) - doc_alias.docs.add(draft) - - RelatedDocument.objects.create(source=draft, target=old_alias, relationship=DocRelationshipName.objects.get(slug='replaces')) + RelatedDocument.objects.create(source=draft, target=old_draft, relationship=DocRelationshipName.objects.get(slug='replaces')) old_draft.set_state(State.objects.get(type='draft', slug='repl')) DocumentAuthor.objects.create( @@ -361,7 +354,7 @@ def make_test_data(): IprDocRel.objects.create( disclosure=ipr, - document=doc_alias, + document=draft, revisions='00', ) @@ -390,37 +383,27 @@ def make_test_data(): ) # an independent submission before review - doc = Document.objects.create(name='draft-imaginary-independent-submission',type_id='draft',rev='00', - title="Some Independent Notes on Imagination") - doc.set_state(State.objects.get(used=True, type="draft", slug="active")) - DocAlias.objects.create(name=doc.name).docs.add(doc) + IndividualDraftFactory(title="Some Independent Notes on Imagination") # an irtf submission mid review - doc = Document.objects.create(name='draft-imaginary-irtf-submission', type_id='draft',rev='00', - stream=StreamName.objects.get(slug='irtf'), title="The Importance of Research Imagination") - docalias = DocAlias.objects.create(name=doc.name) - docalias.docs.add(doc) - doc.set_state(State.objects.get(type="draft", slug="active")) - crdoc = Document.objects.create(name='conflict-review-imaginary-irtf-submission', type_id='conflrev', - rev='00', notify="fsm@ietf.org", title="Conflict Review of IRTF Imagination Document") - DocAlias.objects.create(name=crdoc.name).docs.add(crdoc) - crdoc.set_state(State.objects.get(name='Needs Shepherd', type__slug='conflrev')) - crdoc.relateddocument_set.create(target=docalias,relationship_id='conflrev') + doc = IndividualDraftFactory(name="draft-imaginary-irtf-submission", stream_id="irtf", title="The Importance of Research Imagination") + ConflictReviewFactory(name="conflict-review-imaginary-irtf-submission", review_of=doc, notify="fsm@ietf.org", title="Conflict Review of IRTF Imagination Document") # A status change mid review iesg = Group.objects.get(acronym='iesg') - doc = Document.objects.create(name='status-change-imaginary-mid-review',type_id='statchg', rev='00', - notify="fsm@ietf.org", group=iesg, title="Status Change Review without Imagination") - doc.set_state(State.objects.get(slug='needshep',type__slug='statchg')) - docalias = DocAlias.objects.create(name='status-change-imaginary-mid-review') - docalias.docs.add(doc) + doc = StatusChangeFactory( + name='status-change-imaginary-mid-review', + notify="fsm@ietf.org", + group=iesg, + title="Status Change Review without Imagination", + states= [State.objects.get(type_id="statchg",slug="needshep")] + ) # Some things for a status change to affect def rfc_for_status_change_test_factory(name,rfc_num,std_level_id): - target_rfc = Document.objects.create(name=name, type_id='draft', std_level_id=std_level_id, notify="%s@ietf.org"%name) - target_rfc.set_state(State.objects.get(slug='rfc',type__slug='draft')) - DocAlias.objects.create(name=name).docs.add(target_rfc) - DocAlias.objects.create(name='rfc%d'%rfc_num).docs.add(target_rfc) + target_rfc = WgRfcFactory(rfc_number=rfc_num, std_level_id=std_level_id) + source_draft = WgDraftFactory(name=name, states=[("draft","rfc")], notify=f"{name}@ietf.org") + source_draft.relateddocument_set.create(relationship_id="became_rfc", target=target_rfc) return target_rfc rfc_for_status_change_test_factory('draft-ietf-random-thing',9999,'ps') rfc_for_status_change_test_factory('draft-ietf-random-otherthing',9998,'inf')
    YearYear Convened Chair