ci: merge main to release (feat/rfc)

ci: merge main to release (feat/rfc)
This commit is contained in:
Robert Sparks 2023-12-11 11:23:31 -06:00 committed by GitHub
commit 5d062621cc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
226 changed files with 4503 additions and 2788 deletions

View file

@ -4,6 +4,7 @@ on:
pull_request:
branches:
- 'main'
- 'feat/rfc'
paths:
- 'client/**'
- 'ietf/**'

View file

@ -23,7 +23,7 @@ async function main () {
throw new Error('Missing --branch argument!')
}
if (branch.indexOf('/') >= 0) {
branch = branch.split('/').shift().join('-')
branch = branch.split('/').slice(1).join('-')
}
branch = slugify(branch, { lower: true, strict: true })
if (branch.length < 1) {

View file

@ -36,7 +36,10 @@ echo "Running Datatracker checks..."
# Migrate, adjusting to what the current state of the underlying database might be:
echo "Running Datatracker migrations..."
/usr/local/bin/python ./ietf/manage.py migrate --fake-initial --settings=settings_local
/usr/local/bin/python ./ietf/manage.py migrate --settings=settings_local
echo "Syncing with the rfc-index"
./ietf/bin/rfc-editor-index-updates -d 1969-01-01
echo "Starting Datatracker..."
./ietf/manage.py runserver 0.0.0.0:8000 --settings=settings_local

View file

@ -24,7 +24,7 @@ import debug # pyflakes:ignore
import ietf
from ietf.doc.utils import get_unicode_document_content
from ietf.doc.models import RelatedDocument, State
from ietf.doc.factories import IndividualDraftFactory, WgDraftFactory
from ietf.doc.factories import IndividualDraftFactory, WgDraftFactory, WgRfcFactory
from ietf.group.factories import RoleFactory
from ietf.meeting.factories import MeetingFactory, SessionFactory
from ietf.meeting.models import Session
@ -944,7 +944,7 @@ class RfcdiffSupportTests(TestCase):
self.assertNotIn('previous', received, 'Rev 00 has no previous name when not replacing a draft')
replaced = IndividualDraftFactory()
RelatedDocument.objects.create(relationship_id='replaces',source=draft,target=replaced.docalias.first())
RelatedDocument.objects.create(relationship_id='replaces',source=draft,target=replaced)
received = self.getJson(dict(name=draft.name, rev='00'))
self.assertEqual(received['previous'], f'{replaced.name}-{replaced.rev}',
'Rev 00 has a previous name when replacing a draft')
@ -974,19 +974,19 @@ class RfcdiffSupportTests(TestCase):
def do_rfc_test(self, draft_name):
draft = WgDraftFactory(name=draft_name, create_revisions=range(0,2))
draft.docalias.create(name=f'rfc{self.next_rfc_number():04}')
rfc = WgRfcFactory(group=draft.group, rfc_number=self.next_rfc_number())
draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
draft.set_state(State.objects.get(type_id='draft',slug='rfc'))
draft.set_state(State.objects.get(type_id='draft-iesg', slug='pub'))
draft = reload_db_objects(draft)
rfc = draft
draft, rfc = reload_db_objects(draft, rfc)
number = rfc.rfc_number()
number = rfc.rfc_number
received = self.getJson(dict(name=number))
self.assertEqual(
received,
dict(
content_url=rfc.get_href(),
name=rfc.canonical_name(),
name=rfc.name,
previous=f'{draft.name}-{draft.rev}',
previous_url= draft.history_set.get(rev=draft.rev).get_href(),
),
@ -994,7 +994,7 @@ class RfcdiffSupportTests(TestCase):
)
num_received = received
received = self.getJson(dict(name=rfc.canonical_name()))
received = self.getJson(dict(name=rfc.name))
self.assertEqual(num_received, received, 'RFC by canonical name gives same result as by number')
received = self.getJson(dict(name=f'RfC {number}'))
@ -1026,30 +1026,30 @@ class RfcdiffSupportTests(TestCase):
def test_rfc_with_tombstone(self):
draft = WgDraftFactory(create_revisions=range(0,2))
draft.docalias.create(name='rfc3261') # See views_doc.HAS_TOMBSTONE
rfc = WgRfcFactory(rfc_number=3261,group=draft.group)# See views_doc.HAS_TOMBSTONE
draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
draft.set_state(State.objects.get(type_id='draft',slug='rfc'))
draft.set_state(State.objects.get(type_id='draft-iesg', slug='pub'))
draft = reload_db_objects(draft)
rfc = draft
# Some old rfcs had tombstones that shouldn't be used for comparisons
received = self.getJson(dict(name=rfc.canonical_name()))
received = self.getJson(dict(name=rfc.name))
self.assertTrue(received['previous'].endswith('00'))
def do_rfc_with_broken_history_test(self, draft_name):
draft = WgDraftFactory(rev='10', name=draft_name)
draft.docalias.create(name=f'rfc{self.next_rfc_number():04}')
rfc = WgRfcFactory(group=draft.group, rfc_number=self.next_rfc_number())
draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
draft.set_state(State.objects.get(type_id='draft',slug='rfc'))
draft.set_state(State.objects.get(type_id='draft-iesg', slug='pub'))
draft = reload_db_objects(draft)
rfc = draft
received = self.getJson(dict(name=draft.name))
self.assertEqual(
received,
dict(
content_url=rfc.get_href(),
name=rfc.canonical_name(),
name=rfc.name,
previous=f'{draft.name}-10',
previous_url= f'{settings.IETF_ID_ARCHIVE_URL}{draft.name}-10.txt',
),
@ -1080,3 +1080,9 @@ class RfcdiffSupportTests(TestCase):
# tricky draft names
self.do_rfc_with_broken_history_test(draft_name='draft-gizmo-01')
self.do_rfc_with_broken_history_test(draft_name='draft-oh-boy-what-a-draft-02-03')
def test_no_such_document(self):
for name in ['rfc0000', 'draft-ftei-oof-rab-00']:
url = urlreverse(self.target_view, kwargs={'name': name})
r = self.client.get(url)
self.assertEqual(r.status_code, 404)

View file

@ -317,12 +317,9 @@ def get_previous_url(name, rev=None):
previous_url = ''
if condition in ('historic version', 'current version'):
doc = history if history else document
if found_rev:
doc.is_rfc = lambda: False
previous_url = doc.get_href()
elif condition == 'version dochistory not found':
document.rev = found_rev
document.is_rfc = lambda: False
previous_url = document.get_href()
return previous_url
@ -330,32 +327,38 @@ def get_previous_url(name, rev=None):
def rfcdiff_latest_json(request, name, rev=None):
response = dict()
condition, document, history, found_rev = find_doc_for_rfcdiff(name, rev)
if document and document.type_id == "rfc":
draft = document.came_from_draft()
if condition == 'no such document':
raise Http404
elif condition in ('historic version', 'current version'):
doc = history if history else document
if not found_rev and doc.is_rfc():
response['content_url'] = doc.get_href()
response['name']=doc.canonical_name()
if doc.name != doc.canonical_name():
if doc.type_id == "rfc":
response['content_url'] = doc.get_href()
response['name']=doc.name
if draft:
prev_rev = draft.rev
if doc.rfc_number in HAS_TOMBSTONE and prev_rev != '00':
prev_rev = f'{(int(draft.rev)-1):02d}'
response['previous'] = f'{draft.name}-{prev_rev}'
response['previous_url'] = get_previous_url(draft.name, prev_rev)
elif doc.type_id == "draft" and not found_rev and doc.relateddocument_set.filter(relationship_id="became_rfc").exists():
rfc = doc.related_that_doc("became_rfc")[0]
response['content_url'] = rfc.get_href()
response['name']=rfc.name
prev_rev = doc.rev
# not sure what to do if non-numeric values come back, so at least log it
log.assertion('doc.rfc_number().isdigit()') # .rfc_number() is expensive...
log.assertion('doc.rev.isdigit()')
if int(doc.rfc_number()) in HAS_TOMBSTONE and prev_rev != '00':
if rfc.rfc_number in HAS_TOMBSTONE and prev_rev != '00':
prev_rev = f'{(int(doc.rev)-1):02d}'
response['previous'] = f'{doc.name}-{prev_rev}'
response['previous_url'] = get_previous_url(doc.name, prev_rev)
else:
doc.is_rfc = lambda: False
response['content_url'] = doc.get_href()
response['rev'] = doc.rev
response['name'] = doc.name
if doc.rev == '00':
replaces_docs = (history.doc if condition=='historic version' else doc).related_that_doc('replaces')
if replaces_docs:
replaces = replaces_docs[0].document
replaces = replaces_docs[0]
response['previous'] = f'{replaces.name}-{replaces.rev}'
response['previous_url'] = get_previous_url(replaces.name, replaces.rev)
else:
@ -374,7 +377,6 @@ def rfcdiff_latest_json(request, name, rev=None):
response['name'] = document.name
response['rev'] = found_rev
document.rev = found_rev
document.is_rfc = lambda: False
response['content_url'] = document.get_href()
# not sure what to do if non-numeric values come back, so at least log it
log.assertion('found_rev.isdigit()')

View file

@ -79,12 +79,12 @@ if len(errata_data) < ietf.sync.rfceditor.MIN_ERRATA_RESULTS:
sys.exit(1)
new_rfcs = []
for changes, doc, rfc_published in ietf.sync.rfceditor.update_docs_from_rfc_index(index_data, errata_data, skip_older_than_date=skip_date):
for rfc_number, changes, doc, rfc_published in ietf.sync.rfceditor.update_docs_from_rfc_index(index_data, errata_data, skip_older_than_date=skip_date):
if rfc_published:
new_rfcs.append(doc)
for c in changes:
log("RFC%s, %s: %s" % (doc.rfcnum, doc.name, c))
log("RFC%s, %s: %s" % (rfc_number, doc.name, c))
sys.exit(0)
@ -99,7 +99,7 @@ if newpid == 0:
pipe("%s -a %s %s" % (settings.RSYNC_BINARY,settings.RFC_TEXT_RSYNC_SOURCE,settings.RFC_PATH))
for rfc in new_rfcs:
rebuild_reference_relations(rfc)
log("Updated references for %s"%rfc.canonical_name())
log("Updated references for %s"%rfc.name)
except:
subject = "Exception in updating references for new rfcs: %s : %s" % (sys.exc_info()[0],sys.exc_info()[1])
msg = "%s\n%s\n----\n%s"%(sys.exc_info()[0],sys.exc_info()[1],traceback.format_tb(sys.exc_info()[2]))

View file

@ -30,6 +30,8 @@ class SearchRuleForm(forms.ModelForm):
super(SearchRuleForm, self).__init__(*args, **kwargs)
def restrict_state(state_type, slug=None):
if "state" not in self.fields:
raise RuntimeError(f"Rule type {rule_type} cannot include state filtering")
f = self.fields['state']
f.queryset = f.queryset.filter(used=True).filter(type=state_type)
if slug:
@ -38,11 +40,15 @@ class SearchRuleForm(forms.ModelForm):
f.initial = f.queryset[0].pk
f.widget = forms.HiddenInput()
if rule_type.endswith("_rfc"):
del self.fields["state"] # rfc rules must not look at document states
if rule_type in ["group", "group_rfc", "area", "area_rfc", "group_exp"]:
if rule_type == "group_exp":
restrict_state("draft", "expired")
else:
restrict_state("draft", "rfc" if rule_type.endswith("rfc") else "active")
if not rule_type.endswith("_rfc"):
restrict_state("draft", "active")
if rule_type.startswith("area"):
self.fields["group"].label = "Area"
@ -70,7 +76,8 @@ class SearchRuleForm(forms.ModelForm):
del self.fields["text"]
elif rule_type in ["author", "author_rfc", "shepherd", "ad"]:
restrict_state("draft", "rfc" if rule_type.endswith("rfc") else "active")
if not rule_type.endswith("_rfc"):
restrict_state("draft", "active")
if rule_type.startswith("author"):
self.fields["person"].label = "Author"
@ -84,7 +91,8 @@ class SearchRuleForm(forms.ModelForm):
del self.fields["text"]
elif rule_type == "name_contains":
restrict_state("draft", "rfc" if rule_type.endswith("rfc") else "active")
if not rule_type.endswith("_rfc"):
restrict_state("draft", "active")
del self.fields["person"]
del self.fields["group"]

View file

@ -0,0 +1,50 @@
# Generated by Django 4.2.3 on 2023-07-07 18:33
from django.db import migrations
def forward(apps, schema_editor):
"""Track any RFCs that were created from tracked drafts"""
CommunityList = apps.get_model("community", "CommunityList")
RelatedDocument = apps.get_model("doc", "RelatedDocument")
# Handle individually tracked documents
for cl in CommunityList.objects.all():
for rfc in set(
RelatedDocument.objects.filter(
source__in=cl.added_docs.all(),
relationship__slug="became_rfc",
).values_list("target__docs", flat=True)
):
cl.added_docs.add(rfc)
# Handle rules - rules ending with _rfc should no longer filter by state.
# There are 9 CommunityLists with invalid author_rfc rules that are filtering
# by (draft, active) instead of (draft, rfc) state before migration. All but one
# also includes an author rule for (draft, active), so these will start following
# RFCs as well. The one exception will start tracking RFCs instead of I-Ds, which
# is probably what was intended, but will be a change in their user experience.
SearchRule = apps.get_model("community", "SearchRule")
rfc_rules = SearchRule.objects.filter(rule_type__endswith="_rfc")
rfc_rules.update(state=None)
def reverse(apps, schema_editor):
Document = apps.get_model("doc", "Document")
for rfc in Document.objects.filter(type__slug="rfc"):
rfc.communitylist_set.clear()
# See the comment above regarding author_rfc
SearchRule = apps.get_model("community", "SearchRule")
State = apps.get_model("doc", "State")
SearchRule.objects.filter(rule_type__endswith="_rfc").update(
state=State.objects.get(type_id="draft", slug="rfc")
)
class Migration(migrations.Migration):
dependencies = [
("community", "0002_auto_20230320_1222"),
("doc", "0014_move_rfc_docaliases"),
]
operations = [migrations.RunPython(forward, reverse)]

View file

@ -41,7 +41,7 @@ class CommunityListTests(WebTest):
clist = CommunityList.objects.create(user=User.objects.get(username="plain"))
rule_group = SearchRule.objects.create(rule_type="group", group=draft.group, state=State.objects.get(type="draft", slug="active"), community_list=clist)
rule_group_rfc = SearchRule.objects.create(rule_type="group_rfc", group=draft.group, state=State.objects.get(type="draft", slug="rfc"), community_list=clist)
rule_group_rfc = SearchRule.objects.create(rule_type="group_rfc", group=draft.group, state=State.objects.get(type="rfc", slug="published"), community_list=clist)
rule_area = SearchRule.objects.create(rule_type="area", group=draft.group.parent, state=State.objects.get(type="draft", slug="active"), community_list=clist)
rule_state_iesg = SearchRule.objects.create(rule_type="state_iesg", state=State.objects.get(type="draft-iesg", slug="lc"), community_list=clist)
@ -151,7 +151,7 @@ class CommunityListTests(WebTest):
"action": "add_rule",
"rule_type": "author_rfc",
"author_rfc-person": Person.objects.filter(documentauthor__document=draft).first().pk,
"author_rfc-state": State.objects.get(type="draft", slug="rfc").pk,
"author_rfc-state": State.objects.get(type="rfc", slug="published").pk,
})
self.assertEqual(r.status_code, 302)
clist = CommunityList.objects.get(user__username="plain")
@ -408,4 +408,4 @@ class CommunityListTests(WebTest):
self.assertEqual(len(outbox), mailbox_before + 1)
self.assertTrue(draft.name in outbox[-1]["Subject"])

View file

@ -60,7 +60,7 @@ def reset_name_contains_index_for_rule(rule):
if not rule.rule_type == "name_contains":
return
rule.name_contains_index.set(Document.objects.filter(docalias__name__regex=rule.text))
rule.name_contains_index.set(Document.objects.filter(name__regex=rule.text))
def update_name_contains_indexes_with_new_doc(doc):
for r in SearchRule.objects.filter(rule_type="name_contains"):
@ -71,70 +71,103 @@ def update_name_contains_indexes_with_new_doc(doc):
if re.search(r.text, doc.name) and not doc in r.name_contains_index.all():
r.name_contains_index.add(doc)
def docs_matching_community_list_rule(rule):
docs = Document.objects.all()
if rule.rule_type.endswith("_rfc"):
docs = docs.filter(type_id="rfc") # rule.state is ignored for RFCs
else:
docs = docs.filter(type_id="draft", states=rule.state)
if rule.rule_type in ['group', 'area', 'group_rfc', 'area_rfc']:
return docs.filter(Q(group=rule.group_id) | Q(group__parent=rule.group_id), states=rule.state)
return docs.filter(Q(group=rule.group_id) | Q(group__parent=rule.group_id))
elif rule.rule_type in ['group_exp']:
return docs.filter(group=rule.group_id, states=rule.state)
return docs.filter(group=rule.group_id)
elif rule.rule_type.startswith("state_"):
return docs.filter(states=rule.state)
return docs
elif rule.rule_type in ["author", "author_rfc"]:
return docs.filter(states=rule.state, documentauthor__person=rule.person)
return docs.filter(documentauthor__person=rule.person)
elif rule.rule_type == "ad":
return docs.filter(states=rule.state, ad=rule.person)
return docs.filter(ad=rule.person)
elif rule.rule_type == "shepherd":
return docs.filter(states=rule.state, shepherd__person=rule.person)
return docs.filter(shepherd__person=rule.person)
elif rule.rule_type == "name_contains":
return docs.filter(states=rule.state, searchrule=rule)
return docs.filter(searchrule=rule)
raise NotImplementedError
def community_list_rules_matching_doc(doc):
rules = SearchRule.objects.none()
if doc.type_id not in ["draft", "rfc"]:
return rules # none
states = list(doc.states.values_list("pk", flat=True))
rules = SearchRule.objects.none()
# group and area rules
if doc.group_id:
groups = [doc.group_id]
if doc.group.parent_id:
groups.append(doc.group.parent_id)
rules_to_add = SearchRule.objects.filter(group__in=groups)
if doc.type_id == "rfc":
rules_to_add = rules_to_add.filter(rule_type__in=["group_rfc", "area_rfc"])
else:
rules_to_add = rules_to_add.filter(
rule_type__in=["group", "area", "group_exp"],
state__in=states,
)
rules |= rules_to_add
# state rules (only relevant for I-Ds)
if doc.type_id == "draft":
rules |= SearchRule.objects.filter(
rule_type__in=['group', 'area', 'group_rfc', 'area_rfc', 'group_exp'],
rule_type__in=[
"state_iab",
"state_iana",
"state_iesg",
"state_irtf",
"state_ise",
"state_rfceditor",
"state_ietf",
],
state__in=states,
group__in=groups
)
rules |= SearchRule.objects.filter(
rule_type__in=['state_iab', 'state_iana', 'state_iesg', 'state_irtf', 'state_ise', 'state_rfceditor', 'state_ietf'],
state__in=states,
)
rules |= SearchRule.objects.filter(
rule_type__in=["author", "author_rfc"],
state__in=states,
person__in=list(Person.objects.filter(documentauthor__document=doc)),
)
if doc.ad_id:
# author rules
if doc.type_id == "rfc":
rules |= SearchRule.objects.filter(
rule_type="ad",
rule_type="author_rfc",
person__in=list(Person.objects.filter(documentauthor__document=doc)),
)
else:
rules |= SearchRule.objects.filter(
rule_type="author",
state__in=states,
person=doc.ad_id,
person__in=list(Person.objects.filter(documentauthor__document=doc)),
)
if doc.shepherd_id:
rules |= SearchRule.objects.filter(
rule_type="shepherd",
state__in=states,
person__email=doc.shepherd_id,
)
# Other draft-only rules rules
if doc.type_id == "draft":
if doc.ad_id:
rules |= SearchRule.objects.filter(
rule_type="ad",
state__in=states,
person=doc.ad_id,
)
rules |= SearchRule.objects.filter(
rule_type="name_contains",
state__in=states,
name_contains_index=doc, # search our materialized index to avoid full scan
)
if doc.shepherd_id:
rules |= SearchRule.objects.filter(
rule_type="shepherd",
state__in=states,
person__email=doc.shepherd_id,
)
rules |= SearchRule.objects.filter(
rule_type="name_contains",
state__in=states,
name_contains_index=doc, # search our materialized index to avoid full scan
)
return rules
@ -146,7 +179,11 @@ def docs_tracked_by_community_list(clist):
# in theory, we could use an OR query, but databases seem to have
# trouble with OR queries and complicated joins so do the OR'ing
# manually
doc_ids = set(clist.added_docs.values_list("pk", flat=True))
doc_ids = set()
for doc in clist.added_docs.all():
doc_ids.add(doc.pk)
doc_ids.update(rfc.pk for rfc in doc.related_that_doc("became_rfc"))
for rule in clist.searchrule_set.all():
doc_ids = doc_ids | set(docs_matching_community_list_rule(rule).values_list("pk", flat=True))

View file

@ -79,19 +79,18 @@ def manage_list(request, username=None, acronym=None, group_type=None):
rule_type_form = SearchRuleTypeForm(request.POST)
if rule_type_form.is_valid():
rule_type = rule_type_form.cleaned_data['rule_type']
if rule_type:
rule_form = SearchRuleForm(clist, rule_type, request.POST)
if rule_form.is_valid():
if clist.pk is None:
clist.save()
rule = rule_form.save(commit=False)
rule.community_list = clist
rule.rule_type = rule_type
rule.save()
if rule.rule_type == "name_contains":
reset_name_contains_index_for_rule(rule)
if rule_type:
rule_form = SearchRuleForm(clist, rule_type, request.POST)
if rule_form.is_valid():
if clist.pk is None:
clist.save()
rule = rule_form.save(commit=False)
rule.community_list = clist
rule.rule_type = rule_type
rule.save()
if rule.rule_type == "name_contains":
reset_name_contains_index_for_rule(rule)
return HttpResponseRedirect("")
else:
@ -130,7 +129,7 @@ def manage_list(request, username=None, acronym=None, group_type=None):
@login_required
def track_document(request, name, username=None, acronym=None):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if request.method == "POST":
clist = lookup_community_list(username, acronym)
@ -154,7 +153,7 @@ def track_document(request, name, username=None, acronym=None):
@login_required
def untrack_document(request, name, username=None, acronym=None):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
clist = lookup_community_list(username, acronym)
if not can_manage_community_list(request.user, clist):
permission_denied(request, "You do not have permission to access this view")

View file

@ -7,7 +7,7 @@ from django.db import models
from django import forms
from .models import (StateType, State, RelatedDocument, DocumentAuthor, Document, RelatedDocHistory,
DocHistoryAuthor, DocHistory, DocAlias, DocReminder, DocEvent, NewRevisionDocEvent,
DocHistoryAuthor, DocHistory, DocReminder, DocEvent, NewRevisionDocEvent,
StateDocEvent, ConsensusDocEvent, BallotType, BallotDocEvent, WriteupDocEvent, LastCallDocEvent,
TelechatDocEvent, BallotPositionDocEvent, ReviewRequestDocEvent, InitialReviewDocEvent,
AddedMessageEvent, SubmissionDocEvent, DeletedEvent, EditedAuthorsDocEvent, DocumentURL,
@ -27,10 +27,6 @@ class StateAdmin(admin.ModelAdmin):
filter_horizontal = ["next_states"]
admin.site.register(State, StateAdmin)
# class DocAliasInline(admin.TabularInline):
# model = DocAlias
# extra = 1
class DocAuthorInline(admin.TabularInline):
model = DocumentAuthor
raw_id_fields = ['person', 'email']
@ -43,8 +39,9 @@ class DocActionHolderInline(admin.TabularInline):
class RelatedDocumentInline(admin.TabularInline):
model = RelatedDocument
fk_name= 'source'
def this(self, instance):
return instance.source.canonical_name()
return instance.source.name
readonly_fields = ['this', ]
fields = ['this', 'relationship', 'target', ]
raw_id_fields = ['target']
@ -70,7 +67,7 @@ class DocumentForm(forms.ModelForm):
class DocumentAuthorAdmin(admin.ModelAdmin):
list_display = ['id', 'document', 'person', 'email', 'affiliation', 'country', 'order']
search_fields = ['document__docalias__name', 'person__name', 'email__address', 'affiliation', 'country']
search_fields = ['document__name', 'person__name', 'email__address', 'affiliation', 'country']
raw_id_fields = ["document", "person", "email"]
admin.site.register(DocumentAuthor, DocumentAuthorAdmin)
@ -108,14 +105,6 @@ class DocHistoryAdmin(admin.ModelAdmin):
admin.site.register(DocHistory, DocHistoryAdmin)
class DocAliasAdmin(admin.ModelAdmin):
list_display = ['name', 'targets']
search_fields = ['name', 'docs__name']
raw_id_fields = ['docs']
def targets(self, obj):
return ', '.join([o.name for o in obj.docs.all()])
admin.site.register(DocAlias, DocAliasAdmin)
class DocReminderAdmin(admin.ModelAdmin):
list_display = ['id', 'event', 'type', 'due', 'active']
list_filter = ['type', 'due', 'active']
@ -125,7 +114,7 @@ admin.site.register(DocReminder, DocReminderAdmin)
class RelatedDocumentAdmin(admin.ModelAdmin):
list_display = ['source', 'target', 'relationship', ]
list_filter = ['relationship', ]
search_fields = ['source__name', 'target__name', 'target__docs__name', ]
search_fields = ['source__name', 'target__name', ]
raw_id_fields = ['source', 'target', ]
admin.site.register(RelatedDocument, RelatedDocumentAdmin)

View file

@ -12,7 +12,7 @@ from typing import Optional # pyflakes:ignore
from django.conf import settings
from django.utils import timezone
from ietf.doc.models import ( Document, DocEvent, NewRevisionDocEvent, DocAlias, State, DocumentAuthor,
from ietf.doc.models import ( Document, DocEvent, NewRevisionDocEvent, State, DocumentAuthor,
StateDocEvent, BallotPositionDocEvent, BallotDocEvent, BallotType, IRSGBallotDocEvent, TelechatDocEvent,
DocumentActionHolder, BofreqEditorDocEvent, BofreqResponsibleDocEvent, DocExtResource )
from ietf.group.models import Group
@ -51,16 +51,11 @@ class BaseDocumentFactory(factory.django.DjangoModelFactory):
def name(self, n):
return draft_name_generator(self.type_id,self.group,n)
newrevisiondocevent = factory.RelatedFactory('ietf.doc.factories.NewRevisionDocEventFactory','doc')
@factory.post_generation
def other_aliases(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
alias = DocAliasFactory(name=obj.name)
alias.docs.add(obj)
if create and extracted:
for name in extracted:
alias = DocAliasFactory(name=name)
alias.docs.add(obj)
def newrevisiondocevent(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if create:
if obj.type_id != "rfc":
NewRevisionDocEventFactory(doc=obj)
@factory.post_generation
def states(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
@ -83,13 +78,7 @@ class BaseDocumentFactory(factory.django.DjangoModelFactory):
def relations(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
if create and extracted:
for (rel_id, doc) in extracted:
if isinstance(doc, Document):
docalias = doc.docalias.first()
elif isinstance(doc, DocAlias):
docalias = doc
else:
continue
obj.relateddocument_set.create(relationship_id=rel_id, target=docalias)
obj.relateddocument_set.create(relationship_id=rel_id, target=doc)
@factory.post_generation
def create_revisions(obj, create, extracted, **kwargs): # pylint: disable=no-self-argument
@ -119,6 +108,24 @@ class DocumentFactory(BaseDocumentFactory):
group = factory.SubFactory('ietf.group.factories.GroupFactory',acronym='none')
class RfcFactory(BaseDocumentFactory):
type_id = "rfc"
rev = ""
rfc_number = factory.Sequence(lambda n: n + 1000)
name = factory.LazyAttribute(lambda o: f"rfc{o.rfc_number:d}")
expires = None
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
else:
obj.set_state(State.objects.get(type_id='rfc',slug='published'))
class IndividualDraftFactory(BaseDocumentFactory):
type_id = 'draft'
@ -137,28 +144,11 @@ class IndividualDraftFactory(BaseDocumentFactory):
obj.set_state(State.objects.get(type_id='draft',slug='active'))
obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists'))
class IndividualRfcFactory(IndividualDraftFactory):
class IndividualRfcFactory(RfcFactory):
group = factory.SubFactory('ietf.group.factories.GroupFactory',acronym='none')
alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000)))
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
else:
obj.set_state(State.objects.get(type_id='draft',slug='rfc'))
@factory.post_generation
def reset_canonical_name(obj, create, extracted, **kwargs):
if hasattr(obj, '_canonical_name'):
del obj._canonical_name
return None
class WgDraftFactory(BaseDocumentFactory):
type_id = 'draft'
group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='wg')
stream_id = 'ietf'
@ -177,30 +167,12 @@ class WgDraftFactory(BaseDocumentFactory):
obj.set_state(State.objects.get(type_id='draft-stream-ietf',slug='wg-doc'))
obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists'))
class WgRfcFactory(WgDraftFactory):
alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000)))
class WgRfcFactory(RfcFactory):
group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='wg')
stream_id = 'ietf'
std_level_id = 'ps'
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
if not obj.get_state('draft-iesg'):
obj.set_state(State.objects.get(type_id='draft-iesg', slug='pub'))
else:
obj.set_state(State.objects.get(type_id='draft',slug='rfc'))
obj.set_state(State.objects.get(type_id='draft-iesg', slug='pub'))
@factory.post_generation
def reset_canonical_name(obj, create, extracted, **kwargs):
if hasattr(obj, '_canonical_name'):
del obj._canonical_name
return None
class RgDraftFactory(BaseDocumentFactory):
@ -223,34 +195,11 @@ class RgDraftFactory(BaseDocumentFactory):
obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists'))
class RgRfcFactory(RgDraftFactory):
alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000)))
class RgRfcFactory(RfcFactory):
group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='rg')
stream_id = 'irtf'
std_level_id = 'inf'
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
if not obj.get_state('draft-stream-irtf'):
obj.set_state(State.objects.get(type_id='draft-stream-irtf', slug='pub'))
if not obj.get_state('draft-iesg'):
obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists'))
else:
obj.set_state(State.objects.get(type_id='draft',slug='rfc'))
obj.set_state(State.objects.get(type_id='draft-stream-irtf', slug='pub'))
obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists'))
@factory.post_generation
def reset_canonical_name(obj, create, extracted, **kwargs):
if hasattr(obj, '_canonical_name'):
del obj._canonical_name
return None
class CharterFactory(BaseDocumentFactory):
@ -279,7 +228,7 @@ class StatusChangeFactory(BaseDocumentFactory):
for (rel, target) in extracted:
obj.relateddocument_set.create(relationship_id=rel,target=target)
else:
obj.relateddocument_set.create(relationship_id='tobcp', target=WgRfcFactory().docalias.first())
obj.relateddocument_set.create(relationship_id='tobcp', target=WgRfcFactory())
@factory.post_generation
def states(obj, create, extracted, **kwargs):
@ -306,9 +255,9 @@ class ConflictReviewFactory(BaseDocumentFactory):
if not create:
return
if extracted:
obj.relateddocument_set.create(relationship_id='conflrev',target=extracted.docalias.first())
obj.relateddocument_set.create(relationship_id='conflrev',target=extracted)
else:
obj.relateddocument_set.create(relationship_id='conflrev',target=DocumentFactory(name=obj.name.replace('conflict-review-','draft-'),type_id='draft',group=Group.objects.get(type_id='individ')).docalias.first())
obj.relateddocument_set.create(relationship_id='conflrev',target=DocumentFactory(name=obj.name.replace('conflict-review-','draft-'),type_id='draft',group=Group.objects.get(type_id='individ')))
@factory.post_generation
@ -327,24 +276,6 @@ class ReviewFactory(BaseDocumentFactory):
name = factory.LazyAttribute(lambda o: 'review-doesnotexist-00-%s-%s'%(o.group.acronym,date_today().isoformat()))
group = factory.SubFactory('ietf.group.factories.GroupFactory',type_id='review')
class DocAliasFactory(factory.django.DjangoModelFactory):
class Meta:
model = DocAlias
skip_postgeneration_save = True
@factory.post_generation
def document(self, create, extracted, **kwargs):
if create and extracted:
self.docs.add(extracted)
@factory.post_generation
def docs(self, create, extracted, **kwargs):
if create and extracted:
for doc in extracted:
if not doc in self.docs.all():
self.docs.add(doc)
class DocEventFactory(factory.django.DjangoModelFactory):
class Meta:
model = DocEvent
@ -557,33 +488,8 @@ class EditorialDraftFactory(BaseDocumentFactory):
obj.set_state(State.objects.get(type_id='draft-stream-editorial',slug='active'))
obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists'))
class EditorialRfcFactory(RgDraftFactory):
alias2 = factory.RelatedFactory('ietf.doc.factories.DocAliasFactory','document',name=factory.Sequence(lambda n: 'rfc%04d'%(n+1000)))
std_level_id = 'inf'
@factory.post_generation
def states(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for (state_type_id,state_slug) in extracted:
obj.set_state(State.objects.get(type_id=state_type_id,slug=state_slug))
if not obj.get_state('draft-stream-editorial'):
obj.set_state(State.objects.get(type_id='draft-stream-editorial', slug='pub'))
if not obj.get_state('draft-iesg'):
obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists'))
else:
obj.set_state(State.objects.get(type_id='draft',slug='rfc'))
obj.set_state(State.objects.get(type_id='draft-stream-editorial', slug='pub'))
obj.set_state(State.objects.get(type_id='draft-iesg',slug='idexists'))
@factory.post_generation
def reset_canonical_name(obj, create, extracted, **kwargs):
if hasattr(obj, '_canonical_name'):
del obj._canonical_name
return None
class EditorialRfcFactory(RgRfcFactory):
pass
class StatementFactory(BaseDocumentFactory):
type_id = "statement"
@ -611,3 +517,31 @@ class StatementFactory(BaseDocumentFactory):
obj.set_state(State.objects.get(type_id=state_type_id, slug=state_slug))
else:
obj.set_state(State.objects.get(type_id="statement", slug="active"))
class SubseriesFactory(factory.django.DjangoModelFactory):
class Meta:
model = Document
skip_postgeneration_save = True
@factory.lazy_attribute_sequence
def name(self, n):
return f"{self.type_id}{n}"
@factory.post_generation
def contains(obj, create, extracted, **kwargs):
if not create:
return
if extracted:
for doc in extracted:
obj.relateddocument_set.create(relationship_id="contains",target=doc)
else:
obj.relateddocument_set.create(relationship_id="contains", target=RfcFactory())
class BcpFactory(SubseriesFactory):
type_id="bcp"
class StdFactory(SubseriesFactory):
type_id="std"
class FyiFactory(SubseriesFactory):
type_id="fyi"

View file

@ -36,7 +36,7 @@ class DocumentChangesFeed(Feed):
feed_type = Atom1Feed
def get_object(self, request, name):
return Document.objects.get(docalias__name=name)
return Document.objects.get(name=name)
def title(self, obj):
return "Changes for %s" % obj.display_name()
@ -46,7 +46,7 @@ class DocumentChangesFeed(Feed):
raise FeedDoesNotExist
return urlreverse(
"ietf.doc.views_doc.document_history",
kwargs=dict(name=obj.canonical_name()),
kwargs=dict(name=obj.name),
)
def subtitle(self, obj):
@ -86,7 +86,7 @@ class DocumentChangesFeed(Feed):
return (
urlreverse(
"ietf.doc.views_doc.document_history",
kwargs=dict(name=item.doc.canonical_name()),
kwargs=dict(name=item.doc.name),
)
+ "#history-%s" % item.pk
)
@ -208,13 +208,13 @@ class RfcFeed(Feed):
return [doc for doc, time in results]
def item_title(self, item):
return "%s : %s" % (item.canonical_name(), item.title)
return "%s : %s" % (item.name, item.title)
def item_description(self, item):
return item.abstract
def item_link(self, item):
return "https://rfc-editor.org/info/%s" % item.canonical_name()
return "https://rfc-editor.org/info/%s" % item.name
def item_pubdate(self, item):
return item.publication_time
@ -224,20 +224,20 @@ class RfcFeed(Feed):
extra.update({"dcterms_accessRights": "gratis"})
extra.update({"dcterms_format": "text/html"})
media_contents = []
if int(item.rfc_number()) < 8650:
if int(item.rfc_number()) not in [8, 9, 51, 418, 500, 530, 589]:
if item.rfc_number < 8650:
if item.rfc_number not in [8, 9, 51, 418, 500, 530, 589]:
for fmt, media_type in [("txt", "text/plain"), ("html", "text/html")]:
media_contents.append(
{
"url": f"https://rfc-editor.org/rfc/{item.canonical_name()}.{fmt}",
"url": f"https://rfc-editor.org/rfc/{item.name}.{fmt}",
"media_type": media_type,
"is_format_of": self.item_link(item),
}
)
if int(item.rfc_number()) not in [571, 587]:
if item.rfc_number not in [571, 587]:
media_contents.append(
{
"url": f"https://www.rfc-editor.org/rfc/pdfrfc/{item.canonical_name()}.txt.pdf",
"url": f"https://www.rfc-editor.org/rfc/pdfrfc/{item.name}.txt.pdf",
"media_type": "application/pdf",
"is_format_of": self.item_link(item),
}
@ -245,7 +245,7 @@ class RfcFeed(Feed):
else:
media_contents.append(
{
"url": f"https://www.rfc-editor.org/rfc/{item.canonical_name()}.xml",
"url": f"https://www.rfc-editor.org/rfc/{item.name}.xml",
"media_type": "application/rfc+xml",
}
)
@ -256,16 +256,16 @@ class RfcFeed(Feed):
]:
media_contents.append(
{
"url": f"https://rfc-editor.org/rfc/{item.canonical_name()}.{fmt}",
"url": f"https://rfc-editor.org/rfc/{item.name}.{fmt}",
"media_type": media_type,
"is_format_of": f"https://www.rfc-editor.org/rfc/{item.canonical_name()}.xml",
"is_format_of": f"https://www.rfc-editor.org/rfc/{item.name}.xml",
}
)
extra.update({"media_contents": media_contents})
extra.update({"doi": "10.17487/%s" % item.canonical_name().upper()})
extra.update({"doi": "10.17487/%s" % item.name.upper()})
extra.update(
{"doiuri": "http://dx.doi.org/10.17487/%s" % item.canonical_name().upper()}
{"doiuri": "http://dx.doi.org/10.17487/%s" % item.name.upper()}
)
# R104 Publisher (Mandatory - but we need a string from them first)

View file

@ -13,7 +13,7 @@ from django.urls import reverse as urlreverse
import debug # pyflakes:ignore
from ietf.doc.models import Document, DocAlias
from ietf.doc.models import Document
from ietf.doc.utils import uppercase_std_abbreviated_name
from ietf.utils.fields import SearchableField
@ -69,19 +69,3 @@ class SearchableDocumentsField(SearchableField):
class SearchableDocumentField(SearchableDocumentsField):
"""Specialized to only return one Document"""
max_entries = 1
class SearchableDocAliasesField(SearchableDocumentsField):
"""Search DocAliases instead of Documents"""
model = DocAlias # type: Type[models.Model]
def doc_type_filter(self, queryset):
"""Filter to include only desired doc type
For DocAlias, pass through to the docs to check type.
"""
return queryset.filter(docs__type=self.doc_type)
class SearchableDocAliasField(SearchableDocAliasesField):
"""Specialized to only return one DocAlias"""
max_entries = 1

View file

@ -8,7 +8,7 @@ from django import forms
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import validate_email
from ietf.doc.fields import SearchableDocAliasesField, SearchableDocAliasField
from ietf.doc.fields import SearchableDocumentField, SearchableDocumentsField
from ietf.doc.models import RelatedDocument, DocExtResource
from ietf.iesg.models import TelechatDate
from ietf.iesg.utils import telechat_page_count
@ -134,11 +134,12 @@ class ActionHoldersForm(forms.Form):
IESG_APPROVED_STATE_LIST = ("ann", "rfcqueue", "pub")
class AddDownrefForm(forms.Form):
rfc = SearchableDocAliasField(
rfc = SearchableDocumentField(
label="Referenced RFC",
help_text="The RFC that is approved for downref",
required=True)
drafts = SearchableDocAliasesField(
required=True,
doc_type="rfc")
drafts = SearchableDocumentsField(
label="Internet-Drafts that makes the reference",
help_text="The Internet-Drafts that approve the downref in their Last Call",
required=True)
@ -148,7 +149,7 @@ class AddDownrefForm(forms.Form):
raise forms.ValidationError("Please provide a referenced RFC and a referencing Internet-Draft")
rfc = self.cleaned_data['rfc']
if not rfc.document.is_rfc():
if rfc.type_id != "rfc":
raise forms.ValidationError("Cannot find the RFC: " + rfc.name)
return rfc
@ -158,10 +159,10 @@ class AddDownrefForm(forms.Form):
v_err_names = []
drafts = self.cleaned_data['drafts']
for da in drafts:
state = da.document.get_state("draft-iesg")
for d in drafts:
state = d.get_state("draft-iesg")
if not state or state.slug not in IESG_APPROVED_STATE_LIST:
v_err_names.append(da.name)
v_err_names.append(d.name)
if v_err_names:
raise forms.ValidationError("Internet-Draft is not yet approved: " + ", ".join(v_err_names))
return drafts
@ -173,23 +174,23 @@ class AddDownrefForm(forms.Form):
v_err_pairs = []
rfc = self.cleaned_data['rfc']
drafts = self.cleaned_data['drafts']
for da in drafts:
if RelatedDocument.objects.filter(source=da.document, target=rfc, relationship_id='downref-approval'):
v_err_pairs.append(da.name + " --> RFC " + rfc.document.rfc_number())
for d in drafts:
if RelatedDocument.objects.filter(source=d, target=rfc, relationship_id='downref-approval'):
v_err_pairs.append(f"{d.name} --> RFC {rfc.rfc_number}")
if v_err_pairs:
raise forms.ValidationError("Downref is already in the registry: " + ", ".join(v_err_pairs))
if 'save_downref_anyway' not in self.data:
# this check is skipped if the save_downref_anyway button is used
v_err_refnorm = ""
for da in drafts:
if not RelatedDocument.objects.filter(source=da.document, target=rfc, relationship_id='refnorm'):
for d in drafts:
if not RelatedDocument.objects.filter(source=d, target=rfc, relationship_id='refnorm'):
if v_err_refnorm:
v_err_refnorm = v_err_refnorm + " or " + da.name
v_err_refnorm = v_err_refnorm + " or " + d.name
else:
v_err_refnorm = da.name
v_err_refnorm = d.name
if v_err_refnorm:
v_err_refnorm_prefix = "There does not seem to be a normative reference to RFC " + rfc.document.rfc_number() + " by "
v_err_refnorm_prefix = f"There does not seem to be a normative reference to RFC {rfc.rfc_number} by "
raise forms.ValidationError(v_err_refnorm_prefix + v_err_refnorm)

View file

@ -19,7 +19,7 @@ from ietf.doc.templatetags.mail_filters import std_level_prompt
from ietf.utils import log
from ietf.utils.mail import send_mail, send_mail_text
from ietf.ipr.utils import iprs_from_docs, related_docs
from ietf.doc.models import WriteupDocEvent, LastCallDocEvent, DocAlias, ConsensusDocEvent
from ietf.doc.models import WriteupDocEvent, LastCallDocEvent, ConsensusDocEvent
from ietf.doc.utils import needed_ballot_positions
from ietf.doc.utils_bofreq import bofreq_editors, bofreq_responsible
from ietf.group.models import Role
@ -54,7 +54,7 @@ def email_ad_approved_doc(request, doc, text):
def email_ad_approved_conflict_review(request, review, ok_to_publish):
"""Email notification when AD approves a conflict review"""
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target
(to, cc) = gather_address_lists("ad_approved_conflict_review")
frm = request.user.person.formatted_email()
send_mail(request,
@ -202,7 +202,7 @@ def generate_last_call_announcement(request, doc):
doc.filled_title = textwrap.fill(doc.title, width=70, subsequent_indent=" " * 3)
iprs = iprs_from_docs(related_docs(DocAlias.objects.get(name=doc.canonical_name())))
iprs = iprs_from_docs(related_docs(Document.objects.get(name=doc.name)))
if iprs:
ipr_links = [ urlreverse("ietf.ipr.views.show", kwargs=dict(id=i.id)) for i in iprs]
ipr_links = [ settings.IDTRACKER_BASE_URL+url if not url.startswith("http") else url for url in ipr_links ]
@ -670,7 +670,7 @@ def send_review_possibly_replaces_request(request, doc, submitter_info):
to = set(addrs.to)
cc = set(addrs.cc)
possibly_replaces = Document.objects.filter(name__in=[alias.name for alias in doc.related_that_doc("possibly-replaces")])
possibly_replaces = Document.objects.filter(name__in=[related.name for related in doc.related_that_doc("possibly-replaces")])
for other_doc in possibly_replaces:
(other_to, other_cc) = gather_address_lists('doc_replacement_suggested',doc=other_doc)
to.update(other_to)

View file

@ -24,6 +24,7 @@ from ietf.doc.models import Document
from ietf.group.utils import get_group_role_emails, get_group_ad_emails
from ietf.utils.aliases import dump_sublist
from utils.mail import parseaddr
from ietf.utils import log
DEFAULT_YEARS = 2
@ -120,16 +121,18 @@ class Command(BaseCommand):
vfile.write("%s anything\n" % settings.DRAFT_VIRTUAL_DOMAIN)
# Internet-Drafts with active status or expired within DEFAULT_YEARS
drafts = Document.objects.filter(name__startswith='draft-')
drafts = Document.objects.filter(type_id="draft")
active_drafts = drafts.filter(states__slug='active')
inactive_recent_drafts = drafts.exclude(states__slug='active').filter(expires__gte=show_since)
interesting_drafts = active_drafts | inactive_recent_drafts
alias_domains = ['ietf.org', ]
for draft in interesting_drafts.distinct().iterator():
# Omit RFCs, unless they were published in the last DEFAULT_YEARS
if draft.docalias.filter(name__startswith='rfc'):
if draft.latest_event(type='published_rfc').time < show_since:
# Omit drafts that became RFCs, unless they were published in the last DEFAULT_YEARS
if draft.get_state_slug()=="rfc":
rfc = draft.became_rfc()
log.assertion("rfc is not None")
if rfc.latest_event(type='published_rfc').time < show_since:
continue
alias = draft.name

View file

@ -0,0 +1,23 @@
# Generated by Django 4.2.2 on 2023-06-14 20:57
from django.db import migrations
def forward(apps, schema_editor):
StateType = apps.get_model("doc", "StateType")
rfc_statetype, _ = StateType.objects.get_or_create(slug="rfc", label="State")
State = apps.get_model("doc", "State")
State.objects.get_or_create(
type=rfc_statetype, slug="published", name="Published", used=True, order=1
)
class Migration(migrations.Migration):
dependencies = [
("doc", "0008_alter_docevent_type"),
]
operations = [
migrations.RunPython(forward),
]

View file

@ -0,0 +1,22 @@
# Generated by Django 4.2.2 on 2023-06-14 22:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("doc", "0009_add_rfc_states"),
]
operations = [
migrations.AddField(
model_name="dochistory",
name="rfc_number",
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AddField(
model_name="document",
name="rfc_number",
field=models.PositiveIntegerField(blank=True, null=True),
),
]

View file

@ -0,0 +1,76 @@
# Generated by Django 4.2.2 on 2023-06-15 15:27
from django.db import migrations
def forward(apps, schema_editor):
Document = apps.get_model("doc", "Document")
DocAlias = apps.get_model("doc", "DocAlias")
DocumentAuthor = apps.get_model("doc", "DocumentAuthor")
State = apps.get_model("doc", "State")
draft_rfc_state = State.objects.get(type_id="draft", slug="rfc")
rfc_published_state = State.objects.get(type_id="rfc", slug="published")
# Find draft Documents in the "rfc" state
found_by_state = Document.objects.filter(states=draft_rfc_state).distinct()
# Find Documents with an "rfc..." alias and confirm they're the same set
rfc_docaliases = DocAlias.objects.filter(name__startswith="rfc")
found_by_name = Document.objects.filter(docalias__in=rfc_docaliases).distinct()
assert set(found_by_name) == set(found_by_state), "mismatch between rfcs identified by state and docalias"
# As of 2023-06-15, there is one Document with two rfc aliases: rfc6312 and rfc6342 are the same Document. This
# was due to a publication error. Because we go alias-by-alias, no special handling is needed in this migration.
for rfc_alias in rfc_docaliases.order_by("name"):
assert rfc_alias.docs.count() == 1, f"DocAlias {rfc_alias} is linked to more than 1 Document"
draft = rfc_alias.docs.first()
if draft.name.startswith("rfc"):
rfc = draft
rfc.type_id = "rfc"
rfc.rfc_number = int(draft.name[3:])
rfc.save()
rfc.states.set([rfc_published_state])
else:
rfc = Document.objects.create(
type_id="rfc",
name=rfc_alias.name,
rfc_number=int(rfc_alias.name[3:]),
time=draft.time,
title=draft.title,
stream=draft.stream,
group=draft.group,
abstract=draft.abstract,
pages=draft.pages,
words=draft.words,
std_level=draft.std_level,
ad=draft.ad,
external_url=draft.external_url,
uploaded_filename=draft.uploaded_filename,
note=draft.note,
)
rfc.states.set([rfc_published_state])
rfc.formal_languages.set(draft.formal_languages.all())
# Copy Authors
for da in draft.documentauthor_set.all():
DocumentAuthor.objects.create(
document=rfc,
person=da.person,
email=da.email,
affiliation=da.affiliation,
country=da.country,
order=da.order,
)
class Migration(migrations.Migration):
dependencies = [
("doc", "0010_dochistory_rfc_number_document_rfc_number"),
("name", "0010_rfc_doctype_names"),
]
operations = [
migrations.RunPython(forward),
]

View file

@ -0,0 +1,88 @@
# Generated by Django 4.2.2 on 2023-06-20 18:36
from django.db import migrations
from django.db.models import Q
def forward(apps, schema_editor):
"""Move RFC events from the draft to the rfc Document"""
DocAlias = apps.get_model("doc", "DocAlias")
DocEvent = apps.get_model("doc", "DocEvent")
Document = apps.get_model("doc", "Document")
# queryset with events migrated regardless of whether before or after the "published_rfc" event
events_always_migrated = DocEvent.objects.filter(
Q(
type__in=[
"published_rfc", # do not remove this one!
]
)
)
# queryset with events migrated only after the "published_rfc" event
events_migrated_after_pub = DocEvent.objects.exclude(
type__in=[
"created_ballot",
"closed_ballot",
"sent_ballot_announcement",
"changed_ballot_position",
"changed_ballot_approval_text",
"changed_ballot_writeup_text",
]
).exclude(
type="added_comment",
desc__contains="ballot set", # excludes 311 comments that all apply to drafts
)
# special case for rfc 6312/6342 draft, which has two published_rfc events
ignore = ["rfc6312", "rfc6342"] # do not reprocess these later
rfc6312 = Document.objects.get(name="rfc6312")
rfc6342 = Document.objects.get(name="rfc6342")
draft = DocAlias.objects.get(name="rfc6312").docs.first()
assert draft == DocAlias.objects.get(name="rfc6342").docs.first()
published_events = list(
DocEvent.objects.filter(doc=draft, type="published_rfc").order_by("time")
)
assert len(published_events) == 2
(
pub_event_6312,
pub_event_6342,
) = published_events # order matches pub dates at rfc-editor.org
pub_event_6312.doc = rfc6312
pub_event_6312.save()
events_migrated_after_pub.filter(
doc=draft,
time__gte=pub_event_6312.time,
time__lt=pub_event_6342.time,
).update(doc=rfc6312)
pub_event_6342.doc = rfc6342
pub_event_6342.save()
events_migrated_after_pub.filter(
doc=draft,
time__gte=pub_event_6342.time,
).update(doc=rfc6342)
# Now handle all the rest
for rfc in Document.objects.filter(type_id="rfc").exclude(name__in=ignore):
draft = DocAlias.objects.get(name=rfc.name).docs.first()
assert draft is not None
published_event = DocEvent.objects.get(doc=draft, type="published_rfc")
events_always_migrated.filter(
doc=draft,
).update(doc=rfc)
events_migrated_after_pub.filter(
doc=draft,
time__gte=published_event.time,
).update(doc=rfc)
class Migration(migrations.Migration):
dependencies = [
("doc", "0011_create_rfc_documents"),
]
operations = [
migrations.RunPython(forward),
]

View file

@ -0,0 +1,45 @@
# Generated by Django 4.2.3 on 2023-07-05 22:40
from django.db import migrations
def forward(apps, schema_editor):
DocAlias = apps.get_model("doc", "DocAlias")
Document = apps.get_model("doc", "Document")
RelatedDocument = apps.get_model("doc", "RelatedDocument")
for rfc_alias in DocAlias.objects.filter(name__startswith="rfc").exclude(
docs__type_id="rfc"
):
# Move these over to the RFC
RelatedDocument.objects.filter(
relationship__slug__in=(
"tobcp",
"toexp",
"tohist",
"toinf",
"tois",
"tops",
"obs",
"updates",
),
source__docalias=rfc_alias,
).update(source=Document.objects.get(name=rfc_alias.name))
# Duplicate references on the RFC but keep the ones on the draft as well
originals = list(
RelatedDocument.objects.filter(
relationship__slug__in=("refinfo", "refnorm", "refold", "refunk"),
source__docalias=rfc_alias,
)
)
for o in originals:
o.pk = None
o.source = Document.objects.get(name=rfc_alias.name)
RelatedDocument.objects.bulk_create(originals)
class Migration(migrations.Migration):
dependencies = [
("doc", "0012_move_rfc_docevents"),
]
operations = [migrations.RunPython(forward)]

View file

@ -0,0 +1,38 @@
# Generated by Django 4.2.2 on 2023-06-20 18:36
from django.db import migrations
def forward(apps, schema_editor):
"""Point "rfc..." DocAliases at the rfc-type Document
Creates a became_rfc RelatedDocument to preserve the connection between the draft and the rfc.
"""
DocAlias = apps.get_model("doc", "DocAlias")
Document = apps.get_model("doc", "Document")
RelatedDocument = apps.get_model("doc", "RelatedDocument")
for rfc_alias in DocAlias.objects.filter(name__startswith="rfc"):
rfc = Document.objects.get(name=rfc_alias.name)
aliased_doc = rfc_alias.docs.get() # implicitly confirms only one value in rfc_alias.docs
if aliased_doc != rfc:
# If the DocAlias was not already pointing at the rfc, it was pointing at the draft
# it came from. Create the relationship between draft and rfc Documents.
assert aliased_doc.type_id == "draft", f"Alias for {rfc.name} should be pointing at a draft"
RelatedDocument.objects.create(
source=aliased_doc,
target=rfc_alias,
relationship_id="became_rfc",
)
# Now move the alias from the draft to the rfc
rfc_alias.docs.set([rfc])
class Migration(migrations.Migration):
dependencies = [
("doc", "0013_rfc_relateddocuments"),
]
operations = [
migrations.RunPython(forward),
]

View file

@ -0,0 +1,84 @@
# Generated by Django 4.2.2 on 2023-06-16 13:40
from django.db import migrations
import django.db.models.deletion
from django.db.models import F, Subquery, OuterRef, CharField
import ietf.utils.models
def forward(apps, schema_editor):
RelatedDocument = apps.get_model("doc", "RelatedDocument")
DocAlias = apps.get_model("doc", "DocAlias")
target_subquery = Subquery(DocAlias.objects.filter(pk=OuterRef("deprecated_target")).values("docs")[:1])
name_subquery = Subquery(DocAlias.objects.filter(pk=OuterRef("deprecated_target")).values("name")[:1])
RelatedDocument.objects.annotate(firstdoc=target_subquery).annotate(aliasname=name_subquery).update(target=F("firstdoc"),originaltargetaliasname=F("aliasname"))
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
("doc", "0014_move_rfc_docaliases"),
]
operations = [
migrations.AlterField(
model_name='relateddocument',
name='target',
field=ietf.utils.models.ForeignKey(
db_index=False,
on_delete=django.db.models.deletion.CASCADE,
to='doc.docalias',
),
),
migrations.RenameField(
model_name="relateddocument",
old_name="target",
new_name="deprecated_target"
),
migrations.AlterField(
model_name='relateddocument',
name='deprecated_target',
field=ietf.utils.models.ForeignKey(
db_index=True,
on_delete=django.db.models.deletion.CASCADE,
to='doc.docalias',
),
),
migrations.AddField(
model_name="relateddocument",
name="target",
field=ietf.utils.models.ForeignKey(
default=1, # A lie, but a convenient one - no relations point here.
on_delete=django.db.models.deletion.CASCADE,
related_name="targets_related",
to="doc.document",
db_index=False,
),
preserve_default=False,
),
migrations.AddField(
model_name="relateddocument",
name="originaltargetaliasname",
field=CharField(max_length=255,null=True,blank=True),
preserve_default=True,
),
migrations.RunPython(forward, reverse),
migrations.AlterField(
model_name="relateddocument",
name="target",
field=ietf.utils.models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="targets_related",
to="doc.document",
db_index=True,
),
),
migrations.RemoveField(
model_name="relateddocument",
name="deprecated_target",
field=ietf.utils.models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='doc.DocAlias',
),
),
]

View file

@ -0,0 +1,87 @@
# Generated by Django 4.2.2 on 2023-06-16 13:40
from django.db import migrations
import django.db.models.deletion
from django.db.models import F, Subquery, OuterRef, CharField
import ietf.utils.models
def forward(apps, schema_editor):
RelatedDocHistory = apps.get_model("doc", "RelatedDocHistory")
DocAlias = apps.get_model("doc", "DocAlias")
target_subquery = Subquery(DocAlias.objects.filter(pk=OuterRef("deprecated_target")).values("docs")[:1])
name_subquery = Subquery(DocAlias.objects.filter(pk=OuterRef("deprecated_target")).values("name")[:1])
RelatedDocHistory.objects.annotate(firstdoc=target_subquery).annotate(aliasname=name_subquery).update(target=F("firstdoc"),originaltargetaliasname=F("aliasname"))
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
("doc", "0015_relate_no_aliases"),
]
operations = [
migrations.AlterField(
model_name='relateddochistory',
name='target',
field=ietf.utils.models.ForeignKey(
db_index=False,
on_delete=django.db.models.deletion.CASCADE,
to='doc.docalias',
related_name='reversely_related_document_history_set',
),
),
migrations.RenameField(
model_name="relateddochistory",
old_name="target",
new_name="deprecated_target"
),
migrations.AlterField(
model_name='relateddochistory',
name='deprecated_target',
field=ietf.utils.models.ForeignKey(
db_index=True,
on_delete=django.db.models.deletion.CASCADE,
to='doc.docalias',
related_name='deprecated_reversely_related_document_history_set',
),
),
migrations.AddField(
model_name="relateddochistory",
name="target",
field=ietf.utils.models.ForeignKey(
default=1, # A lie, but a convenient one - no relations point here.
on_delete=django.db.models.deletion.CASCADE,
to="doc.document",
db_index=False,
related_name='reversely_related_document_history_set',
),
preserve_default=False,
),
migrations.AddField(
model_name="relateddochistory",
name="originaltargetaliasname",
field=CharField(max_length=255,null=True,blank=True),
preserve_default=True,
),
migrations.RunPython(forward, reverse),
migrations.AlterField(
model_name="relateddochistory",
name="target",
field=ietf.utils.models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="doc.document",
db_index=True,
related_name='reversely_related_document_history_set',
),
),
migrations.RemoveField(
model_name="relateddochistory",
name="deprecated_target",
field=ietf.utils.models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='doc.DocAlias',
related_name='deprecated_reversely_related_document_history_set',
),
),
]

View file

@ -0,0 +1,16 @@
# Copyright The IETF Trust 2023, All Rights Reserved
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("ipr", "0002_iprdocrel_no_aliases"),
("doc", "0016_relate_hist_no_aliases"),
]
operations = [
migrations.DeleteModel(
name="DocAlias",
),
]

View file

@ -0,0 +1,45 @@
# Generated by Django 4.2.5 on 2023-09-11 17:52
from django.db import migrations
from django.db.models import Subquery, OuterRef, F
def forward(apps, schema_editor):
DocHistory = apps.get_model("doc", "DocHistory")
RelatedDocument = apps.get_model("doc", "RelatedDocument")
Document = apps.get_model("doc", "Document")
DocHistory.objects.filter(type_id="draft", doc__type_id="rfc").update(type_id="rfc")
DocHistory.objects.filter(
type_id="draft", doc__type_id="draft", name__startswith="rfc"
).annotate(
rfc_id=Subquery(
RelatedDocument.objects.filter(
source_id=OuterRef("doc_id"), relationship_id="became_rfc"
).values_list("target_id", flat=True)[:1]
)
).update(
doc_id=F("rfc_id"), type_id="rfc"
)
DocHistory.objects.filter(type_id="rfc").annotate(
rfcno=Subquery(
Document.objects.filter(pk=OuterRef("doc_id")).values_list(
"rfc_number", flat=True
)[:1]
)
).update(rfc_number=F("rfcno"))
assert not DocHistory.objects.filter(
name__startswith="rfc", type_id="draft"
).exists()
assert not DocHistory.objects.filter(
type_id="rfc", rfc_number__isnull=True
).exists()
class Migration(migrations.Migration):
dependencies = [
("doc", "0017_delete_docalias"),
]
# There is no going back
operations = [migrations.RunPython(forward)]

View file

@ -0,0 +1,21 @@
# Copyright The IETF Trust 2023, All Rights Reserved
from django.db import migrations
def forward(apps, schema_editor):
StateType = apps.get_model("doc", "StateType")
for slug in ["bcp", "std", "fyi"]:
StateType.objects.create(slug=slug, label=f"{slug} state")
def reverse(apps, schema_editor):
StateType = apps.get_model("doc", "StateType")
StateType.objects.filter(slug__in=["bcp", "std", "fyi"]).delete()
class Migration(migrations.Migration):
dependencies = [
("doc", "0018_move_dochistory"),
]
operations = [migrations.RunPython(forward, reverse)]

View file

@ -0,0 +1,29 @@
# Copyright The IETF Trust 2023, All Rights Reserved
from django.db import migrations
from django.db.models import Subquery, OuterRef, F
def forward(apps, schema_editor):
Document = apps.get_model("doc", "Document")
RelatedDocument = apps.get_model("doc", "RelatedDocument")
Document.tags.through.objects.filter(
doctagname_id__in=["errata", "verified-errata"], document__type_id="draft"
).annotate(
rfcdoc=Subquery(
RelatedDocument.objects.filter(
relationship_id="became_rfc", source_id=OuterRef("document__pk")
).values_list("target__pk", flat=True)[:1]
)
).update(
document_id=F("rfcdoc")
)
class Migration(migrations.Migration):
dependencies = [
("doc", "0019_subseries"),
]
operations = [migrations.RunPython(forward)]

View file

@ -37,7 +37,6 @@ from ietf.name.models import ( DocTypeName, DocTagName, StreamName, IntendedStdL
from ietf.person.models import Email, Person
from ietf.person.utils import get_active_balloters
from ietf.utils import log
from ietf.utils.admin import admin_link
from ietf.utils.decorators import memoize
from ietf.utils.validators import validate_no_control_chars
from ietf.utils.mail import formataddr
@ -124,6 +123,7 @@ class DocumentInfo(models.Model):
uploaded_filename = models.TextField(blank=True)
note = models.TextField(blank=True)
internal_comments = models.TextField(blank=True)
rfc_number = models.PositiveIntegerField(blank=True, null=True) # only valid for type="rfc"
def file_extension(self):
if not hasattr(self, '_cached_extension'):
@ -136,18 +136,17 @@ class DocumentInfo(models.Model):
def get_file_path(self):
if not hasattr(self, '_cached_file_path'):
if self.type_id == "draft":
if self.type_id == "rfc":
self._cached_file_path = settings.RFC_PATH
elif self.type_id == "draft":
if self.is_dochistory():
self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
else:
if self.get_state_slug() == "rfc":
self._cached_file_path = settings.RFC_PATH
draft_state = self.get_state('draft')
if draft_state and draft_state.slug == 'active':
self._cached_file_path = settings.INTERNET_DRAFT_PATH
else:
draft_state = self.get_state('draft')
if draft_state and draft_state.slug == 'active':
self._cached_file_path = settings.INTERNET_DRAFT_PATH
else:
self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
self._cached_file_path = settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR
elif self.meeting_related() and self.type_id in (
"agenda", "minutes", "slides", "bluesheets", "procmaterials", "chatlog", "polls"
):
@ -172,17 +171,16 @@ class DocumentInfo(models.Model):
if not hasattr(self, '_cached_base_name'):
if self.uploaded_filename:
self._cached_base_name = self.uploaded_filename
elif self.type_id == 'rfc':
self._cached_base_name = "%s.txt" % self.name
elif self.type_id == 'draft':
if self.is_dochistory():
self._cached_base_name = "%s-%s.txt" % (self.doc.name, self.rev)
else:
if self.get_state_slug() == 'rfc':
self._cached_base_name = "%s.txt" % self.canonical_name()
else:
self._cached_base_name = "%s-%s.txt" % (self.name, self.rev)
self._cached_base_name = "%s-%s.txt" % (self.name, self.rev)
elif self.type_id in ["slides", "agenda", "minutes", "bluesheets", "procmaterials", ] and self.meeting_related():
ext = 'pdf' if self.type_id == 'procmaterials' else 'txt'
self._cached_base_name = f'{self.canonical_name()}-{self.rev}.{ext}'
self._cached_base_name = f'{self.name}-{self.rev}.{ext}'
elif self.type_id == 'review':
# TODO: This will be wrong if a review is updated on the same day it was created (or updated more than once on the same day)
self._cached_base_name = "%s.txt" % self.name
@ -190,9 +188,9 @@ class DocumentInfo(models.Model):
self._cached_base_name = "%s-%s.md" % (self.name, self.rev)
else:
if self.rev:
self._cached_base_name = "%s-%s.txt" % (self.canonical_name(), self.rev)
self._cached_base_name = "%s-%s.txt" % (self.name, self.rev)
else:
self._cached_base_name = "%s.txt" % (self.canonical_name(), )
self._cached_base_name = "%s.txt" % (self.name, )
return self._cached_base_name
def get_file_name(self):
@ -200,17 +198,28 @@ class DocumentInfo(models.Model):
self._cached_file_name = os.path.join(self.get_file_path(), self.get_base_name())
return self._cached_file_name
def revisions(self):
def revisions_by_dochistory(self):
revisions = []
doc = self.doc if isinstance(self, DocHistory) else self
for e in doc.docevent_set.filter(type='new_revision').distinct():
if e.rev and not e.rev in revisions:
revisions.append(e.rev)
if not doc.rev in revisions:
revisions.append(doc.rev)
revisions.sort()
if self.type_id != "rfc":
for h in self.history_set.order_by("time", "id"):
if h.rev and not h.rev in revisions:
revisions.append(h.rev)
if not self.rev in revisions:
revisions.append(self.rev)
return revisions
def revisions_by_newrevisionevent(self):
revisions = []
if self.type_id != "rfc":
doc = self.doc if isinstance(self, DocHistory) else self
for e in doc.docevent_set.filter(type='new_revision').distinct():
if e.rev and not e.rev in revisions:
revisions.append(e.rev)
if not doc.rev in revisions:
revisions.append(doc.rev)
revisions.sort()
return revisions
def get_href(self, meeting=None):
return self._get_ref(meeting=meeting,meeting_doc_refs=settings.MEETING_DOC_HREFS)
@ -244,7 +253,7 @@ class DocumentInfo(models.Model):
format = settings.DOC_HREFS[self.type_id]
elif self.type_id in settings.DOC_HREFS:
self.is_meeting_related = False
if self.is_rfc():
if self.type_id == "rfc":
format = settings.DOC_HREFS['rfc']
else:
format = settings.DOC_HREFS[self.type_id]
@ -334,7 +343,9 @@ class DocumentInfo(models.Model):
if not state:
return "Unknown state"
if self.type_id == 'draft':
if self.type_id == "rfc":
return f"RFC {self.rfc_number} ({self.std_level})"
elif self.type_id == 'draft':
iesg_state = self.get_state("draft-iesg")
iesg_state_summary = None
if iesg_state:
@ -343,13 +354,15 @@ class DocumentInfo(models.Model):
iesg_state_summary = iesg_state.name
if iesg_substate:
iesg_state_summary = iesg_state_summary + "::"+"::".join(tag.name for tag in iesg_substate)
if state.slug == "rfc":
return "RFC %s (%s)" % (self.rfc_number(), self.std_level)
rfc = self.became_rfc()
if rfc:
return f"Became RFC {rfc.rfc_number} ({rfc.std_level})"
elif state.slug == "repl":
rs = self.related_that("replaces")
if rs:
return mark_safe("Replaced by " + ", ".join("<a href=\"%s\">%s</a>" % (urlreverse('ietf.doc.views_doc.document_main', kwargs=dict(name=alias.document.name)), alias.document) for alias in rs))
return mark_safe("Replaced by " + ", ".join("<a href=\"%s\">%s</a>" % (urlreverse('ietf.doc.views_doc.document_main', kwargs=dict(name=related.name)), related) for related in rs))
else:
return "Replaced"
elif state.slug == "active":
@ -375,27 +388,6 @@ class DocumentInfo(models.Model):
else:
return state.name
def is_rfc(self):
if not hasattr(self, '_cached_is_rfc'):
self._cached_is_rfc = self.pk and self.type_id == 'draft' and self.states.filter(type='draft',slug='rfc').exists()
return self._cached_is_rfc
def rfc_number(self):
if not hasattr(self, '_cached_rfc_number'):
self._cached_rfc_number = None
if self.is_rfc():
n = self.canonical_name()
if n.startswith("rfc"):
self._cached_rfc_number = n[3:]
else:
if isinstance(self,Document):
logger.error("Document self.is_rfc() is True but self.canonical_name() is %s" % n)
return self._cached_rfc_number
@property
def rfcnum(self):
return self.rfc_number()
def author_list(self):
best_addresses = []
for author in self.documentauthor_set.all():
@ -468,9 +460,9 @@ class DocumentInfo(models.Model):
if not isinstance(relationship, tuple):
raise TypeError("Expected a string or tuple, received %s" % type(relationship))
if isinstance(self, Document):
return RelatedDocument.objects.filter(target__docs=self, relationship__in=relationship).select_related('source')
return RelatedDocument.objects.filter(target=self, relationship__in=relationship).select_related('source')
elif isinstance(self, DocHistory):
return RelatedDocHistory.objects.filter(target__docs=self.doc, relationship__in=relationship).select_related('source')
return RelatedDocHistory.objects.filter(target=self.doc, relationship__in=relationship).select_related('source')
else:
raise TypeError("Expected method called on Document or DocHistory")
@ -504,15 +496,14 @@ class DocumentInfo(models.Model):
for r in rels:
if not r in related:
related += ( r, )
for doc in r.target.docs.all():
related = doc.all_relations_that_doc(relationship, related)
related = r.target.all_relations_that_doc(relationship, related)
return related
def related_that(self, relationship):
return list(set([x.source.docalias.get(name=x.source.name) for x in self.relations_that(relationship)]))
return list(set([x.source for x in self.relations_that(relationship)]))
def all_related_that(self, relationship, related=None):
return list(set([x.source.docalias.get(name=x.source.name) for x in self.all_relations_that(relationship)]))
return list(set([x.source for x in self.all_relations_that(relationship)]))
def related_that_doc(self, relationship):
return list(set([x.target for x in self.relations_that_doc(relationship)]))
@ -521,12 +512,7 @@ class DocumentInfo(models.Model):
return list(set([x.target for x in self.all_relations_that_doc(relationship)]))
def replaces(self):
return set([ d for r in self.related_that_doc("replaces") for d in r.docs.all() ])
def replaces_canonical_name(self):
s = set([ r.document for r in self.related_that_doc("replaces")])
first = list(s)[0] if s else None
return None if first is None else first.filename_with_rev()
return self.related_that_doc("replaces")
def replaced_by(self):
return set([ r.document for r in self.related_that("replaces") ])
@ -553,10 +539,10 @@ class DocumentInfo(models.Model):
return self.text() or "Error; cannot read '%s'"%self.get_base_name()
def html_body(self, classes=""):
if self.get_state_slug() == "rfc":
if self.type_id == "rfc":
try:
html = Path(
os.path.join(settings.RFC_PATH, self.canonical_name() + ".html")
os.path.join(settings.RFC_PATH, self.name + ".html")
).read_text()
except (IOError, UnicodeDecodeError):
return None
@ -656,10 +642,38 @@ class DocumentInfo(models.Model):
return self.relations_that_doc(('refnorm','refinfo','refunk','refold'))
def referenced_by(self):
return self.relations_that(('refnorm','refinfo','refunk','refold')).filter(source__states__type__slug='draft',source__states__slug__in=['rfc','active'])
return self.relations_that(("refnorm", "refinfo", "refunk", "refold")).filter(
models.Q(
source__type__slug="draft",
source__states__type__slug="draft",
source__states__slug="active",
)
| models.Q(source__type__slug="rfc")
)
def referenced_by_rfcs(self):
return self.relations_that(('refnorm','refinfo','refunk','refold')).filter(source__states__type__slug='draft',source__states__slug='rfc')
return self.relations_that(("refnorm", "refinfo", "refunk", "refold")).filter(
source__type__slug="rfc"
)
def became_rfc(self):
if not hasattr(self, "_cached_became_rfc"):
doc = self if isinstance(self, Document) else self.doc
self._cached_became_rfc = next(iter(doc.related_that_doc("became_rfc")), None)
return self._cached_became_rfc
def came_from_draft(self):
if not hasattr(self, "_cached_came_from_draft"):
doc = self if isinstance(self, Document) else self.doc
self._cached_came_from_draft = next(iter(doc.related_that("became_rfc")), None)
return self._cached_came_from_draft
def contains(self):
return self.related_that_doc("contains")
def part_of(self):
return self.related_that("contains")
class Meta:
abstract = True
@ -668,42 +682,40 @@ STATUSCHANGE_RELATIONS = ('tops','tois','tohist','toinf','tobcp','toexp')
class RelatedDocument(models.Model):
source = ForeignKey('Document')
target = ForeignKey('DocAlias')
target = ForeignKey('Document', related_name='targets_related')
relationship = ForeignKey(DocRelationshipName)
originaltargetaliasname = models.CharField(max_length=255, null=True, blank=True)
def action(self):
return self.relationship.name
def __str__(self):
return u"%s %s %s" % (self.source.name, self.relationship.name.lower(), self.target.name)
def is_downref(self):
if self.source.type.slug != "draft" or self.relationship.slug not in [
if self.source.type_id not in ["draft","rfc"] or self.relationship.slug not in [
"refnorm",
"refold",
"refunk",
]:
return None
state = self.source.get_state()
if state and state.slug == "rfc":
source_lvl = self.source.std_level.slug if self.source.std_level else None
elif self.source.intended_std_level:
source_lvl = self.source.intended_std_level.slug
if self.source.type_id == "rfc":
source_lvl = self.source.std_level_id
else:
source_lvl = None
source_lvl = self.source.intended_std_level_id
if source_lvl not in ["bcp", "ps", "ds", "std", "unkn"]:
return None
if self.target.document.get_state().slug == "rfc":
if not self.target.document.std_level:
target_lvl = "unkn"
if self.target.type_id == 'rfc':
if not self.target.std_level:
target_lvl = 'unkn'
else:
target_lvl = self.target.document.std_level.slug
target_lvl = self.target.std_level_id
else:
if not self.target.document.intended_std_level:
target_lvl = "unkn"
if not self.target.intended_std_level:
target_lvl = 'unkn'
else:
target_lvl = self.target.document.intended_std_level.slug
target_lvl = self.target.intended_std_level_id
if self.relationship.slug not in ["refnorm", "refunk"]:
return None
@ -712,7 +724,7 @@ class RelatedDocument(models.Model):
return None
pos_downref = (
"Downref" if self.relationship.slug != "refunk" else "Possible Downref"
"Downref" if self.relationship_id != "refunk" else "Possible Downref"
)
if source_lvl in ["bcp", "ps", "ds", "std"] and target_lvl in ["inf", "exp"]:
@ -734,8 +746,8 @@ class RelatedDocument(models.Model):
def is_approved_downref(self):
if self.target.document.get_state().slug == 'rfc':
if RelatedDocument.objects.filter(relationship_id='downref-approval', target=self.target):
if self.target.type_id == 'rfc':
if RelatedDocument.objects.filter(relationship_id='downref-approval', target=self.target).exists():
return "Approved Downref"
return False
@ -831,7 +843,7 @@ class Document(DocumentInfo):
name = self.name
url = None
if self.type_id == "draft" and self.get_state_slug() == "rfc":
name = self.canonical_name()
name = self.name
url = urlreverse('ietf.doc.views_doc.document_main', kwargs={ 'name': name }, urlconf="ietf.urls")
elif self.type_id in ('slides','bluesheets','recording'):
session = self.session_set.first()
@ -869,28 +881,8 @@ class Document(DocumentInfo):
e = model.objects.filter(doc=self).filter(**filter_args).order_by('-time', '-id').first()
return e
def canonical_name(self):
if not hasattr(self, '_canonical_name'):
name = self.name
if self.type_id == "draft" and self.get_state_slug() == "rfc":
a = self.docalias.filter(name__startswith="rfc").order_by('-name').first()
if a:
name = a.name
elif self.type_id == "charter":
from ietf.doc.utils_charter import charter_name_for_group # Imported locally to avoid circular imports
try:
name = charter_name_for_group(self.chartered_group)
except Group.DoesNotExist:
pass
self._canonical_name = name
return self._canonical_name
def canonical_docalias(self):
return self.docalias.get(name=self.name)
def display_name(self):
name = self.canonical_name()
name = self.name
if name.startswith('rfc'):
name = name.upper()
return name
@ -985,17 +977,27 @@ class Document(DocumentInfo):
def ipr(self,states=settings.PUBLISH_IPR_STATES):
"""Returns the IPR disclosures against this document (as a queryset over IprDocRel)."""
from ietf.ipr.models import IprDocRel
return IprDocRel.objects.filter(document__docs=self, disclosure__state__in=states)
# from ietf.ipr.models import IprDocRel
# return IprDocRel.objects.filter(document__docs=self, disclosure__state__in=states) # TODO - clear these comments away
return self.iprdocrel_set.filter(disclosure__state__in=states)
def related_ipr(self):
"""Returns the IPR disclosures against this document and those documents this
document directly or indirectly obsoletes or replaces
"""
from ietf.ipr.models import IprDocRel
iprs = IprDocRel.objects.filter(document__in=list(self.docalias.all())+self.all_related_that_doc(('obs','replaces'))).filter(disclosure__state__in=settings.PUBLISH_IPR_STATES).values_list('disclosure', flat=True).distinct()
iprs = (
IprDocRel.objects.filter(
document__in=[self]
+ self.all_related_that_doc(("obs", "replaces"))
)
.filter(disclosure__state__in=settings.PUBLISH_IPR_STATES)
.values_list("disclosure", flat=True)
.distinct()
)
return iprs
def future_presentations(self):
""" returns related SessionPresentation objects for meetings that
have not yet ended. This implementation allows for 2 week meetings """
@ -1030,7 +1032,7 @@ class Document(DocumentInfo):
This is the rfc publication date for RFCs, and the new-revision date for other documents.
"""
if self.get_state_slug() == "rfc":
if self.type_id == "rfc":
# As of Sept 2022, in ietf.sync.rfceditor.update_docs_from_rfc_index() `published_rfc` events are
# created with a timestamp whose date *in the PST8PDT timezone* is the official publication date
# assigned by the RFC editor.
@ -1132,8 +1134,9 @@ class DocExtResource(ExtResource):
class RelatedDocHistory(models.Model):
source = ForeignKey('DocHistory')
target = ForeignKey('DocAlias', related_name="reversely_related_document_history_set")
target = ForeignKey('Document', related_name="reversely_related_document_history_set")
relationship = ForeignKey(DocRelationshipName)
originaltargetaliasname = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return u"%s %s %s" % (self.source.doc.name, self.relationship.name.lower(), self.target.name)
@ -1147,10 +1150,7 @@ class DocHistoryAuthor(DocumentAuthorInfo):
class DocHistory(DocumentInfo):
doc = ForeignKey(Document, related_name="history_set")
# the name here is used to capture the canonical name at the time
# - it would perhaps be more elegant to simply call the attribute
# canonical_name and replace the function on Document with a
# property
name = models.CharField(max_length=255)
def __str__(self):
@ -1162,11 +1162,6 @@ class DocHistory(DocumentInfo):
def get_related_proceedings_material(self):
return self.doc.get_related_proceedings_material()
def canonical_name(self):
if hasattr(self, '_canonical_name'):
return self._canonical_name
return self.name
def latest_event(self, *args, **kwargs):
kwargs["time__lte"] = self.time
return self.doc.latest_event(*args, **kwargs)
@ -1181,10 +1176,6 @@ class DocHistory(DocumentInfo):
def groupmilestone_set(self):
return self.doc.groupmilestone_set
@property
def docalias(self):
return self.doc.docalias
def is_dochistory(self):
return True
@ -1202,25 +1193,6 @@ class DocHistory(DocumentInfo):
verbose_name = "document history"
verbose_name_plural = "document histories"
class DocAlias(models.Model):
"""This is used for documents that may appear under multiple names,
and in particular for RFCs, which for continuity still keep the
same immutable Document.name, in the tables, but will be referred
to by RFC number, primarily, after achieving RFC status.
"""
name = models.CharField(max_length=255, unique=True)
docs = models.ManyToManyField(Document, related_name='docalias')
@property
def document(self):
return self.docs.first()
def __str__(self):
return u"%s-->%s" % (self.name, ','.join([force_str(d.name) for d in self.docs.all() if isinstance(d, Document) ]))
document_link = admin_link("document")
class Meta:
verbose_name = "document alias"
verbose_name_plural = "document aliases"
class DocReminder(models.Model):
event = ForeignKey('DocEvent')

View file

@ -12,7 +12,7 @@ from tastypie.cache import SimpleCache
from ietf import api
from ietf.doc.models import (BallotType, DeletedEvent, StateType, State, Document,
DocumentAuthor, DocEvent, StateDocEvent, DocHistory, ConsensusDocEvent, DocAlias,
DocumentAuthor, DocEvent, StateDocEvent, DocHistory, ConsensusDocEvent,
TelechatDocEvent, DocReminder, LastCallDocEvent, NewRevisionDocEvent, WriteupDocEvent,
InitialReviewDocEvent, DocHistoryAuthor, BallotDocEvent, RelatedDocument,
RelatedDocHistory, BallotPositionDocEvent, AddedMessageEvent, SubmissionDocEvent,
@ -286,21 +286,6 @@ class ConsensusDocEventResource(ModelResource):
}
api.doc.register(ConsensusDocEventResource())
class DocAliasResource(ModelResource):
document = ToOneField(DocumentResource, 'document')
class Meta:
cache = SimpleCache()
queryset = DocAlias.objects.all()
serializer = api.Serializer()
detail_uri_name = 'name'
#resource_name = 'docalias'
ordering = ['id', ]
filtering = {
"name": ALL,
"document": ALL_WITH_RELATIONS,
}
api.doc.register(DocAliasResource())
from ietf.person.resources import PersonResource
class TelechatDocEventResource(ModelResource):
by = ToOneField(PersonResource, 'by')
@ -490,7 +475,7 @@ api.doc.register(BallotDocEventResource())
from ietf.name.resources import DocRelationshipNameResource
class RelatedDocumentResource(ModelResource):
source = ToOneField(DocumentResource, 'source')
target = ToOneField(DocAliasResource, 'target')
target = ToOneField(DocumentResource, 'target')
relationship = ToOneField(DocRelationshipNameResource, 'relationship')
class Meta:
cache = SimpleCache()
@ -509,7 +494,7 @@ api.doc.register(RelatedDocumentResource())
from ietf.name.resources import DocRelationshipNameResource
class RelatedDocHistoryResource(ModelResource):
source = ToOneField(DocHistoryResource, 'source')
target = ToOneField(DocAliasResource, 'target')
target = ToOneField(DocumentResource, 'target')
relationship = ToOneField(DocRelationshipNameResource, 'relationship')
class Meta:
cache = SimpleCache()

View file

@ -22,7 +22,7 @@ from django.utils import timezone
import debug # pyflakes:ignore
from ietf.doc.models import BallotDocEvent, DocAlias
from ietf.doc.models import BallotDocEvent, Document
from ietf.doc.models import ConsensusDocEvent
from ietf.ietfauth.utils import can_request_rfc_publication as utils_can_request_rfc_publication
from ietf.utils.html import sanitize_fragment
@ -139,15 +139,16 @@ def rfceditor_info_url(rfcnum : str):
return urljoin(settings.RFC_EDITOR_INFO_BASE_URL, f'rfc{rfcnum}')
def doc_canonical_name(name):
def doc_name(name):
"""Check whether a given document exists, and return its canonical name"""
def find_unique(n):
key = hash(n)
found = cache.get(key)
if not found:
exact = DocAlias.objects.filter(name=n).first()
exact = Document.objects.filter(name=n).first()
found = exact.name if exact else "_"
# TODO review this cache policy (and the need for these entire function)
cache.set(key, found, timeout=60*60*24) # cache for one day
return None if found == "_" else found
@ -173,7 +174,7 @@ def doc_canonical_name(name):
def link_charter_doc_match(match):
if not doc_canonical_name(match[0]):
if not doc_name(match[0]):
return match[0]
url = urlreverse(
"ietf.doc.views_doc.document_main",
@ -186,7 +187,7 @@ def link_non_charter_doc_match(match):
name = match[0]
# handle "I-D.*"" reference-style matches
name = re.sub(r"^i-d\.(.*)", r"draft-\1", name, flags=re.IGNORECASE)
cname = doc_canonical_name(name)
cname = doc_name(name)
if not cname:
return match[0]
if name == cname:
@ -201,7 +202,7 @@ def link_non_charter_doc_match(match):
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=cname))
return f'<a href="{url}">{match[0]}</a>'
cname = doc_canonical_name(name)
cname = doc_name(name)
if not cname:
return match[0]
if name == cname:
@ -221,12 +222,11 @@ def link_non_charter_doc_match(match):
def link_other_doc_match(match):
doc = match[2].strip().lower()
rev = match[3]
if not doc_canonical_name(doc + rev):
if not doc_name(doc + rev):
return match[0]
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=doc + rev))
return f'<a href="{url}">{match[1]}</a>'
@register.filter(name="urlize_ietf_docs", is_safe=True, needs_autoescape=True)
def urlize_ietf_docs(string, autoescape=None):
"""
@ -255,6 +255,7 @@ def urlize_ietf_docs(string, autoescape=None):
string,
flags=re.IGNORECASE | re.ASCII,
)
return mark_safe(string)
@ -267,7 +268,7 @@ def urlize_related_source_list(related, document_html=False):
names = set()
titles = set()
for rel in related:
name=rel.source.canonical_name()
name=rel.source.name
title = rel.source.title
if name in names and title in titles:
continue
@ -288,8 +289,8 @@ def urlize_related_target_list(related, document_html=False):
"""Convert a list of RelatedDocuments into list of links using the target document's canonical name"""
links = []
for rel in related:
name=rel.target.document.canonical_name()
title = rel.target.document.title
name=rel.target.name
title = rel.target.title
url = urlreverse('ietf.doc.views_doc.document_main' if document_html is False else 'ietf.doc.views_doc.document_html', kwargs=dict(name=name))
name = escape(name)
title = escape(title)
@ -556,7 +557,7 @@ def consensus(doc):
@register.filter
def std_level_to_label_format(doc):
"""Returns valid Bootstrap classes to label a status level badge."""
if doc.is_rfc():
if doc.type_id == "rfc":
if doc.related_that("obs"):
return "obs"
else:

View file

@ -3,12 +3,12 @@
from django.conf import settings
from ietf.doc.factories import (
WgDraftFactory,
WgRfcFactory,
IndividualDraftFactory,
CharterFactory,
NewRevisionDocEventFactory,
)
from ietf.doc.models import State, DocEvent, DocAlias
from ietf.doc.models import DocEvent
from ietf.doc.templatetags.ietf_filters import urlize_ietf_docs, is_valid_url
from ietf.person.models import Person
from ietf.utils.test_utils import TestCase
@ -25,23 +25,21 @@ class IetfFiltersTests(TestCase):
self.assertEqual(is_valid_url(url), result)
def test_urlize_ietf_docs(self):
wg_id = WgDraftFactory()
wg_id.set_state(State.objects.get(type="draft", slug="rfc"))
wg_id.std_level_id = "bcp"
wg_id.save_with_history(
rfc = WgRfcFactory(rfc_number=123456,std_level_id="bcp")
rfc.save_with_history(
[
DocEvent.objects.create(
doc=wg_id,
rev=wg_id.rev,
doc=rfc,
rev=rfc.rev,
type="published_rfc",
by=Person.objects.get(name="(System)"),
)
]
)
DocAlias.objects.create(name="rfc123456").docs.add(wg_id)
DocAlias.objects.create(name="bcp123456").docs.add(wg_id)
DocAlias.objects.create(name="std123456").docs.add(wg_id)
DocAlias.objects.create(name="fyi123456").docs.add(wg_id)
# TODO - bring these into existance when subseries are well modeled
# DocAlias.objects.create(name="bcp123456").docs.add(rfc)
# DocAlias.objects.create(name="std123456").docs.add(rfc)
# DocAlias.objects.create(name="fyi123456").docs.add(rfc)
id = IndividualDraftFactory(name="draft-me-rfc123456bis")
id_num = IndividualDraftFactory(name="draft-rosen-rfcefdp-update-2026")
@ -59,15 +57,17 @@ class IetfFiltersTests(TestCase):
cases = [
("no change", "no change"),
("bCp123456", '<a href="/doc/bcp123456/">bCp123456</a>'),
("Std 00123456", '<a href="/doc/std123456/">Std 00123456</a>'),
(
"FyI 0123456 changes std 00123456",
'<a href="/doc/fyi123456/">FyI 0123456</a> changes <a href="/doc/std123456/">std 00123456</a>',
),
# TODO: rework subseries when we add them
# ("bCp123456", '<a href="/doc/bcp123456/">bCp123456</a>'),
# ("Std 00123456", '<a href="/doc/std123456/">Std 00123456</a>'),
# (
# "FyI 0123456 changes std 00123456",
# '<a href="/doc/fyi123456/">FyI 0123456</a> changes <a href="/doc/std123456/">std 00123456</a>',
# ),
("rfc123456", '<a href="/doc/rfc123456/">rfc123456</a>'),
("Rfc 0123456", '<a href="/doc/rfc123456/">Rfc 0123456</a>'),
(wg_id.name, f'<a href="/doc/{wg_id.name}/">{wg_id.name}</a>'),
(rfc.name, f'<a href="/doc/{rfc.name}/">{rfc.name}</a>'),
(
f"{id.name}-{id.rev}.txt",
f'<a href="/doc/{id.name}/{id.rev}/">{id.name}-{id.rev}.txt</a>',

File diff suppressed because it is too large Load diff

View file

@ -803,8 +803,8 @@ class ApproveBallotTests(TestCase):
desc='Last call announcement was changed',
text='this is simple last call text.' )
rfc = IndividualRfcFactory.create(
name = "rfc6666",
stream_id='ise',
other_aliases=['rfc6666',],
states=[('draft','rfc'),('draft-iesg','pub')],
std_level_id='inf', )
@ -821,7 +821,7 @@ class ApproveBallotTests(TestCase):
self.assertContains(r, "No downward references for")
# Add a downref, the page should ask if it should be added to the registry
rel = draft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'),relationship_id='refnorm')
rel = draft.relateddocument_set.create(target=rfc, relationship_id='refnorm')
d = [rdoc for rdoc in draft.relateddocument_set.all() if rel.is_approved_downref()]
original_len = len(d)
r = self.client.get(url)
@ -1121,13 +1121,13 @@ class RegenerateLastCallTestCase(TestCase):
self.assertFalse("contains these normative down" in lc_text)
rfc = IndividualRfcFactory.create(
rfc_number=6666,
stream_id='ise',
other_aliases=['rfc6666',],
states=[('draft','rfc'),('draft-iesg','pub')],
std_level_id='inf',
)
draft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'),relationship_id='refnorm')
draft.relateddocument_set.create(target=rfc,relationship_id='refnorm')
r = self.client.post(url, dict(regenerate_last_call_text="1"))
self.assertEqual(r.status_code, 200)
@ -1137,7 +1137,7 @@ class RegenerateLastCallTestCase(TestCase):
self.assertTrue("rfc6666" in lc_text)
self.assertTrue("Independent Submission" in lc_text)
draft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'), relationship_id='downref-approval')
draft.relateddocument_set.create(target=rfc, relationship_id='downref-approval')
r = self.client.post(url, dict(regenerate_last_call_text="1"))
self.assertEqual(r.status_code, 200)

View file

@ -18,7 +18,7 @@ from django.utils import timezone
from ietf.group.factories import RoleFactory
from ietf.doc.factories import BofreqFactory, NewRevisionDocEventFactory
from ietf.doc.models import State, Document, DocAlias, NewRevisionDocEvent
from ietf.doc.models import State, Document, NewRevisionDocEvent
from ietf.doc.utils_bofreq import bofreq_editors, bofreq_responsible
from ietf.ietfauth.utils import has_role
from ietf.person.factories import PersonFactory
@ -32,7 +32,7 @@ class BofreqTests(TestCase):
settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['BOFREQ_PATH']
def write_bofreq_file(self, bofreq):
fname = Path(settings.BOFREQ_PATH) / ("%s-%s.md" % (bofreq.canonical_name(), bofreq.rev))
fname = Path(settings.BOFREQ_PATH) / ("%s-%s.md" % (bofreq.name, bofreq.rev))
with fname.open("w") as f:
f.write(f"""# This is a test bofreq.
Version: {bofreq.rev}
@ -366,7 +366,6 @@ This test section has some text.
name = f"bofreq-{xslugify(nobody.last_name())[:64]}-{postdict['title']}".replace(' ','-')
bofreq = Document.objects.filter(name=name,type_id='bofreq').first()
self.assertIsNotNone(bofreq)
self.assertIsNotNone(DocAlias.objects.filter(name=name).first())
self.assertEqual(bofreq.title, postdict['title'])
self.assertEqual(bofreq.rev, '00')
self.assertEqual(bofreq.get_state_slug(), 'proposed')

View file

@ -88,10 +88,7 @@ class EditCharterTests(TestCase):
settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['CHARTER_PATH']
def write_charter_file(self, charter):
with (Path(settings.CHARTER_PATH) /
("%s-%s.txt" % (charter.canonical_name(), charter.rev))
).open("w") as f:
f.write("This is a charter.")
(Path(settings.CHARTER_PATH) / f"{charter.name}-{charter.rev}.txt").write_text("This is a charter.")
def test_startstop_process(self):
CharterFactory(group__acronym='mars')
@ -509,8 +506,13 @@ class EditCharterTests(TestCase):
self.assertEqual(charter.rev, next_revision(prev_rev))
self.assertTrue("new_revision" in charter.latest_event().type)
with (Path(settings.CHARTER_PATH) / (charter.canonical_name() + "-" + charter.rev + ".txt")).open(encoding='utf-8') as f:
self.assertEqual(f.read(), "Windows line\nMac line\nUnix line\n" + utf_8_snippet.decode('utf-8'))
file_contents = (
Path(settings.CHARTER_PATH) / (charter.name + "-" + charter.rev + ".txt")
).read_text("utf-8")
self.assertEqual(
file_contents,
"Windows line\nMac line\nUnix line\n" + utf_8_snippet.decode("utf-8"),
)
def test_submit_initial_charter(self):
group = GroupFactory(type_id='wg',acronym='mars',list_email='mars-wg@ietf.org')
@ -538,6 +540,24 @@ class EditCharterTests(TestCase):
group = Group.objects.get(pk=group.pk)
self.assertEqual(group.charter, charter)
def test_submit_charter_with_invalid_name(self):
self.client.login(username="secretary", password="secretary+password")
ietf_group = GroupFactory(type_id="wg")
for bad_name in ("charter-irtf-{}", "charter-randomjunk-{}", "charter-ietf-thisisnotagroup"):
url = urlreverse("ietf.doc.views_charter.submit", kwargs={"name": bad_name.format(ietf_group.acronym)})
r = self.client.get(url)
self.assertEqual(r.status_code, 404, f"GET of charter named {bad_name} should 404")
r = self.client.post(url, {})
self.assertEqual(r.status_code, 404, f"POST of charter named {bad_name} should 404")
irtf_group = GroupFactory(type_id="rg")
for bad_name in ("charter-ietf-{}", "charter-whatisthis-{}", "charter-irtf-thisisnotagroup"):
url = urlreverse("ietf.doc.views_charter.submit", kwargs={"name": bad_name.format(irtf_group.acronym)})
r = self.client.get(url)
self.assertEqual(r.status_code, 404, f"GET of charter named {bad_name} should 404")
r = self.client.post(url, {})
self.assertEqual(r.status_code, 404, f"POST of charter named {bad_name} should 404")
def test_edit_review_announcement_text(self):
area = GroupFactory(type_id='area')
RoleFactory(name_id='ad',group=area,person=Person.objects.get(user__username='ad'))

View file

@ -70,12 +70,12 @@ class ConflictReviewTests(TestCase):
self.assertEqual(review_doc.ad.name,'Areað Irector')
self.assertEqual(review_doc.notify,'ipu@ietf.org')
doc = Document.objects.get(name='draft-imaginary-independent-submission')
self.assertTrue(doc in [x.target.document for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')])
self.assertTrue(doc in [x.target for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')])
self.assertTrue(review_doc.latest_event(DocEvent,type="added_comment").desc.startswith("IETF conflict review requested"))
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith("IETF conflict review initiated"))
self.assertTrue('Conflict Review requested' in outbox[-1]['Subject'])
# verify you can't start a review when a review is already in progress
r = self.client.post(url,dict(ad="Areað Irector",create_in_state="Needs Shepherd",notify='ipu@ietf.org'))
self.assertEqual(r.status_code, 404)
@ -119,7 +119,7 @@ class ConflictReviewTests(TestCase):
self.assertEqual(review_doc.ad.name,'Ietf Chair')
self.assertEqual(review_doc.notify,'ipu@ietf.org')
doc = Document.objects.get(name='draft-imaginary-independent-submission')
self.assertTrue(doc in [x.target.document for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')])
self.assertTrue(doc in [x.target for x in review_doc.relateddocument_set.filter(relationship__slug='conflrev')])
self.assertEqual(len(outbox), messages_before + 2)
@ -403,7 +403,7 @@ class ConflictReviewSubmitTests(TestCase):
# Right now, nothing to test - we let people put whatever the web browser will let them put into that textbox
# sane post using textbox
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.name, doc.rev))
self.assertEqual(doc.rev,'00')
self.assertFalse(os.path.exists(path))
r = self.client.post(url,dict(content="Some initial review text\n",submit_response="1"))
@ -423,7 +423,7 @@ class ConflictReviewSubmitTests(TestCase):
# A little additional setup
# doc.rev is u'00' per the test setup - double-checking that here - if it fails, the breakage is in setUp
self.assertEqual(doc.rev,'00')
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.name, doc.rev))
with io.open(path,'w') as f:
f.write('This is the old proposal.')
f.close()
@ -450,7 +450,7 @@ class ConflictReviewSubmitTests(TestCase):
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='conflict-review-imaginary-irtf-submission')
self.assertEqual(doc.rev,'01')
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (doc.name, doc.rev))
with io.open(path) as f:
self.assertEqual(f.read(),"This is a new proposal.")
f.close()

View file

@ -19,12 +19,9 @@ class Downref(TestCase):
super().setUp()
PersonFactory(name='Plain Man',user__username='plain')
self.draft = WgDraftFactory(name='draft-ietf-mars-test')
self.draftalias = self.draft.docalias.get(name='draft-ietf-mars-test')
self.doc = WgDraftFactory(name='draft-ietf-mars-approved-document',states=[('draft-iesg','rfcqueue')])
self.docalias = self.doc.docalias.get(name='draft-ietf-mars-approved-document')
self.rfc = WgRfcFactory(alias2__name='rfc9998')
self.rfcalias = self.rfc.docalias.get(name='rfc9998')
RelatedDocument.objects.create(source=self.doc, target=self.rfcalias, relationship_id='downref-approval')
self.rfc = WgRfcFactory(rfc_number=9998)
RelatedDocument.objects.create(source=self.doc, target=self.rfc, relationship_id='downref-approval')
def test_downref_registry(self):
url = urlreverse('ietf.doc.views_downref.downref_registry')
@ -64,44 +61,44 @@ class Downref(TestCase):
self.assertContains(r, 'Save downref')
# error - already in the downref registry
r = self.client.post(url, dict(rfc=self.rfcalias.pk, drafts=(self.docalias.pk, )))
r = self.client.post(url, dict(rfc=self.rfc.pk, drafts=(self.doc.pk, )))
self.assertContains(r, 'Downref is already in the registry')
# error - source is not in an approved state
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
r = self.client.post(url, dict(rfc=self.rfcalias.pk, drafts=(self.draftalias.pk, )))
r = self.client.post(url, dict(rfc=self.rfc.pk, drafts=(self.draft.pk, )))
self.assertContains(r, 'Draft is not yet approved')
# error - the target is not a normative reference of the source
self.draft.set_state(State.objects.get(used=True, type="draft-iesg", slug="pub"))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
r = self.client.post(url, dict(rfc=self.rfcalias.pk, drafts=(self.draftalias.pk, )))
r = self.client.post(url, dict(rfc=self.rfc.pk, drafts=(self.draft.pk, )))
self.assertContains(r, 'There does not seem to be a normative reference to RFC')
self.assertContains(r, 'Save downref anyway')
# normal - approve the document so the downref is now okay
RelatedDocument.objects.create(source=self.draft, target=self.rfcalias, relationship_id='refnorm')
RelatedDocument.objects.create(source=self.draft, target=self.rfc, relationship_id='refnorm')
draft_de_count_before = self.draft.docevent_set.count()
rfc_de_count_before = self.rfc.docevent_set.count()
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
r = self.client.post(url, dict(rfc=self.rfcalias.pk, drafts=(self.draftalias.pk, )))
r = self.client.post(url, dict(rfc=self.rfc.pk, drafts=(self.draft.pk, )))
self.assertEqual(r.status_code, 302)
newurl = urlreverse('ietf.doc.views_downref.downref_registry')
r = self.client.get(newurl)
self.assertContains(r, '<a href="/doc/draft-ietf-mars-test')
self.assertTrue(RelatedDocument.objects.filter(source=self.draft, target=self.rfcalias, relationship_id='downref-approval'))
self.assertTrue(RelatedDocument.objects.filter(source=self.draft, target=self.rfc, relationship_id='downref-approval'))
self.assertEqual(self.draft.docevent_set.count(), draft_de_count_before + 1)
self.assertEqual(self.rfc.docevent_set.count(), rfc_de_count_before + 1)
def test_downref_last_call(self):
draft = WgDraftFactory(name='draft-ietf-mars-ready-for-lc-document',intended_std_level_id='ps',states=[('draft-iesg','iesg-eva')])
WgDraftFactory(name='draft-ietf-mars-another-approved-document',states=[('draft-iesg','rfcqueue')])
rfc9999 = WgRfcFactory(alias2__name='rfc9999', std_level_id=None)
RelatedDocument.objects.create(source=draft, target=rfc9999.docalias.get(name='rfc9999'), relationship_id='refnorm')
rfc9999 = WgRfcFactory(rfc_number=9999, std_level_id=None)
RelatedDocument.objects.create(source=draft, target=rfc9999, relationship_id='refnorm')
url = urlreverse('ietf.doc.views_ballot.lastcalltext', kwargs=dict(name=draft.name))
login_testing_unauthorized(self, "secretary", url)
@ -113,7 +110,7 @@ class Downref(TestCase):
self.assertIn('The document contains these normative downward references', text)
# now, the announcement text about the downref to RFC 9999 should be gone
RelatedDocument.objects.create(source=draft, target=rfc9999.docalias.get(name='rfc9999'),relationship_id='downref-approval')
RelatedDocument.objects.create(source=draft, target=rfc9999, relationship_id='downref-approval')
r = self.client.post(url, dict(regenerate_last_call_text="1"))
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)

View file

@ -2026,10 +2026,10 @@ class ChangeReplacesTests(TestCase):
# Post that says replacea replaces base a
empty_outbox()
RelatedDocument.objects.create(source=self.replacea, target=self.basea.docalias.first(),
RelatedDocument.objects.create(source=self.replacea, target=self.basea,
relationship=DocRelationshipName.objects.get(slug="possibly-replaces"))
self.assertEqual(self.basea.get_state().slug,'active')
r = self.client.post(url, dict(replaces=self.basea.docalias.first().pk))
r = self.client.post(url, dict(replaces=self.basea.pk))
self.assertEqual(r.status_code, 302)
self.assertEqual(RelatedDocument.objects.filter(relationship__slug='replaces',source=self.replacea).count(),1)
self.assertEqual(Document.objects.get(name='draft-test-base-a').get_state().slug,'repl')
@ -2043,7 +2043,7 @@ class ChangeReplacesTests(TestCase):
# Post that says replaceboth replaces both base a and base b
url = urlreverse('ietf.doc.views_draft.replaces', kwargs=dict(name=self.replaceboth.name))
self.assertEqual(self.baseb.get_state().slug,'expired')
r = self.client.post(url, dict(replaces=[self.basea.docalias.first().pk, self.baseb.docalias.first().pk]))
r = self.client.post(url, dict(replaces=[self.basea.pk, self.baseb.pk]))
self.assertEqual(r.status_code, 302)
self.assertEqual(Document.objects.get(name='draft-test-base-a').get_state().slug,'repl')
self.assertEqual(Document.objects.get(name='draft-test-base-b').get_state().slug,'repl')
@ -2074,7 +2074,7 @@ class ChangeReplacesTests(TestCase):
def test_review_possibly_replaces(self):
replaced = self.basea.docalias.first()
replaced = self.basea
RelatedDocument.objects.create(source=self.replacea, target=replaced,
relationship=DocRelationshipName.objects.get(slug="possibly-replaces"))
@ -2102,7 +2102,7 @@ class MoreReplacesTests(TestCase):
new_doc = IndividualDraftFactory(stream_id=stream)
url = urlreverse('ietf.doc.views_draft.replaces', kwargs=dict(name=new_doc.name))
r = self.client.post(url, dict(replaces=old_doc.docalias.first().pk))
r = self.client.post(url, dict(replaces=old_doc.pk))
self.assertEqual(r.status_code,302)
old_doc = Document.objects.get(name=old_doc.name)
self.assertEqual(old_doc.get_state_slug('draft'),'repl')

View file

@ -15,7 +15,7 @@ from django.conf import settings
from django.urls import reverse as urlreverse
from django.utils import timezone
from ietf.doc.models import Document, State, DocAlias, NewRevisionDocEvent
from ietf.doc.models import Document, State, NewRevisionDocEvent
from ietf.group.factories import RoleFactory
from ietf.group.models import Group
from ietf.meeting.factories import MeetingFactory, SessionFactory
@ -54,7 +54,6 @@ class GroupMaterialTests(TestCase):
doc = Document.objects.create(name="slides-testteam-test-file", rev="01", type_id="slides", group=group)
doc.set_state(State.objects.get(type="slides", slug="active"))
doc.set_state(State.objects.get(type="reuse_policy", slug="multiple"))
DocAlias.objects.create(name=doc.name).docs.add(doc)
NewRevisionDocEvent.objects.create(doc=doc,by=Person.objects.get(name="(System)"),rev='00',type='new_revision',desc='New revision available')
NewRevisionDocEvent.objects.create(doc=doc,by=Person.objects.get(name="(System)"),rev='01',type='new_revision',desc='New revision available')

View file

@ -92,7 +92,7 @@ class RelatedDocumentTests(TestCase):
for source, target in itertools.product(rfcs, rfcs):
ref = RelatedDocument.objects.create(
source=source,
target=target.docalias.first(),
target=target,
relationship_id=rel,
)

View file

@ -137,10 +137,18 @@ class ReviewTests(TestCase):
url = urlreverse('ietf.doc.views_review.request_review', kwargs={ "name": doc.name })
login_testing_unauthorized(self, "ad", url)
# get should fail
# get should fail - all non draft types 404
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
# Can only request reviews on active draft documents
doc = WgDraftFactory(states=[("draft","rfc")])
url = urlreverse('ietf.doc.views_review.request_review', kwargs={ "name": doc.name })
r = self.client.get(url)
self.assertEqual(r.status_code, 403)
def test_doc_page(self):
doc = WgDraftFactory(group__acronym='mars',rev='01')
@ -153,8 +161,8 @@ class ReviewTests(TestCase):
# check we can fish it out
old_doc = WgDraftFactory(name="draft-foo-mars-test")
older_doc = WgDraftFactory(name="draft-older")
RelatedDocument.objects.create(source=old_doc, target=older_doc.docalias.first(), relationship_id='replaces')
RelatedDocument.objects.create(source=doc, target=old_doc.docalias.first(), relationship_id='replaces')
RelatedDocument.objects.create(source=old_doc, target=older_doc, relationship_id='replaces')
RelatedDocument.objects.create(source=doc, target=old_doc, relationship_id='replaces')
review_req.doc = older_doc
review_req.save()

View file

@ -41,7 +41,7 @@ class IssueRSABBallotTests(TestCase):
self.client.login(username="rsab-chair", password="rsab-chair+password")
for name in [
doc.canonical_name()
doc.name
for doc in (individual_draft, wg_draft, rg_draft, ed_rfc)
]:
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=name))

View file

@ -13,7 +13,7 @@ from django.template.loader import render_to_string
from django.urls import reverse as urlreverse
from ietf.doc.factories import StatementFactory, DocEventFactory
from ietf.doc.models import Document, DocAlias, State, NewRevisionDocEvent
from ietf.doc.models import Document, State, NewRevisionDocEvent
from ietf.group.models import Group
from ietf.person.factories import PersonFactory
from ietf.utils.mail import outbox, empty_outbox
@ -76,9 +76,7 @@ This test section has some text.
doc.set_state(State.objects.get(type_id="statement", slug="replaced"))
doc2 = StatementFactory()
doc2.relateddocument_set.create(
relationship_id="replaces", target=doc.docalias.first()
)
doc2.relateddocument_set.create(relationship_id="replaces", target=doc)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
q = PyQuery(response.content)
@ -247,7 +245,6 @@ This test section has some text.
name=name, type_id="statement"
).first()
self.assertIsNotNone(statement)
self.assertIsNotNone(DocAlias.objects.filter(name=name).first())
self.assertEqual(statement.title, postdict["title"])
self.assertEqual(statement.rev, "00")
self.assertEqual(statement.get_state_slug(), "active")

View file

@ -14,8 +14,9 @@ from textwrap import wrap
from django.conf import settings
from django.urls import reverse as urlreverse
from ietf.doc.factories import DocumentFactory, IndividualRfcFactory, WgRfcFactory, DocEventFactory
from ietf.doc.models import ( Document, DocAlias, State, DocEvent,
from ietf.doc.factories import ( DocumentFactory, IndividualRfcFactory,
WgRfcFactory, DocEventFactory, WgDraftFactory )
from ietf.doc.models import ( Document, State, DocEvent,
BallotPositionDocEvent, NewRevisionDocEvent, TelechatDocEvent, WriteupDocEvent )
from ietf.doc.utils import create_ballot_if_not_open
from ietf.doc.views_status_change import default_approval_text
@ -74,7 +75,7 @@ class StatusChangeTests(TestCase):
self.assertEqual(status_change.rev,'00')
self.assertEqual(status_change.ad.name,'Areað Irector')
self.assertEqual(status_change.notify,'ipu@ietf.org')
self.assertTrue(status_change.relateddocument_set.filter(relationship__slug='tois',target__docs__name='draft-ietf-random-thing'))
self.assertTrue(status_change.relateddocument_set.filter(relationship__slug='tois',target__name='rfc9999'))
# Verify that it's possible to start a status change without a responsible ad.
r = self.client.post(url,dict(
@ -184,8 +185,8 @@ class StatusChangeTests(TestCase):
self.assertTrue(doc.latest_event(DocEvent,type="added_comment").desc.startswith('Notification list changed'))
# Some additional setup so there's something to put in a generated notify list
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9998'),relationship_id='tohist')
# Ask the form to regenerate the list
r = self.client.post(url,dict(regenerate_addresses="1"))
@ -288,8 +289,8 @@ class StatusChangeTests(TestCase):
login_testing_unauthorized(self, "ad", url)
# additional setup
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9998'),relationship_id='tohist')
doc.ad = Person.objects.get(name='Ad No2')
doc.save_with_history([DocEvent.objects.create(doc=doc, rev=doc.rev, type="changed_document", by=Person.objects.get(user__username="secretary"), desc="Test")])
@ -344,8 +345,8 @@ class StatusChangeTests(TestCase):
login_testing_unauthorized(self, "secretary", url)
# Some additional setup
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9998'),relationship_id='tohist')
create_ballot_if_not_open(None, doc, Person.objects.get(user__username="secretary"), "statchg")
doc.set_state(State.objects.get(slug='appr-pend',type='statchg'))
@ -385,10 +386,10 @@ class StatusChangeTests(TestCase):
url = urlreverse('ietf.doc.views_status_change.change_state',kwargs=dict(name=doc.name))
# Add some status change related documents
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9998'),relationship_id='tohist')
# And a non-status change related document
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc14'),relationship_id='updates')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc14'),relationship_id='updates')
login_testing_unauthorized(self, role, url)
empty_outbox()
@ -410,9 +411,9 @@ class StatusChangeTests(TestCase):
self.assertTrue(notification['Subject'].startswith('Approved:'))
notification_text = get_payload_text(notification)
self.assertIn('The AD has approved changing the status', notification_text)
self.assertIn(DocAlias.objects.get(name='rfc9999').document.canonical_name(), notification_text)
self.assertIn(DocAlias.objects.get(name='rfc9998').document.canonical_name(), notification_text)
self.assertNotIn(DocAlias.objects.get(name='rfc14').document.canonical_name(), notification_text)
self.assertIn(Document.objects.get(name='rfc9999').name, notification_text)
self.assertIn(Document.objects.get(name='rfc9998').name, notification_text)
self.assertNotIn(Document.objects.get(name='rfc14').name, notification_text)
self.assertNotIn('No value found for', notification_text) # make sure all interpolation values were set
else:
self.assertEqual(len(outbox), 0)
@ -432,8 +433,8 @@ class StatusChangeTests(TestCase):
login_testing_unauthorized(self, "secretary", url)
# Some additional setup
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=DocAlias.objects.get(name='rfc9998'),relationship_id='tohist')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9999'),relationship_id='tois')
doc.relateddocument_set.create(target=Document.objects.get(name='rfc9998'),relationship_id='tohist')
# get
r = self.client.get(url)
@ -486,9 +487,16 @@ class StatusChangeTests(TestCase):
def setUp(self):
super().setUp()
IndividualRfcFactory(alias2__name='rfc14',name='draft-was-never-issued',std_level_id='unkn')
WgRfcFactory(alias2__name='rfc9999',name='draft-ietf-random-thing',std_level_id='ps')
WgRfcFactory(alias2__name='rfc9998',name='draft-ietf-random-other-thing',std_level_id='inf')
IndividualRfcFactory(rfc_number=14,std_level_id='unkn') # draft was never issued
rfc = WgRfcFactory(rfc_number=9999,std_level_id='ps')
draft = WgDraftFactory(name='draft-ietf-random-thing')
draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
rfc = WgRfcFactory(rfc_number=9998,std_level_id='inf')
draft = WgDraftFactory(name='draft-ietf-random-other-thing')
draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
DocumentFactory(type_id='statchg',name='status-change-imaginary-mid-review',notify='notify@example.org')
class StatusChangeSubmitTests(TestCase):
@ -508,7 +516,7 @@ class StatusChangeSubmitTests(TestCase):
# Right now, nothing to test - we let people put whatever the web browser will let them put into that textbox
# sane post using textbox
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.name, doc.rev))
self.assertEqual(doc.rev,'00')
self.assertFalse(os.path.exists(path))
r = self.client.post(url,dict(content="Some initial review text\n",submit_response="1"))
@ -527,7 +535,7 @@ class StatusChangeSubmitTests(TestCase):
# A little additional setup
# doc.rev is u'00' per the test setup - double-checking that here - if it fails, the breakage is in setUp
self.assertEqual(doc.rev,'00')
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.name, doc.rev))
with io.open(path,'w') as f:
f.write('This is the old proposal.')
f.close()
@ -559,7 +567,7 @@ class StatusChangeSubmitTests(TestCase):
self.assertEqual(r.status_code, 302)
doc = Document.objects.get(name='status-change-imaginary-mid-review')
self.assertEqual(doc.rev,'01')
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.name, doc.rev))
with io.open(path) as f:
self.assertEqual(f.read(),"This is a new proposal.")
f.close()

View file

@ -0,0 +1,49 @@
# Copyright The IETF Trust 2023, All Rights Reserved
# -*- coding: utf-8 -*-
import debug # pyflakes:ignore
from pyquery import PyQuery
from django.urls import reverse as urlreverse
from ietf.doc.factories import SubseriesFactory, RfcFactory
from ietf.doc.models import Document
from ietf.utils.test_utils import TestCase
class SubseriesTests(TestCase):
def test_index_and_view(self):
types = ["bcp", "std", "fyi"]
for type_id in types:
doc = SubseriesFactory(type_id=type_id)
self.assertEqual(len(doc.contains()), 1)
rfc = doc.contains()[0]
# Index
url = urlreverse("ietf.doc.views_search.index_subseries", kwargs=dict(type_id=type_id))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertIsNotNone(q(f"#{doc.name}"))
self.assertIn(f"RFC {rfc.name[3:]}",q(f"#{doc.name}").text())
# Subseries document view
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=doc.name))
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
q = PyQuery(r.content)
self.assertIn(f"{doc.type_id.upper()} {doc.name[3:]} consists of:",q("h2").text())
self.assertIn(f"RFC {rfc.name[3:]}", q("div.row p a").text())
# RFC view
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name))
r = self.client.get(url)
q = PyQuery(r.content)
self.assertIn(f"RFC {rfc.name[3:]} also known as {type_id.upper()} {doc.name[3:]}", q("h1").text())
bcp = Document.objects.filter(type_id="bcp").last()
bcp.relateddocument_set.create(relationship_id="contains", target=RfcFactory())
for rfc in bcp.contains():
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name))
r = self.client.get(url)
q = PyQuery(r.content)
self.assertIn(f"RFC {rfc.name[3:]} part of BCP {bcp.name[3:]}", q("h1").text())

View file

@ -11,10 +11,10 @@ from django.utils import timezone
from ietf.group.factories import GroupFactory, RoleFactory
from ietf.name.models import DocTagName
from ietf.person.factories import PersonFactory
from ietf.utils.test_utils import TestCase, name_of_file_containing
from ietf.utils.test_utils import TestCase, name_of_file_containing, reload_db_objects
from ietf.person.models import Person
from ietf.doc.factories import DocumentFactory, WgRfcFactory, WgDraftFactory
from ietf.doc.models import State, DocumentActionHolder, DocumentAuthor, Document
from ietf.doc.models import State, DocumentActionHolder, DocumentAuthor
from ietf.doc.utils import (update_action_holders, add_state_change_event, update_documentauthors,
fuzzy_find_documents, rebuild_reference_relations, build_file_urls)
from ietf.utils.draft import Draft, PlaintextDraft
@ -272,40 +272,42 @@ class MiscTests(TestCase):
self.assertEqual(docauth.country, '')
def do_fuzzy_find_documents_rfc_test(self, name):
rfc = WgRfcFactory(name=name, create_revisions=(0, 1, 2))
rfc = Document.objects.get(pk=rfc.pk) # clear out any cached values
draft = WgDraftFactory(name=name, create_revisions=(0, 1, 2))
rfc = WgRfcFactory()
draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
draft, rfc = reload_db_objects(draft, rfc)
# by canonical name
found = fuzzy_find_documents(rfc.canonical_name(), None)
self.assertCountEqual(found.documents, [rfc])
self.assertEqual(found.matched_rev, None)
self.assertEqual(found.matched_name, rfc.canonical_name())
# by draft name, no rev
found = fuzzy_find_documents(rfc.name, None)
self.assertCountEqual(found.documents, [rfc])
self.assertEqual(found.matched_rev, None)
self.assertEqual(found.matched_name, rfc.name)
# by draft name, no rev
found = fuzzy_find_documents(draft.name, None)
self.assertCountEqual(found.documents, [draft])
self.assertEqual(found.matched_rev, None)
self.assertEqual(found.matched_name, draft.name)
# by draft name, latest rev
found = fuzzy_find_documents(rfc.name, '02')
self.assertCountEqual(found.documents, [rfc])
found = fuzzy_find_documents(draft.name, '02')
self.assertCountEqual(found.documents, [draft])
self.assertEqual(found.matched_rev, '02')
self.assertEqual(found.matched_name, rfc.name)
self.assertEqual(found.matched_name, draft.name)
# by draft name, earlier rev
found = fuzzy_find_documents(rfc.name, '01')
self.assertCountEqual(found.documents, [rfc])
found = fuzzy_find_documents(draft.name, '01')
self.assertCountEqual(found.documents, [draft])
self.assertEqual(found.matched_rev, '01')
self.assertEqual(found.matched_name, rfc.name)
self.assertEqual(found.matched_name, draft.name)
# wrong name or revision
found = fuzzy_find_documents(rfc.name + '-incorrect')
found = fuzzy_find_documents(draft.name + '-incorrect')
self.assertCountEqual(found.documents, [], 'Should not find document that does not match')
found = fuzzy_find_documents(rfc.name + '-incorrect', '02')
found = fuzzy_find_documents(draft.name + '-incorrect', '02')
self.assertCountEqual(found.documents, [], 'Still should not find document, even with a version')
found = fuzzy_find_documents(rfc.name, '22')
self.assertCountEqual(found.documents, [rfc],
found = fuzzy_find_documents(draft.name, '22')
self.assertCountEqual(found.documents, [draft],
'Should find document even if rev does not exist')
@ -346,29 +348,29 @@ class RebuildReferenceRelationsTests(TestCase):
super().setUp()
self.doc = WgDraftFactory() # document under test
# Other documents that should be found by rebuild_reference_relations
self.normative, self.informative, self.unknown = WgRfcFactory.create_batch(3)
self.normative, self.informative, self.unknown = WgRfcFactory.create_batch(3) # AMHERE - these need to have rfc names.
for relationship in ['refnorm', 'refinfo', 'refunk', 'refold']:
self.doc.relateddocument_set.create(
target=WgRfcFactory().docalias.first(),
target=WgRfcFactory(),
relationship_id=relationship,
)
self.updated = WgRfcFactory() # related document that should be left alone
self.doc.relateddocument_set.create(target=self.updated.docalias.first(), relationship_id='updates')
self.doc.relateddocument_set.create(target=self.updated, relationship_id='updates')
self.assertCountEqual(self.doc.relateddocument_set.values_list('relationship__slug', flat=True),
['refnorm', 'refinfo', 'refold', 'refunk', 'updates'],
'Test conditions set up incorrectly: wrong prior document relationships')
for other_doc in [self.normative, self.informative, self.unknown]:
self.assertEqual(
self.doc.relateddocument_set.filter(target__name=other_doc.canonical_name()).count(),
self.doc.relateddocument_set.filter(target__name=other_doc.name).count(),
0,
'Test conditions set up incorrectly: new documents already related',
)
def _get_refs_return_value(self):
return {
self.normative.canonical_name(): Draft.REF_TYPE_NORMATIVE,
self.informative.canonical_name(): Draft.REF_TYPE_INFORMATIVE,
self.unknown.canonical_name(): Draft.REF_TYPE_UNKNOWN,
self.normative.name: Draft.REF_TYPE_NORMATIVE,
self.informative.name: Draft.REF_TYPE_INFORMATIVE,
self.unknown.name: Draft.REF_TYPE_UNKNOWN,
'draft-not-found': Draft.REF_TYPE_NORMATIVE,
}
@ -399,7 +401,7 @@ class RebuildReferenceRelationsTests(TestCase):
self.assertEqual(
result,
{
'warnings': ['There were 1 references with no matching DocAlias'],
'warnings': ['There were 1 references with no matching Document'],
'unfound': ['draft-not-found'],
}
)
@ -407,10 +409,10 @@ class RebuildReferenceRelationsTests(TestCase):
self.assertCountEqual(
self.doc.relateddocument_set.values_list('target__name', 'relationship__slug'),
[
(self.normative.canonical_name(), 'refnorm'),
(self.informative.canonical_name(), 'refinfo'),
(self.unknown.canonical_name(), 'refunk'),
(self.updated.docalias.first().name, 'updates'),
(self.normative.name, 'refnorm'),
(self.informative.name, 'refinfo'),
(self.unknown.name, 'refunk'),
(self.updated.name, 'updates'),
]
)
@ -430,7 +432,7 @@ class RebuildReferenceRelationsTests(TestCase):
self.assertEqual(
result,
{
'warnings': ['There were 1 references with no matching DocAlias'],
'warnings': ['There were 1 references with no matching Document'],
'unfound': ['draft-not-found'],
}
)
@ -438,10 +440,10 @@ class RebuildReferenceRelationsTests(TestCase):
self.assertCountEqual(
self.doc.relateddocument_set.values_list('target__name', 'relationship__slug'),
[
(self.normative.canonical_name(), 'refnorm'),
(self.informative.canonical_name(), 'refinfo'),
(self.unknown.canonical_name(), 'refunk'),
(self.updated.docalias.first().name, 'updates'),
(self.normative.name, 'refnorm'),
(self.informative.name, 'refinfo'),
(self.unknown.name, 'refunk'),
(self.updated.name, 'updates'),
]
)
@ -462,7 +464,7 @@ class RebuildReferenceRelationsTests(TestCase):
self.assertEqual(
result,
{
'warnings': ['There were 1 references with no matching DocAlias'],
'warnings': ['There were 1 references with no matching Document'],
'unfound': ['draft-not-found'],
}
)
@ -470,9 +472,9 @@ class RebuildReferenceRelationsTests(TestCase):
self.assertCountEqual(
self.doc.relateddocument_set.values_list('target__name', 'relationship__slug'),
[
(self.normative.canonical_name(), 'refnorm'),
(self.informative.canonical_name(), 'refinfo'),
(self.unknown.canonical_name(), 'refunk'),
(self.updated.docalias.first().name, 'updates'),
(self.normative.name, 'refnorm'),
(self.informative.name, 'refinfo'),
(self.unknown.name, 'refunk'),
(self.updated.name, 'updates'),
]
)

View file

@ -90,10 +90,12 @@ urlpatterns = [
url(r'^all/?$', views_search.index_all_drafts),
url(r'^active/?$', views_search.index_active_drafts),
url(r'^recent/?$', views_search.recent_drafts),
url(r'^select2search/(?P<model_name>(document|docalias))/(?P<doc_type>draft)/$', views_search.ajax_select2_search_docs),
url(r'^select2search/(?P<model_name>document)/(?P<doc_type>(draft|rfc|all))/$', views_search.ajax_select2_search_docs),
url(r'^ballots/irsg/$', views_ballot.irsg_ballot_status),
url(r'^ballots/rsab/$', views_ballot.rsab_ballot_status),
url(r'^(?P<type_id>(bcp|std|fyi))/?$', views_search.index_subseries),
url(r'^%(name)s(?:/%(rev)s)?/$' % settings.URL_REGEXPS, views_doc.document_main),
url(r'^%(name)s(?:/%(rev)s)?/bibtex/$' % settings.URL_REGEXPS, views_doc.document_bibtex),
url(r'^%(name)s(?:/%(rev)s)?/idnits2-state/$' % settings.URL_REGEXPS, views_doc.idnits2_state),

View file

@ -32,7 +32,7 @@ from ietf.community.models import CommunityList
from ietf.community.utils import docs_tracked_by_community_list
from ietf.doc.models import Document, DocHistory, State, DocumentAuthor, DocHistoryAuthor
from ietf.doc.models import DocAlias, RelatedDocument, RelatedDocHistory, BallotType, DocReminder
from ietf.doc.models import RelatedDocument, RelatedDocHistory, BallotType, DocReminder
from ietf.doc.models import DocEvent, ConsensusDocEvent, BallotDocEvent, IRSGBallotDocEvent, NewRevisionDocEvent, StateDocEvent
from ietf.doc.models import TelechatDocEvent, DocumentActionHolder, EditedAuthorsDocEvent
from ietf.name.models import DocReminderTypeName, DocRelationshipName
@ -57,7 +57,7 @@ def save_document_in_history(doc):
# copy fields
fields = get_model_fields_as_dict(doc)
fields["doc"] = doc
fields["name"] = doc.canonical_name()
fields["name"] = doc.name
dochist = DocHistory(**fields)
dochist.save()
@ -219,7 +219,7 @@ def needed_ballot_positions(doc, active_positions):
else:
related_set = RelatedDocHistory.objects.none()
for rel in related_set.filter(relationship__slug__in=['tops', 'tois', 'tohist', 'toinf', 'tobcp', 'toexp']):
if (rel.target.document.std_level_id in ['bcp','ps','ds','std']) or (rel.relationship_id in ['tops','tois','tobcp']):
if (rel.target.std_level_id in ['bcp','ps','ds','std']) or (rel.relationship_id in ['tops','tois','tobcp']):
needed = two_thirds_rule(recused=len(recuse))
break
else:
@ -352,16 +352,6 @@ def augment_events_with_revision(doc, events):
qs = NewRevisionDocEvent.objects.filter(doc=doc)
event_revisions = list(qs.order_by('time', 'id').values('id', 'rev', 'time'))
if doc.type_id == "draft" and doc.get_state_slug() == "rfc":
# add fake "RFC" revision
if isinstance(events, QuerySetAny):
e = events.filter(type="published_rfc").order_by('time').last()
else:
e = doc.latest_event(type="published_rfc")
if e:
event_revisions.append(dict(id=e.id, time=e.time, rev="RFC"))
event_revisions.sort(key=lambda x: (x["time"], x["id"]))
for e in sorted(events, key=lambda e: (e.time, e.id), reverse=True):
while event_revisions and (e.time, e.id) < (event_revisions[-1]["time"], event_revisions[-1]["id"]):
event_revisions.pop()
@ -798,22 +788,21 @@ def rebuild_reference_relations(doc, filenames):
errors = []
unfound = set()
for ( ref, refType ) in refs.items():
refdoc = DocAlias.objects.filter(name=ref)
refdoc = Document.objects.filter(name=ref)
if not refdoc and re.match(r"^draft-.*-\d{2}$", ref):
refdoc = DocAlias.objects.filter(name=ref[:-3])
refdoc = Document.objects.filter(name=ref[:-3])
count = refdoc.count()
# As of Dec 2021, DocAlias has a unique constraint on the name field, so count > 1 should not occur
if count == 0:
unfound.add( "%s" % ref )
continue
elif count > 1:
errors.append("Too many DocAlias objects found for %s"%ref)
errors.append("Too many Document objects found for %s"%ref)
else:
# Don't add references to ourself
if doc != refdoc[0].document:
if doc != refdoc[0]:
RelatedDocument.objects.get_or_create( source=doc, target=refdoc[ 0 ], relationship=DocRelationshipName.objects.get( slug='ref%s' % refType ) )
if unfound:
warnings.append('There were %d references with no matching DocAlias'%len(unfound))
warnings.append('There were %d references with no matching Document'%len(unfound))
ret = {}
if errors:
@ -848,26 +837,26 @@ def set_replaces_for_document(request, doc, new_replaces, by, email_subject, com
for d in old_replaces:
if d not in new_replaces:
other_addrs = gather_address_lists('doc_replacement_changed',doc=d.document)
other_addrs = gather_address_lists('doc_replacement_changed',doc=d)
to.update(other_addrs.to)
cc.update(other_addrs.cc)
RelatedDocument.objects.filter(source=doc, target=d, relationship=relationship).delete()
if not RelatedDocument.objects.filter(target=d, relationship=relationship):
s = 'active' if d.document.expires > timezone.now() else 'expired'
d.document.set_state(State.objects.get(type='draft', slug=s))
s = 'active' if d.expires > timezone.now() else 'expired'
d.set_state(State.objects.get(type='draft', slug=s))
for d in new_replaces:
if d not in old_replaces:
other_addrs = gather_address_lists('doc_replacement_changed',doc=d.document)
other_addrs = gather_address_lists('doc_replacement_changed',doc=d)
to.update(other_addrs.to)
cc.update(other_addrs.cc)
RelatedDocument.objects.create(source=doc, target=d, relationship=relationship)
d.document.set_state(State.objects.get(type='draft', slug='repl'))
d.set_state(State.objects.get(type='draft', slug='repl'))
if d.document.stream_id in ('irtf','ise','iab'):
repl_state = State.objects.get(type_id='draft-stream-%s'%d.document.stream_id, slug='repl')
d.document.set_state(repl_state)
events.append(StateDocEvent.objects.create(doc=d.document, rev=d.document.rev, by=by, type='changed_state', desc="Set stream state to Replaced",state_type=repl_state.type, state=repl_state))
if d.stream_id in ('irtf','ise','iab'):
repl_state = State.objects.get(type_id='draft-stream-%s'%d.stream_id, slug='repl')
d.set_state(repl_state)
events.append(StateDocEvent.objects.create(doc=d, rev=d.rev, by=by, type='changed_state', desc="Set stream state to Replaced",state_type=repl_state.type, state=repl_state))
# make sure there are no lingering suggestions duplicating new replacements
RelatedDocument.objects.filter(source=doc, target__in=new_replaces, relationship="possibly-replaces").delete()
@ -937,7 +926,7 @@ def extract_complete_replaces_ancestor_mapping_for_docs(names):
break
relations = ( RelatedDocument.objects.filter(source__name__in=front, relationship="replaces")
.select_related("target").values_list("source__name", "target__docs__name") )
.select_related("target").values_list("source__name", "target__name") )
if not relations:
break
@ -958,49 +947,67 @@ def make_rev_history(doc):
def get_predecessors(doc, predecessors=None):
if predecessors is None:
predecessors = []
predecessors = set()
if hasattr(doc, 'relateddocument_set'):
for alias in doc.related_that_doc('replaces'):
for document in alias.docs.all():
if document not in predecessors:
predecessors.append(document)
predecessors.extend(get_predecessors(document, predecessors))
for document in doc.related_that_doc('replaces'):
if document not in predecessors:
predecessors.add(document)
predecessors.update(get_predecessors(document, predecessors))
if doc.came_from_draft():
predecessors.add(doc.came_from_draft())
predecessors.update(get_predecessors(doc.came_from_draft(), predecessors))
return predecessors
def get_ancestors(doc, ancestors = None):
if ancestors is None:
ancestors = []
ancestors = set()
if hasattr(doc, 'relateddocument_set'):
for alias in doc.related_that('replaces'):
for document in alias.docs.all():
if document not in ancestors:
ancestors.append(document)
ancestors.extend(get_ancestors(document, ancestors))
for document in doc.related_that('replaces'):
if document not in ancestors:
ancestors.add(document)
ancestors.update(get_ancestors(document, ancestors))
if doc.became_rfc():
if doc.became_rfc() not in ancestors:
ancestors.add(doc.became_rfc())
ancestors.update(get_ancestors(doc.became_rfc(), ancestors))
return ancestors
def get_replaces_tree(doc):
tree = get_predecessors(doc)
tree.extend(get_ancestors(doc))
tree.update(get_ancestors(doc))
return tree
history = {}
docs = get_replaces_tree(doc)
if docs is not None:
docs.append(doc)
docs.add(doc)
for d in docs:
for e in d.docevent_set.filter(type='new_revision').distinct():
if hasattr(e, 'newrevisiondocevent'):
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=d)) + e.newrevisiondocevent.rev + "/"
history[url] = {
'name': d.name,
'rev': e.newrevisiondocevent.rev,
'published': e.time.isoformat(),
'url': url,
}
if d.history_set.filter(rev=e.newrevisiondocevent.rev).exists():
history[url]['pages'] = d.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages
if d.type_id == "rfc":
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=d))
e = d.docevent_set.filter(type="published_rfc").order_by("-time").first()
history[url] = {
"name": d.name,
"rev": d.name,
"published": e and e.time.isoformat(),
"url": url,
}
else:
for e in d.docevent_set.filter(type='new_revision').distinct():
if hasattr(e, 'newrevisiondocevent'):
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=d)) + e.newrevisiondocevent.rev + "/"
history[url] = {
'name': d.name,
'rev': e.newrevisiondocevent.rev,
'published': e.time.isoformat(),
'url': url,
}
if d.history_set.filter(rev=e.newrevisiondocevent.rev).exists():
history[url]['pages'] = d.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages
if doc.type_id == "draft":
# Do nothing - all draft revisions are captured above already.
e = None
elif doc.type_id == "rfc":
# e.time.date() agrees with RPC publication date when shown in the RPC_TZINFO time zone
e = doc.latest_event(type='published_rfc')
else:
@ -1008,12 +1015,12 @@ def make_rev_history(doc):
if e:
url = urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=e.doc))
history[url] = {
'name': e.doc.canonical_name(),
'rev': e.doc.canonical_name(),
'name': e.doc.name,
'rev': e.doc.name,
'published': e.time.isoformat(),
'url': url
}
if hasattr(e, 'newrevisiondocevent') and doc.history_set.filter(rev=e.newrevisiondocevent.rev).exists():
if doc.type_id != "rfc" and hasattr(e, 'newrevisiondocevent') and doc.history_set.filter(rev=e.newrevisiondocevent.rev).exists():
history[url]['pages'] = doc.history_set.filter(rev=e.newrevisiondocevent.rev).first().pages
history = list(history.values())
return sorted(history, key=lambda x: x['published'])
@ -1025,14 +1032,11 @@ def get_search_cache_key(params):
kwargs = dict([ (k,v) for (k,v) in list(params.items()) if k in fields ])
key = "doc:document:search:" + hashlib.sha512(json.dumps(kwargs, sort_keys=True).encode('utf-8')).hexdigest()
return key
def build_file_urls(doc: Union[Document, DocHistory]):
if doc.type_id != 'draft':
return [], []
if doc.get_state_slug() == "rfc":
name = doc.canonical_name()
base_path = os.path.join(settings.RFC_PATH, name + ".")
def build_file_urls(doc: Union[Document, DocHistory]):
if doc.type_id == "rfc":
base_path = os.path.join(settings.RFC_PATH, doc.name + ".")
possible_types = settings.RFC_FILE_TYPES
found_types = [t for t in possible_types if os.path.exists(base_path + t)]
@ -1041,17 +1045,17 @@ def build_file_urls(doc: Union[Document, DocHistory]):
file_urls = []
for t in found_types:
label = "plain text" if t == "txt" else t
file_urls.append((label, base + name + "." + t))
file_urls.append((label, base + doc.name + "." + t))
if "pdf" not in found_types and "txt" in found_types:
file_urls.append(("pdf", base + "pdfrfc/" + name + ".txt.pdf"))
file_urls.append(("pdf", base + "pdfrfc/" + doc.name + ".txt.pdf"))
if "txt" in found_types:
file_urls.append(("htmlized", urlreverse('ietf.doc.views_doc.document_html', kwargs=dict(name=name))))
file_urls.append(("htmlized", urlreverse('ietf.doc.views_doc.document_html', kwargs=dict(name=doc.name))))
if doc.tags.filter(slug="verified-errata").exists():
file_urls.append(("with errata", settings.RFC_EDITOR_INLINE_ERRATA_URL.format(rfc_number=doc.rfc_number())))
file_urls.append(("bibtex", urlreverse('ietf.doc.views_doc.document_bibtex',kwargs=dict(name=name))))
elif doc.rev:
file_urls.append(("with errata", settings.RFC_EDITOR_INLINE_ERRATA_URL.format(rfc_number=doc.rfc_number)))
file_urls.append(("bibtex", urlreverse('ietf.doc.views_doc.document_bibtex',kwargs=dict(name=doc.name))))
elif doc.type_id == "draft" and doc.rev != "":
base_path = os.path.join(settings.INTERNET_ALL_DRAFTS_ARCHIVE_DIR, doc.name + "-" + doc.rev + ".")
possible_types = settings.IDSUBMIT_FILE_TYPES
found_types = [t for t in possible_types if os.path.exists(base_path + t)]
@ -1067,12 +1071,14 @@ def build_file_urls(doc: Union[Document, DocHistory]):
file_urls.append(("bibtex", urlreverse('ietf.doc.views_doc.document_bibtex',kwargs=dict(name=doc.name,rev=doc.rev))))
file_urls.append(("bibxml", urlreverse('ietf.doc.views_doc.document_bibxml',kwargs=dict(name=doc.name,rev=doc.rev))))
else:
# As of 2022-12-14, there are 1463 Document and 3136 DocHistory records with type='draft' and rev=''.
# All of these are in the rfc state and are covered by the above cases.
log.unreachable('2022-12-14')
if doc.type_id == "draft":
# TODO: look at the state of the database post migration and update this comment, or remove the block
# As of 2022-12-14, there are 1463 Document and 3136 DocHistory records with type='draft' and rev=''.
# All of these are in the rfc state and are covered by the above cases.
log.unreachable('2022-12-14')
file_urls = []
found_types = []
return file_urls, found_types
def augment_docs_and_user_with_user_info(docs, user):
@ -1139,21 +1145,24 @@ def generate_idnits2_rfc_status():
'unkn': 'U',
}
rfcs = Document.objects.filter(type_id='draft',states__slug='rfc',states__type='draft')
rfcs = Document.objects.filter(type_id='rfc')
for rfc in rfcs:
offset = int(rfc.rfcnum)-1
offset = int(rfc.rfc_number)-1
blob[offset] = symbols[rfc.std_level_id]
if rfc.related_that('obs'):
blob[offset] = 'O'
# Workarounds for unusual states in the datatracker
# Document.get(docalias='rfc6312').rfcnum == 6342
# 6312 was published with the wrong rfc number in it
# weird workaround in the datatracker - there are two
# DocAliases starting with rfc - the canonical name code
# searches for the lexically highest alias starting with rfc
# which is getting lucky.
# The explanation for 6312 is from before docalias was removed
# The workaround is still needed, even if the datatracker
# state no longer matches what's described here:
# Document.get(docalias='rfc6312').rfc_number == 6342
# 6312 was published with the wrong rfc number in it
# weird workaround in the datatracker - there are two
# DocAliases starting with rfc - the canonical name code
# searches for the lexically highest alias starting with rfc
# which is getting lucky.
blob[6312 - 1] = 'O'
# RFC200 is an old RFC List by Number
@ -1169,7 +1178,7 @@ def generate_idnits2_rfc_status():
def generate_idnits2_rfcs_obsoleted():
obsdict = defaultdict(list)
for r in RelatedDocument.objects.filter(relationship_id='obs'):
obsdict[int(r.target.document.rfc_number())].append(int(r.source.rfc_number()))
obsdict[int(r.target.rfc_number)].append(int(r.source.rfc_number)) # Aren't these already guaranteed to be ints?
for k in obsdict:
obsdict[k] = sorted(obsdict[k])
return render_to_string('doc/idnits2-rfcs-obsoleted.txt', context={'obsitems':sorted(obsdict.items())})
@ -1198,13 +1207,19 @@ def fuzzy_find_documents(name, rev=None):
if re.match("^[0-9]+$", name):
name = f'rfc{name}'
if name.startswith("rfc"):
sought_type = "rfc"
log.assertion("rev is None")
else:
sought_type = "draft"
# see if we can find a document using this name
docs = Document.objects.filter(docalias__name=name, type_id='draft')
docs = Document.objects.filter(name=name, type_id=sought_type)
if rev and not docs.exists():
# No document found, see if the name/rev split has been misidentified.
# Handles some special cases, like draft-ietf-tsvwg-ieee-802-11.
name = '%s-%s' % (name, rev)
docs = Document.objects.filter(docalias__name=name, type_id='draft')
docs = Document.objects.filter(name=name, type_id='draft')
if docs.exists():
rev = None # found a doc by name with rev = None, so update that

View file

@ -3,11 +3,12 @@
import datetime
import io
import os
import re
import shutil
from pathlib import Path
from django.conf import settings
from django.urls import reverse as urlreverse
from django.template.loader import render_to_string
@ -62,10 +63,9 @@ def next_approved_revision(rev):
return "%#02d" % (int(m.group('major')) + 1)
def read_charter_text(doc):
filename = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
filename = Path(settings.CHARTER_PATH) / f"{doc.name}-{doc.rev}.txt"
try:
with io.open(filename, 'r') as f:
return f.read()
return filename.read_text()
except IOError:
return "Error: couldn't read charter text"
@ -92,8 +92,8 @@ def change_group_state_after_charter_approval(group, by):
def fix_charter_revision_after_approval(charter, by):
# according to spec, 00-02 becomes 01, so copy file and record new revision
try:
old = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.canonical_name(), charter.rev))
new = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.canonical_name(), next_approved_revision(charter.rev)))
old = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.name, charter.rev))
new = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.name, next_approved_revision(charter.rev)))
shutil.copy(old, new)
except IOError:
log("There was an error copying %s to %s" % (old, new))
@ -101,7 +101,7 @@ def fix_charter_revision_after_approval(charter, by):
events = []
e = NewRevisionDocEvent(doc=charter, by=by, type="new_revision")
e.rev = next_approved_revision(charter.rev)
e.desc = "New version available: <b>%s-%s.txt</b>" % (charter.canonical_name(), e.rev)
e.desc = "New version available: <b>%s-%s.txt</b>" % (charter.name, e.rev)
e.save()
events.append(e)

View file

@ -9,7 +9,7 @@ from zoneinfo import ZoneInfo
from django.conf import settings
from ietf.doc.models import Document, DocAlias, RelatedDocument, DocEvent, TelechatDocEvent, BallotDocEvent, DocTypeName
from ietf.doc.models import Document, RelatedDocument, DocEvent, TelechatDocEvent, BallotDocEvent, DocTypeName
from ietf.doc.expire import expirable_drafts
from ietf.doc.utils import augment_docs_and_user_with_user_info
from ietf.meeting.models import SessionPresentation, Meeting, Session
@ -54,12 +54,13 @@ def fill_in_document_sessions(docs, doc_dict, doc_ids):
def fill_in_document_table_attributes(docs, have_telechat_date=False):
# fill in some attributes for the document table results to save
# some hairy template code and avoid repeated SQL queries
# TODO - this function evolved from something that assumed it was handling only drafts. It still has places where it assumes all docs are drafts where that is not a correct assumption
# TODO - this function evolved from something that assumed it was handling only drafts.
# It still has places where it assumes all docs are drafts where that is not a correct assumption
doc_dict = dict((d.pk, d) for d in docs)
doc_ids = list(doc_dict.keys())
rfc_aliases = dict([ (a.document.id, a.name) for a in DocAlias.objects.filter(name__startswith="rfc", docs__id__in=doc_ids) ])
rfcs = dict((d.pk, d.name) for d in docs if d.type_id == "rfc")
# latest event cache
event_types = ("published_rfc",
@ -90,10 +91,8 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False):
# misc
expirable_pks = expirable_drafts(Document.objects.filter(pk__in=doc_ids)).values_list('pk', flat=True)
for d in docs:
# emulate canonical name which is used by a lot of the utils
# d.canonical_name = wrap_value(rfc_aliases[d.pk] if d.pk in rfc_aliases else d.name)
if d.rfc_number() != None and d.latest_event_cache["published_rfc"]:
if d.type_id == "rfc" and d.latest_event_cache["published_rfc"]:
d.latest_revision_date = d.latest_event_cache["published_rfc"].time
elif d.latest_event_cache["new_revision"]:
d.latest_revision_date = d.latest_event_cache["new_revision"].time
@ -118,7 +117,7 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False):
d.search_heading = "%s" % (d.type,)
d.expirable = False
if d.get_state_slug() != "rfc":
if d.type_id == "draft" and d.get_state_slug() != "rfc":
d.milestones = [ m for (t, s, v, m) in sorted(((m.time, m.state.slug, m.desc, m) for m in d.groupmilestone_set.all() if m.state_id == "active")) ]
d.review_assignments = review_assignments_to_list_for_docs([d]).get(d.name, [])
@ -128,29 +127,30 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False):
# RFCs
# errata
erratas = set(Document.objects.filter(tags="errata", id__in=list(rfc_aliases.keys())).distinct().values_list("name", flat=True))
verified_erratas = set(Document.objects.filter(tags="verified-errata", id__in=list(rfc_aliases.keys())).distinct().values_list("name", flat=True))
erratas = set(Document.objects.filter(tags="errata", id__in=list(rfcs.keys())).distinct().values_list("name", flat=True))
verified_erratas = set(Document.objects.filter(tags="verified-errata", id__in=list(rfcs.keys())).distinct().values_list("name", flat=True))
for d in docs:
d.has_errata = d.name in erratas
d.has_verified_errata = d.name in verified_erratas
# obsoleted/updated by
for a in rfc_aliases:
d = doc_dict[a]
for rfc in rfcs:
d = doc_dict[rfc]
d.obsoleted_by_list = []
d.updated_by_list = []
# Revisit this block after RFCs become first-class Document objects
xed_by = list(
RelatedDocument.objects.filter(
target__name__in=list(rfc_aliases.values()),
target__name__in=list(rfcs.values()),
relationship__in=("obs", "updates"),
).select_related("target")
)
rel_rfc_aliases = {
a.document.id: re.sub(r"rfc(\d+)", r"RFC \1", a.name, flags=re.IGNORECASE)
for a in DocAlias.objects.filter(
name__startswith="rfc", docs__id__in=[rel.source_id for rel in xed_by]
# TODO - this likely reduces to something even simpler
rel_rfcs = {
d.id: re.sub(r"rfc(\d+)", r"RFC \1", d.name, flags=re.IGNORECASE)
for d in Document.objects.filter(
type_id="rfc", id__in=[rel.source_id for rel in xed_by]
)
}
xed_by.sort(
@ -158,18 +158,17 @@ def fill_in_document_table_attributes(docs, have_telechat_date=False):
re.sub(
r"rfc\s*(\d+)",
r"\1",
rel_rfc_aliases[rel.source_id],
rel_rfcs[rel.source_id],
flags=re.IGNORECASE,
)
)
)
for rel in xed_by:
d = doc_dict[rel.target.document.id]
s = rel_rfc_aliases[rel.source_id]
d = doc_dict[rel.target.id]
if rel.relationship_id == "obs":
d.obsoleted_by_list.append(s)
d.obsoleted_by_list.append(rel.source)
elif rel.relationship_id == "updates":
d.updated_by_list.append(s)
d.updated_by_list.append(rel.source)
def augment_docs_with_related_docs_info(docs):
"""Augment all documents with related documents information.
@ -179,7 +178,7 @@ def augment_docs_with_related_docs_info(docs):
if d.type_id == 'conflrev':
if len(d.related_that_doc('conflrev')) != 1:
continue
originalDoc = d.related_that_doc('conflrev')[0].document
originalDoc = d.related_that_doc('conflrev')[0]
d.pages = originalDoc.pages
def prepare_document_table(request, docs, query=None, max_results=200):
@ -193,7 +192,7 @@ def prepare_document_table(request, docs, query=None, max_results=200):
# the number of queries
docs = docs.select_related("ad", "std_level", "intended_std_level", "group", "stream", "shepherd", )
docs = docs.prefetch_related("states__type", "tags", "groupmilestone_set__group", "reviewrequest_set__team",
"ad__email_set", "docalias__iprdocrel_set")
"ad__email_set", "iprdocrel_set")
docs = docs[:max_results] # <- that is still a queryset, but with a LIMIT now
docs = list(docs)
else:
@ -217,7 +216,7 @@ def prepare_document_table(request, docs, query=None, max_results=200):
res = []
rfc_num = d.rfc_number()
rfc_num = num(d.rfc_number) if d.rfc_number else None
if d.type_id == "draft":
res.append(num(["Active", "Expired", "Replaced", "Withdrawn", "RFC"].index(d.search_heading.split()[0])))
@ -232,25 +231,25 @@ def prepare_document_table(request, docs, query=None, max_results=200):
elif sort_key == "date":
res.append(str(d.latest_revision_date.astimezone(ZoneInfo(settings.TIME_ZONE))))
elif sort_key == "status":
if rfc_num != None:
res.append(num(rfc_num))
if rfc_num is not None:
res.append(rfc_num)
else:
res.append(num(d.get_state().order) if d.get_state() else None)
elif sort_key == "ipr":
res.append(len(d.ipr()))
elif sort_key == "ad":
if rfc_num != None:
res.append(num(rfc_num))
if rfc_num is not None:
res.append(rfc_num)
elif d.get_state_slug() == "active":
if d.get_state("draft-iesg"):
res.append(d.get_state("draft-iesg").order)
else:
res.append(0)
else:
if rfc_num != None:
res.append(num(rfc_num))
if rfc_num is not None:
res.append(rfc_num)
else:
res.append(d.canonical_name())
res.append(d.name)
return res

View file

@ -179,7 +179,7 @@ def save_position(form, doc, ballot, balloter, login=None, send_email=False):
@role_required("Area Director", "Secretariat", "IRSG Member", "RSAB Member")
def edit_position(request, name, ballot_id):
"""Vote and edit discuss and comment on document"""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
ballot = get_object_or_404(BallotDocEvent, type="created_ballot", pk=ballot_id, doc=doc)
balloter = login = request.user.person
@ -256,7 +256,7 @@ def api_set_position(request):
if not name:
return err(400, "Missing document name")
try:
doc = Document.objects.get(docalias__name=name)
doc = Document.objects.get(name=name)
except Document.DoesNotExist:
return err(400, "Document not found")
position_names = BallotPositionName.objects.values_list('slug', flat=True)
@ -323,7 +323,7 @@ def build_position_email(balloter, doc, pos):
@role_required('Area Director','Secretariat','IRSG Member', 'RSAB Member')
def send_ballot_comment(request, name, ballot_id):
"""Email document ballot position discuss/comment for Area Director."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
ballot = get_object_or_404(BallotDocEvent, type="created_ballot", pk=ballot_id, doc=doc)
if not has_role(request.user, 'Secretariat'):
@ -413,7 +413,7 @@ def clear_ballot(request, name, ballot_type_slug):
@role_required('Area Director','Secretariat')
def defer_ballot(request, name):
"""Signal post-pone of ballot, notifying relevant parties."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.type_id not in ('draft','conflrev','statchg'):
raise Http404
interesting_state = dict(draft='draft-iesg',conflrev='conflrev',statchg='statchg')
@ -467,7 +467,7 @@ def defer_ballot(request, name):
@role_required('Area Director','Secretariat')
def undefer_ballot(request, name):
"""undo deferral of ballot ballot."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.type_id not in ('draft','conflrev','statchg'):
raise Http404
if doc.type_id == 'draft' and not doc.get_state("draft-iesg"):
@ -503,7 +503,7 @@ class LastCallTextForm(forms.Form):
@role_required('Area Director','Secretariat')
def lastcalltext(request, name):
"""Editing of the last call text"""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if not doc.get_state("draft-iesg"):
raise Http404
@ -589,7 +589,7 @@ class BallotWriteupForm(forms.Form):
@role_required('Area Director','Secretariat')
def ballot_writeupnotes(request, name):
"""Editing of ballot write-up and notes"""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
prev_state = doc.get_state("draft-iesg")
login = request.user.person
@ -700,7 +700,7 @@ class BallotRfcEditorNoteForm(forms.Form):
@role_required('Area Director','Secretariat','IAB Chair','IRTF Chair','ISE')
def ballot_rfceditornote(request, name):
"""Editing of RFC Editor Note"""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if not is_authorized_in_doc_stream(request.user, doc):
permission_denied(request, "You do not have the necessary permissions to change the RFC Editor Note for this document")
@ -765,7 +765,7 @@ class ApprovalTextForm(forms.Form):
@role_required('Area Director','Secretariat')
def ballot_approvaltext(request, name):
"""Editing of approval text"""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if not doc.get_state("draft-iesg"):
raise Http404
@ -816,7 +816,7 @@ def ballot_approvaltext(request, name):
@role_required('Secretariat')
def approve_ballot(request, name):
"""Approve ballot, sending out announcement, changing state."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if not doc.get_state("draft-iesg"):
raise Http404
@ -947,13 +947,19 @@ class ApproveDownrefsForm(forms.Form):
@role_required('Secretariat')
def approve_downrefs(request, name):
"""Document ballot was just approved; add the checked downwared references to the downref registry."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if not doc.get_state("draft-iesg"):
raise Http404
login = request.user.person
downrefs_to_rfc = [rel for rel in doc.relateddocument_set.all() if rel.is_downref() and not rel.is_approved_downref() and rel.target.document.is_rfc()]
downrefs_to_rfc = [
rel
for rel in doc.relateddocument_set.all()
if rel.is_downref()
and not rel.is_approved_downref()
and rel.target.type_id == "rfc"
]
downrefs_to_rfc_qs = RelatedDocument.objects.filter(pk__in=[r.pk for r in downrefs_to_rfc])
@ -968,12 +974,12 @@ def approve_downrefs(request, name):
c = DocEvent(type="downref_approved", doc=rel.source,
rev=rel.source.rev, by=login)
c.desc = "Downref to RFC %s approved by Last Call for %s-%s" % (
rel.target.document.rfc_number(), rel.source, rel.source.rev)
rel.target.rfc_number, rel.source, rel.source.rev)
c.save()
c = DocEvent(type="downref_approved", doc=rel.target.document,
rev=rel.target.document.rev, by=login)
c = DocEvent(type="downref_approved", doc=rel.target,
rev=rel.target.rev, by=login)
c.desc = "Downref to RFC %s approved by Last Call for %s-%s" % (
rel.target.document.rfc_number(), rel.source, rel.source.rev)
rel.target.rfc_number, rel.source, rel.source.rev)
c.save()
return HttpResponseRedirect(doc.get_absolute_url())
@ -995,7 +1001,7 @@ class MakeLastCallForm(forms.Form):
@role_required('Secretariat')
def make_last_call(request, name):
"""Make last call for Internet-Draft, sending out announcement."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if not (doc.get_state("draft-iesg") or doc.get_state("statchg")):
raise Http404
@ -1103,7 +1109,7 @@ def make_last_call(request, name):
@role_required('Secretariat', 'IRTF Chair')
def issue_irsg_ballot(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.stream.slug != "irtf" or doc.type != DocTypeName.objects.get(slug="draft"):
raise Http404
@ -1158,7 +1164,7 @@ def issue_irsg_ballot(request, name):
@role_required('Secretariat', 'IRTF Chair')
def close_irsg_ballot(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.stream.slug != "irtf" or doc.type != DocTypeName.objects.get(slug="draft"):
raise Http404
@ -1199,7 +1205,7 @@ def irsg_ballot_status(request):
@role_required('Secretariat', 'RSAB Chair')
def issue_rsab_ballot(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.stream.slug != "editorial" or doc.type != DocTypeName.objects.get(slug="draft"):
raise Http404
@ -1248,7 +1254,7 @@ def issue_rsab_ballot(request, name):
@role_required('Secretariat', 'RSAB Chair')
def close_rsab_ballot(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.stream.slug != "editorial" or doc.type_id != "draft":
raise Http404

View file

@ -15,7 +15,7 @@ from django.utils.html import escape
from ietf.doc.mails import (email_bofreq_title_changed, email_bofreq_editors_changed,
email_bofreq_new_revision, email_bofreq_responsible_changed)
from ietf.doc.models import (Document, DocAlias, DocEvent, NewRevisionDocEvent,
from ietf.doc.models import (Document, DocEvent, NewRevisionDocEvent,
BofreqEditorDocEvent, BofreqResponsibleDocEvent, State)
from ietf.doc.utils import add_state_change_event
from ietf.doc.utils_bofreq import bofreq_editors, bofreq_responsible
@ -168,8 +168,6 @@ def new_bof_request(request):
)
e2.editors.set([request.user.person])
bofreq.save_with_history([e1,e2])
alias = DocAlias.objects.create(name=name)
alias.docs.set([bofreq])
bofreq_submission = form.cleaned_data['bofreq_submission']
if bofreq_submission == "upload":
content = get_cleaned_text_file_content(form.cleaned_data["bofreq_file"])

View file

@ -3,11 +3,11 @@
import datetime
import io
import json
import os
import textwrap
from pathlib import Path
from django.http import HttpResponseRedirect, HttpResponseNotFound, Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse as urlreverse
@ -22,7 +22,7 @@ from django.utils.html import escape
import debug # pyflakes:ignore
from ietf.doc.models import ( Document, DocAlias, DocHistory, State, DocEvent,
from ietf.doc.models import ( Document, DocHistory, State, DocEvent,
BallotDocEvent, BallotPositionDocEvent, InitialReviewDocEvent, NewRevisionDocEvent,
WriteupDocEvent, TelechatDocEvent )
from ietf.doc.utils import ( add_state_change_event, close_open_ballots,
@ -32,7 +32,7 @@ from ietf.doc.utils_charter import ( historic_milestones_for_charter,
generate_ballot_writeup, generate_issue_ballot_mail, next_revision,
derive_new_work_text,
change_group_state_after_charter_approval, fix_charter_revision_after_approval,
split_charter_name)
split_charter_name, charter_name_for_group)
from ietf.doc.mails import email_state_changed, email_charter_internal_review
from ietf.group.mails import email_admin_re_charter
from ietf.group.models import Group, ChangeStateGroupEvent, MilestoneGroupEvent
@ -42,6 +42,7 @@ from ietf.ietfauth.utils import has_role, role_required
from ietf.name.models import GroupStateName
from ietf.person.models import Person
from ietf.utils.history import find_history_active_at
from ietf.utils.log import assertion
from ietf.utils.mail import send_mail_preformatted
from ietf.utils.textupload import get_cleaned_text_file_content
from ietf.utils.response import permission_denied
@ -362,38 +363,41 @@ class UploadForm(forms.Form):
@login_required
def submit(request, name, option=None):
if not name.startswith('charter-'):
raise Http404
# Charters are named "charter-<ietf|irtf>-<group acronym>"
charter = Document.objects.filter(type="charter", name=name).first()
if charter:
group = charter.group
charter_canonical_name = charter.canonical_name()
assertion("charter.name == charter_name_for_group(group)")
charter_rev = charter.rev
else:
top_org, group_acronym = split_charter_name(name)
group = get_object_or_404(Group, acronym=group_acronym)
charter_canonical_name = name
if name != charter_name_for_group(group):
raise Http404 # do not allow creation of misnamed charters
charter_rev = "00-00"
if not can_manage_all_groups_of_type(request.user, group.type_id) or not group.features.has_chartering_process:
if (
not can_manage_all_groups_of_type(request.user, group.type_id)
or not group.features.has_chartering_process
):
permission_denied(request, "You don't have permission to access this view.")
path = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter_canonical_name, charter_rev))
not_uploaded_yet = charter_rev.endswith("-00") and not os.path.exists(path)
charter_filename = Path(settings.CHARTER_PATH) / f"{name}-{charter_rev}.txt"
not_uploaded_yet = charter_rev.endswith("-00") and not charter_filename.exists()
if not_uploaded_yet or not charter:
# this case is special - we recently chartered or rechartered and have no file yet
next_rev = charter_rev
else:
# search history for possible collisions with abandoned efforts
prev_revs = list(charter.history_set.order_by('-time').values_list('rev', flat=True))
prev_revs = list(
charter.history_set.order_by("-time").values_list("rev", flat=True)
)
next_rev = next_revision(charter.rev)
while next_rev in prev_revs:
next_rev = next_revision(next_rev)
if request.method == 'POST':
if request.method == "POST":
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
# Also save group history so we can search for it
@ -408,9 +412,10 @@ def submit(request, name, option=None):
abstract=group.name,
rev=next_rev,
)
DocAlias.objects.create(name=charter.name).docs.add(charter)
charter.set_state(State.objects.get(used=True, type="charter", slug="notrev"))
charter.set_state(
State.objects.get(used=True, type="charter", slug="notrev")
)
group.charter = charter
group.save()
@ -418,56 +423,74 @@ def submit(request, name, option=None):
charter.rev = next_rev
events = []
e = NewRevisionDocEvent(doc=charter, by=request.user.person, type="new_revision")
e.desc = "New version available: <b>%s-%s.txt</b>" % (charter.canonical_name(), charter.rev)
e = NewRevisionDocEvent(
doc=charter, by=request.user.person, type="new_revision"
)
e.desc = "New version available: <b>%s-%s.txt</b>" % (
charter.name,
charter.rev,
)
e.rev = charter.rev
e.save()
events.append(e)
# Save file on disk
filename = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter.canonical_name(), charter.rev))
with io.open(filename, 'w', encoding='utf-8') as destination:
if form.cleaned_data['txt']:
destination.write(form.cleaned_data['txt'])
charter_filename = charter_filename.with_name(
f"{name}-{charter.rev}.txt"
) # update rev
with charter_filename.open("w", encoding="utf-8") as destination:
if form.cleaned_data["txt"]:
destination.write(form.cleaned_data["txt"])
else:
destination.write(form.cleaned_data['content'])
destination.write(form.cleaned_data["content"])
if option in ['initcharter','recharter'] and charter.ad == None:
charter.ad = getattr(group.ad_role(),'person',None)
if option in ["initcharter", "recharter"] and charter.ad == None:
charter.ad = getattr(group.ad_role(), "person", None)
charter.save_with_history(events)
if option:
return redirect('ietf.doc.views_charter.change_state', name=charter.name, option=option)
return redirect(
"ietf.doc.views_charter.change_state",
name=charter.name,
option=option,
)
else:
return redirect("ietf.doc.views_doc.document_main", name=charter.name)
else:
init = { "content": "" }
init = {"content": ""}
if not_uploaded_yet and charter:
# use text from last approved revision
last_approved = charter.rev.split("-")[0]
h = charter.history_set.filter(rev=last_approved).order_by("-time", "-id").first()
h = (
charter.history_set.filter(rev=last_approved)
.order_by("-time", "-id")
.first()
)
if h:
charter_canonical_name = h.canonical_name()
charter_rev = h.rev
filename = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter_canonical_name, charter_rev))
assertion("h.name == charter_name_for_group(group)")
charter_filename = charter_filename.with_name(
f"{name}-{h.rev}.txt"
) # update rev
try:
with io.open(filename, 'r') as f:
init["content"] = f.read()
init["content"] = charter_filename.read_text()
except IOError:
pass
form = UploadForm(initial=init)
fill_in_charter_info(group)
return render(request, 'doc/charter/submit.html', {
'form': form,
'next_rev': next_rev,
'group': group,
'name': name,
})
return render(
request,
"doc/charter/submit.html",
{
"form": form,
"next_rev": next_rev,
"group": group,
"name": name,
},
)
class ActionAnnouncementTextForm(forms.Form):
announcement_text = forms.CharField(widget=forms.Textarea, required=True, strip=False)
@ -484,7 +507,7 @@ class ReviewAnnouncementTextForm(forms.Form):
return self.cleaned_data["announcement_text"].replace("\r", "")
@role_required('Area Director','Secretariat')
@role_required("Area Director", "Secretariat")
def review_announcement_text(request, name):
"""Editing of review announcement text"""
charter = get_object_or_404(Document, type="charter", name=name)
@ -493,7 +516,9 @@ def review_announcement_text(request, name):
by = request.user.person
existing = charter.latest_event(WriteupDocEvent, type="changed_review_announcement")
existing_new_work = charter.latest_event(WriteupDocEvent, type="changed_new_work_text")
existing_new_work = charter.latest_event(
WriteupDocEvent, type="changed_new_work_text"
)
if not existing:
(existing, existing_new_work) = default_review_text(group, charter, by)
@ -506,19 +531,23 @@ def review_announcement_text(request, name):
existing_new_work.by = by
existing_new_work.type = "changed_new_work_text"
existing_new_work.desc = "%s review text was changed" % group.type.name
existing_new_work.text = derive_new_work_text(existing.text,group)
existing_new_work.text = derive_new_work_text(existing.text, group)
existing_new_work.time = timezone.now()
form = ReviewAnnouncementTextForm(initial=dict(announcement_text=escape(existing.text),new_work_text=escape(existing_new_work.text)))
form = ReviewAnnouncementTextForm(
initial=dict(
announcement_text=escape(existing.text),
new_work_text=escape(existing_new_work.text),
)
)
if request.method == 'POST':
if request.method == "POST":
form = ReviewAnnouncementTextForm(request.POST)
if "save_text" in request.POST and form.is_valid():
now = timezone.now()
events = []
t = form.cleaned_data['announcement_text']
t = form.cleaned_data["announcement_text"]
if t != existing.text:
e = WriteupDocEvent(doc=charter, rev=charter.rev)
e.by = by
@ -532,11 +561,11 @@ def review_announcement_text(request, name):
existing.save()
events.append(existing)
t = form.cleaned_data['new_work_text']
t = form.cleaned_data["new_work_text"]
if t != existing_new_work.text:
e = WriteupDocEvent(doc=charter, rev=charter.rev)
e.by = by
e.type = "changed_new_work_text"
e.type = "changed_new_work_text"
e.desc = "%s new work message text was changed" % (group.type.name)
e.text = t
e.time = now
@ -549,33 +578,71 @@ def review_announcement_text(request, name):
charter.save_with_history(events)
if request.GET.get("next", "") == "approve":
return redirect('ietf.doc.views_charter.approve', name=charter.canonical_name())
return redirect(
"ietf.doc.views_charter.approve", name=charter.name
)
return redirect('ietf.doc.views_doc.document_writeup', name=charter.canonical_name())
return redirect(
"ietf.doc.views_doc.document_writeup", name=charter.name
)
if "regenerate_text" in request.POST:
(existing, existing_new_work) = default_review_text(group, charter, by)
existing.save()
existing_new_work.save()
form = ReviewAnnouncementTextForm(initial=dict(announcement_text=escape(existing.text),
new_work_text=escape(existing_new_work.text)))
form = ReviewAnnouncementTextForm(
initial=dict(
announcement_text=escape(existing.text),
new_work_text=escape(existing_new_work.text),
)
)
if any(x in request.POST for x in ['send_annc_only','send_nw_only','send_both']) and form.is_valid():
if any(x in request.POST for x in ['send_annc_only','send_both']):
parsed_msg = send_mail_preformatted(request, form.cleaned_data['announcement_text'])
messages.success(request, "The email To: '%s' with Subject: '%s' has been sent." % (parsed_msg["To"],parsed_msg["Subject"],))
if any(x in request.POST for x in ['send_nw_only','send_both']):
parsed_msg = send_mail_preformatted(request, form.cleaned_data['new_work_text'])
messages.success(request, "The email To: '%s' with Subject: '%s' has been sent." % (parsed_msg["To"],parsed_msg["Subject"],))
return redirect('ietf.doc.views_doc.document_writeup', name=charter.name)
if (
any(
x in request.POST
for x in ["send_annc_only", "send_nw_only", "send_both"]
)
and form.is_valid()
):
if any(x in request.POST for x in ["send_annc_only", "send_both"]):
parsed_msg = send_mail_preformatted(
request, form.cleaned_data["announcement_text"]
)
messages.success(
request,
"The email To: '%s' with Subject: '%s' has been sent."
% (
parsed_msg["To"],
parsed_msg["Subject"],
),
)
if any(x in request.POST for x in ["send_nw_only", "send_both"]):
parsed_msg = send_mail_preformatted(
request, form.cleaned_data["new_work_text"]
)
messages.success(
request,
"The email To: '%s' with Subject: '%s' has been sent."
% (
parsed_msg["To"],
parsed_msg["Subject"],
),
)
return redirect("ietf.doc.views_doc.document_writeup", name=charter.name)
return render(request, 'doc/charter/review_announcement_text.html',
dict(charter=charter,
back_url=urlreverse('ietf.doc.views_doc.document_writeup', kwargs=dict(name=charter.name)),
announcement_text_form=form,
))
return render(
request,
"doc/charter/review_announcement_text.html",
dict(
charter=charter,
back_url=urlreverse(
"ietf.doc.views_doc.document_writeup", kwargs=dict(name=charter.name)
),
announcement_text_form=form,
),
)
@role_required('Area Director','Secretariat')
@role_required("Area Director", "Secretariat")
def action_announcement_text(request, name):
"""Editing of action announcement text"""
charter = get_object_or_404(Document, type="charter", name=name)
@ -590,16 +657,18 @@ def action_announcement_text(request, name):
if not existing:
raise Http404
form = ActionAnnouncementTextForm(initial=dict(announcement_text=escape(existing.text)))
form = ActionAnnouncementTextForm(
initial=dict(announcement_text=escape(existing.text))
)
if request.method == 'POST':
if request.method == "POST":
form = ActionAnnouncementTextForm(request.POST)
if "save_text" in request.POST and form.is_valid():
t = form.cleaned_data['announcement_text']
t = form.cleaned_data["announcement_text"]
if t != existing.text:
e = WriteupDocEvent(doc=charter, rev=charter.rev)
e.by = by
e.type = "changed_action_announcement"
e.type = "changed_action_announcement"
e.desc = "%s action text was changed" % group.type.name
e.text = t
e.save()
@ -607,25 +676,46 @@ def action_announcement_text(request, name):
existing.save()
if request.GET.get("next", "") == "approve":
return redirect('ietf.doc.views_charter.approve', name=charter.canonical_name())
return redirect(
"ietf.doc.views_charter.approve", name=charter.name
)
return redirect('ietf.doc.views_doc.document_writeup', name=charter.canonical_name())
return redirect(
"ietf.doc.views_doc.document_writeup", name=charter.name
)
if "regenerate_text" in request.POST:
e = default_action_text(group, charter, by)
e.save()
form = ActionAnnouncementTextForm(initial=dict(announcement_text=escape(e.text)))
form = ActionAnnouncementTextForm(
initial=dict(announcement_text=escape(e.text))
)
if "send_text" in request.POST and form.is_valid():
parsed_msg = send_mail_preformatted(request, form.cleaned_data['announcement_text'])
messages.success(request, "The email To: '%s' with Subject: '%s' has been sent." % (parsed_msg["To"],parsed_msg["Subject"],))
return redirect('ietf.doc.views_doc.document_writeup', name=charter.name)
parsed_msg = send_mail_preformatted(
request, form.cleaned_data["announcement_text"]
)
messages.success(
request,
"The email To: '%s' with Subject: '%s' has been sent."
% (
parsed_msg["To"],
parsed_msg["Subject"],
),
)
return redirect("ietf.doc.views_doc.document_writeup", name=charter.name)
return render(request, 'doc/charter/action_announcement_text.html',
dict(charter=charter,
back_url=urlreverse('ietf.doc.views_doc.document_writeup', kwargs=dict(name=charter.name)),
announcement_text_form=form,
))
return render(
request,
"doc/charter/action_announcement_text.html",
dict(
charter=charter,
back_url=urlreverse(
"ietf.doc.views_doc.document_writeup", kwargs=dict(name=charter.name)
),
announcement_text_form=form,
),
)
class BallotWriteupForm(forms.Form):
ballot_writeup = forms.CharField(widget=forms.Textarea, required=True, strip=False)
@ -806,33 +896,37 @@ def approve(request, name):
dict(charter=charter,
announcement=escape(announcement)))
def charter_with_milestones_txt(request, name, rev):
charter = get_object_or_404(Document, type="charter", docalias__name=name)
revision_event = charter.latest_event(NewRevisionDocEvent, type="new_revision", rev=rev)
def charter_with_milestones_txt(request, name, rev):
charter = get_object_or_404(Document, type="charter", name=name)
revision_event = charter.latest_event(
NewRevisionDocEvent, type="new_revision", rev=rev
)
if not revision_event:
return HttpResponseNotFound("Revision %s not found in database" % rev)
# read charter text
c = find_history_active_at(charter, revision_event.time) or charter
filename = '%s-%s.txt' % (c.canonical_name(), rev)
charter_text = ""
filename = Path(settings.CHARTER_PATH) / f"{c.name}-{rev}.txt"
try:
with io.open(os.path.join(settings.CHARTER_PATH, filename), 'r') as f:
charter_text = force_str(f.read(), errors='ignore')
with filename.open() as f:
charter_text = force_str(f.read(), errors="ignore")
except IOError:
charter_text = "Error reading charter text %s" % filename
charter_text = f"Error reading charter text {filename.name}"
milestones = historic_milestones_for_charter(charter, rev)
# wrap the output nicely
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" " * 11, width=80, break_long_words=False)
wrapper = textwrap.TextWrapper(
initial_indent="", subsequent_indent=" " * 11, width=80, break_long_words=False
)
for m in milestones:
m.desc_filled = wrapper.fill(m.desc)
return render(request, 'doc/charter/charter_with_milestones.txt',
dict(charter_text=charter_text,
milestones=milestones),
content_type="text/plain; charset=%s"%settings.DEFAULT_CHARSET)
return render(
request,
"doc/charter/charter_with_milestones.txt",
dict(charter_text=charter_text, milestones=milestones),
content_type="text/plain; charset=%s" % settings.DEFAULT_CHARSET,
)

View file

@ -16,7 +16,7 @@ from django.utils.html import escape
import debug # pyflakes:ignore
from ietf.doc.models import ( BallotDocEvent, BallotPositionDocEvent, DocAlias, DocEvent,
from ietf.doc.models import ( BallotDocEvent, BallotPositionDocEvent, DocEvent,
Document, NewRevisionDocEvent, State )
from ietf.doc.utils import ( add_state_change_event, close_open_ballots,
create_ballot_if_not_open, update_telechat )
@ -98,7 +98,7 @@ def change_state(request, name, option=None):
ok_to_publish)
if new_state.slug in ["appr-reqnopub-sent", "appr-noprob-sent", "withdraw", "dead"]:
doc = review.related_that_doc("conflrev")[0].document
doc = review.related_that_doc("conflrev")[0]
update_stream_state(doc, login, 'chair-w' if doc.stream_id=='irtf' else 'ise-rev', 'iesg-com')
return redirect('ietf.doc.views_doc.document_main', name=review.name)
@ -123,7 +123,7 @@ def send_conflict_review_ad_changed_email(request, review, event):
by = request.user.person,
event = event,
review = review,
reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target.document,
reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target,
review_url = settings.IDTRACKER_BASE_URL+review.get_absolute_url(),
)
)
@ -138,7 +138,7 @@ def send_conflict_review_started_email(request, review):
cc = addrs.cc,
by = request.user.person,
review = review,
reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target.document,
reviewed_doc = review.relateddocument_set.get(relationship__slug='conflrev').target,
review_url = settings.IDTRACKER_BASE_URL+review.get_absolute_url(),
)
)
@ -147,7 +147,7 @@ def send_conflict_review_started_email(request, review):
addrs = gather_address_lists('conflrev_requested_iana',doc=review).as_strings(compact=False)
email_iana(request,
review.relateddocument_set.get(relationship__slug='conflrev').target.document,
review.relateddocument_set.get(relationship__slug='conflrev').target,
addrs.to,
msg,
cc=addrs.cc)
@ -165,7 +165,7 @@ def send_conflict_eval_email(request,review):
send_mail_preformatted(request,msg,override=override)
addrs = gather_address_lists('ballot_issued_iana',doc=review).as_strings()
email_iana(request,
review.relateddocument_set.get(relationship__slug='conflrev').target.document,
review.relateddocument_set.get(relationship__slug='conflrev').target,
addrs.to,
msg,
addrs.cc)
@ -181,7 +181,7 @@ class UploadForm(forms.Form):
return get_cleaned_text_file_content(self.cleaned_data["txt"])
def save(self, review):
filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.canonical_name(), review.rev))
filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.name, review.rev))
with io.open(filename, 'w', encoding='utf-8') as destination:
if self.cleaned_data['txt']:
destination.write(self.cleaned_data['txt'])
@ -195,7 +195,7 @@ def submit(request, name):
login = request.user.person
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.canonical_name(), review.rev))
path = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.name, review.rev))
not_uploaded_yet = review.rev == "00" and not os.path.exists(path)
if not_uploaded_yet:
@ -212,7 +212,7 @@ def submit(request, name):
events = []
e = NewRevisionDocEvent(doc=review, by=login, type="new_revision")
e.desc = "New version available: <b>%s-%s.txt</b>" % (review.canonical_name(), review.rev)
e.desc = "New version available: <b>%s-%s.txt</b>" % (review.name, review.rev)
e.rev = review.rev
e.save()
events.append(e)
@ -244,7 +244,7 @@ def submit(request, name):
dict(),
))
else:
filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.canonical_name(), review.rev))
filename = os.path.join(settings.CONFLICT_REVIEW_PATH, '%s-%s.txt' % (review.name, review.rev))
try:
with io.open(filename, 'r') as f:
init["content"] = f.read()
@ -257,7 +257,7 @@ def submit(request, name):
{'form': form,
'next_rev': next_rev,
'review' : review,
'conflictdoc' : review.relateddocument_set.get(relationship__slug='conflrev').target.document,
'conflictdoc' : review.relateddocument_set.get(relationship__slug='conflrev').target,
})
@role_required("Area Director", "Secretariat")
@ -285,8 +285,8 @@ def edit_ad(request, name):
form = AdForm(initial=init)
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document
titletext = 'the conflict review of %s-%s' % (conflictdoc.canonical_name(),conflictdoc.rev)
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target
titletext = 'the conflict review of %s-%s' % (conflictdoc.name,conflictdoc.rev)
return render(request, 'doc/change_ad.html',
{'form': form,
'doc': review,
@ -297,7 +297,7 @@ def edit_ad(request, name):
def default_approval_text(review):
current_text = review.text_or_error() # pyflakes:ignore
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target
if conflictdoc.stream_id=='ise':
receiver = 'Independent Submissions Editor'
elif conflictdoc.stream_id=='irtf':
@ -365,7 +365,7 @@ def approve_conflict_review(request, name):
c.desc = "The following approval message was sent\n"+form.cleaned_data['announcement_text']
c.save()
doc = review.related_that_doc("conflrev")[0].document
doc = review.related_that_doc("conflrev")[0]
update_stream_state(doc, login, 'chair-w' if doc.stream_id=='irtf' else 'ise-rev', 'iesg-com')
return HttpResponseRedirect(review.get_absolute_url())
@ -378,7 +378,7 @@ def approve_conflict_review(request, name):
return render(request, 'doc/conflict_review/approve.html',
dict(
review = review,
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target.document,
conflictdoc = review.relateddocument_set.get(relationship__slug='conflrev').target,
form = form,
))
@ -429,7 +429,7 @@ def start_review_sanity_check(request, name):
raise Http404
# sanity check that there's not already a conflict review document for this document
if [ rel.source for alias in doc_to_review.docalias.all() for rel in alias.relateddocument_set.filter(relationship='conflrev') ]:
if [ rel.source for rel in doc_to_review.targets_related.filter(relationship='conflrev') ]:
raise Http404
return doc_to_review
@ -461,11 +461,8 @@ def build_conflict_review_document(login, doc_to_review, ad, notify, create_in_s
group=iesg_group,
)
conflict_review.set_state(create_in_state)
DocAlias.objects.create( name=review_name).docs.add( conflict_review )
conflict_review.relateddocument_set.create(target=DocAlias.objects.get(name=doc_to_review.name),relationship_id='conflrev')
conflict_review.relateddocument_set.create(target=doc_to_review, relationship_id='conflrev')
c = DocEvent(type="added_comment", doc=conflict_review, rev=conflict_review.rev, by=login)
c.desc = "IETF conflict review requested"

View file

@ -54,13 +54,13 @@ from django.contrib.staticfiles import finders
import debug # pyflakes:ignore
from ietf.doc.models import ( Document, DocAlias, DocHistory, DocEvent, BallotDocEvent, BallotType,
from ietf.doc.models import ( Document, DocHistory, DocEvent, BallotDocEvent, BallotType,
ConsensusDocEvent, NewRevisionDocEvent, TelechatDocEvent, WriteupDocEvent, IanaExpertDocEvent,
IESG_BALLOT_ACTIVE_STATES, STATUSCHANGE_RELATIONS, DocumentActionHolder, DocumentAuthor,
RelatedDocument, RelatedDocHistory)
from ietf.doc.utils import (augment_events_with_revision,
can_adopt_draft, can_unadopt_draft, get_chartering_type, get_tags_for_stream_id,
needed_ballot_positions, nice_consensus, prettify_std_name, update_telechat, has_same_ballot,
needed_ballot_positions, nice_consensus, update_telechat, has_same_ballot,
get_initial_notify, make_notify_changed_event, make_rev_history, default_consensus,
add_events_message_info, get_unicode_document_content,
augment_docs_and_user_with_user_info, irsg_needed_ballot_positions, add_action_holder_change_event,
@ -154,8 +154,8 @@ def render_document_top(request, doc, tab, name):
None,
)
)
tabs.append(("Email expansions","email",urlreverse('ietf.doc.views_doc.document_email', kwargs=dict(name=name)), True, None))
if not doc.type_id in ["bcp", "std", "fyi"]:
tabs.append(("Email expansions","email",urlreverse('ietf.doc.views_doc.document_email', kwargs=dict(name=name)), True, None))
tabs.append(("History", "history", urlreverse('ietf.doc.views_doc.document_history', kwargs=dict(name=name)), True, None))
if name.startswith("rfc"):
@ -163,7 +163,7 @@ def render_document_top(request, doc, tab, name):
else:
name += "-" + doc.rev
return render_to_string("doc/document_top.html",
return render_to_string("doc/document_top.html" if not doc.type_id in ["bcp", "std", "fyi"] else "doc/document_subseries_top.html",
dict(doc=doc,
tabs=tabs,
selected=tab,
@ -180,42 +180,38 @@ def interesting_doc_relations(doc):
else:
raise TypeError("Expected this method to be called with a Document or DocHistory object")
that_relationships = STATUSCHANGE_RELATIONS + ('conflrev', 'replaces', 'possibly_replaces', 'updates', 'obs')
that_relationships = STATUSCHANGE_RELATIONS + ('conflrev', 'replaces', 'possibly_replaces', 'updates', 'obs', 'became_rfc')
that_doc_relationships = ('replaces', 'possibly_replaces', 'updates', 'obs')
that_doc_relationships = ('replaces', 'possibly_replaces', 'updates', 'obs', 'became_rfc')
# TODO: This returns the relationships in database order, which may not be the order we want to display them in.
interesting_relations_that = cls.objects.filter(target__docs=target, relationship__in=that_relationships).select_related('source')
interesting_relations_that_doc = cls.objects.filter(source=doc, relationship__in=that_doc_relationships).prefetch_related('target__docs')
interesting_relations_that = cls.objects.filter(target=target, relationship__in=that_relationships).select_related('source')
interesting_relations_that_doc = cls.objects.filter(source=doc, relationship__in=that_doc_relationships).prefetch_related('target')
return interesting_relations_that, interesting_relations_that_doc
def document_main(request, name, rev=None, document_html=False):
if name.startswith("rfc") and rev is not None:
doc = get_object_or_404(Document.objects.select_related(), name=name)
if doc.type_id == "rfc" and rev is not None:
raise Http404()
doc = get_object_or_404(Document.objects.select_related(), docalias__name=name)
log.assertion('doc.type_id!="rfc" or doc.name.startswith("rfc")')
# take care of possible redirections
aliases = DocAlias.objects.filter(docs=doc).values_list("name", flat=True)
if document_html is False and rev==None and doc.type_id == "draft" and not name.startswith("rfc"):
for a in aliases:
if a.startswith("rfc"):
return redirect("ietf.doc.views_doc.document_main", name=a)
revisions = []
for h in doc.history_set.order_by("time", "id"):
if h.rev and not h.rev in revisions:
revisions.append(h.rev)
if not doc.rev in revisions:
revisions.append(doc.rev)
if document_html is False and rev is None:
became_rfc = doc.became_rfc()
if became_rfc:
return redirect("ietf.doc.views_doc.document_main", name=became_rfc.name)
revisions = doc.revisions_by_dochistory()
latest_rev = doc.rev
snapshot = False
gh = None
if rev:
# find the entry in the history
if rev and rev != doc.rev:
# find the entry in the history if the rev requested is not the current rev
for h in doc.history_set.order_by("-time"):
if rev == h.rev:
snapshot = True
@ -241,9 +237,129 @@ def document_main(request, name, rev=None, document_html=False):
if telechat and (not telechat.telechat_date or telechat.telechat_date < date_today(settings.TIME_ZONE)):
telechat = None
# specific document types
if doc.type_id == "draft":
if doc.type_id == "rfc":
split_content = request.COOKIES.get("full_draft", settings.USER_PREFERENCE_DEFAULTS["full_draft"]) == "off"
if request.GET.get('include_text') == "0":
split_content = True
elif request.GET.get('include_text') == "1":
split_content = False
else:
pass
interesting_relations_that, interesting_relations_that_doc = interesting_doc_relations(doc)
can_edit = has_role(request.user, ("Area Director", "Secretariat"))
can_edit_authors = has_role(request.user, ("Secretariat"))
stream_slugs = StreamName.objects.values_list("slug", flat=True)
# For some reason, AnonymousUser has __iter__, but is not iterable,
# which causes problems in the filter() below. Work around this:
if request.user.is_authenticated:
roles = Role.objects.filter(group__acronym__in=stream_slugs, person__user=request.user)
roles = group_features_role_filter(roles, request.user.person, 'docman_roles')
else:
roles = []
can_change_stream = bool(can_edit or roles)
file_urls, found_types = build_file_urls(doc)
content = doc.text_or_error() # pyflakes:ignore
content = markup_txt.markup(maybe_split(content, split=split_content))
if not found_types:
content = "This RFC is not currently available online."
split_content = False
elif "txt" not in found_types:
content = "This RFC is not available in plain text format."
split_content = False
# status changes
status_changes = []
proposed_status_changes = []
for r in interesting_relations_that.filter(relationship__in=STATUSCHANGE_RELATIONS):
state_slug = r.source.get_state_slug()
if state_slug in ('appr-sent', 'appr-pend'):
status_changes.append(r)
elif state_slug in ('needshep','adrev','iesgeval','defer','appr-pr'):
proposed_status_changes.append(r)
else:
pass
presentations = doc.future_presentations()
augment_docs_and_user_with_user_info([doc], request.user)
exp_comment = doc.latest_event(IanaExpertDocEvent,type="comment")
iana_experts_comment = exp_comment and exp_comment.desc
html = None
js = None
css = None
diff_revisions = None
simple_diff_revisions = None
if document_html:
diff_revisions=get_diff_revisions(request, name, doc)
simple_diff_revisions = [t[1] for t in diff_revisions if t[0] == doc.name]
simple_diff_revisions.reverse()
html = doc.html_body()
if request.COOKIES.get("pagedeps") == "inline":
js = Path(finders.find("ietf/js/document_html.js")).read_text()
css = Path(finders.find("ietf/css/document_html_inline.css")).read_text()
if html:
css += Path(finders.find("ietf/css/document_html_txt.css")).read_text()
# submission
submission = ""
if group is None:
submission = "unknown"
elif group.type_id == "individ":
submission = "individual"
elif group.type_id == "area" and doc.stream_id == "ietf":
submission = "individual in %s area" % group.acronym
else:
if group.features.acts_like_wg and not group.type_id == "edwg":
submission = "%s %s" % (group.acronym, group.type)
else:
submission = group.acronym
submission = '<a href="%s">%s</a>' % (group.about_url(), submission)
return render(request, "doc/document_rfc.html" if document_html is False else "doc/document_html.html",
dict(doc=doc,
document_html=document_html,
css=css,
js=js,
html=html,
group=group,
top=top,
name=doc.name,
content=content,
split_content=split_content,
revisions=simple_diff_revisions if document_html else revisions,
latest_rev=latest_rev,
can_edit=can_edit,
can_edit_authors=can_edit_authors,
can_change_stream=can_change_stream,
rfc_number=doc.rfc_number,
updates=interesting_relations_that_doc.filter(relationship="updates"),
updated_by=interesting_relations_that.filter(relationship="updates"),
obsoletes=interesting_relations_that_doc.filter(relationship="obs"),
obsoleted_by=interesting_relations_that.filter(relationship="obs"),
status_changes=status_changes,
proposed_status_changes=proposed_status_changes,
has_errata=doc.pk and doc.tags.filter(slug="errata"), # doc.pk == None if using a fake_history_obj
file_urls=file_urls,
rfc_editor_state=doc.get_state("draft-rfceditor"),
iana_review_state=doc.get_state("draft-iana-review"),
iana_action_state=doc.get_state("draft-iana-action"),
iana_experts_state=doc.get_state("draft-iana-experts"),
iana_experts_comment=iana_experts_comment,
presentations=presentations,
diff_revisions=diff_revisions,
submission=submission
))
elif doc.type_id == "draft":
split_content = request.COOKIES.get("full_draft", settings.USER_PREFERENCE_DEFAULTS["full_draft"]) == "off"
if request.GET.get('include_text') == "0":
split_content = True
@ -281,43 +397,13 @@ def document_main(request, name, rev=None, document_html=False):
is_author = request.user.is_authenticated and doc.documentauthor_set.filter(person__user=request.user).exists()
can_view_possibly_replaces = can_edit_replaces or is_author
rfc_number = name[3:] if name.startswith("rfc") else None
draft_name = None
for a in aliases:
if a.startswith("draft"):
draft_name = a
rfc_aliases = [prettify_std_name(a) for a in aliases
if a.startswith("fyi") or a.startswith("std") or a.startswith("bcp")]
latest_revision = None
# Workaround to allow displaying last rev of draft that became rfc as a draft
# This should be unwound when RFCs become their own documents.
if snapshot:
doc.name = doc.doc.name
name = doc.doc.name
else:
name = doc.name
file_urls, found_types = build_file_urls(doc)
if not snapshot and doc.get_state_slug() == "rfc":
# content
content = doc.text_or_error() # pyflakes:ignore
content = markup_txt.markup(maybe_split(content, split=split_content))
content = doc.text_or_error() # pyflakes:ignore
content = markup_txt.markup(maybe_split(content, split=split_content))
if not snapshot and doc.get_state_slug() == "rfc":
if not found_types:
content = "This RFC is not currently available online."
split_content = False
elif "txt" not in found_types:
content = "This RFC is not available in plain text format."
split_content = False
else:
latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision")
latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision")
# ballot
iesg_ballot_summary = None
@ -497,7 +583,7 @@ def document_main(request, name, rev=None, document_html=False):
augment_docs_and_user_with_user_info([doc], request.user)
published = doc.latest_event(type="published_rfc")
published = doc.latest_event(type="published_rfc") # todo rethink this now that published_rfc is on rfc
started_iesg_process = doc.latest_event(type="started_iesg_process")
review_assignments = review_assignments_to_list_for_docs([doc]).get(doc.name, [])
@ -555,7 +641,7 @@ def document_main(request, name, rev=None, document_html=False):
html=html,
group=group,
top=top,
name=name,
name=doc.name,
content=content,
split_content=split_content,
revisions=simple_diff_revisions if document_html else revisions,
@ -579,8 +665,6 @@ def document_main(request, name, rev=None, document_html=False):
can_request_review=can_request_review,
can_submit_unsolicited_review_for_teams=can_submit_unsolicited_review_for_teams,
rfc_number=rfc_number,
draft_name=draft_name,
telechat=telechat,
iesg_ballot_summary=iesg_ballot_summary,
submission=submission,
@ -597,7 +681,6 @@ def document_main(request, name, rev=None, document_html=False):
conflict_reviews=conflict_reviews,
status_changes=status_changes,
proposed_status_changes=proposed_status_changes,
rfc_aliases=rfc_aliases,
has_errata=doc.pk and doc.tags.filter(slug="errata"), # doc.pk == None if using a fake_history_obj
published=published,
file_urls=file_urls,
@ -627,7 +710,7 @@ def document_main(request, name, rev=None, document_html=False):
diff_revisions=diff_revisions
))
if doc.type_id == "charter":
elif doc.type_id == "charter":
content = doc.text_or_error() # pyflakes:ignore
content = markdown.markdown(content)
@ -664,7 +747,7 @@ def document_main(request, name, rev=None, document_html=False):
can_manage=can_manage,
))
if doc.type_id == "bofreq":
elif doc.type_id == "bofreq":
content = markdown.markdown(doc.text_or_error())
editors = bofreq_editors(doc)
responsible = bofreq_responsible(doc)
@ -684,8 +767,8 @@ def document_main(request, name, rev=None, document_html=False):
editor_can_manage=editor_can_manage,
))
if doc.type_id == "conflrev":
filename = "%s-%s.txt" % (doc.canonical_name(), doc.rev)
elif doc.type_id == "conflrev":
filename = "%s-%s.txt" % (doc.name, doc.rev)
pathname = os.path.join(settings.CONFLICT_REVIEW_PATH,filename)
if doc.rev == "00" and not os.path.isfile(pathname):
@ -699,7 +782,7 @@ def document_main(request, name, rev=None, document_html=False):
if doc.get_state_slug() in ("iesgeval", ) and doc.active_ballot():
ballot_summary = needed_ballot_positions(doc, list(doc.active_ballot().active_balloter_positions().values()))
conflictdoc = doc.related_that_doc('conflrev')[0].document
conflictdoc = doc.related_that_doc('conflrev')[0]
return render(request, "doc/document_conflict_review.html",
dict(doc=doc,
@ -714,8 +797,8 @@ def document_main(request, name, rev=None, document_html=False):
approved_states=('appr-reqnopub-pend','appr-reqnopub-sent','appr-noprob-pend','appr-noprob-sent'),
))
if doc.type_id == "statchg":
filename = "%s-%s.txt" % (doc.canonical_name(), doc.rev)
elif doc.type_id == "statchg":
filename = "%s-%s.txt" % (doc.name, doc.rev)
pathname = os.path.join(settings.STATUS_CHANGE_PATH,filename)
if doc.rev == "00" and not os.path.isfile(pathname):
@ -748,14 +831,14 @@ def document_main(request, name, rev=None, document_html=False):
sorted_relations=sorted_relations,
))
if doc.type_id in ("slides", "agenda", "minutes", "bluesheets", "procmaterials",):
elif doc.type_id in ("slides", "agenda", "minutes", "bluesheets", "procmaterials",):
can_manage_material = can_manage_materials(request.user, doc.group)
presentations = doc.future_presentations()
if doc.uploaded_filename:
# we need to remove the extension for the globbing below to work
basename = os.path.splitext(doc.uploaded_filename)[0]
else:
basename = "%s-%s" % (doc.canonical_name(), doc.rev)
basename = "%s-%s" % (doc.name, doc.rev)
pathname = os.path.join(doc.get_file_path(), basename)
@ -804,7 +887,7 @@ def document_main(request, name, rev=None, document_html=False):
))
if doc.type_id == "review":
elif doc.type_id == "review":
basename = "{}.txt".format(doc.name)
pathname = os.path.join(doc.get_file_path(), basename)
content = get_unicode_document_content(basename, pathname)
@ -830,7 +913,7 @@ def document_main(request, name, rev=None, document_html=False):
assignments=assignments,
))
if doc.type_id in ("chatlog", "polls"):
elif doc.type_id in ("chatlog", "polls"):
if isinstance(doc,DocHistory):
session = doc.doc.sessionpresentation_set.last().session
else:
@ -851,7 +934,7 @@ def document_main(request, name, rev=None, document_html=False):
)
)
if doc.type_id == "statement":
elif doc.type_id == "statement":
if doc.uploaded_filename:
basename = doc.uploaded_filename.split(".")[0] # strip extension
else:
@ -872,7 +955,6 @@ def document_main(request, name, rev=None, document_html=False):
can_manage = has_role(request.user,["Secretariat"]) # Add IAB or IESG as appropriate
interesting_relations_that, interesting_relations_that_doc = interesting_doc_relations(doc)
published = doc.latest_event(type="published_statement").time
return render(request, "doc/document_statement.html",
dict(doc=doc,
top=top,
@ -885,6 +967,9 @@ def document_main(request, name, rev=None, document_html=False):
replaced_by=interesting_relations_that.filter(relationship="replaces"),
can_manage=can_manage,
))
elif doc.type_id in ["bcp", "std", "fyi"]:
return render(request, "doc/document_subseries.html", {"doc": doc, "top": top})
raise Http404("Document not found: %s" % (name + ("-%s"%rev if rev else "")))
@ -938,9 +1023,9 @@ def document_html(request, name, rev=None):
doc = found.documents.get()
rev = found.matched_rev
if not requested_rev and doc.is_rfc(): # Someone asked for /doc/html/8989
if not requested_rev and doc.type_id == "rfc": # Someone asked for /doc/html/8989
if not name.startswith('rfc'):
return redirect('ietf.doc.views_doc.document_html', name=doc.canonical_name())
return redirect('ietf.doc.views_doc.document_html', name=doc.name)
if rev:
doc = doc.history_set.filter(rev=rev).first() or doc.fake_history_obj(rev)
@ -948,7 +1033,12 @@ def document_html(request, name, rev=None):
if not os.path.exists(doc.get_file_name()):
raise Http404("File not found: %s" % doc.get_file_name())
return document_main(request, name=doc.name if requested_rev else doc.canonical_name(), rev=doc.rev if requested_rev or not doc.is_rfc() else None, document_html=True)
return document_main(
request,
name=doc.name if requested_rev else doc.name,
rev=doc.rev if requested_rev or doc.type_id != "rfc" else None,
document_html=True,
)
def document_pdfized(request, name, rev=None, ext=None):
@ -1008,7 +1098,7 @@ def get_doc_email_aliases(name):
return aliases
def document_email(request,name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
top = render_document_top(request, doc, "email", name)
aliases = get_doc_email_aliases(name) if doc.type_id=='draft' else None
@ -1026,6 +1116,11 @@ def document_email(request,name):
def get_diff_revisions(request, name, doc):
""" Calculate what to offer for diff comparisons
returns list of (name, rev, time, url, is_this_doc, is_previous_doc)
ordered by -time for use by forms used to get to the diff tools.
"""
diffable = any(
[
name.startswith(prefix)
@ -1048,17 +1143,21 @@ def get_diff_revisions(request, name, doc):
diff_documents = [doc]
diff_documents.extend(
Document.objects.filter(
docalias__relateddocument__source=doc,
docalias__relateddocument__relationship="replaces",
relateddocument__source=doc,
relateddocument__relationship="replaces",
)
)
if doc.came_from_draft():
diff_documents.append(doc.came_from_draft())
if doc.get_state_slug() == "rfc":
if doc.became_rfc():
rfc = doc.became_rfc()
e = rfc.latest_event(type="published_rfc")
diff_revisions.append((rfc.name, "", e.time if e else rfc.time, rfc.name, False, False))
if doc.type_id == "rfc":
e = doc.latest_event(type="published_rfc")
aliases = doc.docalias.filter(name__startswith="rfc")
if aliases:
name = aliases[0].name
diff_revisions.append((name, "", e.time if e else doc.time, name))
diff_revisions.append((name, "", e.time if e else doc.time, name, True, False))
seen = set()
for e in (
@ -1087,13 +1186,22 @@ def get_diff_revisions(request, name, doc):
# rfcdiff tool has special support for IDs
url = e.doc.name + "-" + e.rev
diff_revisions.append((e.doc.name, e.rev, e.time, url))
diff_revisions.append((e.doc.name, e.rev, e.time, url, e.doc == doc and e.rev == doc.rev, False))
diff_revisions.sort(key=lambda t: t[2], reverse=True)
for index, t in enumerate(diff_revisions):
if t[4]: # is_this_doc
n = index+1
if n < len(diff_revisions):
t_name, rev, time, url, _, _ = diff_revisions[n]
diff_revisions[n] = (t_name, rev, time, url, False, True)
break
return diff_revisions
def document_history(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
top = render_document_top(request, doc, "history", name)
diff_revisions = get_diff_revisions(request, name, doc)
@ -1104,21 +1212,38 @@ def document_history(request, name):
add_events_message_info(events)
# figure out if the current user can add a comment to the history
if doc.type_id == "draft" and doc.group != None:
can_add_comment = bool(has_role(request.user, ("Area Director", "Secretariat", "IRTF Chair", "IANA", "RFC Editor")) or (
request.user.is_authenticated and
Role.objects.filter(name__in=("chair", "secr"),
group__acronym=doc.group.acronym,
person__user=request.user)))
if doc.type_id in ("draft", "rfc") and doc.group is not None:
can_add_comment = bool(
has_role(
request.user,
("Area Director", "Secretariat", "IRTF Chair", "IANA", "RFC Editor"),
)
or (
request.user.is_authenticated
and Role.objects.filter(
name__in=("chair", "secr"),
group__acronym=doc.group.acronym,
person__user=request.user,
)
)
)
else:
can_add_comment = has_role(request.user, ("Area Director", "Secretariat", "IRTF Chair"))
return render(request, "doc/document_history.html",
dict(doc=doc,
top=top,
diff_revisions=diff_revisions,
events=events,
can_add_comment=can_add_comment,
))
can_add_comment = has_role(
request.user, ("Area Director", "Secretariat", "IRTF Chair")
)
return render(
request,
"doc/document_history.html",
{
"doc": doc,
"top": top,
"diff_revisions": diff_revisions,
"events": events,
"can_add_comment": can_add_comment,
},
)
def document_bibtex(request, name, rev=None):
@ -1135,27 +1260,28 @@ def document_bibtex(request, name, rev=None):
name = name+"-"+rev
rev = None
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision")
replaced_by = [d.name for d in doc.related_that("replaces")]
published = doc.latest_event(type="published_rfc") is not None
rfc = latest_revision.doc if latest_revision and latest_revision.doc.get_state_slug() == "rfc" else None
doi = None
draft_became_rfc = None
replaced_by = None
latest_revision = None
if doc.type_id == "draft":
latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision")
replaced_by = [d.name for d in doc.related_that("replaces")]
draft_became_rfc = doc.became_rfc()
if rev != None and rev != doc.rev:
# find the entry in the history
for h in doc.history_set.order_by("-time"):
if rev == h.rev:
doc = h
break
if rev != None and rev != doc.rev:
# find the entry in the history
for h in doc.history_set.order_by("-time"):
if rev == h.rev:
doc = h
break
if doc.is_rfc():
elif doc.type_id == "rfc":
# This needs to be replaced with a lookup, as the mapping may change
# over time. Probably by updating ietf/sync/rfceditor.py to add the
# as a DocAlias, and use a method on Document to retrieve it.
doi = "10.17487/RFC%04d" % int(doc.rfc_number())
else:
doi = None
# over time.
doi = f"10.17487/RFC{doc.rfc_number:04d}"
if doc.is_dochistory():
latest_event = doc.latest_event(type='new_revision', rev=rev)
@ -1165,8 +1291,7 @@ def document_bibtex(request, name, rev=None):
return render(request, "doc/document_bibtex.bib",
dict(doc=doc,
replaced_by=replaced_by,
published=published,
rfc=rfc,
published_as=draft_became_rfc,
latest_revision=latest_revision,
doi=doi,
),
@ -1203,7 +1328,7 @@ def document_bibxml(request, name, rev=None):
def document_writeup(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
top = render_document_top(request, doc, "writeup", name)
def text_from_writeup(event_type):
@ -1267,7 +1392,7 @@ def document_writeup(request, name):
))
def document_shepherd_writeup(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
lastwriteup = doc.latest_event(WriteupDocEvent,type="changed_protocol_writeup")
if lastwriteup:
writeup_text = lastwriteup.text
@ -1304,22 +1429,28 @@ def document_shepherd_writeup_template(request, type):
def document_references(request, name):
doc = get_object_or_404(Document,docalias__name=name)
doc = get_object_or_404(Document,name=name)
refs = doc.references()
if doc.type_id in ["bcp","std","fyi"]:
for rfc in doc.contains():
refs |= rfc.references()
return render(request, "doc/document_references.html",dict(doc=doc,refs=sorted(refs,key=lambda x:x.target.name),))
def document_referenced_by(request, name):
doc = get_object_or_404(Document,docalias__name=name)
doc = get_object_or_404(Document,name=name)
refs = doc.referenced_by()
if doc.type_id in ["bcp","std","fyi"]:
for rfc in doc.contains():
refs |= rfc.referenced_by()
full = ( request.GET.get('full') != None )
numdocs = refs.count()
if not full and numdocs>250:
refs=refs[:250]
else:
numdocs=None
refs=sorted(refs,key=lambda x:(['refnorm','refinfo','refunk','refold'].index(x.relationship.slug),x.source.canonical_name()))
refs=sorted(refs,key=lambda x:(['refnorm','refinfo','refunk','refold'].index(x.relationship.slug),x.source.name))
return render(request, "doc/document_referenced_by.html",
dict(alias_name=name,
dict(name=name,
doc=doc,
numdocs=numdocs,
refs=refs,
@ -1393,7 +1524,7 @@ def document_ballot_content(request, doc, ballot_id, editable=True):
request=request)
def document_ballot(request, name, ballot_id=None):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
all_ballots = list(BallotDocEvent.objects.filter(doc=doc, type="created_ballot").order_by("time"))
if not ballot_id:
if all_ballots:
@ -1429,7 +1560,7 @@ def document_ballot(request, name, ballot_id=None):
))
def document_irsg_ballot(request, name, ballot_id=None):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
top = render_document_top(request, doc, "irsgballot", name)
if not ballot_id:
ballot = doc.latest_event(BallotDocEvent, type="created_ballot", ballot_type__slug='irsg-approve')
@ -1448,7 +1579,7 @@ def document_irsg_ballot(request, name, ballot_id=None):
))
def document_rsab_ballot(request, name, ballot_id=None):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
top = render_document_top(request, doc, "rsabballot", name)
if not ballot_id:
ballot = doc.latest_event(BallotDocEvent, type="created_ballot", ballot_type__slug='rsab-approve')
@ -1470,7 +1601,7 @@ def document_rsab_ballot(request, name, ballot_id=None):
)
def ballot_popup(request, name, ballot_id):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
c = document_ballot_content(request, doc, ballot_id=ballot_id, editable=False)
ballot = get_object_or_404(BallotDocEvent,id=ballot_id)
return render(request, "doc/ballot_popup.html",
@ -1483,7 +1614,7 @@ def ballot_popup(request, name, ballot_id):
def document_json(request, name, rev=None):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
def extract_name(s):
return s.name if s else None
@ -1503,7 +1634,6 @@ def document_json(request, name, rev=None):
data["expires"] = doc.expires.strftime("%Y-%m-%d %H:%M:%S") if doc.expires else None
data["title"] = doc.title
data["abstract"] = doc.abstract
data["aliases"] = list(doc.docalias.values_list("name", flat=True))
data["state"] = extract_name(doc.get_state())
data["intended_std_level"] = extract_name(doc.intended_std_level)
data["std_level"] = extract_name(doc.std_level)
@ -1519,7 +1649,7 @@ def document_json(request, name, rev=None):
latest_revision = doc.latest_event(NewRevisionDocEvent, type="new_revision")
data["rev_history"] = make_rev_history(latest_revision.doc if latest_revision else doc)
if doc.type_id == "draft":
if doc.type_id == "draft": # These live only on drafts
data["iesg_state"] = extract_name(doc.get_state("draft-iesg"))
data["rfceditor_state"] = extract_name(doc.get_state("draft-rfceditor"))
data["iana_review_state"] = extract_name(doc.get_state("draft-iana-review"))
@ -1528,6 +1658,8 @@ def document_json(request, name, rev=None):
if doc.stream_id in ("ietf", "irtf", "iab"):
e = doc.latest_event(ConsensusDocEvent, type="changed_consensus")
data["consensus"] = e.consensus if e else None
if doc.type_id in ["draft", "rfc"]:
data["stream"] = extract_name(doc.stream)
return HttpResponse(json.dumps(data, indent=2), content_type='application/json')
@ -1538,7 +1670,7 @@ class AddCommentForm(forms.Form):
@role_required('Area Director', 'Secretariat', 'IRTF Chair', 'WG Chair', 'RG Chair', 'WG Secretary', 'RG Secretary', 'IANA', 'RFC Editor')
def add_comment(request, name):
"""Add comment to history of document."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
login = request.user.person
@ -1622,9 +1754,9 @@ def telechat_date(request, name):
def doc_titletext(doc):
if doc.type.slug=='conflrev':
conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document
return 'the conflict review of %s' % conflictdoc.canonical_name()
return doc.canonical_name()
conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target
return 'the conflict review of %s' % conflictdoc.name
return doc.name
def edit_notify(request, name):
@ -1863,7 +1995,7 @@ def remind_action_holders(request, name):
form = ReminderEmailForm(request.POST)
if form.is_valid():
email_remind_action_holders(request, doc, form.cleaned_data['note'])
return redirect('ietf.doc.views_doc.document_main', name=doc.canonical_name())
return redirect('ietf.doc.views_doc.document_main', name=doc.name)
form = ReminderEmailForm()
return render(
@ -2028,10 +2160,16 @@ def idnits2_rfc_status(request):
def idnits2_state(request, name, rev=None):
doc = get_object_or_404(Document, docalias__name=name)
if doc.type_id!='draft':
doc = get_object_or_404(Document, name=name)
if doc.type_id not in ["draft", "rfc"]:
raise Http404
zero_revision = NewRevisionDocEvent.objects.filter(doc=doc,rev='00').first()
zero_revision = None
if doc.type_id == "rfc":
draft = doc.came_from_draft()
if draft:
zero_revision = NewRevisionDocEvent.objects.filter(doc=draft,rev='00').first()
else:
zero_revision = NewRevisionDocEvent.objects.filter(doc=doc,rev='00').first()
if zero_revision:
doc.created = zero_revision.time
else:

View file

@ -19,7 +19,7 @@ def downref_registry(request):
downref_doc_pairs = [ ]
downref_relations = RelatedDocument.objects.filter(relationship_id='downref-approval')
for rel in downref_relations:
downref_doc_pairs.append((rel.target.document, rel.source))
downref_doc_pairs.append((rel.target, rel.source))
return render(request, 'doc/downref.html', {
"doc_pairs": downref_doc_pairs,
@ -38,18 +38,18 @@ def downref_registry_add(request):
if form.is_valid():
drafts = form.cleaned_data['drafts']
rfc = form.cleaned_data['rfc']
for da in drafts:
RelatedDocument.objects.create(source=da.document,
for d in drafts:
RelatedDocument.objects.create(source=d,
target=rfc, relationship_id='downref-approval')
c = DocEvent(type="downref_approved", doc=da.document,
rev=da.document.rev, by=login)
c = DocEvent(type="downref_approved", doc=d,
rev=d.rev, by=login)
c.desc = "Downref to RFC %s approved by Last Call for %s-%s" % (
rfc.document.rfc_number(), da.name, da.document.rev)
rfc.rfc_number, d.name, d.rev)
c.save()
c = DocEvent(type="downref_approved", doc=rfc.document,
rev=rfc.document.rev, by=login)
c = DocEvent(type="downref_approved", doc=rfc,
rev=rfc.rev, by=login)
c.desc = "Downref to RFC %s approved by Last Call for %s-%s" % (
rfc.document.rfc_number(), da.name, da.document.rev)
rfc.rfc_number, d.name, d.rev)
c.save()
return HttpResponseRedirect(urlreverse('ietf.doc.views_downref.downref_registry'))

View file

@ -23,7 +23,7 @@ from django.utils import timezone
import debug # pyflakes:ignore
from ietf.doc.models import ( Document, DocAlias, RelatedDocument, State,
from ietf.doc.models import ( Document, RelatedDocument, State,
StateType, DocEvent, ConsensusDocEvent, TelechatDocEvent, WriteupDocEvent, StateDocEvent,
IanaExpertDocEvent, IESG_SUBSTATE_TAGS)
from ietf.doc.mails import ( email_pulled_from_rfc_queue, email_resurrect_requested,
@ -38,7 +38,7 @@ from ietf.doc.utils import ( add_state_change_event, can_adopt_draft, can_unadop
set_replaces_for_document, default_consensus, tags_suffix, can_edit_docextresources,
update_doc_extresources )
from ietf.doc.lastcall import request_last_call
from ietf.doc.fields import SearchableDocAliasesField
from ietf.doc.fields import SearchableDocumentsField
from ietf.doc.forms import ExtResourceForm
from ietf.group.models import Group, Role, GroupFeatures
from ietf.iesg.models import TelechatDate
@ -72,7 +72,7 @@ class ChangeStateForm(forms.Form):
state = self.cleaned_data.get('state', '(None)')
tag = self.cleaned_data.get('substate','')
comment = self.cleaned_data['comment'].strip() # pyflakes:ignore
doc = get_object_or_404(Document, docalias__name=self.docname)
doc = get_object_or_404(Document, name=self.docname)
prev = doc.get_state("draft-iesg")
# tag handling is a bit awkward since the UI still works
@ -92,7 +92,7 @@ class ChangeStateForm(forms.Form):
def change_state(request, name):
"""Change IESG state of Internet-Draft, notifying parties as necessary
and logging the change as a comment."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if (not doc.latest_event(type="started_iesg_process")) or doc.get_state_slug() == "expired":
raise Http404
@ -212,7 +212,7 @@ class AddIanaExpertsCommentForm(forms.Form):
@role_required('Secretariat', 'IANA')
def add_iana_experts_comment(request, name):
doc = get_object_or_404(Document, docalias__name = name)
doc = get_object_or_404(Document, name = name)
if request.method == 'POST':
form = AddIanaExpertsCommentForm(request.POST)
if form.is_valid():
@ -238,7 +238,7 @@ class ChangeIanaStateForm(forms.Form):
def change_iana_state(request, name, state_type):
"""Change IANA review state of Internet-Draft. Normally, this is done via
automatic sync, but this form allows one to set it manually."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
state_type = doc.type_id + "-" + state_type
@ -278,7 +278,7 @@ class ChangeStreamForm(forms.Form):
def change_stream(request, name):
"""Change the stream of a Document of type 'draft', notifying parties as necessary
and logging the change as a comment."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if not doc.type_id=='draft':
raise Http404
@ -340,7 +340,7 @@ def change_stream(request, name):
))
class ReplacesForm(forms.Form):
replaces = SearchableDocAliasesField(required=False)
replaces = SearchableDocumentsField(required=False)
comment = forms.CharField(widget=forms.Textarea, required=False, strip=False)
def __init__(self, *args, **kwargs):
@ -350,16 +350,16 @@ class ReplacesForm(forms.Form):
def clean_replaces(self):
for d in self.cleaned_data['replaces']:
if d.document == self.doc:
if d == self.doc:
raise forms.ValidationError("An Internet-Draft can't replace itself")
if d.document.type_id == "draft" and d.document.get_state_slug() == "rfc":
if d.type_id == "draft" and d.get_state_slug() == "rfc":
raise forms.ValidationError("An Internet-Draft can't replace an RFC")
return self.cleaned_data['replaces']
def replaces(request, name):
"""Change 'replaces' set of a Document of type 'draft' , notifying parties
as necessary and logging the change as a comment."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.type_id != 'draft':
raise Http404
if not (has_role(request.user, ("Secretariat", "Area Director", "WG Chair", "RG Chair", "WG Secretary", "RG Secretary"))
@ -390,7 +390,7 @@ def replaces(request, name):
))
class SuggestedReplacesForm(forms.Form):
replaces = forms.ModelMultipleChoiceField(queryset=DocAlias.objects.all(),
replaces = forms.ModelMultipleChoiceField(queryset=Document.objects.all(),
label="Suggestions", required=False, widget=forms.CheckboxSelectMultiple,
help_text="Select only the documents that are replaced by this document")
comment = forms.CharField(label="Optional comment", widget=forms.Textarea, required=False, strip=False)
@ -403,7 +403,7 @@ class SuggestedReplacesForm(forms.Form):
self.fields["replaces"].choices = [(d.pk, d.name) for d in suggested]
def review_possibly_replaces(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.type_id != 'draft':
raise Http404
if not (has_role(request.user, ("Secretariat", "Area Director"))
@ -458,7 +458,7 @@ class ChangeIntentionForm(forms.Form):
def change_intention(request, name):
"""Change the intended publication status of a Document of type 'draft' , notifying parties
as necessary and logging the change as a comment."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.type_id != 'draft':
raise Http404
@ -523,7 +523,7 @@ class EditInfoForm(forms.Form):
def to_iesg(request,name):
""" Submit an IETF stream document to the IESG for publication """
doc = get_object_or_404(Document, docalias__name=name, stream='ietf')
doc = get_object_or_404(Document, name=name, stream='ietf')
if doc.get_state_slug('draft') == "expired" or doc.get_state_slug('draft-iesg') == 'pub-req' :
raise Http404
@ -636,7 +636,7 @@ def to_iesg(request,name):
def edit_info(request, name):
"""Edit various Internet-Draft attributes, notifying parties as
necessary and logging changes as document events."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.get_state_slug() == "expired":
raise Http404
@ -676,7 +676,7 @@ def edit_info(request, name):
e.save()
events.append(e)
replaces = Document.objects.filter(docalias__relateddocument__source=doc, docalias__relateddocument__relationship="replaces")
replaces = Document.objects.filter(targets_related__source=doc, targets_related__relationship="replaces")
if replaces:
# this should perhaps be somewhere else, e.g. the
# place where the replace relationship is established?
@ -781,7 +781,7 @@ def edit_info(request, name):
@role_required('Area Director','Secretariat')
def request_resurrect(request, name):
"""Request resurrect of expired Internet-Draft."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.get_state_slug() != "expired":
raise Http404
@ -804,7 +804,7 @@ def request_resurrect(request, name):
@role_required('Secretariat')
def resurrect(request, name):
"""Resurrect expired Internet-Draft."""
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if doc.get_state_slug() != "expired":
raise Http404

View file

@ -16,7 +16,7 @@ from django.urls import reverse as urlreverse
import debug # pyflakes:ignore
from ietf.doc.models import Document, DocAlias, DocTypeName, DocEvent, State
from ietf.doc.models import Document, DocTypeName, DocEvent, State
from ietf.doc.models import NewRevisionDocEvent
from ietf.doc.utils import add_state_change_event, check_common_doc_name_rules
from ietf.group.models import Group
@ -156,10 +156,6 @@ def edit_material(request, name=None, acronym=None, action=None, doc_type=None):
for chunk in f.chunks():
dest.write(chunk)
if action == "new":
alias, __ = DocAlias.objects.get_or_create(name=doc.name)
alias.docs.add(doc)
if prev_rev != doc.rev:
e = NewRevisionDocEvent(type="new_revision", doc=doc, rev=doc.rev)
e.by = request.user.person

View file

@ -28,7 +28,7 @@ from django.core.exceptions import ValidationError
from django.template.loader import render_to_string, TemplateDoesNotExist
from django.urls import reverse as urlreverse
from ietf.doc.models import (Document, NewRevisionDocEvent, State, DocAlias,
from ietf.doc.models import (Document, NewRevisionDocEvent, State,
LastCallDocEvent, ReviewRequestDocEvent, ReviewAssignmentDocEvent, DocumentAuthor)
from ietf.name.models import (ReviewRequestStateName, ReviewAssignmentStateName, ReviewResultName,
ReviewTypeName)
@ -117,7 +117,7 @@ class RequestReviewForm(forms.ModelForm):
@login_required
def request_review(request, name):
doc = get_object_or_404(Document, name=name)
doc = get_object_or_404(Document, type_id="draft", name=name)
if not can_request_review_of_doc(request.user, doc):
permission_denied(request, "You do not have permission to perform this action")
@ -753,9 +753,7 @@ def complete_review(request, name, assignment_id=None, acronym=None):
name=review_name,
defaults={'type_id': 'review', 'group': team},
)
if created:
DocAlias.objects.create(name=review_name).docs.add(review)
else:
if not created:
messages.warning(request, message='Attempt to save review failed: review document already exists. This most likely occurred because the review was submitted twice in quick succession. If you intended to submit a new review, rather than update an existing one, things are probably OK. Please verify that the shown review is what you expected.')
return redirect("ietf.doc.views_doc.document_main", name=review_name)
@ -1093,7 +1091,7 @@ class ReviewWishAddForm(forms.Form):
@login_required
def review_wish_add(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
if request.method == "POST":
form = ReviewWishAddForm(request.user, doc, request.POST)
@ -1110,7 +1108,7 @@ def review_wish_add(request, name):
@login_required
def review_wishes_remove(request, name):
doc = get_object_or_404(Document, docalias__name=name)
doc = get_object_or_404(Document, name=name)
person = get_object_or_404(Person, user=request.user)
if request.method == "POST":

View file

@ -1,4 +1,4 @@
# Copyright The IETF Trust 2009-2022, All Rights Reserved
# Copyright The IETF Trust 2009-2023, All Rights Reserved
# -*- coding: utf-8 -*-
#
# Some parts Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).
@ -37,7 +37,9 @@
import re
import datetime
import copy
import operator
from functools import reduce
from django import forms
from django.conf import settings
from django.core.cache import cache, caches
@ -53,7 +55,7 @@ from django.utils.text import slugify
import debug # pyflakes:ignore
from ietf.doc.models import ( Document, DocHistory, DocAlias, State,
from ietf.doc.models import ( Document, DocHistory, State,
LastCallDocEvent, NewRevisionDocEvent, IESG_SUBSTATE_TAGS,
IESG_BALLOT_ACTIVE_STATES, IESG_STATCHG_CONFLREV_ACTIVE_STATES,
IESG_CHARTER_ACTIVE_STATES )
@ -96,7 +98,7 @@ class SearchForm(forms.Form):
("ad", "AD"), ("-ad", "AD (desc)"), ),
required=False, widget=forms.HiddenInput)
doctypes = forms.ModelMultipleChoiceField(queryset=DocTypeName.objects.filter(used=True).exclude(slug__in=('draft','liai-att')).order_by('name'), required=False)
doctypes = forms.ModelMultipleChoiceField(queryset=DocTypeName.objects.filter(used=True).exclude(slug__in=('draft', 'rfc', 'bcp', 'std', 'fyi', 'liai-att')).order_by('name'), required=False)
def __init__(self, *args, **kwargs):
super(SearchForm, self).__init__(*args, **kwargs)
@ -155,8 +157,11 @@ def retrieve_search_results(form, all_types=False):
else:
types = []
if query['activedrafts'] or query['olddrafts'] or query['rfcs']:
if query['activedrafts'] or query['olddrafts']:
types.append('draft')
if query['rfcs']:
types.append('rfc')
types.extend(query["doctypes"])
@ -167,13 +172,50 @@ def retrieve_search_results(form, all_types=False):
# name
if query["name"]:
docs = docs.filter(Q(docalias__name__icontains=query["name"]) |
Q(title__icontains=query["name"])).distinct()
look_for = query["name"]
queries = [
Q(name__icontains=look_for),
Q(title__icontains=look_for)
]
# Check to see if this is just a search for an rfc look for a few variants
if look_for.lower()[:3] == "rfc" and look_for[3:].strip().isdigit():
spaceless = look_for.lower()[:3]+look_for[3:].strip()
if spaceless != look_for:
queries.extend([
Q(name__icontains=spaceless),
Q(title__icontains=spaceless)
])
singlespace = look_for.lower()[:3]+" "+look_for[3:].strip()
if singlespace != look_for:
queries.extend([
Q(name__icontains=singlespace),
Q(title__icontains=singlespace)
])
# Do a similar thing if the search is just for a subseries doc, like a bcp.
if look_for.lower()[:3] in ["bcp", "fyi", "std"] and look_for[3:].strip().isdigit() and query["rfcs"]: # Also look for rfcs contained in the subseries.
queries.extend([
Q(targets_related__source__name__icontains=look_for, targets_related__relationship_id="contains"),
Q(targets_related__source__title__icontains=look_for, targets_related__relationship_id="contains"),
])
spaceless = look_for.lower()[:3]+look_for[3:].strip()
if spaceless != look_for:
queries.extend([
Q(targets_related__source__name__icontains=spaceless, targets_related__relationship_id="contains"),
Q(targets_related__source__title__icontains=spaceless, targets_related__relationship_id="contains"),
])
singlespace = look_for.lower()[:3]+" "+look_for[3:].strip()
if singlespace != look_for:
queries.extend([
Q(targets_related__source__name__icontains=singlespace, targets_related__relationship_id="contains"),
Q(targets_related__source__title__icontains=singlespace, targets_related__relationship_id="contains"),
])
combined_query = reduce(operator.or_, queries)
docs = docs.filter(combined_query).distinct()
# rfc/active/old check buttons
allowed_draft_states = []
if query["rfcs"]:
allowed_draft_states.append("rfc")
if query["activedrafts"]:
allowed_draft_states.append("active")
if query["olddrafts"]:
@ -249,17 +291,17 @@ def frontpage(request):
def search_for_name(request, name):
def find_unique(n):
exact = DocAlias.objects.filter(name__iexact=n).first()
exact = Document.objects.filter(name__iexact=n).first()
if exact:
return exact.name
aliases = DocAlias.objects.filter(name__istartswith=n)[:2]
if len(aliases) == 1:
return aliases[0].name
startswith = Document.objects.filter(name__istartswith=n)[:2]
if len(startswith) == 1:
return startswith[0].name
aliases = DocAlias.objects.filter(name__icontains=n)[:2]
if len(aliases) == 1:
return aliases[0].name
contains = Document.objects.filter(name__icontains=n)[:2]
if len(contains) == 1:
return contains[0].name
return None
@ -292,13 +334,13 @@ def search_for_name(request, name):
if redirect_to:
rev = rev_split.group(2)
# check if we can redirect directly to the rev if it's draft, if rfc - always redirect to main page
if not redirect_to.startswith('rfc') and DocHistory.objects.filter(doc__docalias__name=redirect_to, rev=rev).exists():
if not redirect_to.startswith('rfc') and DocHistory.objects.filter(doc__name=redirect_to, rev=rev).exists():
return cached_redirect(cache_key, urlreverse("ietf.doc.views_doc.document_main", kwargs={ "name": redirect_to, "rev": rev }))
else:
return cached_redirect(cache_key, urlreverse("ietf.doc.views_doc.document_main", kwargs={ "name": redirect_to }))
# build appropriate flags based on string prefix
doctypenames = DocTypeName.objects.filter(used=True)
doctypenames = DocTypeName.objects.filter(used=True).exclude(slug__in=["bcp","std","fyi"])
# This would have been more straightforward if document prefixes couldn't
# contain a dash. Probably, document prefixes shouldn't contain a dash ...
search_args = "?name=%s" % n
@ -317,9 +359,21 @@ def search_for_name(request, name):
def state_name(doc_type, state, shorten=True):
name = ""
if doc_type in ["draft", "rfc"] and state not in ["rfc", "expired"]:
# Note doc_type rfc here is _not_ necessarily Document.type - for some callers
# it is a type derived from draft... The ad_workload view needs more rework so that
# the code isn't having to shadow-box so much.
if doc_type == "rfc":
if state == "rfc":
name = "RFC"
if name == "":
s = State.objects.filter(type="rfc",slug=state).first()
if s:
name = s.name
if name == "":
name = State.objects.get(type__in=["draft", "draft-iesg"], slug=state).name
elif doc_type == "draft" and state not in ["rfc", "expired"]:
name = State.objects.get(type__in=["draft", "draft-iesg"], slug=state).name
elif state == "rfc":
elif doc_type == "draft" and state == "rfc":
name = "RFC"
elif doc_type == "conflrev" and state.startswith("appr"):
name = "Approved"
@ -403,7 +457,7 @@ def ad_workload(request):
)
ad.buckets = copy.deepcopy(bucket_template)
for doc in Document.objects.filter(ad=ad):
for doc in Document.objects.exclude(type_id="rfc").filter(ad=ad):
dt = doc_type(doc)
state = doc_state(doc)
@ -696,47 +750,50 @@ def recent_drafts(request, days=7):
})
def index_all_drafts(request):
def index_all_drafts(request): # Should we rename this
# try to be efficient since this view returns a lot of data
categories = []
for s in ("active", "rfc", "expired", "repl", "auth-rm", "ietf-rm"):
# Gather drafts
for s in ("active", "expired", "repl", "auth-rm", "ietf-rm"):
state = State.objects.get(type="draft", slug=s)
if state.slug == "rfc":
heading = "RFCs"
elif state.slug in ("ietf-rm", "auth-rm"):
if state.slug in ("ietf-rm", "auth-rm"):
heading = "Internet-Drafts %s" % state.name
else:
heading = "%s Internet-Drafts" % state.name
draft_names = DocAlias.objects.filter(docs__states=state).values_list("name", "docs__name")
drafts = Document.objects.filter(type_id="draft", states=state).order_by("name")
names = []
names_to_skip = set()
for name, doc in draft_names:
sort_key = name
if name != doc:
if not name.startswith("rfc"):
name, doc = doc, name
names_to_skip.add(doc)
if name.startswith("rfc"):
name = name.upper()
sort_key = '%09d' % (100000000-int(name[3:]))
names.append((name, sort_key))
names.sort(key=lambda t: t[1])
names = [f'<a href=\"{urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=n))}\">{n}</a>'
for n, __ in names if n not in names_to_skip]
names = [
f'<a href=\"{urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=doc.name))}\">{doc.name}</a>'
for doc in drafts
]
categories.append((state,
heading,
len(names),
"<br>".join(names)
))
# gather RFCs
rfcs = Document.objects.filter(type_id="rfc").order_by('-rfc_number')
names = [
f'<a href=\"{urlreverse("ietf.doc.views_doc.document_main", kwargs=dict(name=rfc.name))}\">{rfc.name.upper()}</a>'
for rfc in rfcs
]
state = State.objects.get(type_id="rfc", slug="published")
categories.append((state,
"RFCs",
len(names),
"<br>".join(names)
))
# Return to the previous section ordering
categories = categories[0:1]+categories[5:]+categories[1:5]
return render(request, 'doc/index_all_drafts.html', { "categories": categories })
def index_active_drafts(request):
@ -748,27 +805,42 @@ def index_active_drafts(request):
slowcache.set(cache_key, groups, 15*60)
return render(request, "doc/index_active_drafts.html", { 'groups': groups })
def ajax_select2_search_docs(request, model_name, doc_type):
if model_name == "docalias":
model = DocAlias
else:
model = Document
def ajax_select2_search_docs(request, model_name, doc_type): # TODO - remove model_name argument...
"""Get results for a select2 search field
doc_type can be "draft", "rfc", or "all", to search for only docs of type "draft", only docs of
type "rfc", or docs of type "draft" or "rfc" or any of the subseries ("bcp", "std", ...).
If a need arises for searching _only_ for draft or rfc, without including the subseries, then an
additional option or options will be needed.
"""
model = Document # Earlier versions allowed searching over DocAlias which no longer exists
q = [w.strip() for w in request.GET.get('q', '').split() if w.strip()]
if not q:
objs = model.objects.none()
else:
qs = model.objects.all()
if model == Document:
qs = qs.filter(type=doc_type)
elif model == DocAlias:
qs = qs.filter(docs__type=doc_type)
if doc_type == "draft":
types = ["draft"]
elif doc_type == "rfc":
types = ["rfc"]
elif doc_type == "all":
types = ("draft", "rfc", "bcp", "fyi", "std")
else:
return HttpResponseBadRequest("Invalid document type")
qs = model.objects.filter(type__in=[t.strip() for t in types])
for t in q:
qs = qs.filter(name__icontains=t)
objs = qs.distinct().order_by("name")[:20]
return HttpResponse(select2_id_doc_name_json(model, objs), content_type='application/json')
def index_subseries(request, type_id):
docs = sorted(Document.objects.filter(type_id=type_id),key=lambda o: int(o.name[3:]))
if len(docs)>0:
type = docs[0].type
else:
type = DocTypeName.objects.get(slug=type_id)
return render(request, "doc/index_subseries.html", {"type": type, "docs": docs})

View file

@ -13,7 +13,7 @@ from django.template.loader import render_to_string
from ietf.utils import markdown
from django.utils.html import escape
from ietf.doc.models import Document, DocAlias, DocEvent, NewRevisionDocEvent, State
from ietf.doc.models import Document, DocEvent, NewRevisionDocEvent, State
from ietf.group.models import Group
from ietf.ietfauth.utils import role_required
from ietf.utils.text import xslugify
@ -242,8 +242,6 @@ def new_statement(request):
time=statement.time,
)
statement.save_with_history([e1, e2])
alias = DocAlias.objects.create(name=name)
alias.docs.set([statement])
markdown_content = ""
if statement_submission == "upload":
if not writing_pdf:

View file

@ -21,7 +21,7 @@ from django.utils.html import escape
import debug # pyflakes:ignore
from ietf.doc.mails import email_ad_approved_status_change
from ietf.doc.models import ( Document, DocAlias, State, DocEvent, BallotDocEvent,
from ietf.doc.models import ( Document, State, DocEvent, BallotDocEvent,
BallotPositionDocEvent, NewRevisionDocEvent, WriteupDocEvent, STATUSCHANGE_RELATIONS )
from ietf.doc.forms import AdForm
from ietf.doc.lastcall import request_last_call
@ -104,8 +104,8 @@ def change_state(request, name, option=None):
relationship__slug__in=STATUSCHANGE_RELATIONS
)
related_doc_info = [
dict(title=rel_doc.target.document.title,
canonical_name=rel_doc.target.document.canonical_name(),
dict(title=rel_doc.target.title,
name=rel_doc.target.name,
newstatus=newstatus(rel_doc))
for rel_doc in related_docs
]
@ -154,7 +154,7 @@ class UploadForm(forms.Form):
return get_cleaned_text_file_content(self.cleaned_data["txt"])
def save(self, doc):
filename = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
filename = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.name, doc.rev))
with io.open(filename, 'w', encoding='utf-8') as destination:
if self.cleaned_data['txt']:
destination.write(self.cleaned_data['txt'])
@ -168,7 +168,7 @@ def submit(request, name):
login = request.user.person
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
path = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.name, doc.rev))
not_uploaded_yet = doc.rev == "00" and not os.path.exists(path)
if not_uploaded_yet:
@ -185,7 +185,7 @@ def submit(request, name):
events = []
e = NewRevisionDocEvent(doc=doc, by=login, type="new_revision")
e.desc = "New version available: <b>%s-%s.txt</b>" % (doc.canonical_name(), doc.rev)
e.desc = "New version available: <b>%s-%s.txt</b>" % (doc.name, doc.rev)
e.rev = doc.rev
e.save()
events.append(e)
@ -217,7 +217,7 @@ def submit(request, name):
dict(),
)
else:
filename = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.canonical_name(), doc.rev))
filename = os.path.join(settings.STATUS_CHANGE_PATH, '%s-%s.txt' % (doc.name, doc.rev))
try:
with io.open(filename, 'r') as f:
init["content"] = f.read()
@ -259,7 +259,7 @@ def edit_title(request, name):
init = { "title" : status_change.title }
form = ChangeTitleForm(initial=init)
titletext = '%s-%s.txt' % (status_change.canonical_name(),status_change.rev)
titletext = '%s-%s.txt' % (status_change.name,status_change.rev)
return render(request, 'doc/change_title.html',
{'form': form,
'doc': status_change,
@ -290,7 +290,7 @@ def edit_ad(request, name):
init = { "ad" : status_change.ad_id }
form = AdForm(initial=init)
titletext = '%s-%s.txt' % (status_change.canonical_name(),status_change.rev)
titletext = '%s-%s.txt' % (status_change.name,status_change.rev)
return render(request, 'doc/change_ad.html',
{'form': form,
'doc': status_change,
@ -315,7 +315,7 @@ def default_approval_text(status_change,relateddoc):
current_text = status_change.text_or_error() # pyflakes:ignore
if relateddoc.target.document.std_level_id in ('std','ps','ds','bcp',):
if relateddoc.target.std_level_id in ('std','ps','ds','bcp',):
action = "Protocol Action"
else:
action = "Document Action"
@ -326,7 +326,7 @@ def default_approval_text(status_change,relateddoc):
dict(status_change=status_change,
status_change_url = settings.IDTRACKER_BASE_URL+status_change.get_absolute_url(),
relateddoc= relateddoc,
relateddoc_url = settings.IDTRACKER_BASE_URL+relateddoc.target.document.get_absolute_url(),
relateddoc_url = settings.IDTRACKER_BASE_URL+relateddoc.target.get_absolute_url(),
approved_text = current_text,
action=action,
newstatus=newstatus(relateddoc),
@ -394,7 +394,7 @@ def approve(request, name):
for rel in status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS):
# Add a document event to each target
c = DocEvent(type="added_comment", doc=rel.target.document, rev=rel.target.document.rev, by=login)
c = DocEvent(type="added_comment", doc=rel.target, rev=rel.target.rev, by=login)
c.desc = "New status of %s approved by the IESG\n%s%s" % (newstatus(rel), settings.IDTRACKER_BASE_URL,reverse('ietf.doc.views_doc.document_main', kwargs={'name': status_change.name}))
c.save()
@ -405,7 +405,7 @@ def approve(request, name):
init = []
for rel in status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS):
init.append({"announcement_text" : escape(default_approval_text(status_change,rel)),
"label": "Announcement text for %s to %s"%(rel.target.document.canonical_name(),newstatus(rel)),
"label": "Announcement text for %s to %s"%(rel.target.name,newstatus(rel)),
})
formset = AnnouncementFormSet(initial=init)
for form in formset.forms:
@ -445,7 +445,7 @@ def clean_helper(form, formtype):
if not re.match(r'(?i)rfc\d{1,4}',key):
errors.append(key+" is not a valid RFC - please use the form RFCn\n")
elif not DocAlias.objects.filter(name=key):
elif not Document.objects.filter(name=key):
errors.append(key+" does not exist\n")
if new_relations[key] not in STATUSCHANGE_RELATIONS:
@ -543,7 +543,7 @@ def start_rfc_status_change(request, name=None):
if name:
if not re.match("(?i)rfc[0-9]{1,4}",name):
raise Http404
seed_rfc = get_object_or_404(Document, type="draft", docalias__name=name)
seed_rfc = get_object_or_404(Document, type="rfc", name=name)
login = request.user.person
@ -566,14 +566,11 @@ def start_rfc_status_change(request, name=None):
group=iesg_group,
)
status_change.set_state(form.cleaned_data['create_in_state'])
DocAlias.objects.create( name= 'status-change-'+form.cleaned_data['document_name']).docs.add(status_change)
for key in form.cleaned_data['relations']:
status_change.relateddocument_set.create(target=DocAlias.objects.get(name=key),
status_change.relateddocument_set.create(target=Document.objects.get(name=key),
relationship_id=form.cleaned_data['relations'][key])
tc_date = form.cleaned_data['telechat_date']
if tc_date:
update_telechat(request, status_change, login, tc_date)
@ -583,9 +580,9 @@ def start_rfc_status_change(request, name=None):
init = {}
if name:
init['title'] = "%s to CHANGETHIS" % seed_rfc.title
init['document_name'] = "%s-to-CHANGETHIS" % seed_rfc.canonical_name()
init['document_name'] = "%s-to-CHANGETHIS" % seed_rfc.name
relations={}
relations[seed_rfc.canonical_name()]=None
relations[seed_rfc.name]=None
init['relations'] = relations
form = StartStatusChangeForm(initial=init)
@ -611,11 +608,11 @@ def edit_relations(request, name):
old_relations={}
for rel in status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS):
old_relations[rel.target.document.canonical_name()]=rel.relationship.slug
old_relations[rel.target.name]=rel.relationship.slug
new_relations=form.cleaned_data['relations']
status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS).delete()
for key in new_relations:
status_change.relateddocument_set.create(target=DocAlias.objects.get(name=key),
status_change.relateddocument_set.create(target=Document.objects.get(name=key),
relationship_id=new_relations[key])
c = DocEvent(type="added_comment", doc=status_change, rev=status_change.rev, by=login)
c.desc = "Affected RFC list changed.\nOLD:"
@ -632,7 +629,7 @@ def edit_relations(request, name):
else:
relations={}
for rel in status_change.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS):
relations[rel.target.document.canonical_name()]=rel.relationship.slug
relations[rel.target.name]=rel.relationship.slug
init = { "relations":relations,
}
form = EditStatusChangeForm(initial=init)
@ -659,8 +656,8 @@ def generate_last_call_text(request, doc):
settings=settings,
requester=requester,
expiration_date=expiration_date.strftime("%Y-%m-%d"),
changes=['%s from %s to %s\n (%s)'%(rel.target.name.upper(),rel.target.document.std_level.name,newstatus(rel),rel.target.document.title) for rel in doc.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS)],
urls=[rel.target.document.get_absolute_url() for rel in doc.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS)],
changes=['%s from %s to %s\n (%s)'%(rel.target.name.upper(),rel.target.std_level.name,newstatus(rel),rel.target.title) for rel in doc.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS)],
urls=[rel.target.get_absolute_url() for rel in doc.relateddocument_set.filter(relationship__slug__in=STATUSCHANGE_RELATIONS)],
cc=cc
)
)

View file

@ -369,7 +369,7 @@ def edit_milestones(request, acronym, group_type=None, milestone_set="current"):
email_milestones_changed(request, group, changes, states)
if milestone_set == "charter":
return redirect('ietf.doc.views_doc.document_main', name=group.charter.canonical_name())
return redirect('ietf.doc.views_doc.document_main', name=group.charter.name)
else:
return HttpResponseRedirect(group.about_url())
else:

View file

@ -69,7 +69,7 @@ class GroupStatsTests(TestCase):
a = WgDraftFactory()
b = WgDraftFactory()
RelatedDocument.objects.create(
source=a, target=b.docalias.first(), relationship_id="refnorm"
source=a, target=b, relationship_id="refnorm"
)
def test_group_stats(self):
@ -95,7 +95,7 @@ class GroupDocDependencyTests(TestCase):
a = WgDraftFactory()
b = WgDraftFactory()
RelatedDocument.objects.create(
source=a, target=b.docalias.first(), relationship_id="refnorm"
source=a, target=b, relationship_id="refnorm"
)
def test_group_document_dependencies(self):

View file

@ -27,7 +27,7 @@ from django.utils.html import escape
from ietf.community.models import CommunityList
from ietf.community.utils import reset_name_contains_index_for_rule
from ietf.doc.factories import WgDraftFactory, IndividualDraftFactory, CharterFactory, BallotDocEventFactory
from ietf.doc.models import Document, DocAlias, DocEvent, State
from ietf.doc.models import Document, DocEvent, State
from ietf.doc.utils_charter import charter_name_for_group
from ietf.group.admin import GroupForm as AdminGroupForm
from ietf.group.factories import (GroupFactory, RoleFactory, GroupEventFactory,
@ -117,8 +117,9 @@ class GroupPagesTests(TestCase):
chair = Email.objects.filter(role__group=group, role__name="chair")[0]
with (Path(settings.CHARTER_PATH) / ("%s-%s.txt" % (group.charter.canonical_name(), group.charter.rev))).open("w") as f:
f.write("This is a charter.")
(
Path(settings.CHARTER_PATH) / f"{group.charter.name}-{group.charter.rev}.txt"
).write_text("This is a charter.")
url = urlreverse('ietf.group.views.wg_summary_area', kwargs=dict(group_type="wg"))
r = self.client.get(url)
@ -264,8 +265,9 @@ class GroupPagesTests(TestCase):
group = CharterFactory().group
draft = WgDraftFactory(group=group)
with (Path(settings.CHARTER_PATH) / ("%s-%s.txt" % (group.charter.canonical_name(), group.charter.rev))).open("w") as f:
f.write("This is a charter.")
(
Path(settings.CHARTER_PATH) / f"{group.charter.name}-{group.charter.rev}.txt"
).write_text("This is a charter.")
milestone = GroupMilestone.objects.create(
group=group,
@ -385,7 +387,6 @@ class GroupPagesTests(TestCase):
type_id="slides",
)
doc.set_state(State.objects.get(type="slides", slug="active"))
DocAlias.objects.create(name=doc.name).docs.add(doc)
for url in group_urlreverse_list(group, 'ietf.group.views.materials'):
r = self.client.get(url)
@ -668,8 +669,9 @@ class GroupEditTests(TestCase):
self.assertTrue(len(q('form .is-invalid')) > 0)
# edit info
with (Path(settings.CHARTER_PATH) / ("%s-%s.txt" % (group.charter.canonical_name(), group.charter.rev))).open("w") as f:
f.write("This is a charter.")
(
Path(settings.CHARTER_PATH) / f"{group.charter.name}-{group.charter.rev}.txt"
).write_text("This is a charter.")
area = group.parent
ad = Person.objects.get(name="Areað Irector")
state = GroupStateName.objects.get(slug="bof")
@ -711,7 +713,9 @@ class GroupEditTests(TestCase):
self.assertEqual(group.list_archive, "archive.mars")
self.assertEqual(group.description, '')
self.assertTrue((Path(settings.CHARTER_PATH) / ("%s-%s.txt" % (group.charter.canonical_name(), group.charter.rev))).exists())
self.assertTrue(
(Path(settings.CHARTER_PATH) / f"{group.charter.name}-{group.charter.rev}.txt").exists()
)
self.assertEqual(len(outbox), 2)
self.assertTrue('Personnel change' in outbox[0]['Subject'])
for prefix in ['ad1','ad2','aread','marschairman','marsdelegate']:

View file

@ -2,8 +2,7 @@
# -*- coding: utf-8 -*-
import io
import os
from pathlib import Path
from django.db.models import Q
from django.shortcuts import get_object_or_404
@ -55,15 +54,14 @@ def get_charter_text(group):
if (h.rev > c.rev and not (c_appr and not h_appr)) or (h_appr and not c_appr):
c = h
filename = os.path.join(c.get_file_path(), "%s-%s.txt" % (c.canonical_name(), c.rev))
filename = Path(c.get_file_path()) / f"{c.name}-{c.rev}.txt"
try:
with io.open(filename, 'rb') as f:
text = f.read()
try:
text = text.decode('utf8')
except UnicodeDecodeError:
text = text.decode('latin1')
return text
text = filename.read_bytes()
try:
text = text.decode('utf8')
except UnicodeDecodeError:
text = text.decode('latin1')
return text
except IOError:
return 'Error Loading Group Charter'
@ -191,7 +189,7 @@ def setup_default_community_list_for_group(group):
community_list=clist,
rule_type="group_rfc",
group=group,
state=State.objects.get(slug="rfc", type="draft"),
state=State.objects.get(slug="published", type="rfc"),
)
SearchRule.objects.create(
community_list=clist,

View file

@ -61,7 +61,7 @@ import debug # pyflakes:ignore
from ietf.community.models import CommunityList, EmailSubscription
from ietf.community.utils import docs_tracked_by_community_list
from ietf.doc.models import DocTagName, State, DocAlias, RelatedDocument, Document, DocEvent
from ietf.doc.models import DocTagName, State, RelatedDocument, Document, DocEvent
from ietf.doc.templatetags.ietf_filters import clean_whitespace
from ietf.doc.utils import get_chartering_type, get_tags_for_stream_id
from ietf.doc.utils_charter import charter_name_for_group, replace_charter_of_replaced_group
@ -186,17 +186,12 @@ def fill_in_wg_roles(group):
group.secretaries = get_roles("secr", [])
def fill_in_wg_drafts(group):
aliases = DocAlias.objects.filter(docs__type="draft", docs__group=group).prefetch_related('docs').order_by("name")
group.drafts = []
group.rfcs = []
for a in aliases:
if a.name.startswith("draft"):
group.drafts.append(a)
else:
group.rfcs.append(a)
a.remote_field = RelatedDocument.objects.filter(source=a.document,relationship_id__in=['obs','updates']).distinct()
a.invrel = RelatedDocument.objects.filter(target=a,relationship_id__in=['obs','updates']).distinct()
group.drafts = Document.objects.filter(type_id="draft", group=group).order_by("name")
group.rfcs = Document.objects.filter(type_id="rfc", group=group).order_by("rfc_number")
for rfc in group.rfcs:
# TODO: remote_field?
rfc.remote_field = RelatedDocument.objects.filter(source=rfc,relationship_id__in=['obs','updates']).distinct()
rfc.invrel = RelatedDocument.objects.filter(target=rfc,relationship_id__in=['obs','updates']).distinct()
def check_group_email_aliases():
pattern = re.compile(r'expand-(.*?)(-\w+)@.*? +(.*)$')
@ -475,8 +470,8 @@ def prepare_group_documents(request, group, clist):
# non-WG drafts and call for WG adoption are considered related
if (d.group != group
or (d.stream_id and d.get_state_slug("draft-stream-%s" % d.stream_id) in ("c-adopt", "wg-cand"))):
if d.get_state_slug() != "expired":
d.search_heading = "Related Internet-Draft"
if (d.type_id == "draft" and d.get_state_slug() not in ["expired","rfc"]) or d.type_id == "rfc":
d.search_heading = "Related Internet-Drafts and RFCs"
docs_related.append(d)
else:
if not (d.get_state_slug('draft-iesg') == "dead" or (d.stream_id and d.get_state_slug("draft-stream-%s" % d.stream_id) == "dead")):
@ -535,9 +530,8 @@ def group_documents_txt(request, acronym, group_type=None):
rows = []
for d in itertools.chain(docs, docs_related):
rfc_number = d.rfc_number()
if rfc_number != None:
name = rfc_number
if d.type_id == "rfc":
name = str(d.rfc_number)
else:
name = "%s-%s" % (d.name, d.rev)
@ -747,7 +741,7 @@ def dependencies(request, acronym, group_type=None):
relationship__slug__startswith="ref",
)
both_rfcs = Q(source__states__slug="rfc", target__docs__states__slug="rfc")
both_rfcs = Q(source__type_id="rfc", target__type_id="rfc")
inactive = Q(source__states__slug__in=["expired", "repl"])
attractor = Q(target__name__in=["rfc5000", "rfc5741"])
removed = Q(source__states__slug__in=["auth-rm", "ietf-rm"])
@ -761,23 +755,23 @@ def dependencies(request, acronym, group_type=None):
links = set()
for x in relations:
target_state = x.target.document.get_state_slug("draft")
target_state = x.target.get_state_slug("draft")
if target_state != "rfc" or x.is_downref():
links.add(x)
replacements = RelatedDocument.objects.filter(
relationship__slug="replaces",
target__docs__in=[x.target.document for x in links],
target__in=[x.target for x in links],
)
for x in replacements:
links.add(x)
nodes = set([x.source for x in links]).union([x.target.document for x in links])
nodes = set([x.source for x in links]).union([x.target for x in links])
graph = {
"nodes": [
{
"id": x.canonical_name(),
"id": x.name,
"rfc": x.get_state("draft").slug == "rfc",
"post-wg": not x.get_state("draft-iesg").slug
in ["idexists", "watching", "dead"],
@ -795,8 +789,8 @@ def dependencies(request, acronym, group_type=None):
],
"links": [
{
"source": x.source.canonical_name(),
"target": x.target.document.canonical_name(),
"source": x.source.name,
"target": x.target.name,
"rel": "downref" if x.is_downref() else x.relationship.slug,
}
for x in links
@ -1283,7 +1277,10 @@ def stream_documents(request, acronym):
editable = has_role(request.user, "Secretariat") or group.has_role(request.user, "chair")
stream = StreamName.objects.get(slug=acronym)
qs = Document.objects.filter(states__type="draft", states__slug__in=["active", "rfc"], stream=acronym)
qs = Document.objects.filter(stream=acronym).filter(
Q(type_id="draft", states__type="draft", states__slug="active")
| Q(type_id="rfc")
)
docs, meta = prepare_document_table(request, qs, max_results=1000)
return render(request, 'group/stream_documents.html', {'stream':stream, 'docs':docs, 'meta':meta, 'editable':editable } )

View file

@ -14,7 +14,7 @@ from django.utils import timezone
import debug # pyflakes:ignore
from ietf.doc.models import Document, DocEvent, DocumentAuthor, RelatedDocument, DocAlias, State
from ietf.doc.models import Document, DocEvent, DocumentAuthor, RelatedDocument, State
from ietf.doc.models import LastCallDocEvent, NewRevisionDocEvent
from ietf.doc.models import IESG_SUBSTATE_TAGS
from ietf.doc.templatetags.ietf_filters import clean_whitespace
@ -31,15 +31,18 @@ def all_id_txt():
t = revision_time.get(name)
return t.strftime("%Y-%m-%d") if t else ""
rfc_aliases = dict(DocAlias.objects.filter(name__startswith="rfc",
docs__states=State.objects.get(type="draft", slug="rfc")).values_list("docs__name", "name"))
rfcs = dict()
for rfc in Document.objects.filter(type_id="rfc"):
draft = rfc.came_from_draft()
if draft is not None:
rfcs[draft.name] = rfc.name
replacements = dict(RelatedDocument.objects.filter(target__docs__states=State.objects.get(type="draft", slug="repl"),
replacements = dict(RelatedDocument.objects.filter(target__states=State.objects.get(type="draft", slug="repl"),
relationship="replaces").values_list("target__name", "source__name"))
# we need a distinct to prevent the queries below from multiplying the result
all_ids = Document.objects.filter(type="draft").order_by('name').exclude(name__startswith="rfc").distinct()
all_ids = Document.objects.filter(type="draft").order_by('name').distinct()
res = ["\nInternet-Drafts Status Summary\n"]
@ -77,9 +80,9 @@ def all_id_txt():
last_field = ""
if s.slug == "rfc":
a = rfc_aliases.get(name)
if a:
last_field = a[3:]
rfc = rfcs.get(name)
if rfc:
last_field = rfc[3:] # Rework this to take advantage of having the number at hand already.
elif s.slug == "repl":
state += " replaced by " + replacements.get(name, "0")
@ -108,14 +111,17 @@ def file_types_for_drafts():
def all_id2_txt():
# this returns a lot of data so try to be efficient
drafts = Document.objects.filter(type="draft").exclude(name__startswith="rfc").order_by('name')
drafts = Document.objects.filter(type="draft").order_by('name')
drafts = drafts.select_related('group', 'group__parent', 'ad', 'intended_std_level', 'shepherd', )
drafts = drafts.prefetch_related("states")
rfc_aliases = dict(DocAlias.objects.filter(name__startswith="rfc",
docs__states=State.objects.get(type="draft", slug="rfc")).values_list("docs__name", "name"))
rfcs = dict()
for rfc in Document.objects.filter(type_id="rfc"):
draft = rfc.came_from_draft()
if draft is not None:
rfcs[draft.name] = rfc.name
replacements = dict(RelatedDocument.objects.filter(target__docs__states=State.objects.get(type="draft", slug="repl"),
replacements = dict(RelatedDocument.objects.filter(target__states=State.objects.get(type="draft", slug="repl"),
relationship="replaces").values_list("target__name", "source__name"))
revision_time = dict(DocEvent.objects.filter(type="new_revision", doc__name__startswith="draft-").order_by('time').values_list("doc__name", "time"))
@ -164,9 +170,9 @@ def all_id2_txt():
# 4
rfc_number = ""
if state == "rfc":
a = rfc_aliases.get(d.name)
if a:
rfc_number = a[3:]
rfc = rfcs.get(d.name)
if rfc:
rfc_number = rfc[3:]
fields.append(rfc_number)
# 5
repl = ""

View file

@ -11,8 +11,8 @@ from django.utils import timezone
import debug # pyflakes:ignore
from ietf.doc.factories import WgDraftFactory
from ietf.doc.models import Document, DocAlias, RelatedDocument, State, LastCallDocEvent, NewRevisionDocEvent
from ietf.doc.factories import WgDraftFactory, RfcFactory
from ietf.doc.models import Document, RelatedDocument, State, LastCallDocEvent, NewRevisionDocEvent
from ietf.group.factories import GroupFactory
from ietf.name.models import DocRelationshipName
from ietf.idindex.index import all_id_txt, all_id2_txt, id_index_txt
@ -41,7 +41,8 @@ class IndexTests(TestCase):
# published
draft.set_state(State.objects.get(type="draft", slug="rfc"))
DocAlias.objects.create(name="rfc1234").docs.add(draft)
rfc = RfcFactory(rfc_number=1234)
draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
txt = all_id_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
@ -52,8 +53,13 @@ class IndexTests(TestCase):
RelatedDocument.objects.create(
relationship=DocRelationshipName.objects.get(slug="replaces"),
source=Document.objects.create(type_id="draft", rev="00", name="draft-test-replacement"),
target=draft.docalias.get(name__startswith="draft"))
source=Document.objects.create(
type_id="draft",
rev="00",
name="draft-test-replacement"
),
target=draft
)
txt = all_id_txt()
self.assertTrue(draft.name + "-" + draft.rev in txt)
@ -103,7 +109,8 @@ class IndexTests(TestCase):
# test RFC
draft.set_state(State.objects.get(type="draft", slug="rfc"))
DocAlias.objects.create(name="rfc1234").docs.add(draft)
rfc = RfcFactory(rfc_number=1234)
draft.relateddocument_set.create(relationship_id="became_rfc", target=rfc)
t = get_fields(all_id2_txt())
self.assertEqual(t[4], "1234")
@ -111,8 +118,12 @@ class IndexTests(TestCase):
draft.set_state(State.objects.get(type="draft", slug="repl"))
RelatedDocument.objects.create(
relationship=DocRelationshipName.objects.get(slug="replaces"),
source=Document.objects.create(type_id="draft", rev="00", name="draft-test-replacement"),
target=draft.docalias.get(name__startswith="draft"))
source=Document.objects.create(
type_id="draft",
rev="00",
name="draft-test-replacement"
),
target=draft)
t = get_fields(all_id2_txt())
self.assertEqual(t[5], "draft-test-replacement")

View file

@ -66,7 +66,7 @@ def get_doc_section(doc):
elif doc.type_id == 'statchg':
protocol_action = False
for relation in doc.relateddocument_set.filter(relationship__slug__in=('tops','tois','tohist','toinf','tobcp','toexp')):
if relation.relationship_id in ('tops','tois') or relation.target.document.std_level_id in ('std','ds','ps'):
if relation.relationship_id in ('tops','tois') or relation.target.std_level_id in ('std','ds','ps'):
protocol_action = True
if protocol_action:
s = "2.3"
@ -186,7 +186,7 @@ def fill_in_agenda_docs(date, sections, docs=None):
doc.review_assignments = review_assignments_for_docs.get(doc.name, [])
elif doc.type_id == "conflrev":
doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target.document
doc.conflictdoc = doc.relateddocument_set.get(relationship__slug='conflrev').target
elif doc.type_id == "charter":
pass
@ -219,4 +219,4 @@ def agenda_data(date=None):
fill_in_agenda_docs(date, sections)
fill_in_agenda_management_issues(date, sections)
return { 'date': date.isoformat(), 'sections': sections }
return { 'date': date.isoformat(), 'sections': sections }

View file

@ -17,7 +17,7 @@ from django.utils.html import escape
import debug # pyflakes:ignore
from ietf.doc.models import DocEvent, BallotPositionDocEvent, TelechatDocEvent
from ietf.doc.models import Document, DocAlias, State, RelatedDocument
from ietf.doc.models import Document, State, RelatedDocument
from ietf.doc.factories import WgDraftFactory, IndividualDraftFactory, ConflictReviewFactory, BaseDocumentFactory, CharterFactory, WgRfcFactory, IndividualRfcFactory
from ietf.doc.utils import create_ballot_if_not_open
from ietf.group.factories import RoleFactory, GroupFactory, DatedGroupMilestoneFactory, DatelessGroupMilestoneFactory
@ -150,8 +150,8 @@ class IESGAgendaTests(TestCase):
super().setUp()
mars = GroupFactory(acronym='mars',parent=Group.objects.get(acronym='farfut'))
wgdraft = WgDraftFactory(name='draft-ietf-mars-test', group=mars, intended_std_level_id='ps')
rfc = IndividualRfcFactory.create(stream_id='irtf', other_aliases=['rfc6666',], states=[('draft','rfc'),('draft-iesg','pub')], std_level_id='inf', )
wgdraft.relateddocument_set.create(target=rfc.docalias.get(name='rfc6666'), relationship_id='refnorm')
rfc = IndividualRfcFactory.create(stream_id='irtf', rfc_number=6666, std_level_id='inf', )
wgdraft.relateddocument_set.create(target=rfc, relationship_id='refnorm')
ise_draft = IndividualDraftFactory(name='draft-imaginary-independent-submission')
ise_draft.stream = StreamName.objects.get(slug="ise")
ise_draft.save_with_history([DocEvent(doc=ise_draft, rev=ise_draft.rev, type="changed_stream", by=Person.objects.get(user__username="secretary"), desc="Test")])
@ -281,7 +281,7 @@ class IESGAgendaTests(TestCase):
relation = RelatedDocument.objects.create(
source=statchg,
target=DocAlias.objects.filter(name__startswith='rfc', docs__std_level="ps")[0],
target=Document.objects.filter(type_id="rfc", std_level="ps").first(),
relationship_id="tohist")
statchg.group = Group.objects.get(acronym="mars")
@ -299,7 +299,7 @@ class IESGAgendaTests(TestCase):
self.assertTrue(statchg in agenda_data(date_str)["sections"]["2.3.3"]["docs"])
# 3.3 document status changes
relation.target = DocAlias.objects.filter(name__startswith='rfc', docs__std_level="inf")[0]
relation.target = Document.objects.filter(type_id="rfc", std_level="inf").first()
relation.save()
statchg.group = Group.objects.get(acronym="mars")

View file

@ -32,10 +32,10 @@ def telechat_page_count(date=None, docs=None):
pages_for_action += d.pages or 0
elif d.type_id == 'statchg':
for rel in d.related_that_doc(STATUSCHANGE_RELATIONS):
pages_for_action += rel.document.pages or 0
pages_for_action += rel.pages or 0
elif d.type_id == 'conflrev':
for rel in d.related_that_doc('conflrev'):
pages_for_action += rel.document.pages or 0
pages_for_action += rel.pages or 0
else:
pass
@ -43,10 +43,10 @@ def telechat_page_count(date=None, docs=None):
for d in for_approval-set(drafts):
if d.type_id == 'statchg':
for rel in d.related_that_doc(STATUSCHANGE_RELATIONS):
related_pages += rel.document.pages or 0
related_pages += rel.pages or 0
elif d.type_id == 'conflrev':
for rel in d.related_that_doc('conflrev'):
related_pages += rel.document.pages or 0
related_pages += rel.pages or 0
else:
# There's really nothing to rely on to give a reading load estimate for charters
pass

View file

@ -122,7 +122,7 @@ def agenda_json(request, date=None):
for doc in docs:
wginfo = {
'docname': doc.canonical_name(),
'docname': doc.name,
'rev': doc.rev,
'wgname': doc.group.name,
'acronym': doc.group.acronym,
@ -137,7 +137,7 @@ def agenda_json(request, date=None):
for doc in docs:
docinfo = {
'docname':doc.canonical_name(),
'docname':doc.name,
'title':doc.title,
'ad':doc.ad.name if doc.ad else None,
}
@ -149,8 +149,8 @@ def agenda_json(request, date=None):
if doc.type_id == "draft":
docinfo['rev'] = doc.rev
docinfo['intended-std-level'] = str(doc.intended_std_level)
if doc.rfc_number():
docinfo['rfc-number'] = doc.rfc_number()
if doc.type_id == "rfc":
docinfo['rfc-number'] = doc.rfc_number
iana_state = doc.get_state("draft-iana-review")
if iana_state and iana_state.slug in ("not-ok", "changed", "need-rev"):
@ -170,8 +170,8 @@ def agenda_json(request, date=None):
elif doc.type_id == 'conflrev':
docinfo['rev'] = doc.rev
td = doc.relateddocument_set.get(relationship__slug='conflrev').target.document
docinfo['target-docname'] = td.canonical_name()
td = doc.relateddocument_set.get(relationship__slug='conflrev').target
docinfo['target-docname'] = td.name
docinfo['target-title'] = td.title
docinfo['target-rev'] = td.rev
docinfo['intended-std-level'] = str(td.intended_std_level)

View file

@ -94,7 +94,7 @@ admin.site.register(IprDocRel, IprDocRelAdmin)
class RelatedIprAdmin(admin.ModelAdmin):
list_display = ['source', 'target', 'relationship', ]
search_fields = ['source__name', 'target__name', 'target__docs__name', ]
search_fields = ['source__name', 'target__name', ]
raw_id_fields = ['source', 'target', ]
admin.site.register(RelatedIpr, RelatedIprAdmin)

View file

@ -42,7 +42,7 @@ class IprDisclosureBaseFactory(factory.django.DjangoModelFactory):
return
if extracted:
for doc in extracted:
IprDocRel.objects.create(disclosure=self,document=doc.docalias.first())
IprDocRel.objects.create(disclosure=self,document=doc)
@factory.post_generation
def updates(self, create, extracted, **kwargs):

View file

@ -14,7 +14,7 @@ from django.utils.encoding import force_str
import debug # pyflakes:ignore
from ietf.group.models import Group
from ietf.doc.fields import SearchableDocAliasField
from ietf.doc.fields import SearchableDocumentField
from ietf.ipr.mail import utc_from_string
from ietf.ipr.fields import SearchableIprDisclosuresField
from ietf.ipr.models import (IprDocRel, IprDisclosureBase, HolderIprDisclosure,
@ -95,7 +95,7 @@ class AddEmailForm(forms.Form):
return self.cleaned_data
class DraftForm(forms.ModelForm):
document = SearchableDocAliasField(label="I-D name/RFC number", required=True, doc_type="draft")
document = SearchableDocumentField(label="I-D name/RFC number", required=True, doc_type="all")
class Meta:
model = IprDocRel

View file

@ -0,0 +1,104 @@
# Generated by Django 4.2.2 on 2023-06-16 13:40
from django.db import migrations
import django.db.models.deletion
from django.db.models import F, Subquery, OuterRef, ManyToManyField, CharField
import ietf.utils.models
def forward(apps, schema_editor):
IprDocRel = apps.get_model("ipr", "IprDocRel")
DocAlias = apps.get_model("doc", "DocAlias")
document_subquery = Subquery(
DocAlias.objects.filter(
pk=OuterRef("deprecated_document")
).values("docs")[:1]
)
name_subquery = Subquery(
DocAlias.objects.filter(
pk=OuterRef("deprecated_document")
).values("name")[:1]
)
IprDocRel.objects.annotate(
firstdoc=document_subquery,
aliasname=name_subquery,
).update(
document=F("firstdoc"),
originaldocumentaliasname=F("aliasname"),
)
# This might not be right - we may need here (and in the relateddocument migrations) to pay attention to
# whether the name being pointed to is and rfc name or a draft name and point to the right object instead...
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
("ipr", "0001_initial"),
("doc", "0016_relate_hist_no_aliases")
]
operations = [
migrations.AlterField(
model_name='iprdocrel',
name='document',
field=ietf.utils.models.ForeignKey(
db_index=False,
on_delete=django.db.models.deletion.CASCADE,
to='doc.docalias',
),
),
migrations.RenameField(
model_name="iprdocrel",
old_name="document",
new_name="deprecated_document"
),
migrations.AlterField(
model_name='iprdocrel',
name='deprecated_document',
field=ietf.utils.models.ForeignKey(
db_index=True,
on_delete=django.db.models.deletion.CASCADE,
to='doc.docalias',
),
),
migrations.AddField(
model_name="iprdocrel",
name="document",
field=ietf.utils.models.ForeignKey(
default=1, # A lie, but a convenient one - no iprdocrel objects point here.
on_delete=django.db.models.deletion.CASCADE,
to="doc.document",
db_index=False,
),
preserve_default=False,
),
migrations.AddField(
model_name="iprdocrel",
name="originaldocumentaliasname",
field=CharField(max_length=255,null=True,blank=True),
preserve_default=True,
),
migrations.RunPython(forward, reverse),
migrations.AlterField(
model_name="iprdocrel",
name="document",
field=ietf.utils.models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="doc.document",
db_index=True,
),
),
migrations.AlterField(
model_name='iprdisclosurebase',
name='docs',
field=ManyToManyField(through='ipr.IprDocRel', to='doc.Document'),
),
migrations.RemoveField(
model_name="iprdocrel",
name="deprecated_document",
field=ietf.utils.models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='doc.DocAlias',
),
),
]

View file

@ -0,0 +1,18 @@
# Copyright The IETF Trust 2023, All Rights Reserved
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("doc", "0017_delete_docalias"),
("ipr", "0002_iprdocrel_no_aliases"),
]
operations = [
migrations.AlterField(
model_name="iprdisclosurebase",
name="docs",
field=models.ManyToManyField(through="ipr.IprDocRel", to="doc.document"),
),
]

View file

@ -7,7 +7,7 @@ from django.db import models
from django.urls import reverse
from django.utils import timezone
from ietf.doc.models import DocAlias, DocEvent
from ietf.doc.models import Document, DocEvent
from ietf.name.models import DocRelationshipName,IprDisclosureStateName,IprLicenseTypeName,IprEventTypeName
from ietf.person.models import Person
from ietf.message.models import Message
@ -16,7 +16,7 @@ from ietf.utils.models import ForeignKey
class IprDisclosureBase(models.Model):
by = ForeignKey(Person) # who was logged in, or System if nobody was logged in
compliant = models.BooleanField("Complies to RFC3979", default=True)
docs = models.ManyToManyField(DocAlias, through='IprDocRel')
docs = models.ManyToManyField(Document, through='IprDocRel')
holder_legal_name = models.CharField(max_length=255)
notes = models.TextField("Additional notes", blank=True)
other_designations = models.CharField("Designations for other contributions", blank=True, max_length=255)
@ -160,9 +160,10 @@ class GenericIprDisclosure(IprDisclosureBase):
class IprDocRel(models.Model):
disclosure = ForeignKey(IprDisclosureBase)
document = ForeignKey(DocAlias)
document = ForeignKey(Document)
sections = models.TextField(blank=True)
revisions = models.CharField(max_length=16,blank=True) # allows strings like 01-07
originaldocumentaliasname = models.CharField(max_length=255, null=True, blank=True)
def doc_type(self):
name = self.document.name
@ -175,7 +176,7 @@ class IprDocRel(models.Model):
def formatted_name(self):
name = self.document.name
if name.startswith("rfc"):
if len(name) >= 3 and name[:3] in ("rfc", "bcp", "fyi", "std"):
return name.upper()
#elif self.revisions:
# return "%s-%s" % (name, self.revisions)
@ -234,10 +235,7 @@ class IprEvent(models.Model):
'removed_objfalse': 'removed_objfalse_related_ipr',
}
if self.type_id in event_type_map:
related_docs = set() # related docs, no duplicates
for alias in self.disclosure.docs.all():
related_docs.update(alias.docs.all())
for doc in related_docs:
for doc in self.disclosure.docs.distinct():
DocEvent.objects.create(
type=event_type_map[self.type_id],
time=self.time,

View file

@ -16,11 +16,11 @@ from ietf.ipr.models import ( IprDisclosureBase, IprDocRel, HolderIprDisclosure,
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource
from ietf.doc.resources import DocAliasResource
from ietf.doc.resources import DocumentResource
class IprDisclosureBaseResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
docs = ToManyField(DocAliasResource, 'docs', null=True)
docs = ToManyField(DocumentResource, 'docs', null=True)
rel = ToManyField('ietf.ipr.resources.IprDisclosureBaseResource', 'rel', null=True)
class Meta:
queryset = IprDisclosureBase.objects.all()
@ -45,10 +45,9 @@ class IprDisclosureBaseResource(ModelResource):
}
api.ipr.register(IprDisclosureBaseResource())
from ietf.doc.resources import DocAliasResource
class IprDocRelResource(ModelResource):
disclosure = ToOneField(IprDisclosureBaseResource, 'disclosure')
document = ToOneField(DocAliasResource, 'document')
document = ToOneField(DocumentResource, 'document')
class Meta:
cache = SimpleCache()
queryset = IprDocRel.objects.all()
@ -66,13 +65,12 @@ api.ipr.register(IprDocRelResource())
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource, IprLicenseTypeNameResource
from ietf.doc.resources import DocAliasResource
class HolderIprDisclosureResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr')
licensing = ToOneField(IprLicenseTypeNameResource, 'licensing')
docs = ToManyField(DocAliasResource, 'docs', null=True)
docs = ToManyField(DocumentResource, 'docs', null=True)
rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True)
class Meta:
cache = SimpleCache()
@ -111,12 +109,11 @@ api.ipr.register(HolderIprDisclosureResource())
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource
from ietf.doc.resources import DocAliasResource
class ThirdPartyIprDisclosureResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr')
docs = ToManyField(DocAliasResource, 'docs', null=True)
docs = ToManyField(DocumentResource, 'docs', null=True)
rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True)
class Meta:
cache = SimpleCache()
@ -168,12 +165,11 @@ api.ipr.register(RelatedIprResource())
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource
from ietf.doc.resources import DocAliasResource
class NonDocSpecificIprDisclosureResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr')
docs = ToManyField(DocAliasResource, 'docs', null=True)
docs = ToManyField(DocumentResource, 'docs', null=True)
rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True)
class Meta:
cache = SimpleCache()
@ -207,12 +203,11 @@ api.ipr.register(NonDocSpecificIprDisclosureResource())
from ietf.person.resources import PersonResource
from ietf.name.resources import IprDisclosureStateNameResource
from ietf.doc.resources import DocAliasResource
class GenericIprDisclosureResource(ModelResource):
by = ToOneField(PersonResource, 'by')
state = ToOneField(IprDisclosureStateNameResource, 'state')
iprdisclosurebase_ptr = ToOneField(IprDisclosureBaseResource, 'iprdisclosurebase_ptr')
docs = ToManyField(DocAliasResource, 'docs', null=True)
docs = ToManyField(DocumentResource, 'docs', null=True)
rel = ToManyField(IprDisclosureBaseResource, 'rel', null=True)
class Meta:
cache = SimpleCache()

View file

@ -32,7 +32,7 @@ def to_class_name(value):
return value.__class__.__name__
def draft_rev_at_time(iprdocrel):
draft = iprdocrel.document.document
draft = iprdocrel.document
event = iprdocrel.disclosure.get_latest_event_posted()
if event is None:
return ("","The Internet-Draft's revision at the time this disclosure was posted could not be determined.")
@ -47,7 +47,7 @@ def draft_rev_at_time(iprdocrel):
@register.filter
def no_revisions_message(iprdocrel):
draft = iprdocrel.document.document
draft = iprdocrel.document
if draft.type_id != "draft" or iprdocrel.revisions.strip() != "":
return ""
rev_at_time, exception = draft_rev_at_time(iprdocrel)

View file

@ -15,11 +15,11 @@ from django.utils import timezone
import debug # pyflakes:ignore
from ietf.doc.models import DocAlias
from ietf.doc.factories import (
DocumentFactory,
WgDraftFactory,
WgRfcFactory,
RfcFactory,
NewRevisionDocEventFactory
)
from ietf.group.factories import RoleFactory
@ -180,7 +180,8 @@ class IprTests(TestCase):
self.assertContains(r, draft.name)
self.assertNotContains(r, ipr.title)
DocAlias.objects.create(name="rfc321").docs.add(draft)
rfc = RfcFactory(rfc_number=321)
draft.relateddocument_set.create(relationship_id="became_rfc",target=rfc)
# find RFC
r = self.client.get(url + "?submit=rfc&rfc=321")
@ -285,7 +286,7 @@ class IprTests(TestCase):
"""Add a new specific disclosure. Note: submitter does not need to be logged in.
"""
draft = WgDraftFactory()
WgRfcFactory()
rfc = WgRfcFactory()
url = urlreverse("ietf.ipr.views.new", kwargs={ "type": "specific" })
# successful post
@ -299,9 +300,9 @@ class IprTests(TestCase):
"ietfer_contact_info": "555-555-0101",
"iprdocrel_set-TOTAL_FORMS": 2,
"iprdocrel_set-INITIAL_FORMS": 0,
"iprdocrel_set-0-document": draft.docalias.first().pk,
"iprdocrel_set-0-document": draft.pk,
"iprdocrel_set-0-revisions": '00',
"iprdocrel_set-1-document": DocAlias.objects.filter(name__startswith="rfc").first().pk,
"iprdocrel_set-1-document": rfc.pk,
"patent_number": "SE12345678901",
"patent_inventor": "A. Nonymous",
"patent_title": "A method of transferring bits",
@ -341,7 +342,7 @@ class IprTests(TestCase):
def test_new_specific_no_revision(self):
draft = WgDraftFactory()
WgRfcFactory()
rfc = WgRfcFactory()
url = urlreverse("ietf.ipr.views.new", kwargs={ "type": "specific" })
# successful post
@ -355,8 +356,8 @@ class IprTests(TestCase):
"ietfer_contact_info": "555-555-0101",
"iprdocrel_set-TOTAL_FORMS": 2,
"iprdocrel_set-INITIAL_FORMS": 0,
"iprdocrel_set-0-document": draft.docalias.first().pk,
"iprdocrel_set-1-document": DocAlias.objects.filter(name__startswith="rfc").first().pk,
"iprdocrel_set-0-document": draft.pk,
"iprdocrel_set-1-document": rfc.pk,
"patent_number": "SE12345678901",
"patent_inventor": "A. Nonymous",
"patent_title": "A method of transferring bits",
@ -375,7 +376,7 @@ class IprTests(TestCase):
"""Add a new third-party disclosure. Note: submitter does not need to be logged in.
"""
draft = WgDraftFactory()
WgRfcFactory()
rfc = WgRfcFactory()
url = urlreverse("ietf.ipr.views.new", kwargs={ "type": "third-party" })
# successful post
@ -387,9 +388,9 @@ class IprTests(TestCase):
"ietfer_contact_info": "555-555-0101",
"iprdocrel_set-TOTAL_FORMS": 2,
"iprdocrel_set-INITIAL_FORMS": 0,
"iprdocrel_set-0-document": draft.docalias.first().pk,
"iprdocrel_set-0-document": draft.pk,
"iprdocrel_set-0-revisions": '00',
"iprdocrel_set-1-document": DocAlias.objects.filter(name__startswith="rfc").first().pk,
"iprdocrel_set-1-document": rfc.pk,
"patent_number": "SE12345678901",
"patent_inventor": "A. Nonymous",
"patent_title": "A method of transferring bits",
@ -434,7 +435,7 @@ class IprTests(TestCase):
"holder_legal_name": "Test Legal",
"ietfer_contact_info": "555-555-0101",
"ietfer_name": "Test Participant",
"iprdocrel_set-0-document": draft.docalias.first().pk,
"iprdocrel_set-0-document": draft.pk,
"iprdocrel_set-0-revisions": '00',
"iprdocrel_set-INITIAL_FORMS": 0,
"iprdocrel_set-TOTAL_FORMS": 1,
@ -462,7 +463,7 @@ class IprTests(TestCase):
def test_update(self):
draft = WgDraftFactory()
WgRfcFactory()
rfc = WgRfcFactory()
original_ipr = HolderIprDisclosureFactory(docs=[draft,])
# get
@ -483,9 +484,9 @@ class IprTests(TestCase):
"ietfer_contact_info": "555-555-0101",
"iprdocrel_set-TOTAL_FORMS": 2,
"iprdocrel_set-INITIAL_FORMS": 0,
"iprdocrel_set-0-document": draft.docalias.first().pk,
"iprdocrel_set-0-document": draft.pk,
"iprdocrel_set-0-revisions": '00',
"iprdocrel_set-1-document": DocAlias.objects.filter(name__startswith="rfc").first().pk,
"iprdocrel_set-1-document": rfc.pk,
"patent_number": "SE12345678901",
"patent_inventor": "A. Nonymous",
"patent_title": "A method of transferring bits",
@ -520,7 +521,7 @@ class IprTests(TestCase):
"holder_contact_email": "test@holder.com",
"iprdocrel_set-TOTAL_FORMS": 1,
"iprdocrel_set-INITIAL_FORMS": 0,
"iprdocrel_set-0-document": draft.docalias.first().pk,
"iprdocrel_set-0-document": draft.pk,
"iprdocrel_set-0-revisions": '00',
"patent_number": "SE12345678901",
"patent_inventor": "A. Nonymous",
@ -786,7 +787,7 @@ Subject: test
'iprdocrel_set-INITIAL_FORMS' : 0,
'iprdocrel_set-0-id': '',
"iprdocrel_set-0-document": disclosure.docs.first().pk,
"iprdocrel_set-0-revisions": disclosure.docs.first().document.rev,
"iprdocrel_set-0-revisions": disclosure.docs.first().rev,
'holder_legal_name': disclosure.holder_legal_name,
'patent_number': patent_dict['Number'],
'patent_title': patent_dict['Title'],
@ -848,7 +849,7 @@ Subject: test
NewRevisionDocEventFactory(doc=draft, rev=f"{rev:02d}", time=now-datetime.timedelta(days=30*(2-rev)))
# Disclosure has non-empty revisions field on its related draft
iprdocrel = IprDocRelFactory(document=draft.docalias.first())
iprdocrel = IprDocRelFactory(document=draft)
IprEventFactory(type_id="posted",time=now,disclosure=iprdocrel.disclosure)
self.assertEqual(
no_revisions_message(iprdocrel),
@ -856,7 +857,7 @@ Subject: test
)
# Disclosure has more than one revision, none called out, disclosure after submissions
iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="")
iprdocrel = IprDocRelFactory(document=draft, revisions="")
IprEventFactory(type_id="posted",time=now,disclosure=iprdocrel.disclosure)
self.assertEqual(
no_revisions_message(iprdocrel),
@ -864,7 +865,7 @@ Subject: test
)
# Disclosure has more than one revision, none called out, disclosure after 01
iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="")
iprdocrel = IprDocRelFactory(document=draft, revisions="")
e = IprEventFactory(type_id="posted",disclosure=iprdocrel.disclosure)
e.time = now-datetime.timedelta(days=15)
e.save()
@ -874,7 +875,7 @@ Subject: test
)
# Disclosure has more than one revision, none called out, disclosure was before the 00
iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="")
iprdocrel = IprDocRelFactory(document=draft, revisions="")
e = IprEventFactory(type_id="posted",disclosure=iprdocrel.disclosure)
e.time = now-datetime.timedelta(days=180)
e.save()
@ -886,7 +887,7 @@ Subject: test
# disclosed draft has no NewRevisionDocEvents
draft = WgDraftFactory(rev="20")
draft.docevent_set.all().delete()
iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="")
iprdocrel = IprDocRelFactory(document=draft, revisions="")
IprEventFactory(type_id="posted",disclosure=iprdocrel.disclosure)
self.assertEqual(
no_revisions_message(iprdocrel),
@ -895,7 +896,7 @@ Subject: test
# disclosed draft has only one revision
draft = WgDraftFactory(rev="00")
iprdocrel = IprDocRelFactory(document=draft.docalias.first(), revisions="")
iprdocrel = IprDocRelFactory(document=draft, revisions="")
IprEventFactory(type_id="posted",disclosure=iprdocrel.disclosure)
self.assertEqual(
no_revisions_message(iprdocrel),

View file

@ -32,33 +32,31 @@ def get_ipr_summary(disclosure):
return summary if len(summary) <= 128 else summary[:125]+'...'
def iprs_from_docs(aliases,**kwargs):
"""Returns a list of IPRs related to doc aliases"""
def iprs_from_docs(docs,**kwargs):
"""Returns a list of IPRs related to docs"""
iprdocrels = []
for alias in aliases:
for document in alias.docs.all():
if document.ipr(**kwargs):
iprdocrels += document.ipr(**kwargs)
for document in docs:
if document.ipr(**kwargs):
iprdocrels += document.ipr(**kwargs)
return list(set([i.disclosure for i in iprdocrels]))
def related_docs(alias, relationship=('replaces', 'obs')):
def related_docs(doc, relationship=('replaces', 'obs'), reverse_relationship=("became_rfc",)):
"""Returns list of related documents"""
results = []
for doc in alias.docs.all():
results += list(doc.docalias.all())
rels = []
for doc in alias.docs.all():
rels += list(doc.all_relations_that_doc(relationship))
results = [doc]
rels = doc.all_relations_that_doc(relationship)
for rel in rels:
rel_aliases = list(rel.target.document.docalias.all())
for x in rel_aliases:
x.related = rel
x.relation = rel.relationship.revname
results += rel_aliases
rel.target.related = rel
rel.target.relation = rel.relationship.revname
results += [x.target for x in rels]
rev_rels = doc.all_relations_that(reverse_relationship)
for rel in rev_rels:
rel.source.related = rel
rel.source.relation = rel.relationship.name
results += [x.source for x in rev_rels]
return list(set(results))
@ -67,17 +65,16 @@ def generate_draft_recursive_txt():
docipr = {}
for o in IprDocRel.objects.filter(disclosure__state='posted').select_related('document'):
alias = o.document
name = alias.name
for document in alias.docs.all():
related = set(document.docalias.all()) | set(document.all_related_that_doc(('obs', 'replaces')))
for alias in related:
name = alias.name
if name.startswith("rfc"):
name = name.upper()
if not name in docipr:
docipr[name] = []
docipr[name].append(o.disclosure_id)
doc = o.document
name = doc.name
related_set = set(doc) | set(doc.all_related_that_doc(('obs', 'replaces')))
for related in related_set:
name = related.name
if name.startswith("rfc"):
name = name.upper()
if not name in docipr:
docipr[name] = []
docipr[name].append(o.disclosure_id)
lines = [ "# Machine-readable list of IPR disclosures by Internet-Draft name" ]
for name, iprs in docipr.items():

View file

@ -18,7 +18,7 @@ from django.utils.html import escape
import debug # pyflakes:ignore
from ietf.doc.models import DocAlias
from ietf.doc.models import Document
from ietf.group.models import Role, Group
from ietf.ietfauth.utils import role_required, has_role
from ietf.ipr.mail import (message_from_message, get_reply_to, get_update_submitter_emails)
@ -38,7 +38,7 @@ from ietf.message.models import Message
from ietf.message.utils import infer_message
from ietf.name.models import IprLicenseTypeName
from ietf.person.models import Person
from ietf.secr.utils.document import get_rfc_num, is_draft
from ietf.utils import log
from ietf.utils.draft_search import normalize_draftname
from ietf.utils.mail import send_mail, send_mail_message
from ietf.utils.response import permission_denied
@ -69,12 +69,15 @@ def get_document_emails(ipr):
has been posted"""
messages = []
for rel in ipr.iprdocrel_set.all():
doc = rel.document.document
doc = rel.document
if is_draft(doc):
if doc.type_id=="draft":
doc_info = 'Internet-Draft entitled "{}" ({})'.format(doc.title,doc.name)
elif doc.type_id=="rfc":
doc_info = 'RFC entitled "{}" (RFC{})'.format(doc.title, doc.rfc_number)
else:
doc_info = 'RFC entitled "{}" (RFC{})'.format(doc.title,get_rfc_num(doc))
log.unreachable("2023-08-15")
return ""
addrs = gather_address_lists('ipr_posted_on_doc',doc=doc).as_strings(compact=False)
@ -674,17 +677,18 @@ def search(request):
doc = q
if docid:
start = DocAlias.objects.filter(name__iexact=docid)
elif search_type == "draft":
q = normalize_draftname(q)
start = DocAlias.objects.filter(name__icontains=q, name__startswith="draft")
else: # search_type == "rfc"
start = DocAlias.objects.filter(name="rfc%s" % q.lstrip("0"))
start = Document.objects.filter(name__iexact=docid)
else:
if search_type == "draft":
q = normalize_draftname(q)
start = Document.objects.filter(name__icontains=q, name__startswith="draft")
elif search_type == "rfc":
start = Document.objects.filter(name="rfc%s" % q.lstrip("0"))
# one match
if len(start) == 1:
first = start[0]
doc = first.document
doc = first
docs = related_docs(first)
iprs = iprs_from_docs(docs,states=states)
template = "ipr/search_doc_result.html"
@ -716,27 +720,27 @@ def search(request):
# Search by wg acronym
# Document list with IPRs
elif search_type == "group":
docs = list(DocAlias.objects.filter(docs__group=q))
docs = list(Document.objects.filter(group=q))
related = []
for doc in docs:
doc.product_of_this_wg = True
related += related_docs(doc)
iprs = iprs_from_docs(list(set(docs+related)),states=states)
docs = [ doc for doc in docs if doc.document.ipr() ]
docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.document.ipr()]), reverse=True)
docs = [ doc for doc in docs if doc.ipr() ]
docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.ipr()]), reverse=True)
template = "ipr/search_wg_result.html"
q = Group.objects.get(id=q).acronym # make acronym for use in template
# Search by rfc and id title
# Document list with IPRs
elif search_type == "doctitle":
docs = list(DocAlias.objects.filter(docs__title__icontains=q))
docs = list(Document.objects.filter(title__icontains=q))
related = []
for doc in docs:
related += related_docs(doc)
iprs = iprs_from_docs(list(set(docs+related)),states=states)
docs = [ doc for doc in docs if doc.document.ipr() ]
docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.document.ipr()]), reverse=True)
docs = [ doc for doc in docs if doc.ipr() ]
docs = sorted(docs, key=lambda x: max([ipr.disclosure.time for ipr in x.ipr()]), reverse=True)
template = "ipr/search_doctitle_result.html"
# Search by title of IPR disclosure

View file

@ -31,7 +31,7 @@ from ietf.liaisons.fields import SearchableLiaisonStatementsField
from ietf.group.models import Group
from ietf.person.models import Email
from ietf.person.fields import SearchableEmailField
from ietf.doc.models import Document, DocAlias
from ietf.doc.models import Document
from ietf.utils.fields import DatepickerDateField
from ietf.utils.timezone import date_today, datetime_from_date, DEADLINE_TZINFO
from functools import reduce
@ -375,8 +375,6 @@ class LiaisonModelForm(forms.ModelForm):
uploaded_filename = name + extension,
)
)
if created:
DocAlias.objects.create(name=attach.name).docs.add(attach)
LiaisonStatementAttachment.objects.create(statement=self.instance,document=attach)
attach_file = io.open(os.path.join(settings.LIAISON_ATTACH_PATH, attach.name + extension), 'wb')
attach_file.write(attached_file.read())

View file

@ -0,0 +1,25 @@
# Copyright The IETF Trust 2023, All Rights Reserved
from django.db import migrations
def forward(apps, schema_editor):
Recipient = apps.get_model("mailtrigger", "Recipient")
Recipient.objects.filter(slug="doc_authors").update(
template='{% if doc.type_id == "draft" or doc.type_id == "rfc" %}<{{doc.name}}@ietf.org>{% endif %}'
)
def reverse(apps, schema_editor):
Recipient = apps.get_model("mailtrigger", "Recipient")
Recipient.objects.filter(slug="doc_authors").update(
template='{% if doc.type_id == "draft" %}<{{doc.name}}@ietf.org>{% endif %}'
)
class Migration(migrations.Migration):
dependencies = [
("mailtrigger", "0004_slides_approved"),
]
operations = [migrations.RunPython(forward, reverse)]

View file

@ -96,35 +96,35 @@ class Recipient(models.Model):
addrs = []
if 'doc' in kwargs:
for reldoc in kwargs['doc'].related_that_doc(('conflrev','tohist','tois','tops')):
addrs.extend(Recipient.objects.get(slug='doc_authors').gather(**{'doc':reldoc.document}))
addrs.extend(Recipient.objects.get(slug='doc_authors').gather(**{'doc':reldoc}))
return addrs
def gather_doc_affecteddoc_group_chairs(self, **kwargs):
addrs = []
if 'doc' in kwargs:
for reldoc in kwargs['doc'].related_that_doc(('conflrev','tohist','tois','tops')):
addrs.extend(Recipient.objects.get(slug='doc_group_chairs').gather(**{'doc':reldoc.document}))
addrs.extend(Recipient.objects.get(slug='doc_group_chairs').gather(**{'doc':reldoc}))
return addrs
def gather_doc_affecteddoc_notify(self, **kwargs):
addrs = []
if 'doc' in kwargs:
for reldoc in kwargs['doc'].related_that_doc(('conflrev','tohist','tois','tops')):
addrs.extend(Recipient.objects.get(slug='doc_notify').gather(**{'doc':reldoc.document}))
addrs.extend(Recipient.objects.get(slug='doc_notify').gather(**{'doc':reldoc}))
return addrs
def gather_conflict_review_stream_manager(self, **kwargs):
addrs = []
if 'doc' in kwargs:
for reldoc in kwargs['doc'].related_that_doc(('conflrev',)):
addrs.extend(Recipient.objects.get(slug='doc_stream_manager').gather(**{'doc':reldoc.document}))
addrs.extend(Recipient.objects.get(slug='doc_stream_manager').gather(**{'doc':reldoc}))
return addrs
def gather_conflict_review_steering_group(self,**kwargs):
addrs = []
if 'doc' in kwargs:
for reldoc in kwargs['doc'].related_that_doc(('conflrev',)):
if reldoc.document.stream_id=='irtf':
if reldoc.stream_id=='irtf':
addrs.append('"Internet Research Steering Group" <irsg@irtf.org>')
return addrs

View file

@ -2,44 +2,53 @@
from collections import namedtuple
import debug # pyflakes:ignore
import debug # pyflakes:ignore
from ietf.mailtrigger.models import MailTrigger, Recipient
from ietf.submit.models import Submission
from ietf.utils.mail import excludeaddrs
class AddrLists(namedtuple('AddrLists',['to','cc'])):
class AddrLists(namedtuple("AddrLists", ["to", "cc"])):
__slots__ = ()
def as_strings(self,compact=True):
def as_strings(self, compact=True):
separator = ", " if compact else ",\n "
to_string = separator.join(self.to)
cc_string = separator.join(self.cc)
return namedtuple('AddrListsAsStrings',['to','cc'])(to=to_string,cc=cc_string)
return namedtuple("AddrListsAsStrings", ["to", "cc"])(
to=to_string, cc=cc_string
)
def gather_address_lists(slug, skipped_recipients=None, create_from_slug_if_not_exists=None,
desc_if_not_exists=None, **kwargs):
mailtrigger = get_mailtrigger(slug, create_from_slug_if_not_exists, desc_if_not_exists)
def gather_address_lists(
slug,
skipped_recipients=None,
create_from_slug_if_not_exists=None,
desc_if_not_exists=None,
**kwargs
):
mailtrigger = get_mailtrigger(
slug, create_from_slug_if_not_exists, desc_if_not_exists
)
to = set()
for recipient in mailtrigger.to.all():
to.update(recipient.gather(**kwargs))
to.discard('')
to.discard("")
if skipped_recipients:
to = excludeaddrs(to, skipped_recipients)
cc = set()
for recipient in mailtrigger.cc.all():
cc.update(recipient.gather(**kwargs))
cc.discard('')
cc.discard("")
if skipped_recipients:
cc = excludeaddrs(cc, skipped_recipients)
return AddrLists(to=sorted(list(to)),cc=sorted(list(cc)))
return AddrLists(to=sorted(list(to)), cc=sorted(list(cc)))
def get_mailtrigger(slug, create_from_slug_if_not_exists, desc_if_not_exists):
try:
@ -50,77 +59,99 @@ def get_mailtrigger(slug, create_from_slug_if_not_exists, desc_if_not_exists):
mailtrigger = MailTrigger.objects.create(slug=slug, desc=desc_if_not_exists)
mailtrigger.to.set(template.to.all())
mailtrigger.cc.set(template.cc.all())
if slug.startswith('review_completed') and slug.endswith('early'):
mailtrigger.cc.remove('ietf_last_call')
if slug.startswith("review_completed") and slug.endswith("early"):
mailtrigger.cc.remove("ietf_last_call")
else:
raise
return mailtrigger
def gather_relevant_expansions(**kwargs):
def starts_with(prefix):
return MailTrigger.objects.filter(slug__startswith=prefix).values_list('slug',flat=True)
return MailTrigger.objects.filter(slug__startswith=prefix).values_list(
"slug", flat=True
)
relevant = set()
if 'doc' in kwargs:
relevant = set()
doc = kwargs['doc']
if "doc" in kwargs:
doc = kwargs["doc"]
relevant.add('doc_state_edited')
if not doc.type_id in ['bofreq', 'statement']:
relevant.update(['doc_telechat_details_changed','ballot_deferred','iesg_ballot_saved'])
relevant.add("doc_state_edited")
if doc.type_id in ['draft','statchg']:
relevant.update(starts_with('last_call_'))
if not doc.type_id in ["bofreq", "statement", "rfc"]:
relevant.update(
["doc_telechat_details_changed", "ballot_deferred", "iesg_ballot_saved"]
)
if doc.type_id == 'draft':
relevant.update(starts_with('doc_'))
relevant.update(starts_with('resurrection_'))
relevant.update(['ipr_posted_on_doc',])
if doc.stream_id == 'ietf':
relevant.update(['ballot_approved_ietf_stream','pubreq_iesg'])
if doc.type_id in ["draft", "statchg"]:
relevant.update(starts_with("last_call_"))
if doc.type_id == "rfc":
relevant.update(
[
"doc_added_comment",
"doc_external_resource_change_requested",
"doc_state_edited",
"ipr_posted_on_doc",
]
)
if doc.type_id == "draft":
relevant.update(starts_with("doc_"))
relevant.update(starts_with("resurrection_"))
relevant.update(
[
"ipr_posted_on_doc",
]
)
if doc.stream_id == "ietf":
relevant.update(["ballot_approved_ietf_stream", "pubreq_iesg"])
else:
relevant.update(['pubreq_rfced'])
last_submission = Submission.objects.filter(name=doc.name,state='posted').order_by('-rev').first()
if last_submission and 'submission' not in kwargs:
kwargs['submission'] = last_submission
relevant.update(["pubreq_rfced"])
last_submission = (
Submission.objects.filter(name=doc.name, state="posted")
.order_by("-rev")
.first()
)
if last_submission and "submission" not in kwargs:
kwargs["submission"] = last_submission
if doc.type_id == 'conflrev':
relevant.update(['conflrev_requested','ballot_approved_conflrev'])
if doc.type_id == 'charter':
relevant.update(['charter_external_review','ballot_approved_charter'])
if doc.type_id == "conflrev":
relevant.update(["conflrev_requested", "ballot_approved_conflrev"])
if doc.type_id == "charter":
relevant.update(["charter_external_review", "ballot_approved_charter"])
if doc.type_id == 'bofreq':
relevant.update(starts_with('bofreq'))
if doc.type_id == "bofreq":
relevant.update(starts_with("bofreq"))
if 'group' in kwargs:
relevant.update(starts_with('group_'))
relevant.update(starts_with('milestones_'))
group = kwargs['group']
if "group" in kwargs:
relevant.update(starts_with("group_"))
relevant.update(starts_with("milestones_"))
group = kwargs["group"]
if group.features.acts_like_wg:
relevant.update(starts_with('session_'))
relevant.update(starts_with("session_"))
if group.features.has_chartering_process:
relevant.update(['charter_external_review',])
relevant.update(
[
"charter_external_review",
]
)
if 'submission' in kwargs:
relevant.update(starts_with('sub_'))
if "submission" in kwargs:
relevant.update(starts_with("sub_"))
rule_list = []
for mailtrigger in MailTrigger.objects.filter(slug__in=relevant):
addrs = gather_address_lists(mailtrigger.slug,**kwargs)
addrs = gather_address_lists(mailtrigger.slug, **kwargs)
if addrs.to or addrs.cc:
rule_list.append((mailtrigger.slug,mailtrigger.desc,addrs.to,addrs.cc))
rule_list.append((mailtrigger.slug, mailtrigger.desc, addrs.to, addrs.cc))
return sorted(rule_list)
def get_base_submission_message_address():
return Recipient.objects.get(slug='submission_manualpost_handling').gather()[0]
return Recipient.objects.get(slug="submission_manualpost_handling").gather()[0]
def get_base_ipr_request_address():
return Recipient.objects.get(slug='ipr_requests').gather()[0]
return Recipient.objects.get(slug="ipr_requests").gather()[0]

View file

@ -19,7 +19,7 @@ from django.utils.functional import cached_property
import debug # pyflakes:ignore
from ietf.doc.models import Document, DocAlias, State, NewRevisionDocEvent
from ietf.doc.models import Document, State, NewRevisionDocEvent
from ietf.group.models import Group
from ietf.group.utils import groups_managed_by
from ietf.meeting.models import Session, Meeting, Schedule, countries, timezones, TimeSlot, Room
@ -341,7 +341,6 @@ class InterimSessionModelForm(forms.ModelForm):
# FIXME: What about agendas in html or markdown format?
uploaded_filename='{}-00.txt'.format(filename))
doc.set_state(State.objects.get(type__slug=doc.type.slug, slug='active'))
DocAlias.objects.create(name=doc.name).docs.add(doc)
self.instance.sessionpresentation_set.create(document=doc, rev=doc.rev)
NewRevisionDocEvent.objects.create(
type='new_revision',

View file

@ -26,7 +26,6 @@ from django.conf import settings
from django.urls import reverse as urlreverse
from django.utils import timezone
from django.utils.text import slugify
from django.utils.safestring import mark_safe
from ietf.dbtemplate.models import DBTemplate
from ietf.doc.models import Document
@ -582,19 +581,23 @@ class TimeSlot(models.Model):
self._session_cache = self.sessions.filter(timeslotassignments__schedule__in=[self.meeting.schedule, self.meeting.schedule.base if self.meeting else None]).first()
return self._session_cache
def meeting_date(self):
return self.time.date()
# Unused
#
# def meeting_date(self):
# return self.time.date()
def registration(self):
# below implements a object local cache
# it tries to find a timeslot of type registration which starts at the same time as this slot
# so that it can be shown at the top of the agenda.
if not hasattr(self, '_reg_info'):
try:
self._reg_info = TimeSlot.objects.get(meeting=self.meeting, time__month=self.time.month, time__day=self.time.day, type="reg")
except TimeSlot.DoesNotExist:
self._reg_info = None
return self._reg_info
# Unused
#
# def registration(self):
# # below implements a object local cache
# # it tries to find a timeslot of type registration which starts at the same time as this slot
# # so that it can be shown at the top of the agenda.
# if not hasattr(self, '_reg_info'):
# try:
# self._reg_info = TimeSlot.objects.get(meeting=self.meeting, time__month=self.time.month, time__day=self.time.day, type="reg")
# except TimeSlot.DoesNotExist:
# self._reg_info = None
# return self._reg_info
def __str__(self):
location = self.get_location()
@ -621,30 +624,33 @@ class TimeSlot(models.Model):
def get_location(self):
return self.get_hidden_location() if self.show_location else ""
def get_functional_location(self):
name_parts = []
room = self.location
if room and room.functional_name:
name_parts.append(room.functional_name)
location = self.get_hidden_location()
if location:
name_parts.append(location)
return ' - '.join(name_parts)
# Unused
#
# def get_functional_location(self):
# name_parts = []
# room = self.location
# if room and room.functional_name:
# name_parts.append(room.functional_name)
# location = self.get_hidden_location()
# if location:
# name_parts.append(location)
# return ' - '.join(name_parts)
def get_html_location(self):
if not hasattr(self, '_cached_html_location'):
self._cached_html_location = self.get_location()
if len(self._cached_html_location) > 8:
self._cached_html_location = mark_safe(self._cached_html_location.replace('/', '/<wbr>'))
else:
self._cached_html_location = mark_safe(self._cached_html_location.replace(' ', '&nbsp;'))
return self._cached_html_location
# def get_html_location(self):
# if not hasattr(self, '_cached_html_location'):
# self._cached_html_location = self.get_location()
# if len(self._cached_html_location) > 8:
# self._cached_html_location = mark_safe(self._cached_html_location.replace('/', '/<wbr>'))
# else:
# self._cached_html_location = mark_safe(self._cached_html_location.replace(' ', '&nbsp;'))
# return self._cached_html_location
def tz(self):
return self.meeting.tz()
def tzname(self):
return self.tz().tzname(self.time)
# Unused
# def tzname(self):
# return self.tz().tzname(self.time)
def utc_start_time(self):
return self.time.astimezone(pytz.utc) # USE_TZ is True, so time is aware
@ -658,30 +664,32 @@ class TimeSlot(models.Model):
def local_end_time(self):
return (self.time.astimezone(pytz.utc) + self.duration).astimezone(self.tz())
@property
def js_identifier(self):
# this returns a unique identifier that is js happy.
# {{s.timeslot.time|date:'Y-m-d'}}_{{ s.timeslot.time|date:'Hi' }}"
# also must match:
# {{r|slugify}}_{{day}}_{{slot.0|date:'Hi'}}
dom_id="ts%u" % (self.pk)
if self.location is not None:
dom_id = self.location.dom_id()
return "%s_%s_%s" % (dom_id, self.time.strftime('%Y-%m-%d'), self.time.strftime('%H%M'))
# Unused
#
# @property
# def js_identifier(self):
# # this returns a unique identifier that is js happy.
# # {{s.timeslot.time|date:'Y-m-d'}}_{{ s.timeslot.time|date:'Hi' }}"
# # also must match:
# # {{r|slugify}}_{{day}}_{{slot.0|date:'Hi'}}
# dom_id="ts%u" % (self.pk)
# if self.location is not None:
# dom_id = self.location.dom_id()
# return "%s_%s_%s" % (dom_id, self.time.strftime('%Y-%m-%d'), self.time.strftime('%H%M'))
def delete_concurrent_timeslots(self):
"""Delete all timeslots which are in the same time as this slot"""
# can not include duration in filter, because there is no support
# for having it a WHERE clause.
# below will delete self as well.
for ts in self.meeting.timeslot_set.filter(time=self.time).all():
if ts.duration!=self.duration:
continue
# def delete_concurrent_timeslots(self):
# """Delete all timeslots which are in the same time as this slot"""
# # can not include duration in filter, because there is no support
# # for having it a WHERE clause.
# # below will delete self as well.
# for ts in self.meeting.timeslot_set.filter(time=self.time).all():
# if ts.duration!=self.duration:
# continue
# now remove any schedule that might have been made to this
# timeslot.
ts.sessionassignments.all().delete()
ts.delete()
# # now remove any schedule that might have been made to this
# # timeslot.
# ts.sessionassignments.all().delete()
# ts.delete()
"""
Find a timeslot that comes next, in the same room. It must be on the same day,

View file

@ -552,7 +552,7 @@ class MeetingTests(BaseMeetingTestCase):
if material.type_id == 'draft':
expected_url = urlreverse(
'ietf.doc.views_doc.document_main',
kwargs={'name': material.canonical_name()},
kwargs={'name': material.name},
)
else:
expected_url = material.get_href(meeting)
@ -563,7 +563,7 @@ class MeetingTests(BaseMeetingTestCase):
if material.type_id == 'draft':
expected_url = urlreverse(
'ietf.doc.views_doc.document_main',
kwargs={'name': material.canonical_name()},
kwargs={'name': material.name},
)
else:
expected_url = material.get_href(meeting)
@ -7773,7 +7773,7 @@ class ProceedingsTests(BaseMeetingTestCase):
if material.type_id == 'draft':
expected_url = urlreverse(
'ietf.doc.views_doc.document_main',
kwargs={'name': material.canonical_name()},
kwargs={'name': material.name},
)
else:
expected_url = material.get_href(meeting)
@ -7784,7 +7784,7 @@ class ProceedingsTests(BaseMeetingTestCase):
if material.type_id == 'draft':
expected_url = urlreverse(
'ietf.doc.views_doc.document_main',
kwargs={'name': material.canonical_name()},
kwargs={'name': material.name},
)
else:
expected_url = material.get_href(meeting)

View file

@ -20,7 +20,7 @@ import debug # pyflakes:ignore
from ietf.dbtemplate.models import DBTemplate
from ietf.meeting.models import (Session, SchedulingEvent, TimeSlot,
Constraint, SchedTimeSessAssignment, SessionPresentation, Attended)
from ietf.doc.models import Document, DocAlias, State, NewRevisionDocEvent
from ietf.doc.models import Document, State, NewRevisionDocEvent
from ietf.doc.models import DocEvent
from ietf.group.models import Group
from ietf.group.utils import can_manage_materials
@ -596,7 +596,6 @@ def save_session_minutes_revision(session, file, ext, request, encoding=None, ap
group = session.group,
rev = '00',
)
DocAlias.objects.create(name=doc.name).docs.add(doc)
doc.states.add(State.objects.get(type_id='minutes',slug='active'))
if session.sessionpresentation_set.filter(document=doc).exists():
sp = session.sessionpresentation_set.get(document=doc)
@ -720,7 +719,6 @@ def new_doc_for_session(type_id, session):
rev = '00',
)
doc.states.add(State.objects.get(type_id=type_id, slug='active'))
DocAlias.objects.create(name=doc.name).docs.add(doc)
session.sessionpresentation_set.create(document=doc,rev='00')
return doc
@ -753,8 +751,6 @@ def create_recording(session, url, title=None, user=None):
rev='00',
type_id='recording')
doc.set_state(State.objects.get(type='recording', slug='active'))
DocAlias.objects.create(name=doc.name).docs.add(doc)
# create DocEvent
NewRevisionDocEvent.objects.create(type='new_revision',
@ -773,11 +769,11 @@ def get_next_sequence(group, meeting, type):
Returns the next sequence number to use for a document of type = type.
Takes a group=Group object, meeting=Meeting object, type = string
'''
aliases = DocAlias.objects.filter(name__startswith='{}-{}-{}-'.format(type, meeting.number, group.acronym))
if not aliases:
docs = Document.objects.filter(name__startswith='{}-{}-{}-'.format(type, meeting.number, group.acronym))
if not docs:
return 1
aliases = aliases.order_by('name')
sequence = int(aliases.last().name.split('-')[-1]) + 1
docs = docs.order_by('name')
sequence = int(docs.last().name.split('-')[-1]) + 1
return sequence
def get_activity_stats(sdate, edate):

View file

@ -48,7 +48,7 @@ from django.views.generic import RedirectView
import debug # pyflakes:ignore
from ietf.doc.fields import SearchableDocumentsField
from ietf.doc.models import Document, State, DocEvent, NewRevisionDocEvent, DocAlias
from ietf.doc.models import Document, State, DocEvent, NewRevisionDocEvent
from ietf.group.models import Group
from ietf.group.utils import can_manage_session_materials, can_manage_some_groups, can_manage_group
from ietf.person.models import Person, User
@ -238,7 +238,7 @@ def _get_materials_doc(meeting, name):
docname, rev = name.rsplit("-", 1)
if len(rev) == 2 and rev.isdigit():
doc = Document.objects.get(name=docname) # may raise Document.DoesNotExist
if doc.get_related_meeting() == meeting and rev in doc.revisions():
if doc.get_related_meeting() == meeting and rev in doc.revisions_by_newrevisionevent():
return doc, rev
# give up
raise Document.DoesNotExist
@ -248,7 +248,6 @@ def _get_materials_doc(meeting, name):
def materials_document(request, document, num=None, ext=None):
meeting=get_meeting(num,type_in=['ietf','interim'])
num = meeting.number
# This view does not allow the use of DocAliases. Right now we are probably only creating one (identity) alias, but that may not hold in the future.
try:
doc, rev = _get_materials_doc(meeting=meeting, name=document)
except Document.DoesNotExist:
@ -2595,7 +2594,6 @@ def save_bluesheet(request, session, file, encoding='utf-8'):
rev = '00',
)
doc.states.add(State.objects.get(type_id='bluesheets',slug='active'))
DocAlias.objects.create(name=doc.name).docs.add(doc)
session.sessionpresentation_set.create(document=doc,rev='00')
filename = '%s-%s%s'% ( doc.name, doc.rev, ext)
doc.uploaded_filename = filename
@ -2724,7 +2722,6 @@ def upload_session_agenda(request, session_id, num):
group = session.group,
rev = '00',
)
DocAlias.objects.create(name=doc.name).docs.add(doc)
doc.states.add(State.objects.get(type_id='agenda',slug='active'))
if session.sessionpresentation_set.filter(document=doc).exists():
sp = session.sessionpresentation_set.get(document=doc)
@ -2817,7 +2814,6 @@ def upload_session_slides(request, session_id, num, name=None):
group = session.group,
rev = '00',
)
DocAlias.objects.create(name=doc.name).docs.add(doc)
doc.states.add(State.objects.get(type_id='slides',slug='active'))
doc.states.add(State.objects.get(type_id='reuse_policy',slug='single'))
if session.sessionpresentation_set.filter(document=doc).exists():
@ -4551,7 +4547,6 @@ def approve_proposed_slides(request, slidesubmission_id, num):
group = submission.session.group,
rev = '00',
)
DocAlias.objects.create(name=doc.name).docs.add(doc)
doc.states.add(State.objects.get(type_id='slides',slug='active'))
doc.states.add(State.objects.get(type_id='reuse_policy',slug='single'))
if submission.session.sessionpresentation_set.filter(document=doc).exists():

View file

@ -8,7 +8,7 @@ from django.shortcuts import render, redirect, get_object_or_404
import debug # pyflakes:ignore
from ietf.doc.utils import add_state_change_event
from ietf.doc.models import DocAlias, DocEvent, Document, NewRevisionDocEvent, State
from ietf.doc.models import DocEvent, Document, NewRevisionDocEvent, State
from ietf.ietfauth.utils import role_required
from ietf.meeting.forms import FileUploadForm
from ietf.meeting.models import Meeting, MeetingHost
@ -98,10 +98,6 @@ def save_proceedings_material_doc(meeting, material_type, title, request, file=N
)
created = True
# do this even if we did not create the document, just to be sure the alias exists
alias, _ = DocAlias.objects.get_or_create(name=doc.name)
alias.docs.add(doc)
if file:
if not created:
doc.rev = '{:02}'.format(int(doc.rev) + 1)

View file

@ -2565,6 +2565,19 @@
"model": "doc.state",
"pk": 176
},
{
"fields": {
"desc": "",
"name": "Published",
"next_states": [],
"order": 1,
"slug": "published",
"type": "rfc",
"used": true
},
"model": "doc.state",
"pk": 177
},
{
"fields": {
"label": "State"
@ -2572,6 +2585,13 @@
"model": "doc.statetype",
"pk": "agenda"
},
{
"fields": {
"label": "bcp state"
},
"model": "doc.statetype",
"pk": "bcp"
},
{
"fields": {
"label": "State"
@ -2691,6 +2711,13 @@
"model": "doc.statetype",
"pk": "draft-stream-ise"
},
{
"fields": {
"label": "fyi state"
},
"model": "doc.statetype",
"pk": "fyi"
},
{
"fields": {
"label": "State"
@ -2747,6 +2774,13 @@
"model": "doc.statetype",
"pk": "review"
},
{
"fields": {
"label": "State"
},
"model": "doc.statetype",
"pk": "rfc"
},
{
"fields": {
"label": "Shepherd's Writeup State"
@ -2775,6 +2809,13 @@
"model": "doc.statetype",
"pk": "statement"
},
{
"fields": {
"label": "std state"
},
"model": "doc.statetype",
"pk": "std"
},
{
"fields": {
"about_page": "ietf.group.views.group_about",
@ -5903,7 +5944,7 @@
{
"fields": {
"desc": "The document's authors",
"template": "{% if doc.type_id == \"draft\" %}<{{doc.name}}@ietf.org>{% endif %}"
"template": "{% if doc.type_id == \"draft\" or doc.type_id == \"rfc\" %}<{{doc.name}}@ietf.org>{% endif %}"
},
"model": "mailtrigger.recipient",
"pk": "doc_authors"
@ -10052,6 +10093,17 @@
"model": "name.dbtemplatetypename",
"pk": "rst"
},
{
"fields": {
"desc": "",
"name": "became RFC",
"order": 0,
"revname": "came from draft",
"used": true
},
"model": "name.docrelationshipname",
"pk": "became_rfc"
},
{
"fields": {
"desc": "",
@ -10063,6 +10115,17 @@
"model": "name.docrelationshipname",
"pk": "conflrev"
},
{
"fields": {
"desc": "This document contains other documents (e.g., STDs contain RFCs)",
"name": "Contains",
"order": 0,
"revname": "Is part of",
"used": true
},
"model": "name.docrelationshipname",
"pk": "contains"
},
{
"fields": {
"desc": "Approval for downref",
@ -10579,6 +10642,17 @@
"model": "name.doctypename",
"pk": "agenda"
},
{
"fields": {
"desc": "",
"name": "Best Current Practice",
"order": 0,
"prefix": "bcp",
"used": true
},
"model": "name.doctypename",
"pk": "bcp"
},
{
"fields": {
"desc": "",
@ -10645,6 +10719,17 @@
"model": "name.doctypename",
"pk": "draft"
},
{
"fields": {
"desc": "",
"name": "For Your Information",
"order": 0,
"prefix": "fyi",
"used": true
},
"model": "name.doctypename",
"pk": "fyi"
},
{
"fields": {
"desc": "",
@ -10722,6 +10807,17 @@
"model": "name.doctypename",
"pk": "review"
},
{
"fields": {
"desc": "",
"name": "RFC",
"order": 0,
"prefix": "rfc",
"used": true
},
"model": "name.doctypename",
"pk": "rfc"
},
{
"fields": {
"desc": "",
@ -10766,6 +10862,17 @@
"model": "name.doctypename",
"pk": "statement"
},
{
"fields": {
"desc": "",
"name": "Standard",
"order": 0,
"prefix": "std",
"used": true
},
"model": "name.doctypename",
"pk": "std"
},
{
"fields": {
"desc": "",
@ -11796,7 +11903,7 @@
{
"fields": {
"default_offset_days": -57,
"desc": "Cut-off date for BOF proposal requests. To request a BOF, please see instructions at https://www.ietf.org/how/bofs/bof-procedures on Requesting a BOF",
"desc": "Cut-off date for BOF proposal requests. To request a __BoF__ session use the [IETF BoF Request Tool](/doc/bof-requests).",
"name": "Cut-off preliminary BOF requests",
"order": 0,
"used": true
@ -11807,7 +11914,7 @@
{
"fields": {
"default_offset_days": -57,
"desc": "Preliminary BOF proposals requested. To request a BOF, please see instructions on requesting a BOF at https://www.ietf.org/how/bofs/bof-procedures/",
"desc": "Preliminary BOF proposals requested. To request a __BoF__ session use the [IETF BoF Request Tool](/doc/bof-requests).",
"name": "Preliminary BOF proposals requested",
"order": 0,
"used": false
@ -11840,7 +11947,7 @@
{
"fields": {
"default_offset_days": -43,
"desc": "Cut-off date for BOF proposal requests to Area Directors at UTC 23:59",
"desc": "Cut-off date for BOF proposal requests to Area Directors at UTC 23:59. To request a __BoF__ session use the [IETF BoF Request Tool](/doc/bof-requests).",
"name": "Cut-off BOF scheduling Requests",
"order": 0,
"used": false
@ -11884,7 +11991,7 @@
{
"fields": {
"default_offset_days": -43,
"desc": "Cut-off date for requests to schedule Working Group Meetings at UTC 23:59",
"desc": "Cut-off date for requests to schedule Working Group Meetings at UTC 23:59. To request a __Working Group__ session, use the [IETF Meeting Session Request Tool](/secr/sreq/).",
"name": "Cut-off WG scheduling Requests",
"order": 0,
"used": true
@ -11939,7 +12046,7 @@
{
"fields": {
"default_offset_days": -12,
"desc": "Internet-Draft submission cut-off (for all Internet-Drafts, including -00) by UTC 23:59",
"desc": "Internet-Draft submission cut-off (for all Internet-Drafts, including -00) by UTC 23:59. Upload using the [I-D Submission Tool](/submit/).",
"name": "I-D Cutoff",
"order": 0,
"used": true
@ -11972,7 +12079,7 @@
{
"fields": {
"default_offset_days": -82,
"desc": "IETF Online Registration Opens",
"desc": "IETF Online Registration Opens [Register Here](https://www.ietf.org/how/meetings/register/).",
"name": "Registration Opens",
"order": 0,
"used": true
@ -11983,7 +12090,7 @@
{
"fields": {
"default_offset_days": -89,
"desc": "Working Group and BOF scheduling begins",
"desc": "Working Group and BOF scheduling begins. To request a Working Group session, use the [IETF Meeting Session Request Tool](/secr/sreq/). If you are working on a BOF request, it is highly recommended to tell the IESG now by sending an [email to iesg@ietf.org](mailtp:iesg@ietf.org) to get advance help with the request.",
"name": "Scheduling Opens",
"order": 0,
"used": true

Some files were not shown because too many files have changed in this diff Show more