Some cleanup around stats. Things are well except for the time series graphs.

- Legacy-Id: 16035
This commit is contained in:
Robert Sparks 2019-03-15 16:50:59 +00:00
parent 334767fa6f
commit 3678d20b0b
4 changed files with 43 additions and 139 deletions

View file

@ -98,7 +98,7 @@ from ietf.review.utils import (can_manage_review_requests_for_team,
current_unavailable_periods_for_reviewers,
email_reviewer_availability_change,
reviewer_rotation_list,
latest_review_requests_for_reviewers,
latest_review_assignments_for_reviewers,
augment_review_requests_with_events,
get_default_filter_re,
days_needed_to_fulfill_min_interval_for_reviewers,
@ -107,7 +107,7 @@ from ietf.doc.models import LastCallDocEvent
from ietf.name.models import ReviewRequestStateName
from ietf.name.models import ReviewAssignmentStateName
from ietf.utils.mail import send_mail_text, parse_preformatted
from ietf.ietfauth.utils import user_is_person
@ -1361,8 +1361,8 @@ def reviewer_overview(request, acronym, group_type=None):
today = datetime.date.today()
req_data_for_reviewers = latest_review_requests_for_reviewers(group)
review_state_by_slug = { n.slug: n for n in ReviewRequestStateName.objects.all() }
req_data_for_reviewers = latest_review_assignments_for_reviewers(group)
assignment_state_by_slug = { n.slug: n for n in ReviewAssignmentStateName.objects.all() }
days_needed = days_needed_to_fulfill_min_interval_for_reviewers(group)
@ -1383,14 +1383,15 @@ def reviewer_overview(request, acronym, group_type=None):
person.busy = person.id in days_needed
# TODO - What is this MAX_CLOSED_REQS trying to accomplish?
MAX_CLOSED_REQS = 10
req_data = req_data_for_reviewers.get(person.pk, [])
open_reqs = sum(1 for d in req_data if d.state in ["requested", "accepted"])
open_reqs = sum(1 for d in req_data if d.state in ["assigned", "accepted"])
latest_reqs = []
for d in req_data:
if d.state in ["requested", "accepted"] or len(latest_reqs) < MAX_CLOSED_REQS + open_reqs:
latest_reqs.append((d.req_pk, d.doc, d.reviewed_rev, d.assigned_time, d.deadline,
review_state_by_slug.get(d.state),
if d.state in ["assigned", "accepted"] or len(latest_reqs) < MAX_CLOSED_REQS + open_reqs:
latest_reqs.append((d.assignment_pk, d.doc, d.reviewed_rev, d.assigned_time, d.deadline,
assignment_state_by_slug.get(d.state),
int(math.ceil(d.assignment_to_closure_days)) if d.assignment_to_closure_days is not None else None))
person.latest_reqs = latest_reqs

View file

@ -176,11 +176,6 @@ ReviewAssignmentData = namedtuple("ReviewAssignmentData", [
"late_days",
"request_to_assignment_days", "assignment_to_closure_days", "request_to_closure_days"])
# TODO - see if this becomes dead
ReviewRequestData = namedtuple("ReviewRequestData", [
"req_pk", "doc", "doc_pages", "req_time", "state", "assigned_time", "deadline", "reviewed_rev", "result", "team", "reviewer",
"late_days",
"request_to_assignment_days", "assignment_to_closure_days", "request_to_closure_days"])
def extract_review_assignment_data(teams=None, reviewers=None, time_from=None, time_to=None, ordering=[]):
"""Yield data on each review assignment, sorted by (*ordering, assigned_on)
@ -208,7 +203,7 @@ def extract_review_assignment_data(teams=None, reviewers=None, time_from=None, t
"reviewer__person", "assigned_on", "completed_on"
)
event_qs = event_qs.order_by(*[o.replace("reviewer", "reviewer__person") for o in ordering] + ["assigned_on", "pk", "completed_on"])
event_qs = event_qs.order_by(*[o.replace("reviewer", "reviewer__person").replace("team","review_request__team") for o in ordering] + ["assigned_on", "pk", "completed_on"])
def positive_days(time_from, time_to):
if time_from is None or time_to is None:
@ -242,74 +237,10 @@ def extract_review_assignment_data(teams=None, reviewers=None, time_from=None, t
yield d
# TODO - see if this is dead code
def extract_review_request_data(teams=None, reviewers=None, time_from=None, time_to=None, ordering=[]):
"""Yield data on each review request, sorted by (*ordering, time)
for easy use with itertools.groupby. Valid entries in *ordering are "team" and "reviewer"."""
filters = Q()
if teams:
filters &= Q(team__in=teams)
if reviewers:
filters &= Q(reviewer__person__in=reviewers)
if time_from:
filters &= Q(time__gte=time_from)
if time_to:
filters &= Q(time__lte=time_to)
# we may be dealing with a big bunch of data, so treat it carefully
event_qs = ReviewRequest.objects.filter(filters)
# left outer join with RequestRequestDocEvent for request/assign/close time
event_qs = event_qs.values_list(
"pk", "doc", "doc__pages", "time", "state", "deadline", "reviewassignment__reviewed_rev", "reviewassignment__result", "team",
"reviewassignment__reviewer__person", "reviewrequestdocevent__time", "reviewrequestdocevent__type"
)
event_qs = event_qs.order_by(*[o.replace("reviewer", "reviewassignment__reviewer__person") for o in ordering] + ["time", "pk", "-reviewrequestdocevent__time"])
def positive_days(time_from, time_to):
if time_from is None or time_to is None:
return None
delta = time_to - time_from
seconds = delta.total_seconds()
if seconds > 0:
return seconds / float(24 * 60 * 60)
else:
return 0.0
for _, events in itertools.groupby(event_qs.iterator(), lambda t: t[0]):
requested_time = assigned_time = closed_time = None
for e in events:
req_pk, doc, doc_pages, req_time, state, deadline, reviewed_rev, result, team, reviewer, event_time, event_type = e
if event_type == "requested_review" and requested_time is None:
requested_time = event_time
elif event_type == "assigned_review_request" and assigned_time is None:
assigned_time = event_time
elif event_type == "closed_review_request" and closed_time is None:
closed_time = event_time
late_days = positive_days(datetime.datetime.combine(deadline, datetime.time.max), closed_time)
request_to_assignment_days = positive_days(requested_time, assigned_time)
assignment_to_closure_days = positive_days(assigned_time, closed_time)
request_to_closure_days = positive_days(requested_time, closed_time)
d = ReviewRequestData(req_pk, doc, doc_pages, req_time, state, assigned_time, deadline, reviewed_rev, result, team, reviewer,
late_days, request_to_assignment_days, assignment_to_closure_days,
request_to_closure_days)
yield d
def aggregate_raw_period_review_request_stats(review_request_data, count=None):
def aggregate_raw_period_review_assignment_stats(review_assignment_data, count=None):
"""Take a sequence of review request data from
extract_review_request_data and aggregate them."""
extract_review_assignment_data and aggregate them."""
state_dict = defaultdict(int)
late_state_dict = defaultdict(int)
@ -317,8 +248,8 @@ def aggregate_raw_period_review_request_stats(review_request_data, count=None):
assignment_to_closure_days_list = []
assignment_to_closure_days_count = 0
for (req_pk, doc, doc_pages, req_time, state, assigned_time, deadline, reviewed_rev, result, team, reviewer,
late_days, request_to_assignment_days, assignment_to_closure_days, request_to_closure_days) in review_request_data:
for (assignment_pk, doc, doc_pages, req_time, state, assigned_time, deadline, reviewed_rev, result, team, reviewer,
late_days, request_to_assignment_days, assignment_to_closure_days, request_to_closure_days) in review_assignment_data:
if count == "pages":
c = doc_pages
else:
@ -337,7 +268,7 @@ def aggregate_raw_period_review_request_stats(review_request_data, count=None):
return state_dict, late_state_dict, result_dict, assignment_to_closure_days_list, assignment_to_closure_days_count
def sum_period_review_request_stats(raw_aggregation):
def sum_period_review_assignment_stats(raw_aggregation):
"""Compute statistics from aggregated review request data for one aggregation point."""
state_dict, late_state_dict, result_dict, assignment_to_closure_days_list, assignment_to_closure_days_count = raw_aggregation
@ -345,11 +276,11 @@ def sum_period_review_request_stats(raw_aggregation):
res["state"] = state_dict
res["result"] = result_dict
res["open"] = sum(state_dict.get(s, 0) for s in ("requested", "accepted"))
res["open"] = sum(state_dict.get(s, 0) for s in ("assigned", "accepted"))
res["completed"] = sum(state_dict.get(s, 0) for s in ("completed", "part-completed"))
res["not_completed"] = sum(state_dict.get(s, 0) for s in state_dict if s in ("rejected", "withdrawn", "overtaken", "no-response"))
res["open_late"] = sum(late_state_dict.get(s, 0) for s in ("requested", "accepted"))
res["open_late"] = sum(late_state_dict.get(s, 0) for s in ("assigned", "accepted"))
res["open_in_time"] = res["open"] - res["open_late"]
res["completed_late"] = sum(late_state_dict.get(s, 0) for s in ("completed", "part-completed"))
res["completed_in_time"] = res["completed"] - res["completed_late"]
@ -358,7 +289,7 @@ def sum_period_review_request_stats(raw_aggregation):
return res
def sum_raw_review_request_aggregations(raw_aggregations):
def sum_raw_review_assignment_aggregations(raw_aggregations):
"""Collapse a sequence of aggregations into one aggregation."""
state_dict = defaultdict(int)
late_state_dict = defaultdict(int)
@ -397,36 +328,6 @@ def latest_review_assignments_for_reviewers(team, days_back=365):
return assignment_data_for_reviewers
# TODO - see if this is dead code
def latest_review_requests_for_reviewers(team, days_back=365):
"""Collect and return stats for reviewers on latest requests, in
extract_review_request_data format."""
extracted_data = extract_review_request_data(
teams=[team],
time_from=datetime.date.today() - datetime.timedelta(days=days_back),
ordering=["reviewer"],
)
req_data_for_reviewers = {
reviewer: list(reversed(list(req_data_items)))
for reviewer, req_data_items in itertools.groupby(extracted_data, key=lambda data: data.reviewer)
}
return req_data_for_reviewers
def make_new_review_request_from_existing(review_req):
obj = ReviewRequest()
obj.time = review_req.time
obj.type = review_req.type
obj.doc = review_req.doc
obj.team = review_req.team
obj.deadline = review_req.deadline
obj.requested_rev = review_req.requested_rev
obj.requested_by = review_req.requested_by
obj.state = ReviewRequestStateName.objects.get(slug="requested")
return obj
def email_review_assignment_change(request, review_assignment, subject, msg, by, notify_secretary, notify_reviewer, notify_requested_by):
system_email = Person.objects.get(name="(System)").formatted_email()

View file

@ -4,6 +4,8 @@ from mock import patch
from pyquery import PyQuery
from requests import Response
import debug # pyflakes:ignore
from django.urls import reverse as urlreverse
from django.contrib.auth.models import User
@ -157,8 +159,8 @@ class StatisticsTests(TestCase):
def test_review_stats(self):
reviewer = PersonFactory()
review_req = ReviewRequestFactory()
ReviewAssignmentFactory(review_request=review_req, reviewer=reviewer.email_set.first())
review_req = ReviewRequestFactory(state_id='assigned')
ReviewAssignmentFactory(review_request=review_req, state_id='assigned', reviewer=reviewer.email_set.first())
RoleFactory(group=review_req.team,name_id='reviewer',person=reviewer)
ReviewerSettingsFactory(team=review_req.team, person=reviewer)
PersonFactory(user__username='plain')

View file

@ -18,15 +18,15 @@ from django.utils.safestring import mark_safe
import debug # pyflakes:ignore
from ietf.review.utils import (extract_review_request_data,
aggregate_raw_period_review_request_stats,
ReviewRequestData,
sum_period_review_request_stats,
sum_raw_review_request_aggregations)
from ietf.review.utils import (extract_review_assignment_data,
aggregate_raw_period_review_assignment_stats,
ReviewAssignmentData,
sum_period_review_assignment_stats,
sum_raw_review_assignment_aggregations)
from ietf.submit.models import Submission
from ietf.group.models import Role, Group
from ietf.person.models import Person
from ietf.name.models import ReviewRequestStateName, ReviewResultName, CountryName, DocRelationshipName
from ietf.name.models import ReviewResultName, CountryName, DocRelationshipName, ReviewAssignmentStateName
from ietf.person.name import plain_name
from ietf.doc.models import DocAlias, Document, State, DocEvent
from ietf.meeting.models import Meeting
@ -1091,7 +1091,7 @@ def review_stats(request, stats_type=None, acronym=None):
query_reviewers = None
group_by_objs = { t.pk: t for t in query_teams }
group_by_index = ReviewRequestData._fields.index("team")
group_by_index = ReviewAssignmentData._fields.index("team")
elif level == "reviewer":
for t in teams:
@ -1102,9 +1102,9 @@ def review_stats(request, stats_type=None, acronym=None):
return HttpResponseRedirect(urlreverse(review_stats))
query_reviewers = list(Person.objects.filter(
email__reviewrequest__time__gte=from_time,
email__reviewrequest__time__lte=to_time,
email__reviewrequest__team=reviewers_for_team,
email__reviewassignment__review_request__time__gte=from_time,
email__reviewassignment__review_request__time__lte=to_time,
email__reviewassignment__review_request__team=reviewers_for_team,
**reviewer_filter_args.get(t.pk, {})
).distinct())
query_reviewers.sort(key=lambda p: p.last_name())
@ -1112,7 +1112,7 @@ def review_stats(request, stats_type=None, acronym=None):
query_teams = [t]
group_by_objs = { r.pk: r for r in query_reviewers }
group_by_index = ReviewRequestData._fields.index("reviewer")
group_by_index = ReviewAssignmentData._fields.index("reviewer")
# now filter and aggregate the data
possible_teams = possible_completion_types = possible_results = possible_states = None
@ -1136,9 +1136,9 @@ def review_stats(request, stats_type=None, acronym=None):
)
query_teams = [t for t in query_teams if t.acronym in selected_teams]
extracted_data = extract_review_request_data(query_teams, query_reviewers, from_time, to_time)
extracted_data = extract_review_assignment_data(query_teams, query_reviewers, from_time, to_time)
req_time_index = ReviewRequestData._fields.index("req_time")
req_time_index = ReviewAssignmentData._fields.index("req_time")
def time_key_fn(t):
d = t[req_time_index].date()
@ -1150,8 +1150,8 @@ def review_stats(request, stats_type=None, acronym=None):
found_states = set()
aggrs = []
for d, request_data_items in itertools.groupby(extracted_data, key=time_key_fn):
raw_aggr = aggregate_raw_period_review_request_stats(request_data_items, count=count)
aggr = sum_period_review_request_stats(raw_aggr)
raw_aggr = aggregate_raw_period_review_assignment_stats(request_data_items, count=count)
aggr = sum_period_review_assignment_stats(raw_aggr)
aggrs.append((d, aggr))
@ -1161,7 +1161,7 @@ def review_stats(request, stats_type=None, acronym=None):
found_states.add(slug)
results = ReviewResultName.objects.filter(slug__in=found_results)
states = ReviewRequestStateName.objects.filter(slug__in=found_states)
states = ReviewAssignmentStateName.objects.filter(slug__in=found_states)
# choice
@ -1210,7 +1210,7 @@ def review_stats(request, stats_type=None, acronym=None):
}])
else: # tabular data
extracted_data = extract_review_request_data(query_teams, query_reviewers, from_time, to_time, ordering=[level])
extracted_data = extract_review_assignment_data(query_teams, query_reviewers, from_time, to_time, ordering=[level])
data = []
@ -1218,10 +1218,10 @@ def review_stats(request, stats_type=None, acronym=None):
found_states = set()
raw_aggrs = []
for group_pk, request_data_items in itertools.groupby(extracted_data, key=lambda t: t[group_by_index]):
raw_aggr = aggregate_raw_period_review_request_stats(request_data_items, count=count)
raw_aggr = aggregate_raw_period_review_assignment_stats(request_data_items, count=count)
raw_aggrs.append(raw_aggr)
aggr = sum_period_review_request_stats(raw_aggr)
aggr = sum_period_review_assignment_stats(raw_aggr)
# skip zero-valued rows
if aggr["open"] == 0 and aggr["completed"] == 0 and aggr["not_completed"] == 0:
@ -1238,12 +1238,12 @@ def review_stats(request, stats_type=None, acronym=None):
# add totals row
if len(raw_aggrs) > 1:
totals = sum_period_review_request_stats(sum_raw_review_request_aggregations(raw_aggrs))
totals = sum_period_review_assignment_stats(sum_raw_review_assignment_aggregations(raw_aggrs))
totals["obj"] = "Totals"
data.append(totals)
results = ReviewResultName.objects.filter(slug__in=found_results)
states = ReviewRequestStateName.objects.filter(slug__in=found_states)
states = ReviewAssignmentStateName.objects.filter(slug__in=found_states)
# massage states/results breakdowns for template rendering
for aggr in data: