=== added file 'dashboard_app/filters.py'
@@ -0,0 +1,402 @@
+
+# A test run filter allows a user to produce an ordered list of results of
+# interest.
+
+# The data that makes up a filter are:
+#
+# * A non-empty set of bundle streams
+# * A possibly empty set of (attribute-name, attribute-value) pairs
+# * A possibly empty list of tests, each of which has a possibly empty list of
+# test cases
+# * An optional build number attribute name
+
+# A filter matches a test run if:
+#
+# * It is part of a bundle that is in one of the specified streams
+# * It has all the attribute names with the specified values (or there are no
+# attributes specified)
+# * The test of the test run is one of those specified (or there are no test
+# runs specified)
+# * One of the results of the test run is one of those specified (or there are
+# no test cases specified)
+# * The build number attribute is present, if specified.
+#
+# The test runs matching a filter are grouped, either by the upload date of
+# the bundle or by the value of the build number attribute.
+
+# We define several representations for this data:
+#
+# * One is the TestRunFilter and related tables (the "model represenation").
+# These have some representation specific metadata that does not relate to
+# the test runs the filter selects: names, owner, the "public" flag.
+
+# * One is the natural Python data structure for the data (the "in-memory
+# representation"), i.e.
+# {
+# bundle_streams: [<BundleStream objects>],
+# attributes: [(attr-name, attr-value)],
+# tests: [{"test": <Test instance>, "test_cases":[<TestCase instances>]}],
+# build_number_attribute: attr-name-or-None,
+# uploaded_by: <User instance-or-None>,
+# }
+# This is the representation that is used to evaluate a filter (so that
+# previewing new filters can be done without having to create a
+# TestRunFilter instance that we carefully don't save to the database --
+# which doesn't work very well anyway with all the ManyToMany relations
+# involved)
+
+# * The final one is the TRFForm object defined in
+# dashboard_app.views.filters.forms (the "form representation")
+# (pedantically, the rendered form of this is yet another
+# representation...). This representation is the only one other than the
+# model objects to include the name/owner/public metadata.
+
+# evaluate_filter returns a sort of fake QuerySet. Iterating over it returns
+# "FilterMatch" objects, whose attributes are described in the class
+# defintion. A FilterMatch also has a serializable representation:
+#
+# {
+# 'tag': either a stringified date (bundle__uploaded_on) or a build number
+# 'test_runs': [{
+# 'test_id': test_id
+# 'link': link-to-test-run,
+# 'passes': int, 'fails': int, 'skips': int, 'total': int,
+# # only present if filter specifies cases for this test:
+# 'specific_results': [{
+# 'test_case_id': test_case_id,
+# 'link': link-to-test-result,
+# 'result': pass/fail/skip/unknown,
+# 'measurement': string-containing-decimal-or-None,
+# 'units': units,
+# }],
+# }]
+# # Only present if filter does not specify tests:
+# 'pass_count': int,
+# 'fail_count': int,
+# }
+
+import datetime
+
+from django.contrib.contenttypes.models import ContentType
+from django.contrib.sites.models import Site
+from django.core.exceptions import ImproperlyConfigured
+from django.db import models
+from django.db.models.sql.aggregates import Aggregate as SQLAggregate
+
+from dashboard_app.models import (
+ BundleStream,
+ NamedAttribute,
+ TestResult,
+ TestRun,
+ )
+
+
+class FilterMatch(object):
+ """A non-database object that represents the way a filter matches a test_run.
+
+ Returned by TestRunFilter.matches_against_bundle and evaluate_filter.
+ """
+
+ filter = None # The model representation of the filter (this is only set
+ # by matches_against_bundle)
+ filter_data = None # The in-memory representation of the filter.
+ tag = None # either a date (bundle__uploaded_on) or a build number
+
+ test_runs = None # Will be all test runs from the bundle if
+ # filter_data['tests'] is empty, will just be the test
+ # runs with matching tests if not.
+
+ specific_results = None # Will stay none unless filter specifies a test case
+
+ pass_count = None # Only filled out for filters that dont specify a test
+ result_count = None # Ditto
+
+ def serializable(self):
+ cases_by_test = {}
+ for test in self.filter_data['tests']:
+ # Not right if filter specifies a test more than once...
+ if test['test_cases']:
+ cases_by_test[test['test']] = test['test_cases']
+ test_runs = []
+
+ domain = '???'
+ try:
+ site = Site.objects.get_current()
+ except (Site.DoesNotExist, ImproperlyConfigured):
+ pass
+ else:
+ domain = site.domain
+ url_prefix = 'http://%s' % domain
+
+ for tr in self.test_runs:
+ d = {
+ 'test_id': tr.test.test_id,
+ 'pass': 0,
+ 'fail': 0,
+ 'skip': 0,
+ 'unknown': 0,
+ 'total': 0,
+ 'link': url_prefix + tr.get_absolute_url(),
+ }
+ if tr.test in cases_by_test:
+ results = d['specific_results'] = []
+ for result in self.specific_results:
+ if result.test_run == tr:
+ result_str = TestResult.RESULT_MAP[result.result]
+ result_data = {
+ 'test_case_id': result.test_case.test_case_id,
+ 'result': result_str,
+ 'link': url_prefix + result.get_absolute_url()
+ }
+ if result.measurement is not None:
+ result_data['measurement'] = str(result.measurement)
+ if result.units is not None:
+ result_data['units'] = str(result.units)
+ results.append(result_data)
+ d[result_str] += 1
+ d['total'] += 1
+ else:
+ d['pass'] = tr.denormalization.count_pass
+ d['fail'] = tr.denormalization.count_fail
+ d['skip'] = tr.denormalization.count_skip
+ d['unknown'] = tr.denormalization.count_unknown
+ d['total'] = tr.denormalization.count_all()
+ test_runs.append(d)
+ r = {
+ 'tag': str(self.tag),
+ 'test_runs': test_runs,
+ }
+ if self.pass_count is not None:
+ r['pass_count'] = self.pass_count
+ if self.result_count is not None:
+ r['result_count'] = self.result_count
+ return r
+
+ def _format_test_result(self, result):
+ prefix = result.test_case.test.test_id + ':' + result.test_case.test_case_id + ' '
+ if result.test_case.units:
+ return prefix + '%s%s' % (result.measurement, result.units)
+ else:
+ return prefix + result.RESULT_MAP[result.result]
+
+ def _format_test_run(self, tr):
+ return "%s %s pass / %s total" % (
+ tr.test.test_id,
+ tr.denormalization.count_pass,
+ tr.denormalization.count_all())
+
+ def _format_many_test_runs(self):
+ return "%s pass / %s total" % (self.pass_count, self.result_count)
+
+ def format_for_mail(self):
+ r = [' ~%s/%s ' % (self.filter.owner.username, self.filter.name)]
+ if not self.filter_data['tests']:
+ r.append(self._format_many_test_runs())
+ else:
+ for test in self.filter_data['tests']:
+ if not test['test_cases']:
+ for tr in self.test_runs:
+ if tr.test == test.test:
+ r.append('\n ')
+ r.append(self._format_test_run(tr))
+ for test_case in test['test_cases']:
+ for result in self.specific_results:
+ if result.test_case.id == test_case.id:
+ r.append('\n ')
+ r.append(self._format_test_result(result))
+ r.append('\n')
+ return ''.join(r)
+
+
+class MatchMakingQuerySet(object):
+ """Wrap a QuerySet and construct FilterMatchs from what the wrapped query
+ set returns.
+
+ Just enough of the QuerySet API to work with DataTable (i.e. pretend
+ ordering and real slicing)."""
+
+ model = TestRun
+
+ def __init__(self, queryset, filter_data, prefetch_related):
+ self.queryset = queryset
+ self.filter_data = filter_data
+ self.prefetch_related = prefetch_related
+ if filter_data['build_number_attribute']:
+ self.key = 'build_number'
+ self.key_name = 'Build'
+ else:
+ self.key = 'bundle__uploaded_on'
+ self.key_name = 'Uploaded On'
+
+ def _makeMatches(self, data):
+ test_run_ids = set()
+ for datum in data:
+ test_run_ids.update(datum['id__arrayagg'])
+ r = []
+ trs = TestRun.objects.filter(id__in=test_run_ids).select_related(
+ 'denormalization', 'bundle', 'bundle__bundle_stream', 'test').prefetch_related(
+ *self.prefetch_related)
+ trs_by_id = {}
+ for tr in trs:
+ trs_by_id[tr.id] = tr
+ case_ids = set()
+ for t in self.filter_data['tests']:
+ for case in t['test_cases']:
+ case_ids.add(case.id)
+ if case_ids:
+ result_ids_by_tr_id = {}
+ results_by_tr_id = {}
+ values = TestResult.objects.filter(
+ test_case__id__in=case_ids,
+ test_run__id__in=test_run_ids).values_list(
+ 'test_run__id', 'id')
+ result_ids = set()
+ for v in values:
+ result_ids_by_tr_id.setdefault(v[0], []).append(v[1])
+ result_ids.add(v[1])
+
+ results_by_id = {}
+ for result in TestResult.objects.filter(
+ id__in=list(result_ids)).select_related(
+ 'test', 'test_case', 'test_run__bundle__bundle_stream'):
+ results_by_id[result.id] = result
+
+ for tr_id, result_ids in result_ids_by_tr_id.items():
+ rs = results_by_tr_id[tr_id] = []
+ for result_id in result_ids:
+ rs.append(results_by_id[result_id])
+ for datum in data:
+ trs = []
+ for tr_id in set(datum['id__arrayagg']):
+ trs.append(trs_by_id[tr_id])
+ match = FilterMatch()
+ match.test_runs = trs
+ match.filter_data = self.filter_data
+ match.tag = datum[self.key]
+ if case_ids:
+ match.specific_results = []
+ for tr_id in set(datum['id__arrayagg']):
+ match.specific_results.extend(results_by_tr_id.get(tr_id, []))
+ else:
+ match.pass_count = sum(tr.denormalization.count_pass for tr in trs)
+ match.result_count = sum(tr.denormalization.count_all() for tr in trs)
+ r.append(match)
+ return iter(r)
+
+ def _wrap(self, queryset, **kw):
+ return self.__class__(queryset, self.filter_data, self.prefetch_related, **kw)
+
+ def order_by(self, *args):
+ # the generic tables code calls this even when it shouldn't...
+ return self
+
+ def since(self, since):
+ if self.key == 'build_number':
+ q = self.queryset.extra(
+ where=['convert_to_integer("dashboard_app_namedattribute"."value") > %d' % since]
+ )
+ else:
+ assert isinstance(since, datetime.datetime)
+ q = self.queryset.filter(bundle__uploaded_on__gt=since)
+ return self._wrap(q)
+
+ def count(self):
+ return self.queryset.count()
+
+ def __getitem__(self, item):
+ return self._wrap(self.queryset[item])
+
+ def __iter__(self):
+ data = list(self.queryset)
+ return self._makeMatches(data)
+
+
+class SQLArrayAgg(SQLAggregate):
+ sql_function = 'array_agg'
+
+
+class ArrayAgg(models.Aggregate):
+ name = 'ArrayAgg'
+ def add_to_query(self, query, alias, col, source, is_summary):
+ aggregate = SQLArrayAgg(
+ col, source=source, is_summary=is_summary, **self.extra)
+ # For way more detail than you want about what this next line is for,
+ # see
+ # http://voices.canonical.com/michael.hudson/2012/09/02/using-postgres-array_agg-from-django/
+ aggregate.field = models.DecimalField() # vomit
+ query.aggregates[alias] = aggregate
+
+
+# given filter:
+# select from testrun
+# where testrun.bundle in filter.bundle_streams ^ accessible_bundles
+# and testrun has attribute with key = key1 and value = value1
+# and testrun has attribute with key = key2 and value = value2
+# and ...
+# and testrun has attribute with key = keyN and value = valueN
+# and testrun has any of the tests/testcases requested
+# [and testrun has attribute with key = build_number_attribute]
+# [and testrun.bundle.uploaded_by = uploaded_by]
+def evaluate_filter(user, filter_data, prefetch_related=[], descending=True):
+ accessible_bundle_streams = BundleStream.objects.accessible_by_principal(
+ user)
+ bs_ids = list(
+ accessible_bundle_streams.filter(
+ id__in=[bs.id for bs in filter_data['bundle_streams']]).values_list('id', flat=True))
+ conditions = [models.Q(bundle__bundle_stream__id__in=bs_ids)]
+
+ content_type_id = ContentType.objects.get_for_model(TestRun).id
+
+ for (name, value) in filter_data['attributes']:
+ # We punch through the generic relation abstraction here for 100x
+ # better performance.
+ conditions.append(
+ models.Q(id__in=NamedAttribute.objects.filter(
+ name=name, value=value, content_type_id=content_type_id
+ ).values('object_id')))
+
+ test_condition = None
+ for test in filter_data['tests']:
+ case_ids = set()
+ for test_case in test['test_cases']:
+ case_ids.add(test_case.id)
+ if case_ids:
+ q = models.Q(
+ test__id=test['test'].id,
+ test_results__test_case__id__in=case_ids)
+ else:
+ q = models.Q(test__id=test['test'].id)
+ if test_condition:
+ test_condition = test_condition | q
+ else:
+ test_condition = q
+ if test_condition:
+ conditions.append(test_condition)
+
+ if filter_data['uploaded_by']:
+ conditions.append(models.Q(bundle__uploaded_by=filter_data['uploaded_by']))
+
+ testruns = TestRun.objects.filter(*conditions)
+
+ if filter_data['build_number_attribute']:
+ if descending:
+ ob = ['-build_number']
+ else:
+ ob = ['build_number']
+ testruns = testruns.filter(
+ attributes__name=filter_data['build_number_attribute']).extra(
+ select={
+ 'build_number': 'convert_to_integer("dashboard_app_namedattribute"."value")',
+ },
+ where=['convert_to_integer("dashboard_app_namedattribute"."value") IS NOT NULL']).extra(
+ order_by=ob,
+ ).values('build_number').annotate(ArrayAgg('id'))
+ else:
+ if descending:
+ ob = '-bundle__uploaded_on'
+ else:
+ ob = 'bundle__uploaded_on'
+ testruns = testruns.order_by(ob).values(
+ 'bundle__uploaded_on').annotate(ArrayAgg('id'))
+
+ return MatchMakingQuerySet(testruns, filter_data, prefetch_related)
=== modified file 'dashboard_app/models.py'
@@ -43,7 +43,6 @@
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db.models.signals import post_delete
-from django.db.models.sql.aggregates import Aggregate as SQLAggregate
from django.dispatch import receiver
from django.template import Template, Context
from django.template.defaultfilters import filesizeformat
@@ -1529,148 +1528,6 @@
field.storage.delete(field.path)
-class FilterMatch(object):
- """A non-database object that represents the way a filter matches a test_run.
-
- Returned by TestRunFilter.matches_against_bundle and
- TestRunFilter.get_test_runs.
- """
-
- filter = None
- tag = None # either a date (bundle__uploaded_on) or a build number
- test_runs = None
- specific_results = None # Will stay none unless filter specifies a test case
- pass_count = None # Only filled out for filters that dont specify a test
- result_code = None # Ditto
-
- def _format_test_result(self, result):
- prefix = result.test_case.test.test_id + ':' + result.test_case.test_case_id + ' '
- if result.test_case.units:
- return prefix + '%s%s' % (result.measurement, result.units)
- else:
- return prefix + result.RESULT_MAP[result.result]
-
- def _format_test_run(self, tr):
- return "%s %s pass / %s total" % (
- tr.test.test_id,
- tr.denormalization.count_pass,
- tr.denormalization.count_all())
-
- def _format_many_test_runs(self):
- return "%s pass / %s total" % (self.pass_count, self.result_count)
-
- def format_for_mail(self):
- r = [' ~%s/%s ' % (self.filter.owner.username, self.filter.name)]
- if not self.filter_data['tests']:
- r.append(self._format_many_test_runs())
- else:
- for test in self.filter_data['tests']:
- if not test.all_case_ids():
- for tr in self.test_runs:
- if tr.test == test.test:
- r.append('\n ')
- r.append(self._format_test_run(tr))
- for case_id in test.all_case_ids():
- for result in self.specific_results:
- if result.test_case.id == case_id:
- r.append('\n ')
- r.append(self._format_test_result(result))
- r.append('\n')
- return ''.join(r)
-
-
-class MatchMakingQuerySet(object):
- """Wrap a QuerySet and construct FilterMatchs from what the wrapped query
- set returns.
-
- Just enough of the QuerySet API to work with DataTable (i.e. pretend
- ordering and real slicing)."""
-
- model = TestRun
-
- def __init__(self, queryset, filter_data, prefetch_related):
- self.queryset = queryset
- self.filter_data = filter_data
- self.prefetch_related = prefetch_related
- if filter_data['build_number_attribute']:
- self.key = 'build_number'
- self.key_name = 'Build'
- else:
- self.key = 'bundle__uploaded_on'
- self.key_name = 'Uploaded On'
-
- def _makeMatches(self, data):
- test_run_ids = set()
- for datum in data:
- test_run_ids.update(datum['id__arrayagg'])
- r = []
- trs = TestRun.objects.filter(id__in=test_run_ids).select_related(
- 'denormalization', 'bundle', 'bundle__bundle_stream', 'test').prefetch_related(
- *self.prefetch_related)
- trs_by_id = {}
- for tr in trs:
- trs_by_id[tr.id] = tr
- case_ids = set()
- for t in self.filter_data['tests']:
- case_ids.update(t.all_case_ids())
- if case_ids:
- result_ids_by_tr_id = {}
- results_by_tr_id = {}
- values = TestResult.objects.filter(
- test_case__id__in=case_ids,
- test_run__id__in=test_run_ids).values_list(
- 'test_run__id', 'id')
- result_ids = set()
- for v in values:
- result_ids_by_tr_id.setdefault(v[0], []).append(v[1])
- result_ids.add(v[1])
-
- results_by_id = {}
- for result in TestResult.objects.filter(
- id__in=list(result_ids)).select_related(
- 'test', 'test_case', 'test_run__bundle__bundle_stream'):
- results_by_id[result.id] = result
-
- for tr_id, result_ids in result_ids_by_tr_id.items():
- rs = results_by_tr_id[tr_id] = []
- for result_id in result_ids:
- rs.append(results_by_id[result_id])
- for datum in data:
- trs = []
- for id in set(datum['id__arrayagg']):
- trs.append(trs_by_id[id])
- match = FilterMatch()
- match.test_runs = trs
- match.filter_data = self.filter_data
- match.tag = datum[self.key]
- if case_ids:
- match.specific_results = []
- for id in set(datum['id__arrayagg']):
- match.specific_results.extend(results_by_tr_id.get(id, []))
- else:
- match.pass_count = sum(tr.denormalization.count_pass for tr in trs)
- match.result_count = sum(tr.denormalization.count_all() for tr in trs)
- r.append(match)
- return iter(r)
-
- def _wrap(self, queryset, **kw):
- return self.__class__(queryset, self.filter_data, self.prefetch_related, **kw)
-
- def order_by(self, *args):
- # the generic tables code calls this even when it shouldn't...
- return self
-
- def count(self):
- return self.queryset.count()
-
- def __getitem__(self, item):
- return self._wrap(self.queryset[item])
-
- def __iter__(self):
- data = list(self.queryset)
- return self._makeMatches(data)
-
-
class TestRunFilterAttribute(models.Model):
name = models.CharField(max_length=1024)
@@ -1689,12 +1546,6 @@
index = models.PositiveIntegerField(
help_text = _(u"The index of this test in the filter"))
- def all_case_ids(self):
- return self.cases.all().order_by('index').values_list('test_case__id', flat=True)
-
- def all_case_names(self):
- return self.cases.all().order_by('index').values_list('test_case__test_case_id', flat=True)
-
def __unicode__(self):
return unicode(self.test)
@@ -1710,22 +1561,6 @@
return unicode(self.test_case)
-class SQLArrayAgg(SQLAggregate):
- sql_function = 'array_agg'
-
-
-class ArrayAgg(models.Aggregate):
- name = 'ArrayAgg'
- def add_to_query(self, query, alias, col, source, is_summary):
- aggregate = SQLArrayAgg(
- col, source=source, is_summary=is_summary, **self.extra)
- # For way more detail than you want about what this next line is for,
- # see
- # http://voices.canonical.com/michael.hudson/2012/09/02/using-postgres-array_agg-from-django/
- aggregate.field = models.DecimalField() # vomit
- query.aggregates[alias] = aggregate
-
-
class TestRunFilter(models.Model):
owner = models.ForeignKey(User)
@@ -1757,86 +1592,24 @@
User, null=True, blank=True, related_name='+',
help_text="Only consider bundles uploaded by this user")
- @property
- def summary_data(self):
+ def as_data(self):
+ tests = []
+ for trftest in self.tests.order_by('index').prefetch_related('cases'):
+ tests.append({
+ 'test': trftest.test,
+ 'test_cases': [trftestcase.test_case for trftestcase in trftest.cases.all().select_related('test_case')],
+ })
return {
'bundle_streams': self.bundle_streams.all(),
'attributes': self.attributes.all().values_list('name', 'value'),
- 'tests': self.tests.all().prefetch_related('cases'),
+ 'tests': tests,
'build_number_attribute': self.build_number_attribute,
+ 'uploaded_by': self.uploaded_by,
}
def __unicode__(self):
return "<TestRunFilter ~%s/%s>" % (self.owner.username, self.name)
- # given filter:
- # select from testrun
- # where testrun.bundle in filter.bundle_streams ^ accessible_bundles
- # and testrun has attribute with key = key1 and value = value1
- # and testrun has attribute with key = key2 and value = value2
- # and ...
- # and testrun has attribute with key = keyN and value = valueN
- # and testrun has any of the tests/testcases requested
-
- def get_test_runs_impl(self, user, bundle_streams, attributes, tests, prefetch_related=[]):
- accessible_bundle_streams = BundleStream.objects.accessible_by_principal(
- user)
- bs_ids = [bs.id for bs in set(accessible_bundle_streams) & set(bundle_streams)]
- conditions = [models.Q(bundle__bundle_stream__id__in=bs_ids)]
-
- content_type_id = ContentType.objects.get_for_model(TestRun).id
-
- for (name, value) in attributes:
- # We punch through the generic relation abstraction here for 100x
- # better performance.
- conditions.append(
- models.Q(id__in=NamedAttribute.objects.filter(
- name=name, value=value, content_type_id=content_type_id
- ).values('object_id')))
-
- test_condition = None
- for test in tests:
- cases = list(test.all_case_ids())
- if cases:
- q = models.Q(
- test__id=test.test.id,
- test_results__test_case__id__in=cases)
- else:
- q = models.Q(test__id=test.test.id)
- if test_condition:
- test_condition = test_condition | q
- else:
- test_condition = q
- if test_condition:
- conditions.append(test_condition)
-
- if self.uploaded_by:
- conditions.append(models.Q(bundle__uploaded_by=self.uploaded_by))
-
- testruns = TestRun.objects.filter(*conditions)
-
- if self.build_number_attribute:
- testruns = testruns.filter(
- attributes__name=self.build_number_attribute).extra(
- select={
- 'build_number': 'convert_to_integer("dashboard_app_namedattribute"."value")',
- },
- where=['convert_to_integer("dashboard_app_namedattribute"."value") IS NOT NULL']).extra(
- order_by=['-build_number'],
- ).values('build_number').annotate(ArrayAgg('id'))
- else:
- testruns = testruns.order_by('-bundle__uploaded_on').values(
- 'bundle__uploaded_on').annotate(ArrayAgg('id'))
-
- filter_data = {
- 'bundle_streams': bundle_streams,
- 'attributes': attributes,
- 'tests': tests,
- 'build_number_attribute': self.build_number_attribute,
- }
-
- return MatchMakingQuerySet(testruns, filter_data, prefetch_related)
-
# given bundle:
# select from filter
# where bundle.bundle_stream in filter.bundle_streams
@@ -1848,6 +1621,7 @@
@classmethod
def matches_against_bundle(self, bundle):
+ from dashboard_app.filters import FilterMatch
bundle_filters = bundle.bundle_stream.testrunfilter_set.all()
attribute_filters = bundle_filters.extra(
where=[
@@ -1901,14 +1675,6 @@
matches.append(match)
return matches
- def get_test_runs(self, user, prefetch_related=[]):
- return self.get_test_runs_impl(
- user,
- self.bundle_streams.all(),
- self.attributes.values_list('name', 'value'),
- self.tests.all(),
- prefetch_related)
-
@models.permalink
def get_absolute_url(self):
return (
@@ -1960,7 +1726,7 @@
failure_found = match.pass_count != match.result_count
else:
for t in match.filter_data['tests']:
- if not t.all_case_ids():
+ if not t['test_cases']:
for tr in match.test_runs:
if tr.test == t.test:
if tr.denormalization.count_pass != tr.denormalization.count_all():
=== modified file 'dashboard_app/templates/dashboard_app/filter_detail.html'
@@ -6,7 +6,7 @@
<h1>[BETA] Filter {{ filter.name }}</h1>
-{% include "dashboard_app/filter_summary.html" with summary_data=filter.summary_data %}
+{% include "dashboard_app/filter_summary.html" with filter_data=filter.as_data %}
{% if filter.owner == request.user %}
<p>
=== modified file 'dashboard_app/templates/dashboard_app/filter_preview.html'
@@ -14,7 +14,7 @@
<h1>[BETA] Previewing new filter “{{ form.name.value }}”</h1>
{% endif %}
-{% include "dashboard_app/filter_summary.html" with summary_data=form.summary_data %}
+{% include "dashboard_app/filter_summary.html" with summary_data=filter.as_data %}
<p>
These are the results matched by your filter.
=== modified file 'dashboard_app/templates/dashboard_app/filter_summary.html'
@@ -4,30 +4,30 @@
Bundle streams
</th>
<td>
- {% for stream in summary_data.bundle_streams.all %}
+ {% for stream in filter_data.bundle_streams %}
{{stream.pathname}}{% if not forloop.last %}, {% endif %}
{% endfor %}
</td>
</tr>
-{% if summary_data.attributes %}
+{% if filter_data.attributes %}
<tr>
<th>
Attributes
</th>
<td>
- {% for a in summary_data.attributes %}
+ {% for a in filter_data.attributes %}
{{ a.0 }} == {{ a.1 }} <br />
{% endfor %}
</td>
</tr>
{% endif %}
-{% if summary_data.build_number_attribute %}
+{% if filter_data.build_number_attribute %}
<tr>
<th>
Build Number Attribute
</th>
<td>
- {{ summary_data.build_number_attribute }}
+ {{ filter_data.build_number_attribute }}
</td>
</tr>
{% endif %}
@@ -38,19 +38,21 @@
<td>
<table>
<tbody>
- {% for test in summary_data.tests %}
+ {% for test in filter_data.tests %}
<tr>
<td>
{{ test.test }}
</td>
<td>
- {% for test_case in test.all_case_names %}
+ {% for test_case in test.test_cases %}
{{ test_case }}
{% empty %}
<i>any</i>
{% endfor %}
</td>
</tr>
+ {% empty %}
+ <i>any</i>
{% endfor %}
</tbody>
</table>
=== modified file 'dashboard_app/views/filters/forms.py'
@@ -207,15 +207,21 @@
self.attributes_formset.full_clean()
self.tests_formset.full_clean()
- @property
- def summary_data(self):
+ def as_data(self):
+ assert self.is_valid(), self.errors
data = self.cleaned_data.copy()
tests = []
for form in self.tests_formset.forms:
- tests.append(FakeTRFTest(form))
+ tests.append({
+ 'test': form.cleaned_data['test'],
+ 'test_cases': [
+ tc_form.cleaned_data['test_case']
+ for tc_form in form.test_case_formset]
+ })
data['attributes'] = [
(d['name'], d['value']) for d in self.attributes_formset.cleaned_data]
data['tests'] = tests
+ data['uploaded_by'] = None
return data
def __init__(self, user, *args, **kwargs):
@@ -251,12 +257,3 @@
BundleStream.objects.accessible_by_principal(user).order_by('pathname')
self.fields['name'].validators.append(self.validate_name)
- def get_test_runs(self, user):
- assert self.is_valid(), self.errors
- filter = self.save(commit=False)
- tests = []
- for form in self.tests_formset.forms:
- tests.append(FakeTRFTest(form))
- return filter.get_test_runs_impl(
- user, self.cleaned_data['bundle_streams'], self.summary_data['attributes'], tests)
-
=== modified file 'dashboard_app/views/filters/tables.py'
@@ -25,6 +25,7 @@
from lava.utils.data_tables.tables import DataTablesTable
+from dashboard_app.filters import evaluate_filter
from dashboard_app.models import (
TestRunFilter,
TestRunFilterSubscription,
@@ -57,14 +58,14 @@
test = TemplateColumn('''
<table style="border-collapse: collapse">
<tbody>
- {% for test in record.tests.all %}
+ {% for trftest in record.tests.all %}
<tr>
<td>
- {{ test.test }}
+ {{ trftest.test }}
</td>
<td>
- {% for test_case in test.all_case_names %}
- {{ test_case }}
+ {% for trftest_case in trftest.cases.all %}
+ {{ trftest_case.test_case.test_case_id }}
{% empty %}
<i>any</i>
{% endfor %}
@@ -119,19 +120,21 @@
class SpecificCaseColumn(Column):
- def __init__(self, verbose_name, test_case_id):
+ def __init__(self, test_case, verbose_name=None):
+ if verbose_name is None:
+ verbose_name = mark_safe(test_case.test_case_id)
super(SpecificCaseColumn, self).__init__(verbose_name)
- self.test_case_id = test_case_id
+ self.test_case = test_case
def render(self, record):
r = []
for result in record.specific_results:
- if result.test_case_id != self.test_case_id:
+ if result.test_case_id != self.test_case.id:
continue
if result.result == result.RESULT_PASS and result.units:
s = '%s %s' % (result.measurement, result.units)
else:
s = result.RESULT_MAP[result.result]
- r.append('<a href="' + result.get_absolute_url() + '">'+s+'</a>')
+ r.append('<a href="' + result.get_absolute_url() + '">'+escape(s)+'</a>')
return mark_safe(', '.join(r))
@@ -154,23 +157,24 @@
del self.base_columns['passes']
del self.base_columns['total']
for i, t in enumerate(reversed(match_maker.filter_data['tests'])):
- if len(t.all_case_names()) == 0:
- col = TestRunColumn(mark_safe(t.test.test_id))
+ if len(t['test_cases']) == 0:
+ col = TestRunColumn(mark_safe(t['test'].test_id))
self.base_columns.insert(0, 'test_run_%s' % i, col)
- elif len(t.all_case_names()) == 1:
- n = t.test.test_id + ':' + t.all_case_names()[0]
- col = SpecificCaseColumn(mark_safe(n), t.all_case_ids()[0])
+ elif len(t['test_cases']) == 1:
+ tc = t['test_cases'][0]
+ n = t['test'].test_id + ':' + tc.test_case_id
+ col = SpecificCaseColumn(tc, n)
self.base_columns.insert(0, 'test_run_%s_case' % i, col)
else:
- col0 = SpecificCaseColumn(mark_safe(t.all_case_names()[0]), t.all_case_ids()[0])
+ col0 = SpecificCaseColumn(t['test_cases'][0])
col0.in_group = True
col0.first_in_group = True
- col0.group_length = len(t.all_case_names())
- col0.group_name = mark_safe(t.test.test_id)
+ col0.group_length = len(t['test_cases'])
+ col0.group_name = mark_safe(t['test'].test_id)
self.complex_header = True
self.base_columns.insert(0, 'test_run_%s_case_%s' % (i, 0), col0)
- for j, n in enumerate(t.all_case_names()[1:], 1):
- col = SpecificCaseColumn(mark_safe(n), t.all_case_ids()[j])
+ for j, tc in enumerate(t['test_cases'][1:], 1):
+ col = SpecificCaseColumn(tc)
col.in_group = True
col.first_in_group = False
self.base_columns.insert(j, 'test_run_%s_case_%s' % (i, j), col)
@@ -204,7 +208,7 @@
total = Column(accessor='result_count')
def get_queryset(self, user, filter):
- return filter.get_test_runs(user)
+ return evaluate_filter(user, filter.as_data())
datatable_opts = {
"sPaginationType": "full_numbers",
@@ -215,7 +219,7 @@
class FilterPreviewTable(FilterTable):
def get_queryset(self, user, form):
- return form.get_test_runs(user)
+ return evaluate_filter(user, form.as_data())
datatable_opts = FilterTable.datatable_opts.copy()
datatable_opts.update({
=== modified file 'dashboard_app/views/images.py'
@@ -27,6 +27,7 @@
BreadCrumbTrail,
)
+from dashboard_app.filters import evaluate_filter
from dashboard_app.models import (
LaunchpadBug,
Image,
@@ -46,9 +47,10 @@
# Migration hack: Image.filter cannot be auto populated, so ignore
# images that have not been migrated to filters for now.
if image.filter:
+ filter_data = image.filter.as_data()
image_data = {
'name': image.name,
- 'bundle_count': image.filter.get_test_runs(request.user).count(),
+ 'bundle_count': evaluate_filter(request.user, filter_data).count(),
'link': image.name,
}
images_data.append(image_data)
@@ -70,7 +72,8 @@
def image_report_detail(request, name):
image = Image.objects.get(name=name)
- matches = image.filter.get_test_runs(request.user, prefetch_related=['launchpad_bugs'])[:50]
+ filter_data = image.filter.as_data()
+ matches = evaluate_filter(request.user, filter_data, prefetch_related=['launchpad_bugs'])[:50]
build_number_to_cols = {}
=== modified file 'dashboard_app/xmlrpc.py'
@@ -20,8 +20,10 @@
XMP-RPC API
"""
+import datetime
import decimal
import logging
+import re
import xmlrpclib
from django.contrib.auth.models import User, Group
@@ -34,11 +36,13 @@
)
from dashboard_app import __version__
+from dashboard_app.filters import evaluate_filter
from dashboard_app.models import (
Bundle,
BundleStream,
DataView,
Test,
+ TestRunFilter,
)
@@ -50,6 +54,8 @@
"""
AUTH_FAILED = 100
AUTH_BLOCKED = 101
+ BAD_REQUEST = 400
+ AUTH_REQUIRED = 401
FORBIDDEN = 403
NOT_FOUND = 404
CONFLICT = 409
@@ -718,6 +724,162 @@
} for item in columns]
}
+ def _get_filter_data(self, filter_name):
+ match = re.match("~([-_A-Za-z0-9]+)/([-_A-Za-z0-9]+)", filter_name)
+ if not match:
+ raise xmlrpclib.Fault(errors.BAD_REQUEST, "filter_name must be of form ~owner/filter-name")
+ owner_name, filter_name = match.groups()
+ try:
+ owner = User.objects.get(username=owner_name)
+ except User.NotFound:
+ raise xmlrpclib.Fault(errors.NOT_FOUND, "user %s not found" % owner_name)
+ try:
+ filter = TestRunFilter.objects.get(owner=owner, name=filter_name)
+ except TestRunFilter.NotFound:
+ raise xmlrpclib.Fault(errors.NOT_FOUND, "filter %s not found" % filter_name)
+ if not filter.public and self.user != owner:
+ if self.user:
+ raise xmlrpclib.Fault(
+ errors.FORBIDDEN, "forbidden")
+ else:
+ raise xmlrpclib.Fault(
+ errors.AUTH_REQUIRED, "authentication required")
+ return filter.as_data()
+
+ def get_filter_results(self, filter_name, count=10, offset=0):
+ """
+ Name
+ ----
+ ::
+
+ get_filter_results(filter_name, count=10, offset=0)
+
+ Description
+ -----------
+
+ Return information about the test runs and results that a given filter
+ matches.
+
+ Arguments
+ ---------
+
+ ``filter_name``:
+ The name of a filter in the format ~owner/name.
+ ``count``:
+ The maximum number of matches to return.
+ ``offset``:
+ Skip over this many results.
+
+ Return value
+ ------------
+
+ A list of "filter matches". A filter match describes the results of
+ matching a filter against one or more test runs::
+
+ {
+ 'tag': either a stringified date (bundle__uploaded_on) or a build number
+ 'test_runs': [{
+ 'test_id': test_id
+ 'link': link-to-test-run,
+ 'passes': int, 'fails': int, 'skips': int, 'total': int,
+ # only present if filter specifies cases for this test:
+ 'specific_results': [{
+ 'test_case_id': test_case_id,
+ 'link': link-to-test-result,
+ 'result': pass/fail/skip/unknown,
+ 'measurement': string-containing-decimal-or-None,
+ 'units': units,
+ }],
+ }]
+ # Only present if filter does not specify tests:
+ 'pass_count': int,
+ 'fail_count': int,
+ }
+
+ """
+ filter_data = self._get_filter_data(filter_name)
+ matches = evaluate_filter(self.user, filter_data, descending=False)
+ matches = matches[offset:offset+count]
+ return [match.serializable() for match in matches]
+
+ def get_filter_results_since(self, filter_name, since=None):
+ """
+ Name
+ ----
+ ::
+
+ get_filter_results_since(filter_name, since=None)
+
+ Description
+ -----------
+
+ Return information about the test runs and results that a given filter
+ matches that are more recent than a previous match -- in more detail,
+ results where the ``tag`` is greater than the value passed in
+ ``since``.
+
+ The idea of this method is that it will be called from a cron job to
+ update previously accessed results. Something like this::
+
+ previous_results = json.load(open('results.json'))
+ results = previous_results + server.dashboard.get_filter_results_since(
+ filter_name, previous_results[-1]['tag'])
+ ... do things with results ...
+ json.save(results, open('results.json', 'w'))
+
+ If called without passing ``since`` (or with ``since`` set to
+ ``None``), this method returns up to 100 matches from the filter. In
+ fact, the matches are always capped at 100 -- so set your cronjob to
+ execute frequently enough that there are less than 100 matches
+ generated between calls!
+
+ Arguments
+ ---------
+
+ ``filter_name``:
+ The name of a filter in the format ~owner/name.
+ ``since``:
+ The most re
+
+ Return value
+ ------------
+
+ A list of "filter matches". A filter match describes the results of
+ matching a filter against one or more test runs::
+
+ {
+ 'tag': either a stringified date (bundle__uploaded_on) or a build number
+ 'test_runs': [{
+ 'test_id': test_id
+ 'link': link-to-test-run,
+ 'passes': int, 'fails': int, 'skips': int, 'total': int,
+ # only present if filter specifies cases for this test:
+ 'specific_results': [{
+ 'test_case_id': test_case_id,
+ 'link': link-to-test-result,
+ 'result': pass/fail/skip/unknown,
+ 'measurement': string-containing-decimal-or-None,
+ 'units': units,
+ }],
+ }]
+ # Only present if filter does not specify tests:
+ 'pass_count': int,
+ 'fail_count': int,
+ }
+
+ """
+ filter_data = self._get_filter_data(filter_name)
+ matches = evaluate_filter(self.user, filter_data, descending=False)
+ if since is not None:
+ if filter_data.get('build_number_attribute') is not None:
+ try:
+ since = datetime.datetime.strptime(since, "%Y-%m-%d %H:%M:%S.%f")
+ except ValueError:
+ raise xmlrpclib.Fault(
+ errors.BAD_REQUEST, "cannot parse since argument as datetime")
+ matches = matches.since(since)
+ matches = matches[:100]
+ return [match.serializable() for match in matches]
# Mapper used by the legacy URL
legacy_mapper = Mapper()