So is the idea that now regressions shows regressions compared to an initial "golden" run?
I don't like the idea of only changing regressions. These things were previously pretty consistent... fixes + regressions + new + disabled = changes Now regressions is some weird other thing not part of that taxonomy. I'd rather that invariant remain - if you change one, you change all. On Mon, Oct 24, 2016 at 10:02 AM, Nicolai Hähnle <[email protected]> wrote: > From: Nicolai Hähnle <[email protected]> > > The idea is that 'regressions' can be useful when a summary is generated > for a sequence of chronologically sorted runs of the same configuration. > In that case, what's interesting is the state of the driver in the latest > run. With this change, 'regressions' now shows those tests that fail (or > crash etc.) in the latest run but have had a better result in at least one > earlier run. > > In particular, this no longer shows tests that regressed in an earlier > test but have been fixed again since then. > --- > framework/summary/common.py | 29 +++++++++++++++++++++++------ > 1 file changed, 23 insertions(+), 6 deletions(-) > > diff --git a/framework/summary/common.py b/framework/summary/common.py > index 8c7e2c7..d3a7b31 100644 > --- a/framework/summary/common.py > +++ b/framework/summary/common.py > @@ -23,20 +23,21 @@ > """Shared functions for summary generation.""" > > from __future__ import ( > absolute_import, division, print_function, unicode_literals > ) > import re > import operator > > import six > from six.moves import zip > +from itertools import repeat > > # a local variable status exists, prevent accidental overloading by renaming > # the module > import framework.status as so > from framework.core import lazy_property > from framework import grouptools > > > class Results(object): # pylint: disable=too-few-public-methods > """Container object for results. > @@ -69,28 +70,29 @@ class Names(object): > """Class containing names of tests for various statuses. > > Members contain lists of sets of names that have a status. > > Each status is lazily evaluated and cached. > > """ > def __init__(self, tests): > self.__results = tests.results > > - def __diff(self, comparator, handler=None): > + def __diff(self, comparator, handler=None, lhs=None, rhs=None): > """Helper for simplifying comparators using find_diffs.""" > ret = [''] > if handler is None: > - ret.extend(find_diffs(self.__results, self.all, comparator)) > + ret.extend(find_diffs(self.__results, self.all, comparator, > + lhs=lhs, rhs=rhs)) > else: > ret.extend(find_diffs(self.__results, self.all, comparator, > - handler=handler)) > + handler=handler, lhs=lhs, rhs=rhs)) > return ret > > def __single(self, comparator): > """Helper for simplifying comparators using find_single.""" > return find_single(self.__results, self.all, comparator) > > @lazy_property > def all(self): > """A set of all tests in all runs.""" > all_ = set() > @@ -133,21 +135,22 @@ class Names(object): > @lazy_property > def skips(self): > # It is critical to use is not == here, otherwise so.NOTRUN will also > # be added to this list > return self.__single(lambda x: x is so.SKIP) > > @lazy_property > def regressions(self): > # By ensureing tha min(x, y) is >= so.PASS we eleminate NOTRUN and > SKIP > # from these pages > - return self.__diff(lambda x, y: x < y and min(x, y) >= so.PASS) > + return self.__diff(lambda x, y: x < y and min(x, y) >= so.PASS, > + rhs=self.__results[-1]) > > @lazy_property > def fixes(self): > # By ensureing tha min(x, y) is >= so.PASS we eleminate NOTRUN and > SKIP > # from these pages > return self.__diff(lambda x, y: x > y and min(x, y) >= so.PASS) > > @lazy_property > def enabled(self): > def handler(names, name, prev, cur): > @@ -285,41 +288,55 @@ def _result_in(name, result): > """If a result (or a subtest result) exists return True, else False.""" > try: > # This is a little hacky, but I don't know of a better way where we > # ensure the value is truthy > _ = result.get_result(name) > return True > except KeyError: > return False > > > -def find_diffs(results, tests, comparator, handler=lambda *a: None): > +def find_diffs(results, tests, comparator, handler=lambda *a: None, > lhs=None, rhs=None): > """Generate diffs between two or more sets of results. > > Arguments: > results -- a list of results.TestrunResult instances > tests -- an iterable of test names. Must be iterable more than once > comparator -- a function with the signautre f(x, y), that returns True > when > the test should be added to the set of diffs > + lhs -- the left-hand-side result for calls to comparator. > + If not specified, results from the range results[:-1] will be > used. > + rhs -- the right-hand-side result for calls to comparator. > + If not specified, results from the range results[1:] will be used. > + Note that at least one of lhs and rhs must be unspecified. > > Keyword Arguemnts: > handler -- a function with the signature f(names, name, prev, cur). in > the > event of a KeyError while comparing the results with > comparator, > handler will be passed the (<the set of names>, <the current > test name>, <the previous result>, <the current result>). This > can be used to add name even when a KeyError is expected (ie, > enabled tests). > Default: pass > > """ > + assert (lhs is None) or (rhs is None) > diffs = [] # There can't be changes from nil -> 0 > - for prev, cur in zip(results[:-1], results[1:]): > + if lhs is None: > + lhs = results[:-1] > + else: > + lhs = repeat(lhs) > + if rhs is None: > + rhs = results[1:] > + else: > + rhs = repeat(rhs) > + for prev, cur in zip(lhs, rhs): > names = set() > for name in tests: > try: > if comparator(prev.get_result(name), cur.get_result(name)): > names.add(name) > except KeyError: > handler(names, name, prev, cur) > diffs.append(names) > return diffs > > -- > 2.7.4 > > _______________________________________________ > Piglit mailing list > [email protected] > https://lists.freedesktop.org/mailman/listinfo/piglit _______________________________________________ Piglit mailing list [email protected] https://lists.freedesktop.org/mailman/listinfo/piglit
