Hello community,

here is the log from the commit of package python-os-testr for openSUSE:Factory 
checked in at 2015-12-13 09:39:52
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-os-testr (Old)
 and      /work/SRC/openSUSE:Factory/.python-os-testr.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-os-testr"

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-os-testr/python-os-testr.changes  
2015-10-30 13:42:50.000000000 +0100
+++ /work/SRC/openSUSE:Factory/.python-os-testr.new/python-os-testr.changes     
2015-12-13 09:39:59.000000000 +0100
@@ -1,0 +2,28 @@
+Fri Oct 16 07:16:29 UTC 2015 - [email protected]
+
+- update to 0.4.2:
+  * Better blacklist - tested with Nova
+  * Add whitelist file support
+  * Fix issues with the blacklist file regex generation
+  * Use test_to_run var in no-discover
+  * Minor refactoring to make os_testr more testable
+  * Switch to using autogenerated ChangeLog in docs
+  * Change ignore-errors to ignore_errors
+  * Handle a skipped test without a reason message
+  * Minimize output when --abbreviate is used
+  * Make use of argparse groups and add some tests
+  * Convert file names to regular expressions
+  * Handle incomplete subunit streams
+  * Set min pbr version in setup_requires
+  * update requirements
+  * Add TODO entry for moving away from subprocess in ostestr
+  * Improved docs for os-testr commands
+  * Dogfood things for unit tests
+  * Disable printing percent change on run time by default
+  * Misc Python 3 compatibility fixes
+  * Catch exception trying to extract test time
+  * Fix ValueError in subunit_trace
+  * Add support for having comments in the exclude file
+  * Add TODO file to os-testr
+
+-------------------------------------------------------------------

Old:
----
  os-testr-0.1.0.tar.gz

New:
----
  os-testr-0.4.2.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-os-testr.spec ++++++
--- /var/tmp/diff_new_pack.msttwq/_old  2015-12-13 09:40:00.000000000 +0100
+++ /var/tmp/diff_new_pack.msttwq/_new  2015-12-13 09:40:00.000000000 +0100
@@ -17,7 +17,7 @@
 
 
 Name:           python-os-testr
-Version:        0.1.0
+Version:        0.4.2
 Release:        0
 Summary:        A testr wrapper to provide functionality for OpenStack projects
 License:        Apache-2.0
@@ -29,7 +29,7 @@
 Requires:       python-Babel >= 1.3
 Requires:       python-python-subunit >= 0.0.18
 Requires:       python-testrepository >= 0.0.18
-Requires:       python-testtools >= 0.9.36
+Requires:       python-testtools >= 1.4.0
 BuildRoot:      %{_tmppath}/%{name}-%{version}-build
 %if 0%{?suse_version} && 0%{?suse_version} <= 1110
 %{!?python_sitelib: %global python_sitelib %(python -c "from 
distutils.sysconfig import get_python_lib; print get_python_lib()")}

++++++ os-testr-0.1.0.tar.gz -> os-testr-0.4.2.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/.coveragerc 
new/os-testr-0.4.2/.coveragerc
--- old/os-testr-0.1.0/.coveragerc      2015-04-02 22:15:38.000000000 +0200
+++ new/os-testr-0.4.2/.coveragerc      2015-10-01 21:27:31.000000000 +0200
@@ -4,4 +4,4 @@
 omit = os_testr/tests/*,os_testr/openstack/*
 
 [report]
-ignore-errors = True
+ignore_errors = True
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/AUTHORS new/os-testr-0.4.2/AUTHORS
--- old/os-testr-0.1.0/AUTHORS  2015-04-02 22:16:01.000000000 +0200
+++ new/os-testr-0.4.2/AUTHORS  2015-10-01 21:27:57.000000000 +0200
@@ -1 +1,9 @@
+Assaf Muller <[email protected]>
+Davanum Srinivas <[email protected]>
+James Page <[email protected]>
+John Griffith <[email protected]>
+Kun Huang <[email protected]>
 Matthew Treinish <[email protected]>
+Monty Taylor <[email protected]>
+TerryHowe <[email protected]>
+Thomas Bechtold <[email protected]>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/ChangeLog new/os-testr-0.4.2/ChangeLog
--- old/os-testr-0.1.0/ChangeLog        2015-04-02 22:16:01.000000000 +0200
+++ new/os-testr-0.4.2/ChangeLog        2015-10-01 21:27:57.000000000 +0200
@@ -1,6 +1,50 @@
 CHANGES
 =======
 
+0.4.2
+-----
+
+* Force utf8 encoding on subunit attachments output
+
+0.4.1
+-----
+
+* Better blacklist - tested with Nova
+
+0.4.0
+-----
+
+* Add whitelist file support
+* Fix issues with the blacklist file regex generation
+* Use test_to_run var in no-discover
+* Minor refactoring to make os_testr more testable
+* Switch to using autogenerated ChangeLog in docs
+* Change ignore-errors to ignore_errors
+* Handle a skipped test without a reason message
+* Minimize output when --abbreviate is used
+* Make use of argparse groups and add some tests
+
+0.3.0
+-----
+
+* Convert file names to regular expressions
+* Handle incomplete subunit streams
+* Set min pbr version in setup_requires
+* update requirements
+* Add TODO entry for moving away from subprocess in ostestr
+* Improved docs for os-testr commands
+
+0.2.0
+-----
+
+* Dogfood things for unit tests
+* Disable printing percent change on run time by default
+* Misc Python 3 compatibility fixes
+* Catch exception trying to extract test time
+* Fix ValueError in subunit_trace
+* Add support for having comments in the exclude file
+* Add TODO file to os-testr
+
 0.1.0
 -----
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/PKG-INFO new/os-testr-0.4.2/PKG-INFO
--- old/os-testr-0.1.0/PKG-INFO 2015-04-02 22:16:01.000000000 +0200
+++ new/os-testr-0.4.2/PKG-INFO 2015-10-01 21:27:57.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: os-testr
-Version: 0.1.0
+Version: 0.4.2
 Summary: A testr wrapper to provide functionality for OpenStack projects
 Home-page: http://www.openstack.org/
 Author: OpenStack
@@ -26,13 +26,6 @@
                          information about the run
         * subunit2html: generates a test results html page from a subunit 
stream
         
-        Release Notes
-        =============
-        
-        0.1.0
-        -----
-         * First release which includes: ostestr, subunit-trace, and 
subunit2html
-        
         
 Platform: UNKNOWN
 Classifier: Environment :: OpenStack
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/README.rst 
new/os-testr-0.4.2/README.rst
--- old/os-testr-0.1.0/README.rst       2015-04-02 22:15:38.000000000 +0200
+++ new/os-testr-0.4.2/README.rst       2015-10-01 21:27:31.000000000 +0200
@@ -17,10 +17,3 @@
 * subunit-trace: an output filter for a subunit stream which provides useful
                  information about the run
 * subunit2html: generates a test results html page from a subunit stream
-
-Release Notes
-=============
-
-0.1.0
------
- * First release which includes: ostestr, subunit-trace, and subunit2html
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/TODO.rst new/os-testr-0.4.2/TODO.rst
--- old/os-testr-0.1.0/TODO.rst 1970-01-01 01:00:00.000000000 +0100
+++ new/os-testr-0.4.2/TODO.rst 2015-10-01 21:27:31.000000000 +0200
@@ -0,0 +1,19 @@
+Work Items for os-testr
+=======================
+
+Short Term
+----------
+ * Expose all subunit-trace options through ostestr
+ * Add --html option to ostestr to run testr with subunit2html output
+ * Add unit tests
+   * For ostestr test selection api
+   * Response code validation on more argument permutations
+Long Term
+---------
+ * Lock down test selection CLI
+   * When this is done it will become release 1.0.0
+ * Add subunit-trace functional tests
+   ** Sample subunit streams and test output from subunit-trace
+ * Add testing for subunit2html
+ * Stop using subprocess in ostestr, everything it uses is python so there
+   isn't a need to shell out for everything.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/doc/source/conf.py 
new/os-testr-0.4.2/doc/source/conf.py
--- old/os-testr-0.1.0/doc/source/conf.py       2015-04-02 22:15:38.000000000 
+0200
+++ new/os-testr-0.4.2/doc/source/conf.py       2015-10-01 21:27:31.000000000 
+0200
@@ -22,7 +22,7 @@
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = [
     'sphinx.ext.autodoc',
-    #'sphinx.ext.intersphinx',
+    # 'sphinx.ext.intersphinx',
     'oslosphinx'
 ]
 
@@ -38,7 +38,7 @@
 
 # General information about the project.
 project = u'os-testr'
-copyright = u'2013, OpenStack Foundation'
+copyright = u'2015, Matthew Treinish'
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
 add_function_parentheses = True
@@ -68,8 +68,15 @@
     ('index',
      '%s.tex' % project,
      u'%s Documentation' % project,
-     u'OpenStack Foundation', 'manual'),
+     u'Matthew Treinish', 'manual'),
 ]
 
+man_pages = [('ostestr', 'ostestr', 'tooling to run OpenStack tests',
+             ['Matthew Treinish'], 1),
+             ('subunit_trace', 'subunit-trace', 'pretty output filter for '
+              'subunit streams', ['Matthew Treinish'], 1),
+             ('subunit2html', 'subunit2html', 'generate a html results page '
+              'from a subunit stream', ['Matthew Treinish'], 1)]
+
 # Example configuration for intersphinx: refer to the Python standard library.
-#intersphinx_mapping = {'http://docs.python.org/': None}
+# intersphinx_mapping = {'http://docs.python.org/': None}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/doc/source/history.rst 
new/os-testr-0.4.2/doc/source/history.rst
--- old/os-testr-0.1.0/doc/source/history.rst   1970-01-01 01:00:00.000000000 
+0100
+++ new/os-testr-0.4.2/doc/source/history.rst   2015-10-01 21:27:31.000000000 
+0200
@@ -0,0 +1 @@
+.. include:: ../../ChangeLog
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/doc/source/index.rst 
new/os-testr-0.4.2/doc/source/index.rst
--- old/os-testr-0.1.0/doc/source/index.rst     2015-04-02 22:15:38.000000000 
+0200
+++ new/os-testr-0.4.2/doc/source/index.rst     2015-10-01 21:27:31.000000000 
+0200
@@ -15,6 +15,8 @@
    installation
    usage
    contributing
+   todo
+   history
 
 Indices and tables
 ==================
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/doc/source/ostestr.rst 
new/os-testr-0.4.2/doc/source/ostestr.rst
--- old/os-testr-0.1.0/doc/source/ostestr.rst   1970-01-01 01:00:00.000000000 
+0100
+++ new/os-testr-0.4.2/doc/source/ostestr.rst   2015-10-01 21:27:31.000000000 
+0200
@@ -0,0 +1,217 @@
+.. _ostestr:
+
+ostestr
+=======
+
+The ostestr command provides a wrapper around the testr command included in
+the testrepository package. It's designed to build on the functionality
+included in testr and workaround several UI bugs in the short term. By default
+it also has output that is much more useful for OpenStack's test suites which
+are lengthy in both runtime and number of tests. Please note that the CLI
+semantics are still a work in progress as the project is quite young, so
+default behavior might change in future version.
+
+Summary
+-------
+    ostestr [-b|--blacklist_file <blacklist_file>] [-r|--regex REGEX]
+            [-p|--pretty] [--no-pretty] [-s|--subunit] [-l|--list]
+            [-n|--no-discover <test_id>] [--slowest] [--no-slowest]
+            [--pdb <test_id>] [--parallel] [--serial]
+            [-c|--concurrency <workers>] [--until-failure] [--print-exclude]
+
+Options
+-------
+
+  --blacklist_file BLACKLIST_FILE, -b BLACKLIST_FILE
+                        Path to a blacklist file, this file contains a
+                        separate regex exclude on each newline
+  --regex REGEX, -r REGEX
+                        A normal testr selection regex. If a blacklist file is
+                        specified, the regex will be appended to the end of
+                        the generated regex from that file
+  --pretty, -p
+                        Print pretty output from subunit-trace. This is
+                        mutually exclusive with --subunit
+  --no-pretty
+                        Disable the pretty output with subunit-trace
+  --subunit, -s
+                        output the raw subunit v2 from the test run this is
+                        mutuall exclusive with --pretty
+  --list, -l
+                        List all the tests which will be run.
+  --no-discover TEST_ID, -n TEST_ID
+                        Takes in a single test to bypasses test discover and
+                        just excute the test specified
+  --slowest
+                        After the test run print the slowest tests
+  --no-slowest
+                        After the test run don't print the slowest tests
+  --pdb TEST_ID
+                        Run a single test that has pdb traces added
+  --parallel
+                        Run tests in parallel (this is the default)
+  --serial
+                        Run tests serially
+  --concurrency WORKERS, -c WORKERS
+                        The number of workers to use when running in parallel.
+                        By default this is the number of cpus
+  --until-failure
+                        Run the tests in a loop until a failure is
+                        encountered. Running with subunit or prettyoutput
+                        enable will force the loop to run testsserially
+  --print-exclude
+                        If an exclude file is used this option will prints the
+                        comment from the same line and all skipped tests
+                        before the test run
+
+Running Tests
+-------------
+
+os-testr is primarily for running tests at it's basic level you just invoke
+ostestr to run a test suite for a project. (assuming it's setup to run tests
+using testr already) For example::
+
+    $ ostestr
+
+This will run tests in parallel (with the number of workers matching the number
+of CPUs) and with subunit-trace output. If you need to run tests in serial you
+can use the serial option::
+
+    $ ostestr --serial
+
+Or if you need to adjust the concurrency but still run in parallel you can use
+-c/--concurrency::
+
+    $ ostestr --concurrency 2
+
+If you only want to run an individual test module or more specific (a single
+class, or test) and parallel execution doesn't matter, you can use the
+-n/--no-discover to skip test discovery and just directly calls subunit.run on
+the tests under the covers. Bypassing discovery is desirable when running a
+small subset of tests in a larger test suite because the discovery time can
+often far exceed the total run time of the tests.
+
+For example::
+
+    $ ostestr --no-discover test.test_thing.TestThing.test_thing_method
+
+Additionally, if you need to run a single test module, class, or single test
+with pdb enabled you can use --pdb to directly call testtools.run under the
+covers which works with pdb. For example::
+
+    $ ostestr --pdb tests.test_thing.TestThing.test_thing_method
+
+
+Test Selection
+--------------
+
+ostestr is designed to build on top of the test selection in testr. testr only
+exposed a regex option to select tests. This equivalent is functionality is
+exposed via the --regex option. For example::
+
+    $ ostestr --regex 'magic\.regex'
+
+This will do a straight passthrough of the provided regex to testr.
+Additionally, ostestr allows you to specify a a blacklist file to define a set
+of regexes to exclude. You can specify a blacklist file with the
+--blacklist-file/-b option, for example::
+
+    $ ostestr --blacklist_file $path_to_file
+
+The format for the file is line separated regex, with '#' used to signify the
+start of a comment on a line. For example::
+
+    # Blacklist File
+    ^regex1 # Excludes these tests
+    .*regex2 # exclude those tests
+
+Will generate a regex to pass to testr which will exclude both any tests
+matching '^regex1' and '.*regex2'. If a blacklist file is used in conjunction
+with the --regex option the regex specified with --regex will be appended to
+the generated output from the --blacklist_file. Also it's worth noting that the
+regex test selection options can not be used in conjunction with the
+--no-discover or --pdb options described in the previous section. This is
+because the regex selection requires using testr under the covers to actually
+do the filtering, and those 2 options do not use testr.
+
+It's also worth noting that you can use the test list option to dry run any
+selection arguments you are using. You just need to use --list/-l with your
+selection options to do this, for example::
+
+    $ ostestr --regex 'regex3.*' --blacklist_file blacklist.txt --list
+
+This will list all the tests which will be run by ostestr using that 
combination
+of arguments.
+
+Please not that all of this selection functionality will be expanded on in the
+future and a default grammar for selecting multiple tests will be chosen in a
+future release. However as of right now all current arguments (which have
+guarantees on always remaining in place) are still required to perform any
+selection logic while this functionality is still under development.
+
+
+Output Options
+--------------
+
+By default ostestr will use subunit-trace as the output filter on the test
+run. It will also print the slowest tests from the run after the run is
+concluded. You can disable the printing the slowest tests with the --no-slowest
+flag, for example::
+
+    $ ostestr --no-slowest
+
+If you'd like to disable the subunit-trace output you can do this using
+--no-pretty::
+
+    $ ostestr --no-pretty
+
+ostestr also provides the option to just output the raw subunit stream on
+STDOUT with --subunit/-s. Note if you want to use this you also have to
+specify --no-pretty as the subunit-trace output and the raw subunit output
+are mutually exclusive. For example, to get raw subunit output the arguments
+would be::
+
+    $ ostestr --no-pretty --subunit
+
+An additional option on top of the blacklist file is --print-exclude option.
+When this option is specified when using a blacklist file before the tests are
+run ostestr will print all the tests it will be excluding from the blacklist
+file. If a line in the blacklist file has a comment that will be printed before
+listing the tests which will be excluded by that line's regex. If no comment is
+present on a line the regex from that line will be used instead. For example,
+if you were using the example blacklist file from the previous section the
+output before the regular test run output would be::
+
+    $ ostestr -b blacklist-file blacklist.txt --print-exclude
+    Excludes these tests
+    regex1_match
+    regex1_exclude
+
+    exclude those tests
+    regex2_match
+    regex2_exclude
+
+    ...
+
+Notes for running with tox
+--------------------------
+
+If you use `tox`_ for running your tests and call ostestr as the test command
+.. _tox: https://tox.readthedocs.org/en/latest/
+it's recommended that you set a posargs following ostestr on the commands
+ stanza. For example::
+
+    [testenv]
+    commands = ostestr {posargs}
+
+this will enable end users to pass args to configure the output, use the
+selection logic, or any other options directly from the tox cli. This will let
+tox take care of the venv management and the environment separation but enable
+direct access to all of the ostestr options to easily customize your test run.
+For example, assuming the above posargs usage you would be to do::
+
+    $ tox -epy34 -- --regex ^regex1
+
+or to skip discovery::
+
+    $ tox -epy34 -- -n test.test_thing.TestThing.test_thing_method
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/doc/source/subunit2html.rst 
new/os-testr-0.4.2/doc/source/subunit2html.rst
--- old/os-testr-0.1.0/doc/source/subunit2html.rst      1970-01-01 
01:00:00.000000000 +0100
+++ new/os-testr-0.4.2/doc/source/subunit2html.rst      2015-10-01 
21:27:31.000000000 +0200
@@ -0,0 +1,33 @@
+.. _subunit2html:
+
+subunit2html
+============
+
+subunit2html is a tool that takes in a subunit stream file and will output an
+html page
+
+Summary
+-------
+
+    subunit2html subunit_stream [output]
+
+Usage
+-----
+
+subunit2html takes in 1 mandatory argument. This is used to specify the 
location
+of the subunit stream file. For example::
+
+    $ subunit2html subunit_stream
+
+By default subunit2html will store the generated html results file at
+results.html file in the current working directory.
+
+An optional second argument can be provided to set the output path of the html
+results file that is generated. If it is provided this will be the output path
+for saving the generated file, otherwise results.html in the current working
+directory will be used. For example::
+
+    $ subunit2html subunit_stream test_results.html
+
+will write the generated html results file to test_results.html in the current
+working directory
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/doc/source/subunit_trace.rst 
new/os-testr-0.4.2/doc/source/subunit_trace.rst
--- old/os-testr-0.1.0/doc/source/subunit_trace.rst     1970-01-01 
01:00:00.000000000 +0100
+++ new/os-testr-0.4.2/doc/source/subunit_trace.rst     2015-10-01 
21:27:31.000000000 +0200
@@ -0,0 +1,110 @@
+.. _subunit_trace:
+
+subunit-trace
+=============
+
+subunit-trace is an output filter for subunit streams. It is often used in
+conjunction with test runners that emit subunit to enable a consistent and
+useful realtime output from a test run.
+
+Summary
+-------
+
+subunit-trace [--fails|-f] [--failonly] [--perc-diff|-d] [--no-summary]
+              [--diff-threshold|-t <threshold>]
+
+Options
+-------
+
+  --no-failure-debug, -n
+                        Disable printing failure debug information in realtime
+  --fails, -f
+                        Print failure debug information after the stream is
+                        proccesed
+  --failonly
+                        Don't print success items
+  --perc-diff, -d
+                        Print percent change in run time on each test
+  --diff-threshold THRESHOLD, -t THRESHOLD
+                        Threshold to use for displaying percent change from the
+                        avg run time. If one is not specified the percent
+                        change will always be displayed.
+  --no-summary
+                        Don't print the summary of the test run after completes
+
+Usage
+-----
+subunit-trace will take a subunit stream in via STDIN. This is the only input
+into the tool. It will then print on STDOUT the formatted test result output
+for the test run information contained in the stream.
+
+A subunit v2 stream must be passed into subunit-trace. If only a subunit v1
+stream is available you must use the subunit-1to2 utility to convert it before
+passing the stream into subunit-trace. For example this can be done by chaining
+pipes::
+
+    $ cat subunit_v1 | subunit-1to2 | subunit-trace
+
+Adjusting per test output
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+subunit-trace provides several options to customize it's output. This allows
+users to customize the output from subunit-trace to suit their needs. The 
output
+from subunit-trace basically comes in 2 parts, the per test output, and the
+summary at the end. By default subunit-trace will print failure messages during
+the per test output, meaning when a test fails it will also print the message
+and any traceback and other attachments at that time. However this can be
+disabled by using --no-failure-debug, -n. For example::
+
+    $ testr run --subunit | subunit-trace --no-failure-debug
+
+Rhere is also the option to print all failures together at the end of the test
+run before the summary view. This is done using the --fails/-f option. For
+example::
+
+    $ testr run --subunit | subunit-trace --fails
+
+Often the --fails and --no-failure-debug options are used in conjunction to
+only print failures at the end of a test run. This is useful for large test
+suites where an error message might be lost in the noise. To do this ::
+
+    $ testr run --subunit | subunit-trace --fails --no-failure-debug
+
+By default subunit-trace will print a line for each test after it completes 
with
+the test status. However, if you only want to see the run time output for
+failures and not any other test status you can use the --failonly option. For
+example::
+
+     $ testr run --subunit | subunit-trace --failonly
+
+The last output option provided by subunit-trace is to diable the summary view
+of the test run which is normally displayed at the end of a run. You can do
+this using the --no-summary option. For example::
+
+    $ testr run --subunit | subunit-trace --no-summary
+
+
+Show per test run time percent change
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+subunit-trace provides an option to display the percent change in run time
+from the previous run. To do this subunit-trace leverages the testr internals
+a bit. It uses the times.dbm database which, the file repository type in
+testrepository will create, to get the previous run time for a test. If testr
+hasn't ever been used before or for whatever reason subunit-trace is unable to
+find the times.dbm file from testr no percentages will be displayed even if 
it's
+enabled. Additionally, if a test is run which does not have an entry in the
+times.dbm file will not have a percentage printed for it.
+
+To enable this feature you use --perc-diff/-d, for example::
+
+    $ testr run --subunit | subunit-trace --perc-diff
+
+There is also the option to set a threshold value for this option. If used it
+acts as an absolute value and only percentage changes that exceed it will be
+printed. Use the --diff-threshold/-t option to set a threshold, for example::
+
+    $ testr run --subunit | subunit-trace --perc-diff --threshold 45
+
+This will only display percent differences when the change in run time is 
either
+>=45% faster or <=45% slower.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/doc/source/todo.rst 
new/os-testr-0.4.2/doc/source/todo.rst
--- old/os-testr-0.1.0/doc/source/todo.rst      1970-01-01 01:00:00.000000000 
+0100
+++ new/os-testr-0.4.2/doc/source/todo.rst      2015-10-01 21:27:31.000000000 
+0200
@@ -0,0 +1 @@
+.. include:: ../../TODO.rst
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/doc/source/usage.rst 
new/os-testr-0.4.2/doc/source/usage.rst
--- old/os-testr-0.1.0/doc/source/usage.rst     2015-04-02 22:15:38.000000000 
+0200
+++ new/os-testr-0.4.2/doc/source/usage.rst     2015-10-01 21:27:31.000000000 
+0200
@@ -1,7 +1,12 @@
-========
+=====
 Usage
-========
+=====
 
-To use os-testr in a project::
+This section contains the documentation for each of tools packaged in os-testr
 
-    import os_testr
+.. toctree::
+   :maxdepth: 2
+
+   ostestr
+   subunit_trace
+   subunit2html
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/os_testr/os_testr.py 
new/os-testr-0.4.2/os_testr/os_testr.py
--- old/os-testr-0.1.0/os_testr/os_testr.py     2015-04-02 22:15:38.000000000 
+0200
+++ new/os-testr-0.4.2/os_testr/os_testr.py     2015-10-01 21:27:31.000000000 
+0200
@@ -23,41 +23,54 @@
 from testtools import run as testtools_run
 
 
-def parse_args():
+def get_parser(args):
     parser = argparse.ArgumentParser(
         description='Tool to run openstack tests')
-    parser.add_argument('--blacklist_file', '-b',
-                        help='Path to a blacklist file, this file contains a'
-                             ' separate regex exclude on each newline')
-    parser.add_argument('--regex', '-r',
-                        help='A normal testr selection regex. If a blacklist '
-                             'file is specified, the regex will be appended '
-                             'to the end of the generated regex from that '
-                             'file')
-    parser.add_argument('--pretty', '-p', dest='pretty', action='store_true',
+    list_files = parser.add_mutually_exclusive_group()
+    list_files.add_argument('--blacklist_file', '-b',
+                            help='Path to a blacklist file, this file '
+                                 'contains a separate regex exclude on each '
+                                 'newline')
+    list_files.add_argument('--whitelist_file', '-w',
+                            help='Path to a whitelist file, this file '
+                                 'contains a separate regex on each newline.')
+    group = parser.add_mutually_exclusive_group()
+    group.add_argument('--regex', '-r',
+                       help='A normal testr selection regex. If a blacklist '
+                            'file is specified, the regex will be appended '
+                            'to the end of the generated regex from that '
+                            'file.')
+    group.add_argument('--path', metavar='FILE_OR_DIRECTORY',
+                       help='A file name or directory of tests to run.')
+    group.add_argument('--no-discover', '-n', metavar='TEST_ID',
+                       help="Takes in a single test to bypasses test "
+                            "discover and just excute the test specified. "
+                            "A file name may be used in place of a test "
+                            "name.")
+    pretty = parser.add_mutually_exclusive_group()
+    pretty.add_argument('--pretty', '-p', dest='pretty', action='store_true',
                         help='Print pretty output from subunit-trace. This is '
                              'mutually exclusive with --subunit')
-    parser.add_argument('--no-pretty', dest='pretty', action='store_false',
+    pretty.add_argument('--no-pretty', dest='pretty', action='store_false',
                         help='Disable the pretty output with subunit-trace')
     parser.add_argument('--subunit', '-s', action='store_true',
                         help='output the raw subunit v2 from the test run '
-                             'this is mutuall exclusive with --pretty')
+                             'this is mutually exclusive with --pretty')
     parser.add_argument('--list', '-l', action='store_true',
                         help='List all the tests which will be run.')
-    parser.add_argument('--no-discover', '-n', metavar='TEST_ID',
-                        help="Takes in a single test to bypasses test "
-                             "discover and just excute the test specified")
-    parser.add_argument('--slowest', dest='slowest', action='store_true',
-                        help="after the test run print the slowest tests")
-    parser.add_argument('--no-slowest', dest='slowest', action='store_false',
-                        help="after the test run don't print the slowest "
-                             "tests")
+    slowest = parser.add_mutually_exclusive_group()
+    slowest.add_argument('--slowest', dest='slowest', action='store_true',
+                         help="after the test run print the slowest tests")
+    slowest.add_argument('--no-slowest', dest='slowest', action='store_false',
+                         help="after the test run don't print the slowest "
+                              "tests")
     parser.add_argument('--pdb', metavar='TEST_ID',
                         help='Run a single test that has pdb traces added')
-    parser.add_argument('--parallel', dest='parallel', action='store_true',
-                        help='Run tests in parallel (this is the default)')
-    parser.add_argument('--serial', dest='parallel', action='store_false',
-                        help='Run tests serially')
+    parallel = parser.add_mutually_exclusive_group()
+    parallel.add_argument('--parallel', dest='parallel', action='store_true',
+                          help='Run tests in parallel (this is the default)')
+    parallel.add_argument('--serial', dest='parallel', action='store_false',
+                          help='Run tests serially')
     parser.add_argument('--concurrency', '-c', type=int, metavar='WORKERS',
                         help='The number of workers to use when running in '
                              'parallel. By default this is the number of cpus')
@@ -66,24 +79,89 @@
                              'encountered. Running with subunit or pretty'
                              'output enable will force the loop to run tests'
                              'serially')
+    parser.add_argument('--print-exclude', action='store_true',
+                        help='If an exclude file is used this option will '
+                             'prints the comment from the same line and all '
+                             'skipped tests before the test run')
     parser.set_defaults(pretty=True, slowest=True, parallel=True)
-    opts = parser.parse_args()
-    return opts
+    return parser.parse_args(args)
 
 
-def construct_regex(blacklist_file, regex):
+def _get_test_list(regex, env=None):
+    env = env or copy.deepcopy(os.environ)
+    proc = subprocess.Popen(['testr', 'list-tests', regex], env=env,
+                            stdout=subprocess.PIPE)
+    out = proc.communicate()[0]
+    raw_test_list = out.split('\n')
+    bad = False
+    test_list = []
+    exclude_list = ['OS_', 'CAPTURE', 'TEST_TIMEOUT', 'PYTHON',
+                    'subunit.run discover']
+    for line in raw_test_list:
+        for exclude in exclude_list:
+            if exclude in line:
+                bad = True
+                break
+            elif not line:
+                bad = True
+                break
+        if not bad:
+            test_list.append(line)
+        bad = False
+    return test_list
+
+
+def print_skips(regex, message):
+    test_list = _get_test_list(regex)
+    if test_list:
+        if message:
+            print(message)
+        else:
+            print('Skipped because of regex %s:' % regex)
+        for test in test_list:
+            print(test)
+        # Extra whitespace to separate
+        print('\n')
+
+
+def path_to_regex(path):
+    root, _ = os.path.splitext(path)
+    return root.replace('/', '.')
+
+
+def get_regex_from_whitelist_file(file_path):
+    return '|'.join(open(file_path).read().splitlines())
+
+
+def construct_regex(blacklist_file, whitelist_file, regex, print_exclude):
     if not blacklist_file:
         exclude_regex = ''
     else:
         black_file = open(blacklist_file, 'r')
         exclude_regex = ''
         for line in black_file:
-            regex = line.strip()
-            exclude_regex = '|'.join([regex, exclude_regex])
+            raw_line = line.strip()
+            split_line = raw_line.split('#')
+            # Before the # is the regex
+            line_regex = split_line[0].strip()
+            if len(split_line) > 1:
+                # After the # is a comment
+                comment = split_line[1].strip()
+            else:
+                comment = ''
+            if line_regex:
+                if print_exclude:
+                    print_skips(line_regex, comment)
+                if exclude_regex:
+                    exclude_regex = '|'.join([line_regex, exclude_regex])
+                else:
+                    exclude_regex = line_regex
         if exclude_regex:
-            exclude_regex = "'(?!.*" + exclude_regex + ")"
+            exclude_regex = "^((?!" + exclude_regex + ").)*$"
     if regex:
         exclude_regex += regex
+    if whitelist_file:
+        exclude_regex += '%s' % get_regex_from_whitelist_file(whitelist_file)
     return exclude_regex
 
 
@@ -106,25 +184,7 @@
     # This workaround is necessary because of lp bug 1411804 it's super hacky
     # and makes tons of unfounded assumptions, but it works for the most part
     if (subunit or pretty) and until_failure:
-        proc = subprocess.Popen(['testr', 'list-tests', regex], env=env,
-                                stdout=subprocess.PIPE)
-        out = proc.communicate()[0]
-        raw_test_list = out.split('\n')
-        bad = False
-        test_list = []
-        exclude_list = ['CAPTURE', 'TEST_TIMEOUT', 'PYTHON',
-                        'subunit.run discover']
-        for line in raw_test_list:
-            for exclude in exclude_list:
-                if exclude in line:
-                    bad = True
-                    break
-                elif not line:
-                    bad = True
-                    break
-            if not bad:
-                test_list.append(line)
-            bad = False
+        test_list = _get_test_list(regex, env)
         count = 0
         failed = False
         if not test_list:
@@ -193,12 +253,25 @@
         testtools_run.main([sys.argv[0], test_id], sys.stdout)
 
 
-def call_testtools_run(test_id):
-    testtools_run.main([sys.argv[0], test_id], sys.stdout)
+def _select_and_call_runner(opts, exclude_regex):
+    ec = 1
+    if not os.path.isdir('.testrepository'):
+        subprocess.call(['testr', 'init'])
+
+    if not opts.no_discover and not opts.pdb:
+        ec = call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list,
+                        opts.slowest, opts.parallel, opts.concurrency,
+                        opts.until_failure)
+    else:
+        test_to_run = opts.no_discover or opts.pdb
+        if test_to_run.find('/') != -1:
+            test_to_run = path_to_regex(test_to_run)
+        ec = call_subunit_run(test_to_run, opts.pretty, opts.subunit)
+    return ec
 
 
 def main():
-    opts = parse_args()
+    opts = get_parser(sys.argv[1:])
     if opts.pretty and opts.subunit:
         msg = ('Subunit output and pretty output cannot be specified at the '
                'same time')
@@ -217,17 +290,15 @@
         msg = "You can not use until_failure mode with pdb or no-discover"
         print(msg)
         exit(5)
-    exclude_regex = construct_regex(opts.blacklist_file, opts.regex)
-    if not os.path.isdir('.testrepository'):
-        subprocess.call(['testr', 'init'])
-    if not opts.no_discover and not opts.pdb:
-        exit(call_testr(exclude_regex, opts.subunit, opts.pretty, opts.list,
-                        opts.slowest, opts.parallel, opts.concurrency,
-                        opts.until_failure))
-    elif opts.pdb:
-        exit(call_testtools_run(opts.pdb))
+    if opts.path:
+        regex = path_to_regex(opts.path)
     else:
-        exit(call_subunit_run(opts.no_discover, opts.pretty, opts.subunit))
+        regex = opts.regex
+    exclude_regex = construct_regex(opts.blacklist_file,
+                                    opts.whitelist_file,
+                                    regex,
+                                    opts.print_exclude)
+    exit(_select_and_call_runner(opts, exclude_regex))
 
 if __name__ == '__main__':
     main()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/os_testr/subunit_trace.py 
new/os-testr-0.4.2/os_testr/subunit_trace.py
--- old/os-testr-0.1.0/os_testr/subunit_trace.py        2015-04-02 
22:15:38.000000000 +0200
+++ new/os-testr-0.4.2/os_testr/subunit_trace.py        2015-10-01 
21:27:32.000000000 +0200
@@ -120,6 +120,7 @@
             # indent attachment lines 4 spaces to make them visually
             # offset
             for line in detail.as_text().split('\n'):
+                line = line.encode('utf8')
                 stream.write("    %s\n" % line)
 
 
@@ -131,7 +132,14 @@
             test_times = dbm.open(times_db_path)
         except Exception:
             return False
-        avg_runtime = float(test_times.get(str(test_id), False))
+        try:
+            avg_runtime = float(test_times.get(str(test_id), False))
+        except Exception:
+            try:
+                avg_runtime = float(test_times[str(test_id)])
+            except Exception:
+                avg_runtime = False
+
         if avg_runtime and avg_runtime > 0:
             run_time = float(run_time.rstrip('s'))
             perc_diff = ((run_time - avg_runtime) / avg_runtime) * 100
@@ -140,7 +148,7 @@
 
 
 def show_outcome(stream, test, print_failures=False, failonly=False,
-                 threshold='0'):
+                 enable_diff=False, threshold='0', abbreviate=False):
     global RESULTS
     status = test['status']
     # TODO(sdague): ask lifeless why on this?
@@ -161,29 +169,45 @@
 
     if status == 'fail':
         FAILS.append(test)
-        stream.write('{%s} %s [%s] ... FAILED\n' % (
-            worker, name, duration))
-        if not print_failures:
-            print_attachments(stream, test, all_channels=True)
+        if abbreviate:
+            stream.write('F')
+        else:
+            stream.write('{%s} %s [%s] ... FAILED\n' % (
+                worker, name, duration))
+            if not print_failures:
+                print_attachments(stream, test, all_channels=True)
     elif not failonly:
         if status == 'success':
-            out_string = '{%s} %s [%s' % (worker, name, duration)
-            perc_diff = find_test_run_time_diff(test['id'], duration)
-            if perc_diff and abs(perc_diff) >= abs(float(threshold)):
-                if perc_diff > 0:
-                    out_string = out_string + ' +%.2f%%' % perc_diff
-                else:
-                    out_string = out_string + ' %.2f%%' % perc_diff
-            stream.write(out_string + '] ... ok\n')
-            print_attachments(stream, test)
+            if abbreviate:
+                stream.write('.')
+            else:
+                out_string = '{%s} %s [%s' % (worker, name, duration)
+                perc_diff = find_test_run_time_diff(test['id'], duration)
+                if enable_diff:
+                    if perc_diff and abs(perc_diff) >= abs(float(threshold)):
+                        if perc_diff > 0:
+                            out_string = out_string + ' +%.2f%%' % perc_diff
+                        else:
+                            out_string = out_string + ' %.2f%%' % perc_diff
+                stream.write(out_string + '] ... ok\n')
+                print_attachments(stream, test)
         elif status == 'skip':
-            stream.write('{%s} %s ... SKIPPED: %s\n' % (
-                worker, name, test['details']['reason'].as_text()))
+            if abbreviate:
+                stream.write('S')
+            else:
+                reason = test['details'].get('reason', '')
+                if reason:
+                    reason = ': ' + reason.as_text()
+                stream.write('{%s} %s ... SKIPPED%s\n' % (
+                    worker, name, reason))
         else:
-            stream.write('{%s} %s [%s] ... %s\n' % (
-                worker, name, duration, test['status']))
-            if not print_failures:
-                print_attachments(stream, test, all_channels=True)
+            if abbreviate:
+                stream.write('%s' % test['status'][0])
+            else:
+                stream.write('{%s} %s [%s] ... %s\n' % (
+                    worker, name, duration, test['status']))
+                if not print_failures:
+                    print_attachments(stream, test, all_channels=True)
 
     stream.flush()
 
@@ -220,15 +244,24 @@
     runtime = 0.0
     for k, v in RESULTS.items():
         for test in v:
-            runtime += float(get_duration(test['timestamps']).strip('s'))
+            test_dur = get_duration(test['timestamps']).strip('s')
+            # NOTE(toabctl): get_duration() can return an empty string
+            # which leads to a ValueError when casting to float
+            if test_dur:
+                runtime += float(test_dur)
     return runtime
 
 
 def worker_stats(worker):
     tests = RESULTS[worker]
     num_tests = len(tests)
-    delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
-    return num_tests, delta
+    stop_time = tests[-1]['timestamps'][1]
+    start_time = tests[0]['timestamps'][0]
+    if not start_time or not stop_time:
+        delta = 'N/A'
+    else:
+        delta = stop_time - start_time
+    return num_tests, str(delta)
 
 
 def print_summary(stream, elapsed_time):
@@ -254,8 +287,11 @@
                     "Race in testr accounting.\n" % w)
             else:
                 num, time = worker_stats(w)
-                stream.write(" - Worker %s (%s tests) => %ss\n" %
-                             (w, num, time))
+                out_str = " - Worker %s (%s tests) => %s" % (w, num, time)
+                if time.isdigit():
+                    out_str += 's'
+                out_str += '\n'
+                stream.write(out_str)
 
 
 def parse_args():
@@ -271,6 +307,12 @@
                         default=(
                             os.environ.get('TRACE_FAILONLY', False)
                             is not False))
+    parser.add_argument('--abbreviate', '-a', action='store_true',
+                        dest='abbreviate', help='Print one character status'
+                                                'for each test')
+    parser.add_argument('--perc-diff', '-d', action='store_true',
+                        dest='enable_diff',
+                        help="Print percent change in run time on each test ")
     parser.add_argument('--diff-threshold', '-t', dest='threshold',
                         help="Threshold to use for displaying percent change "
                              "from the avg run time. If one is not specified "
@@ -288,7 +330,9 @@
     outcomes = testtools.StreamToDict(
         functools.partial(show_outcome, sys.stdout,
                           print_failures=args.print_failures,
-                          failonly=args.failonly))
+                          failonly=args.failonly,
+                          enable_diff=args.enable_diff,
+                          abbreviate=args.abbreviate))
     summary = testtools.StreamSummary()
     result = testtools.CopyStreamResult([outcomes, summary])
     result = testtools.StreamResultRouter(result)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/os_testr/tests/test_os_testr.py 
new/os-testr-0.4.2/os_testr/tests/test_os_testr.py
--- old/os-testr-0.1.0/os_testr/tests/test_os_testr.py  2015-04-02 
22:15:38.000000000 +0200
+++ new/os-testr-0.4.2/os_testr/tests/test_os_testr.py  2015-10-01 
21:27:31.000000000 +0200
@@ -19,10 +19,238 @@
 Tests for `os_testr` module.
 """
 
+import mock
+import six
+
+from os_testr import os_testr
 from os_testr.tests import base
 
 
-class TestOs_testr(base.TestCase):
+class TestPathToRegex(base.TestCase):
+
+    def test_file_name(self):
+        result = os_testr.path_to_regex("tests/network/v2/test_net.py")
+        self.assertEqual("tests.network.v2.test_net", result)
+        result = os_testr.path_to_regex("openstack/tests/network/v2")
+        self.assertEqual("openstack.tests.network.v2", result)
+
+
+class TestGetParser(base.TestCase):
+    def test_pretty(self):
+        namespace = os_testr.get_parser(['--pretty'])
+        self.assertEqual(True, namespace.pretty)
+        namespace = os_testr.get_parser(['--no-pretty'])
+        self.assertEqual(False, namespace.pretty)
+        self.assertRaises(SystemExit, os_testr.get_parser,
+                          ['--no-pretty', '--pretty'])
+
+    def test_slowest(self):
+        namespace = os_testr.get_parser(['--slowest'])
+        self.assertEqual(True, namespace.slowest)
+        namespace = os_testr.get_parser(['--no-slowest'])
+        self.assertEqual(False, namespace.slowest)
+        self.assertRaises(SystemExit, os_testr.get_parser,
+                          ['--no-slowest', '--slowest'])
+
+    def test_parallel(self):
+        namespace = os_testr.get_parser(['--parallel'])
+        self.assertEqual(True, namespace.parallel)
+        namespace = os_testr.get_parser(['--serial'])
+        self.assertEqual(False, namespace.parallel)
+        self.assertRaises(SystemExit, os_testr.get_parser,
+                          ['--parallel', '--serial'])
+
+
+class TestCallers(base.TestCase):
+    def test_no_discover(self):
+        namespace = os_testr.get_parser(['-n', 'project.tests.foo'])
+
+        def _fake_exit(arg):
+            self.assertTrue(arg)
+
+        def _fake_run(*args, **kwargs):
+            return 'project.tests.foo' in args
+
+        with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
+                mock.patch.object(os_testr, 'get_parser', 
return_value=namespace), \
+                mock.patch.object(os_testr,
+                                  'call_subunit_run',
+                                  side_effect=_fake_run):
+            os_testr.main()
+
+    def test_no_discover_path(self):
+        namespace = os_testr.get_parser(['-n', 'project/tests/foo'])
+
+        def _fake_exit(arg):
+            self.assertTrue(arg)
+
+        def _fake_run(*args, **kwargs):
+            return 'project.tests.foo' in args
+
+        with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
+                mock.patch.object(os_testr, 'get_parser', 
return_value=namespace), \
+                mock.patch.object(os_testr,
+                                  'call_subunit_run',
+                                  side_effect=_fake_run):
+            os_testr.main()
+
+    def test_pdb(self):
+        namespace = os_testr.get_parser(['--pdb', 'project.tests.foo'])
+
+        def _fake_exit(arg):
+            self.assertTrue(arg)
+
+        def _fake_run(*args, **kwargs):
+            return 'project.tests.foo' in args
+
+        with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
+                mock.patch.object(os_testr, 'get_parser', 
return_value=namespace), \
+                mock.patch.object(os_testr,
+                                  'call_subunit_run',
+                                  side_effect=_fake_run):
+            os_testr.main()
+
+    def test_pdb_path(self):
+        namespace = os_testr.get_parser(['--pdb', 'project/tests/foo'])
+
+        def _fake_exit(arg):
+            self.assertTrue(arg)
+
+        def _fake_run(*args, **kwargs):
+            return 'project.tests.foo' in args
+
+        with mock.patch.object(os_testr, 'exit', side_effect=_fake_exit), \
+                mock.patch.object(os_testr, 'get_parser', 
return_value=namespace), \
+                mock.patch.object(os_testr,
+                                  'call_subunit_run',
+                                  side_effect=_fake_run):
+            os_testr.main()
+
+
+class TestConstructRegex(base.TestCase):
+    def test_regex_passthrough(self):
+        result = os_testr.construct_regex(None, None, 'fake_regex', False)
+        self.assertEqual(result, 'fake_regex')
+
+    def test_blacklist_regex_with_comments(self):
+        blacklist_file = six.StringIO()
+        for i in range(4):
+            blacklist_file.write('fake_regex_%s # A Comment\n' % i)
+        blacklist_file.seek(0)
+        with mock.patch('six.moves.builtins.open',
+                        return_value=blacklist_file):
+            result = os_testr.construct_regex('fake_path', None, None, False)
+        self.assertEqual(
+            result,
+            "^((?!fake_regex_3|fake_regex_2|fake_regex_1|fake_regex_0).)*$")
+
+    def test_blacklist_regex_without_comments(self):
+        blacklist_file = six.StringIO()
+        for i in range(4):
+            blacklist_file.write('fake_regex_%s\n' % i)
+        blacklist_file.seek(0)
+        with mock.patch('six.moves.builtins.open',
+                        return_value=blacklist_file):
+            result = os_testr.construct_regex('fake_path', None, None, False)
+        self.assertEqual(
+            result,
+            "^((?!fake_regex_3|fake_regex_2|fake_regex_1|fake_regex_0).)*$")
+
+    def test_blacklist_regex_with_comments_and_regex(self):
+        blacklist_file = six.StringIO()
+        for i in range(4):
+            blacklist_file.write('fake_regex_%s # Comments\n' % i)
+        blacklist_file.seek(0)
+        with mock.patch('six.moves.builtins.open',
+                        return_value=blacklist_file):
+            result = os_testr.construct_regex('fake_path', None,
+                                              'fake_regex', False)
+
+            expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|"
+                              "fake_regex_0).)*$fake_regex")
+            self.assertEqual(result, expected_regex)
+
+    def test_blacklist_regex_without_comments_and_regex(self):
+        blacklist_file = six.StringIO()
+        for i in range(4):
+            blacklist_file.write('fake_regex_%s\n' % i)
+        blacklist_file.seek(0)
+        with mock.patch('six.moves.builtins.open',
+                        return_value=blacklist_file):
+            result = os_testr.construct_regex('fake_path', None,
+                                              'fake_regex', False)
+
+            expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|"
+                              "fake_regex_0).)*$fake_regex")
+            self.assertEqual(result, expected_regex)
+
+    @mock.patch.object(os_testr, 'print_skips')
+    def test_blacklist_regex_with_comment_print_skips(self, print_mock):
+        blacklist_file = six.StringIO()
+        for i in range(4):
+            blacklist_file.write('fake_regex_%s # Comment\n' % i)
+        blacklist_file.seek(0)
+        with mock.patch('six.moves.builtins.open',
+                        return_value=blacklist_file):
+            result = os_testr.construct_regex('fake_path', None,
+                                              None, True)
+
+        expected_regex = ("^((?!fake_regex_3|fake_regex_2|fake_regex_1|"
+                          "fake_regex_0).)*$")
+        self.assertEqual(result, expected_regex)
+        calls = print_mock.mock_calls
+        self.assertEqual(len(calls), 4)
+        args = list(map(lambda x: x[1], calls))
+        self.assertIn(('fake_regex_0', 'Comment'), args)
+        self.assertIn(('fake_regex_1', 'Comment'), args)
+        self.assertIn(('fake_regex_2', 'Comment'), args)
+        self.assertIn(('fake_regex_3', 'Comment'), args)
+
+    @mock.patch.object(os_testr, 'print_skips')
+    def test_blacklist_regex_without_comment_print_skips(self, print_mock):
+        blacklist_file = six.StringIO()
+        for i in range(4):
+            blacklist_file.write('fake_regex_%s\n' % i)
+        blacklist_file.seek(0)
+        with mock.patch('six.moves.builtins.open',
+                        return_value=blacklist_file):
+            result = os_testr.construct_regex('fake_path', None,
+                                              None, True)
+
+        expected_regex = ("^((?!fake_regex_3|fake_regex_2|"
+                          "fake_regex_1|fake_regex_0).)*$")
+        self.assertEqual(result, expected_regex)
+        calls = print_mock.mock_calls
+        self.assertEqual(len(calls), 4)
+        args = list(map(lambda x: x[1], calls))
+        self.assertIn(('fake_regex_0', ''), args)
+        self.assertIn(('fake_regex_1', ''), args)
+        self.assertIn(('fake_regex_2', ''), args)
+        self.assertIn(('fake_regex_3', ''), args)
+
+
+class TestWhitelistFile(base.TestCase):
+    def test_read_whitelist_file(self):
+        file_contents = """regex_a
+regex_b"""
+        whitelist_file = six.StringIO()
+        whitelist_file.write(file_contents)
+        whitelist_file.seek(0)
+        with mock.patch('six.moves.builtins.open',
+                        return_value=whitelist_file):
+            regex = os_testr.get_regex_from_whitelist_file('/path/to/not_used')
+        self.assertEqual('regex_a|regex_b', regex)
+
+    def test_whitelist_regex_without_comments_and_regex(self):
+        file_contents = """regex_a
+regex_b"""
+        whitelist_file = six.StringIO()
+        whitelist_file.write(file_contents)
+        whitelist_file.seek(0)
+        with mock.patch('six.moves.builtins.open',
+                        return_value=whitelist_file):
+            result = os_testr.construct_regex(None, 'fake_path',
+                                              None, False)
 
-    def test_something(self):
-        pass
+            expected_regex = 'regex_a|regex_b'
+            self.assertEqual(result, expected_regex)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/os_testr/tests/test_return_codes.py 
new/os-testr-0.4.2/os_testr/tests/test_return_codes.py
--- old/os-testr-0.1.0/os_testr/tests/test_return_codes.py      2015-04-02 
22:15:38.000000000 +0200
+++ new/os-testr-0.4.2/os_testr/tests/test_return_codes.py      2015-10-01 
21:27:31.000000000 +0200
@@ -14,13 +14,13 @@
 
 import os
 import shutil
-import StringIO
 import subprocess
 import tempfile
 
 import testtools
 
 from os_testr.tests import base
+from six import StringIO
 
 DEVNULL = open(os.devnull, 'wb')
 
@@ -47,8 +47,8 @@
         shutil.copy('os_testr/tests/files/setup.cfg', self.setup_cfg_file)
         shutil.copy('os_testr/tests/files/__init__.py', self.init_file)
 
-        self.stdout = StringIO.StringIO()
-        self.stderr = StringIO.StringIO()
+        self.stdout = StringIO()
+        self.stderr = StringIO()
         # Change directory, run wrapper and check result
         self.addCleanup(os.chdir, os.path.abspath(os.curdir))
         os.chdir(self.directory)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/os_testr/tests/test_subunit_trace.py 
new/os-testr-0.4.2/os_testr/tests/test_subunit_trace.py
--- old/os-testr-0.1.0/os_testr/tests/test_subunit_trace.py     1970-01-01 
01:00:00.000000000 +0100
+++ new/os-testr-0.4.2/os_testr/tests/test_subunit_trace.py     2015-10-01 
21:27:31.000000000 +0200
@@ -0,0 +1,61 @@
+# Copyright 2015 SUSE Linux GmbH
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from datetime import datetime as dt
+
+from ddt import data
+from ddt import ddt
+from ddt import unpack
+from mock import patch
+
+from os_testr import subunit_trace
+from os_testr.tests import base
+
+
+@ddt
+class TestSubunitTrace(base.TestCase):
+
+    @data(([dt(2015, 4, 17, 22, 23, 14, 111111),
+            dt(2015, 4, 17, 22, 23, 14, 111111)],
+           "0.000000s"),
+          ([dt(2015, 4, 17, 22, 23, 14, 111111),
+            dt(2015, 4, 17, 22, 23, 15, 111111)],
+           "1.000000s"),
+          ([dt(2015, 4, 17, 22, 23, 14, 111111),
+            None],
+           ""))
+    @unpack
+    def test_get_durating(self, timestamps, expected_result):
+        self.assertEqual(subunit_trace.get_duration(timestamps),
+                         expected_result)
+
+    @data(([dt(2015, 4, 17, 22, 23, 14, 111111),
+            dt(2015, 4, 17, 22, 23, 14, 111111)],
+           0.0),
+          ([dt(2015, 4, 17, 22, 23, 14, 111111),
+            dt(2015, 4, 17, 22, 23, 15, 111111)],
+           1.0),
+          ([dt(2015, 4, 17, 22, 23, 14, 111111),
+            None],
+           0.0))
+    @unpack
+    def test_run_time(self, timestamps, expected_result):
+        patched_res = {
+            0: [
+                {'timestamps': timestamps}
+            ]
+        }
+        with patch.dict(subunit_trace.RESULTS, patched_res, clear=True):
+            self.assertEqual(subunit_trace.run_time(), expected_result)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/os_testr.egg-info/PKG-INFO 
new/os-testr-0.4.2/os_testr.egg-info/PKG-INFO
--- old/os-testr-0.1.0/os_testr.egg-info/PKG-INFO       2015-04-02 
22:16:01.000000000 +0200
+++ new/os-testr-0.4.2/os_testr.egg-info/PKG-INFO       2015-10-01 
21:27:57.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: os-testr
-Version: 0.1.0
+Version: 0.4.2
 Summary: A testr wrapper to provide functionality for OpenStack projects
 Home-page: http://www.openstack.org/
 Author: OpenStack
@@ -26,13 +26,6 @@
                          information about the run
         * subunit2html: generates a test results html page from a subunit 
stream
         
-        Release Notes
-        =============
-        
-        0.1.0
-        -----
-         * First release which includes: ostestr, subunit-trace, and 
subunit2html
-        
         
 Platform: UNKNOWN
 Classifier: Environment :: OpenStack
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/os_testr.egg-info/SOURCES.txt 
new/os-testr-0.4.2/os_testr.egg-info/SOURCES.txt
--- old/os-testr-0.1.0/os_testr.egg-info/SOURCES.txt    2015-04-02 
22:16:01.000000000 +0200
+++ new/os-testr-0.4.2/os_testr.egg-info/SOURCES.txt    2015-10-01 
21:27:57.000000000 +0200
@@ -8,6 +8,7 @@
 LICENSE
 MANIFEST.in
 README.rst
+TODO.rst
 babel.cfg
 openstack-common.conf
 requirements.txt
@@ -17,9 +18,14 @@
 tox.ini
 doc/source/conf.py
 doc/source/contributing.rst
+doc/source/history.rst
 doc/source/index.rst
 doc/source/installation.rst
+doc/source/ostestr.rst
 doc/source/readme.rst
+doc/source/subunit2html.rst
+doc/source/subunit_trace.rst
+doc/source/todo.rst
 doc/source/usage.rst
 os_testr/__init__.py
 os_testr/os_testr.py
@@ -37,6 +43,7 @@
 os_testr/tests/base.py
 os_testr/tests/test_os_testr.py
 os_testr/tests/test_return_codes.py
+os_testr/tests/test_subunit_trace.py
 os_testr/tests/files/__init__.py
 os_testr/tests/files/failing-tests
 os_testr/tests/files/passing-tests
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/os_testr.egg-info/pbr.json 
new/os-testr-0.4.2/os_testr.egg-info/pbr.json
--- old/os-testr-0.1.0/os_testr.egg-info/pbr.json       2015-04-02 
22:16:01.000000000 +0200
+++ new/os-testr-0.4.2/os_testr.egg-info/pbr.json       2015-10-01 
21:27:57.000000000 +0200
@@ -1 +1 @@
-{"git_version": "78f9371", "is_release": true}
\ No newline at end of file
+{"git_version": "65e1e18", "is_release": true}
\ No newline at end of file
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/os_testr.egg-info/requires.txt 
new/os-testr-0.4.2/os_testr.egg-info/requires.txt
--- old/os-testr-0.1.0/os_testr.egg-info/requires.txt   2015-04-02 
22:16:01.000000000 +0200
+++ new/os-testr-0.4.2/os_testr.egg-info/requires.txt   2015-10-01 
21:27:57.000000000 +0200
@@ -1,5 +1,5 @@
-pbr>=0.6,!=0.7,<1.0
+pbr>=1.3,<2.0
 Babel>=1.3
 testrepository>=0.0.18
 python-subunit>=0.0.18
-testtools>=0.9.36,!=1.2.0
+testtools>=1.4.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/requirements.txt 
new/os-testr-0.4.2/requirements.txt
--- old/os-testr-0.1.0/requirements.txt 2015-04-02 22:15:38.000000000 +0200
+++ new/os-testr-0.4.2/requirements.txt 2015-10-01 21:27:31.000000000 +0200
@@ -2,8 +2,8 @@
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
 
-pbr>=0.6,!=0.7,<1.0
+pbr>=1.3,<2.0
 Babel>=1.3
 testrepository>=0.0.18
 python-subunit>=0.0.18
-testtools>=0.9.36,!=1.2.0
+testtools>=1.4.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/setup.cfg new/os-testr-0.4.2/setup.cfg
--- old/os-testr-0.1.0/setup.cfg        2015-04-02 22:16:01.000000000 +0200
+++ new/os-testr-0.4.2/setup.cfg        2015-10-01 21:27:57.000000000 +0200
@@ -53,7 +53,7 @@
 output_file = os_testr/locale/os-testr.pot
 
 [egg_info]
-tag_date = 0
 tag_svn_revision = 0
 tag_build = 
+tag_date = 0
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/setup.py new/os-testr-0.4.2/setup.py
--- old/os-testr-0.1.0/setup.py 2015-04-02 22:15:38.000000000 +0200
+++ new/os-testr-0.4.2/setup.py 2015-10-01 21:27:31.000000000 +0200
@@ -26,5 +26,5 @@
     pass
 
 setuptools.setup(
-    setup_requires=['pbr'],
+    setup_requires=['pbr>=1.3'],
     pbr=True)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/test-requirements.txt 
new/os-testr-0.4.2/test-requirements.txt
--- old/os-testr-0.1.0/test-requirements.txt    2015-04-02 22:15:38.000000000 
+0200
+++ new/os-testr-0.4.2/test-requirements.txt    2015-10-01 21:27:31.000000000 
+0200
@@ -10,3 +10,5 @@
 oslosphinx>=2.2.0  # Apache-2.0
 oslotest>=1.2.0  # Apache-2.0
 testscenarios>=0.4
+ddt>=0.4.0
+six>=1.9.0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/os-testr-0.1.0/tox.ini new/os-testr-0.4.2/tox.ini
--- old/os-testr-0.1.0/tox.ini  2015-04-02 22:15:38.000000000 +0200
+++ new/os-testr-0.4.2/tox.ini  2015-10-01 21:27:31.000000000 +0200
@@ -10,7 +10,7 @@
    VIRTUAL_ENV={envdir}
 deps = -r{toxinidir}/requirements.txt
        -r{toxinidir}/test-requirements.txt
-commands = python setup.py testr --slowest --testr-args='{posargs}'
+commands = ostestr {posargs}
 
 [testenv:pep8]
 commands = flake8


Reply via email to