# HG changeset patch -- Bitbucket.org
# Project pytest
# URL http://bitbucket.org/hpk42/pytest/overview
# User holger krekel <hol...@merlinux.eu>
# Date 1290285355 -3600
# Node ID f6fe6e272106d390b43fc85a043cd7fa4cd2c449
# Parent  98e74a494cb2d96e45a4bbcf04e764229ccd0b30
merging and refining examples,  also refining skipping documentation.

--- a/example/genhtmlcss.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import py
-html = py.xml.html
-
-class my(html):
-    "a custom style"
-    class body(html.body):
-        style = html.Style(font_size = "120%")
-
-    class h2(html.h2):
-        style = html.Style(background = "grey")
-
-    class p(html.p):
-        style = html.Style(font_weight="bold")
-
-doc = my.html(
-    my.head(),
-    my.body(
-        my.h2("hello world"),
-        my.p("bold as bold can")
-    )
-)
-
-print doc.unicode(indent=2)

--- /dev/null
+++ b/doc/example/costlysetup/sub2/__init__.py
@@ -0,0 +1,1 @@
+#

--- a/example/assertion/test_failures.py
+++ /dev/null
@@ -1,13 +0,0 @@
-
-import py
-failure_demo = py.path.local(__file__).dirpath('failure_demo.py')
-
-def test_failure_demo_fails_properly(testdir):
-    target = testdir.tmpdir.join(failure_demo.basename)
-    failure_demo.copy(target)
-    failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
-    result = testdir.runpytest(target)
-    result.stdout.fnmatch_lines([
-        "*35 failed*"
-    ])
-    assert result.ret != 0

--- /dev/null
+++ b/doc/example/pythoncollection.py
@@ -0,0 +1,11 @@
+
+# run this with $ py.test --collectonly test_collectonly.py
+#
+def test_function():
+    pass
+
+class TestClass:
+    def test_method(self):
+        pass
+    def test_anothermethod(self):
+        pass

--- /dev/null
+++ b/doc/example/costlysetup/sub1/test_quick.py
@@ -0,0 +1,3 @@
+
+def test_quick(setup):
+    pass

--- a/doc/example/builtin.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-
-writing well integrated assertion helpers
-========================================================
-
-If you have a test helper function called from a test you can
-use the ``pytest.fail`` marker to fail a test with a certain message.
-The test support function will not show up in the traceback if you
-set the ``__tracebackhide__`` option somewhere in the helper function.
-Example::
-
-    # content of test_checkconfig.py
-    import pytest
-    def checkconfig(x):
-        __tracebackhide__ = True
-        if not hasattr(x, "config"):
-            pytest.fail("not configured: %s" %(x,))
-
-    def test_something():
-        checkconfig(42)
-
-The ``__tracebackhide__`` setting influences py.test showing
-of tracebacks: the ``checkconfig`` function will not be shown
-unless the ``--fulltrace`` command line option is specified.
-Let's run our little function::
-
-    $ py.test -q
-    F
-    ================================= FAILURES 
=================================
-    ______________________________ test_something 
______________________________
-    
-        def test_something():
-    >       checkconfig(42)
-    E       Failed: not configured: 42
-    
-    test_checkconfig.py:8: Failed
-    1 failed in 0.02 seconds

--- a/example/assertion/global_testmodule_config/test_hello.py
+++ /dev/null
@@ -1,5 +0,0 @@
-
-hello = "world"
-
-def test_func():
-    pass

--- a/example/genhtml.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from py.xml import html
-
-paras = "First Para", "Second para"
-
-doc = html.html(
-   html.head(
-        html.meta(name="Content-Type", value="text/html; charset=latin1")),
-   html.body(
-        [html.p(p) for p in paras]))
-
-print unicode(doc).encode('latin1')
-
-

--- /dev/null
+++ b/doc/example/parametrize.txt
@@ -0,0 +1,142 @@
+
+parametrizing tests
+=================================================
+
+py.test allows to easily implement your own custom
+parametrization scheme for tests.  Here we provide
+some examples for inspiration and re-use.
+
+Parametrizing test methods through per-class configuration
+--------------------------------------------------------------
+
+.. _`unittest parameterizer`: 
http://code.google.com/p/unittest-ext/source/browse/trunk/params.py
+
+Here is an example ``pytest_generate_function`` function implementing a
+parametrization scheme similar to Michael Foords `unittest
+parameterizer`_ in a lot less code::
+
+    # content of ./test_parametrize.py
+    import pytest
+
+    def pytest_generate_tests(metafunc):
+        # called once per each test function
+        for funcargs in metafunc.cls.params[metafunc.function.__name__]:
+            # schedule a new test function run with applied **funcargs
+            metafunc.addcall(funcargs=funcargs)
+
+    class TestClass:
+        # a map specifying multiple argument sets for a test method
+        params = {
+            'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
+            'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)],
+        }
+
+        def test_equals(self, a, b):
+            assert a == b
+
+        def test_zerodivision(self, a, b):
+            pytest.raises(ZeroDivisionError, "a/b")
+
+Running it means we are two tests for each test functions, using
+the respective settings::
+
+    $ py.test -q
+    F..F
+    ================================= FAILURES 
=================================
+    _________________________ TestClass.test_equals[0] 
_________________________
+    
+    self = <test_parametrize.TestClass instance at 0x128a638>, a = 1, b = 2
+    
+        def test_equals(self, a, b):
+    >       assert a == b
+    E       assert 1 == 2
+    
+    test_parametrize.py:17: AssertionError
+    ______________________ TestClass.test_zerodivision[1] 
______________________
+    
+    self = <test_parametrize.TestClass instance at 0x1296440>, a = 3, b = 2
+    
+        def test_zerodivision(self, a, b):
+    >       pytest.raises(ZeroDivisionError, "a/b")
+    E       Failed: DID NOT RAISE
+    
+    test_parametrize.py:20: Failed
+    2 failed, 2 passed in 0.03 seconds
+
+Parametrizing test methods through a decorator
+--------------------------------------------------------------
+
+Modifying the previous example we can also allow decorators
+for parametrizing test methods::
+
+    # content of test_parametrize2.py
+
+    import pytest
+
+    # test support code
+    def params(funcarglist):
+        def wrapper(function):
+            function.funcarglist = funcarglist
+            return function
+        return wrapper
+
+    def pytest_generate_tests(metafunc):
+        for funcargs in getattr(metafunc.function, 'funcarglist', ()):
+            metafunc.addcall(funcargs=funcargs)
+
+    # actual test code
+    class TestClass:
+        @params([dict(a=1, b=2), dict(a=3, b=3), ])
+        def test_equals(self, a, b):
+            assert a == b
+
+        @params([dict(a=1, b=0), dict(a=3, b=2)])
+        def test_zerodivision(self, a, b):
+            pytest.raises(ZeroDivisionError, "a/b")
+
+Running it gives similar results as before::
+
+    $ py.test -q test_parametrize2.py
+    F..F
+    ================================= FAILURES 
=================================
+    _________________________ TestClass.test_equals[0] 
_________________________
+    
+    self = <test_parametrize2.TestClass instance at 0x1dbcc68>, a = 1, b = 2
+    
+        @params([dict(a=1, b=2), dict(a=3, b=3), ])
+        def test_equals(self, a, b):
+    >       assert a == b
+    E       assert 1 == 2
+    
+    test_parametrize2.py:19: AssertionError
+    ______________________ TestClass.test_zerodivision[1] 
______________________
+    
+    self = <test_parametrize2.TestClass instance at 0x1dd0488>, a = 3, b = 2
+    
+        @params([dict(a=1, b=0), dict(a=3, b=2)])
+        def test_zerodivision(self, a, b):
+    >       pytest.raises(ZeroDivisionError, "a/b")
+    E       Failed: DID NOT RAISE
+    
+    test_parametrize2.py:23: Failed
+    2 failed, 2 passed in 0.03 seconds
+
+checking serialization between Python interpreters
+--------------------------------------------------------------
+
+Here is a stripped down real-life example of using parametrized
+testing for testing serialization betwee different interpreters.
+We define a ``test_basic_objects`` function which is to be run
+with different sets of arguments for its three arguments::
+
+* ``python1``: first python interpreter
+* ``python2``: second python interpreter
+* ``obj``: object to be dumped from first interpreter and loaded into second 
interpreter
+
+.. literalinclude:: multipython.py
+
+Running it (with Python-2.4 through to Python2.7 installed)::
+
+   . $ py.test -q multipython.py
+   ....s....s....s....ssssss....s....s....s....ssssss....s....s....s....ssssss
+   48 passed, 27 skipped in 2.55 seconds

--- a/example/funcarg/mysetup2/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-# XXX this file should not need to be here but is here for proper sys.path 
mangling

--- a/doc/skipping.txt
+++ b/doc/skipping.txt
@@ -2,18 +2,19 @@
 skip and xfail mechanisms
 =====================================================================
 
-You can mark test functions for a conditional *skip* or as *xfail*,
-expected-to-fail.  Skipping a test avoids running a test.
-Whereas an xfail-marked test usually is run but if it fails it is
-not reported in detail and counted separately. The latter allows
-to keep track of real implementation problems whereas test skips
-are normally tied to a condition, such as a platform or dependency
-requirement without which considering or running the test does
-not make sense.  If a test fails under all conditions then it's
-probably best to mark your test as 'xfail'.
+You can skip or "xfail" test functions, either by marking functions
+through a decorator or by calling the ``pytest.skip|xfail`` helpers.
+A *skip* means that you expect your test to pass unless a certain 
configuration or condition (e.g. wrong Python interpreter, missing dependency) 
prevents it to run.  And *xfail* means that you expect your test to fail 
because there is an
+implementation problem.  Counting and listing *xfailing* tests separately
+helps to maintain a list of implementation problems and you can provide
+info such as a bug number or a URL to provide a human readable problem context.
 
-By running ``py.test -rxs`` you will see extra reporting
-information on skips and xfail-run tests at the end of a test run.
+Usually detailed information about skipped/xfailed tests is not shown
+to avoid cluttering the output.  You can use the ``-r`` option to
+see details corresponding to the "short" letters shown in the
+test progress::
+
+    py.test -rxs  # show extra info on skips and xfail tests
 
 .. _skipif:
 
@@ -47,7 +48,7 @@ at module level like this::
         ...
 
 
-skip groups of test functions
+skip test functions of a class
 --------------------------------------
 
 As with all function :ref:`marking` you can do it at
@@ -58,8 +59,7 @@ for skipping all methods of a test class
         pytestmark = pytest.mark.skipif("sys.platform == 'win32'")
 
         def test_function(self):
-            # will not be setup or run under 'win32' platform
-            #
+            "will not be setup or run under 'win32' platform"
 
 The ``pytestmark`` decorator will be applied to each test function.
 If your code targets python2.6 or above you can equivalently use
@@ -69,8 +69,7 @@ the skipif decorator on classes::
     class TestPosixCalls:
 
         def test_function(self):
-            # will not be setup or run under 'win32' platform
-            #
+            "will not be setup or run under 'win32' platform"
 
 It is fine in general to apply multiple "skipif" decorators
 on a single function - this means that if any of the conditions
@@ -94,6 +93,13 @@ This test will be run but no traceback w
 when it fails. Instead terminal reporting will list it in the
 "expected to fail" or "unexpectedly passing" sections.
 
+By specifying on the commandline::
+
+    pytest --runxfail
+
+you can force the running and reporting of an ``xfail`` marked test
+as if it weren't marked at all.
+
 Same as with skipif_ you can also selectively expect a failure
 depending on platform::
 
@@ -101,19 +107,32 @@ depending on platform::
     def test_function():
         ...
 
-To not run a test and still regard it as "xfailed"::
+You can also avoid running an "xfail" test at all or
+specify a reason such as a bug ID or similar.  Here is
+a simple test file with usages:
 
-    @pytest.mark.xfail(..., run=False)
+.. literalinclude:: example/xfail_demo.py
 
-To specify an explicit reason to be shown with xfailure detail::
+Running it with the report-on-xfail option gives this output::
 
-    @pytest.mark.xfail(..., reason="my reason")
-
-By specifying on the commandline::
-
-    pytest --runxfail
-
-you can force the running and reporting of a runnable ``xfail`` marked test.
+    example $ py.test -rx xfail_demo.py
+    =========================== test session starts 
============================
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev31
+    test path 1: xfail_demo.py
+    
+    xfail_demo.py xxxxx
+    ========================= short test summary info 
==========================
+    XFAIL xfail_demo.py::test_hello
+    XFAIL xfail_demo.py::test_hello2
+      reason: [NOTRUN]
+    XFAIL xfail_demo.py::test_hello3
+      condition: hasattr(os, 'sep')
+    XFAIL xfail_demo.py::test_hello4
+      bug 110
+    XFAIL xfail_demo.py::test_hello5
+      reason: reason
+    
+    ======================== 5 xfailed in 0.04 seconds 
=========================
 
 imperative xfail from within a test or setup function
 ------------------------------------------------------

--- a/example/funcarg/urloption/conftest.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# conftest.py
-import py
-
-
-def pytest_addoption(parser):
-    grp = parser.getgroup("testserver options")
-    grp.addoption("--url", action="store", default=None,
-        help="url for testserver")
-
-def pytest_funcarg__url(request):
-    url = request.config.getvalue("url")
-    if url is None:
-        py.test.skip("need --url")
-    return url
-

--- a/example/funcarg/test_simpleprovider.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# ./test_simpleprovider.py
-def pytest_funcarg__myfuncarg(request):
-    return 42
-
-def test_function(myfuncarg):
-    assert myfuncarg == 17
-

--- a/example/funcarg/costlysetup/sub2/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-#

--- /dev/null
+++ b/doc/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+# just defined to prevent the root level tox.ini to kick in

--- a/example/funcarg/costlysetup/conftest.py
+++ /dev/null
@@ -1,16 +0,0 @@
-
-def pytest_funcarg__setup(request):
-    return request.cached_setup(
-        setup=lambda: CostlySetup(),
-        teardown=lambda costlysetup: costlysetup.finalize(),
-        scope="session",
-    )
-
-class CostlySetup:
-    def __init__(self):
-        import time
-        time.sleep(5)
-        self.timecostly = 1
-
-    def finalize(self):
-        del self.timecostly

--- a/doc/example/xunit_setup.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-Learning by examples
-=====================
-
-adding custom options
-----------------------
-
-py.test supports adding of standard optparse_ Options.
-A plugin may implement the ``addoption`` hook for registering
-custom options::
-
-    def pytest_addoption(parser):
-        parser.addoption("-M", "--myopt", action="store",
-            help="specify string to set myopt")
-
-    def pytest_configure(config):
-        if config.option.myopt:
-            # do action based on option value
-            #
-
-.. _optparse: http://docs.python.org/library/optparse.html
-
-order of setup/teardown module/class/item methods
-====================================================
-
-managing state at module, class and method level
-------------------------------------------------------------
-
-Here is a working example for what goes on when you setup modules,
-classes and methods::
-
-    # [[from py/documentation/example/pytest/test_setup_flow_example.py]]
-
-    def setup_module(module):
-        module.TestStateFullThing.classcount = 0
-
-    class TestStateFullThing:
-        def setup_class(cls):
-            cls.classcount += 1
-
-        def teardown_class(cls):
-            cls.classcount -= 1
-
-        def setup_method(self, method):
-            self.id = eval(method.func_name[5:])
-
-        def test_42(self):
-            assert self.classcount == 1
-            assert self.id == 42
-
-        def test_23(self):
-            assert self.classcount == 1
-            assert self.id == 23
-
-    def teardown_module(module):
-        assert module.TestStateFullThing.classcount == 0
-
-For this example the control flow happens as follows::
-
-    import test_setup_flow_example
-    setup_module(test_setup_flow_example)
-       setup_class(TestStateFullThing)
-           instance = TestStateFullThing()
-           setup_method(instance, instance.test_42)
-              instance.test_42()
-           setup_method(instance, instance.test_23)
-              instance.test_23()
-       teardown_class(TestStateFullThing)
-    teardown_module(test_setup_flow_example)
-
-Note that ``setup_class(TestStateFullThing)`` is called and not
-``TestStateFullThing.setup_class()`` which would require you
-to insert ``setup_class = classmethod(setup_class)`` to make
-your setup function callable. Did we mention that lazyness
-is a virtue?

--- a/example/funcarg/mysetup2/myapp.py
+++ /dev/null
@@ -1,5 +0,0 @@
-
-class MyApp:
-    def question(self):
-        return 6 * 9
-

--- a/example/funcarg/parametrize/test_parametrize.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import py
-
-def pytest_generate_tests(metafunc):
-    for funcargs in metafunc.cls.params[metafunc.function.__name__]:
-        metafunc.addcall(funcargs=funcargs)
-
-class TestClass:
-    params = {
-        'test_equals': [dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)],
-        'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)],
-    }
-
-    def test_equals(self, a, b):
-        assert a == b
-
-    def test_zerodivision(self, a, b):
-        py.test.raises(ZeroDivisionError, "a/b")

--- /dev/null
+++ b/doc/example/assertion/global_testmodule_config/test_hello.py
@@ -0,0 +1,5 @@
+
+hello = "world"
+
+def test_func():
+    pass

--- a/example/genxml.py
+++ /dev/null
@@ -1,17 +0,0 @@
-
-import py
-class ns(py.xml.Namespace):
-    pass
-
-doc = ns.books(
-    ns.book(
-        ns.author("May Day"),
-        ns.title("python for java programmers"),),
-    ns.book(
-        ns.author("why", class_="somecssclass"),
-        ns.title("Java for Python programmers"),),
-    publisher="N.N",
-    )
-print doc.unicode(indent=2).encode('utf8')
-
-

--- a/doc/plugins.txt
+++ b/doc/plugins.txt
@@ -65,7 +65,7 @@ tool, for example::
 
 If a plugin is installed,  py.test automatically finds and integrates it,
 there is no need to activate it.  If you don't need a plugin anymore simply
-de-install it.  You can find a list of valid plugins through a
+de-install it.  You can find a list of available plugins through a
 `pytest- pypi.python.org search`_.
 
 .. _`available installable plugins`:

--- a/example/xfail_demo.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import py
-
-...@py.test.mark.xfail
-def test_hello():
-    assert 0
-
-...@py.test.mark.xfail(run=False)
-def test_hello2():
-    assert 0
-
-...@py.test.mark.xfail("hasattr(os, 'sep')")
-def test_hello3():
-    assert 0
-
-def test_hello5():
-    py.test.xfail("reason")

--- a/example/funcarg/conftest.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import py
-
-collect_ignore = 'mysetup', 'mysetup2', 'test_simpleprovider.py', 'parametrize'

--- /dev/null
+++ b/doc/example/assertion/test_setup_flow_example.py
@@ -0,0 +1,42 @@
+def setup_module(module):
+    module.TestStateFullThing.classcount = 0
+
+class TestStateFullThing:
+    def setup_class(cls):
+        cls.classcount += 1
+
+    def teardown_class(cls):
+        cls.classcount -= 1
+
+    def setup_method(self, method):
+        self.id = eval(method.__name__[5:])
+
+    def test_42(self):
+        assert self.classcount == 1
+        assert self.id == 42
+
+    def test_23(self):
+        assert self.classcount == 1
+        assert self.id == 23
+
+def teardown_module(module):
+    assert module.TestStateFullThing.classcount == 0
+
+""" For this example the control flow happens as follows::
+    import test_setup_flow_example
+    setup_module(test_setup_flow_example)
+       setup_class(TestStateFullThing)
+           instance = TestStateFullThing()
+           setup_method(instance, instance.test_42)
+              instance.test_42()
+           setup_method(instance, instance.test_23)
+              instance.test_23()
+       teardown_class(TestStateFullThing)
+    teardown_module(test_setup_flow_example)
+
+Note that ``setup_class(TestStateFullThing)`` is called and not
+``TestStateFullThing.setup_class()`` which would require you
+to insert ``setup_class = classmethod(setup_class)`` to make
+your setup function callable.
+"""
+

--- a/example/funcarg/costlysetup/sub1/test_quick.py
+++ /dev/null
@@ -1,3 +0,0 @@
-
-def test_quick():
-    pass

--- a/example/funcarg/mysetup/myapp.py
+++ /dev/null
@@ -1,5 +0,0 @@
-
-class MyApp:
-    def question(self):
-        return 6 * 9
-

--- a/example/funcarg/parametrize/test_parametrize2.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import py
-
-# test support code
-def params(funcarglist):
-    def wrapper(function):
-        function.funcarglist = funcarglist
-        return function
-    return wrapper
-
-def pytest_generate_tests(metafunc):
-    for funcargs in getattr(metafunc.function, 'funcarglist', ()):
-        metafunc.addcall(funcargs=funcargs)
-
-
-# actual test code
-
-class TestClass:
-    @params([dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)], )
-    def test_equals(self, a, b):
-        assert a == b
-
-    @params([dict(a=1, b=0), dict(a=3, b=2)])
-    def test_zerodivision(self, a, b):
-        py.test.raises(ZeroDivisionError, "a/b")
-

--- a/doc/example/detectpytest.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-
-Detect if running from within a py.test run
---------------------------------------------------------------
-
-Usually it is a bad idea to make application code
-behave differently if called from a test.  But if you
-absolutely must find out if your application code is
-running from a test you can do something like this::
-
-    # content of conftest.py in your testing directory
-
-    def pytest_configure(config):
-        import sys   
-        sys._called_from_test = True
-
-    def pytest_unconfigure(config):
-        del sys._called_from_test
-
-and then check for the ``sys._called_from_test`` flag::
-
-    if hasattr(sys, '_called_from_test'):
-        # called from within a test run
-    else:
-        # called "normally"
-    
-accordingly in your application.  It's also a good idea
-to rather use your own application module rather than ``sys``
-for handling flag.
-

--- a/example/funcarg/costlysetup/sub1/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-#

--- a/example/funcarg/parametrize/test_parametrize3.py
+++ /dev/null
@@ -1,15 +0,0 @@
-
-# following hook can be put unchanged into a local or global plugin
-def pytest_generate_tests(metafunc):
-    for scenario in metafunc.cls.scenarios:
-        metafunc.addcall(id=scenario[0], funcargs=scenario[1])
-
-
-scenario1 = ('basic', {'attribute': 'value'})
-scenario2 = ('advanced', {'attribute': 'value2'})
-
-class TestSampleWithScenarios:
-    scenarios = [scenario1, scenario2]
-
-    def test_demo(self, attribute):
-        assert isinstance(attribute, str)

--- a/doc/example/simple.txt
+++ b/doc/example/simple.txt
@@ -1,9 +1,26 @@
 
 .. highlightlang:: python
 
-simple patterns using hooks
+simple hook using patterns
 ==========================================================
 
+adding custom options
+----------------------
+
+py.test supports adding of standard optparse_ Options.
+A plugin may implement the ``addoption`` hook for registering
+custom options::
+
+    def pytest_addoption(parser):
+        parser.addoption("-M", "--myopt", action="store",
+            help="specify string to set myopt")
+
+    def pytest_configure(config):
+        if config.option.myopt:
+            # do action based on option value
+
+.. _optparse: http://docs.python.org/library/optparse.html
+
 pass different values to a test function, depending on command line options
 ----------------------------------------------------------------------------
 
@@ -134,3 +151,128 @@ let's run the full monty::
 
 As expected when running the full range of ``param1`` values
 we'll get an error on the last one.
+
+
+.. _`retrieved by hooks as item keywords`:
+
+control skipping of tests according to command line option
+--------------------------------------------------------------
+
+Here is a ``conftest.py`` file adding a ``--runslow`` command
+line option to control skipping of ``slow`` marked tests::
+
+    # content of conftest.py
+
+    import pytest
+    def pytest_addoption(parser):
+        parser.addoption("--runslow", action="store_true",
+            help="run slow tests")
+
+    def pytest_runtest_setup(item):
+        if 'slow' in item.keywords and not item.config.getvalue("runslow"):
+            pytest.skip("need --runslow option to run")
+
+We can now write a test module like this::
+
+    # content of test_module.py
+
+    import pytest
+    slow = pytest.mark.slow
+
+    def test_func_fast():
+        pass
+
+    @slow
+    def test_func_slow():
+        pass
+
+and when running it will see a skipped "slow" test::
+
+    $ py.test test_module.py -rs    # "-rs" means report details on the little 
's'
+    =========================== test session starts 
============================
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
+    test path 1: test_module.py
+    
+    test_module.py .s
+    ========================= short test summary info 
==========================
+    SKIP [1] /tmp/doc-exec-104/conftest.py:9: need --runslow option to run
+    
+    =================== 1 passed, 1 skipped in 0.02 seconds 
====================
+
+Or run it including the ``slow`` marked test::
+
+    $ py.test test_module.py --runslow
+    =========================== test session starts 
============================
+    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
+    test path 1: test_module.py
+    
+    test_module.py ..
+    
+    ========================= 2 passed in 0.01 seconds 
=========================
+
+
+writing well integrated assertion helpers
+--------------------------------------------------
+
+If you have a test helper function called from a test you can
+use the ``pytest.fail`` marker to fail a test with a certain message.
+The test support function will not show up in the traceback if you
+set the ``__tracebackhide__`` option somewhere in the helper function.
+Example::
+
+    # content of test_checkconfig.py
+    import pytest
+    def checkconfig(x):
+        __tracebackhide__ = True
+        if not hasattr(x, "config"):
+            pytest.fail("not configured: %s" %(x,))
+
+    def test_something():
+        checkconfig(42)
+
+The ``__tracebackhide__`` setting influences py.test showing
+of tracebacks: the ``checkconfig`` function will not be shown
+unless the ``--fulltrace`` command line option is specified.
+Let's run our little function::
+
+    $ py.test -q
+    F
+    ================================= FAILURES 
=================================
+    ______________________________ test_something 
______________________________
+    
+        def test_something():
+    >       checkconfig(42)
+    E       Failed: not configured: 42
+    
+    test_checkconfig.py:8: Failed
+    1 failed in 0.02 seconds
+
+
+Detect if running from within a py.test run
+--------------------------------------------------------------
+
+Usually it is a bad idea to make application code
+behave differently if called from a test.  But if you
+absolutely must find out if your application code is
+running from a test you can do something like this::
+
+    # content of conftest.py in your testing directory
+
+    def pytest_configure(config):
+        import sys
+        sys._called_from_test = True
+
+    def pytest_unconfigure(config):
+        del sys._called_from_test
+
+and then check for the ``sys._called_from_test`` flag::
+
+    if hasattr(sys, '_called_from_test'):
+        # called from within a test run
+    else:
+        # called "normally"
+    
+accordingly in your application.  It's also a good idea
+to rather use your own application module rather than ``sys``
+for handling flag.
+

--- a/example/funcarg/costlysetup/sub2/test_two.py
+++ /dev/null
@@ -1,6 +0,0 @@
-def test_something(setup):
-    assert setup.timecostly == 1
-
-def test_something_more(setup):
-    assert setup.timecostly == 1
-

--- /dev/null
+++ b/doc/example/xfail_demo.py
@@ -0,0 +1,21 @@
+import pytest
+xfail = pytest.mark.xfail
+
+...@xfail
+def test_hello():
+    assert 0
+
+...@xfail(run=False)
+def test_hello2():
+    assert 0
+
+...@xfail("hasattr(os, 'sep')")
+def test_hello3():
+    assert 0
+
+...@xfail(reason="bug 110")
+def test_hello4():
+    assert 0
+
+def test_hello5():
+    pytest.xfail("reason")

--- a/example/funcarg/mysetup/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-# XXX this file should not need to be here but is here for proper sys.path 
mangling

--- /dev/null
+++ b/doc/example/assertion/test_failures.py
@@ -0,0 +1,14 @@
+
+import py
+failure_demo = py.path.local(__file__).dirpath('failure_demo.py')
+pytest_plugins = 'pytester',
+
+def test_failure_demo_fails_properly(testdir):
+    target = testdir.tmpdir.join(failure_demo.basename)
+    failure_demo.copy(target)
+    failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
+    result = testdir.runpytest(target)
+    result.stdout.fnmatch_lines([
+        "*35 failed*"
+    ])
+    assert result.ret != 0

--- /dev/null
+++ b/doc/example/costlysetup/sub1/__init__.py
@@ -0,0 +1,1 @@
+#

--- a/example/funcarg/test_multi_python.py
+++ /dev/null
@@ -1,65 +0,0 @@
-"""
-
-module containing a parametrized tests testing cross-python
-serialization via the pickle module.
-"""
-import py
-
-pythonlist = ['python2.3', 'python2.4', 'python2.5', 'python2.6']
-# 'jython' 'python3.1']
-
-def pytest_generate_tests(metafunc):
-    if 'python1' in metafunc.funcargnames:
-        assert 'python2' in metafunc.funcargnames
-        for obj in metafunc.function.multiarg.kwargs['obj']:
-            for py1 in pythonlist:
-                for py2 in pythonlist:
-                    metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj),
-                        param=(py1, py2, obj))
-
-...@py.test.mark.multiarg(obj=[42, {}, {1:3},])
-def test_basic_objects(python1, python2, obj):
-    python1.dumps(obj)
-    python2.load_and_is_true("obj == %s" % obj)
-
-def pytest_funcarg__python1(request):
-    tmpdir = request.getfuncargvalue("tmpdir")
-    picklefile = tmpdir.join("data.pickle")
-    return Python(request.param[0], picklefile)
-
-def pytest_funcarg__python2(request):
-    python1 = request.getfuncargvalue("python1")
-    return Python(request.param[1], python1.picklefile)
-
-def pytest_funcarg__obj(request):
-    return request.param[2]
-
-class Python:
-    def __init__(self, version, picklefile):
-        self.pythonpath = py.path.local.sysfind(version)
-        if not self.pythonpath:
-            py.test.skip("%r not found" %(version,))
-        self.picklefile = picklefile
-    def dumps(self, obj):
-        dumpfile = self.picklefile.dirpath("dump.py")
-        dumpfile.write(py.code.Source("""
-            import pickle
-            f = open(%r, 'wb')
-            s = pickle.dump(%r, f)
-            f.close()
-        """ % (str(self.picklefile), obj)))
-        py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile))
-
-    def load_and_is_true(self, expression):
-        loadfile = self.picklefile.dirpath("load.py")
-        loadfile.write(py.code.Source("""
-            import pickle
-            f = open(%r, 'rb')
-            obj = pickle.load(f)
-            f.close()
-            res = eval(%r)
-            if not res:
-                raise SystemExit(1)
-        """ % (str(self.picklefile), expression)))
-        print (loadfile)
-        py.process.cmdexec("%s %s" %(self.pythonpath, loadfile))

--- /dev/null
+++ b/doc/example/assertion/failure_demo.py
@@ -0,0 +1,193 @@
+from py.test import raises
+import py
+
+def otherfunc(a,b):
+    assert a==b
+
+def somefunc(x,y):
+    otherfunc(x,y)
+
+def otherfunc_multi(a,b):
+    assert (a ==
+            b)
+
+def test_generative(param1, param2):
+    assert param1 * 2 < param2
+
+def pytest_generate_tests(metafunc):
+    if 'param1' in metafunc.funcargnames:
+        metafunc.addcall(funcargs=dict(param1=3, param2=6))
+
+class TestFailing(object):
+    def test_simple(self):
+        def f():
+            return 42
+        def g():
+            return 43
+
+        assert f() == g()
+
+    def test_simple_multiline(self):
+        otherfunc_multi(
+                  42,
+                  6*9)
+
+    def test_not(self):
+        def f():
+            return 42
+        assert not f()
+
+    def test_complex_error(self):
+        def f():
+            return 44
+        def g():
+            return 43
+        somefunc(f(), g())
+
+    def test_z1_unpack_error(self):
+        l = []
+        a,b  = l
+
+    def test_z2_type_error(self):
+        l = 3
+        a,b  = l
+
+    def test_startswith(self):
+        s = "123"
+        g = "456"
+        assert s.startswith(g)
+
+    def test_startswith_nested(self):
+        def f():
+            return "123"
+        def g():
+            return "456"
+        assert f().startswith(g())
+
+    def test_global_func(self):
+        assert isinstance(globf(42), float)
+
+    def test_instance(self):
+        self.x = 6*7
+        assert self.x != 42
+
+    def test_compare(self):
+        assert globf(10) < 5
+
+    def test_try_finally(self):
+        x = 1
+        try:
+            assert x == 0
+        finally:
+            x = 0
+
+    def test_raises(self):
+        s = 'qwe'
+        raises(TypeError, "int(s)")
+
+    def test_raises_doesnt(self):
+        raises(IOError, "int('3')")
+
+    def test_raise(self):
+        raise ValueError("demo error")
+
+    def test_tupleerror(self):
+        a,b = [1]
+
+    def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
+        l = [1,2,3]
+        print ("l is %r" % l)
+        a,b = l.pop()
+
+    def test_some_error(self):
+        if namenotexi:
+            pass
+
+    def func1(self):
+        assert 41 == 42
+
+
+# thanks to Matthew Scott for this test
+def test_dynamic_compile_shows_nicely():
+    src = 'def foo():\n assert 1 == 0\n'
+    name = 'abc-123'
+    module = py.std.imp.new_module(name)
+    code = py.code.compile(src, name, 'exec')
+    py.builtin.exec_(code, module.__dict__)
+    py.std.sys.modules[name] = module
+    module.foo()
+
+
+class TestSpecialisedExplanations(object):
+    def test_eq_text(self):
+        assert 'spam' == 'eggs'
+
+    def test_eq_similar_text(self):
+        assert 'foo 1 bar' == 'foo 2 bar'
+
+    def test_eq_multiline_text(self):
+        assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
+
+    def test_eq_long_text(self):
+        a = '1'*100 + 'a' + '2'*100
+        b = '1'*100 + 'b' + '2'*100
+        assert a == b
+
+    def test_eq_long_text_multiline(self):
+        a = '1\n'*100 + 'a' + '2\n'*100
+        b = '1\n'*100 + 'b' + '2\n'*100
+        assert a == b
+
+    def test_eq_list(self):
+        assert [0, 1, 2] == [0, 1, 3]
+
+    def test_eq_list_long(self):
+        a = [0]*100 + [1] + [3]*100
+        b = [0]*100 + [2] + [3]*100
+        assert a == b
+
+    def test_eq_dict(self):
+        assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2}
+
+    def test_eq_set(self):
+        assert set([0, 10, 11, 12]) == set([0, 20, 21])
+
+    def test_eq_longer_list(self):
+        assert [1,2] == [1,2,3]
+
+    def test_in_list(self):
+        assert 1 in [0, 2, 3, 4, 5]
+
+
+def test_attribute():
+    class Foo(object):
+        b = 1
+    i = Foo()
+    assert i.b == 2
+
+
+def test_attribute_instance():
+    class Foo(object):
+        b = 1
+    assert Foo().b == 2
+
+
+def test_attribute_failure():
+    class Foo(object):
+        def _get_b(self):
+            raise Exception('Failed to get attrib')
+        b = property(_get_b)
+    i = Foo()
+    assert i.b == 2
+
+
+def test_attribute_multiple():
+    class Foo(object):
+        b = 1
+    class Bar(object):
+        b = 2
+    assert Foo().b == Bar().b
+
+
+def globf(x):
+    return x+1

--- /dev/null
+++ b/doc/example/costlysetup/sub2/test_two.py
@@ -0,0 +1,6 @@
+def test_something(setup):
+    assert setup.timecostly == 1
+
+def test_something_more(setup):
+    assert setup.timecostly == 1
+

--- a/doc/example/collectonly.py
+++ /dev/null
@@ -1,11 +0,0 @@
-
-# run this with $ py.test --collectonly test_collectonly.py
-#
-def test_function():
-    pass
-
-class TestClass:
-    def test_method(self):
-        pass
-    def test_anothermethod(self):
-        pass

--- a/example/funcarg/mysetup/conftest.py
+++ /dev/null
@@ -1,9 +0,0 @@
-
-from mysetup.myapp import MyApp
-
-def pytest_funcarg__mysetup(request):
-    return MySetup()
-
-class MySetup:
-    def myapp(self):
-        return MyApp()

--- a/example/funcarg/mysetup2/test_sample.py
+++ /dev/null
@@ -1,6 +0,0 @@
-
-def test_answer(mysetup):
-    app = mysetup.myapp()
-    answer = app.question()
-    assert answer == 42
-

--- /dev/null
+++ b/doc/example/assertion/global_testmodule_config/conftest.py
@@ -0,0 +1,10 @@
+import pytest, py
+mydir = py.path.local(__file__).dirpath()
+
+def pytest_runtest_setup(item):
+    if isinstance(item, pytest.Function):
+        if not item.fspath.relto(mydir):
+            return
+        mod = item.getparent(pytest.Module).obj
+        if hasattr(mod, 'hello'):
+            py.builtin.print_("mod.hello", mod.hello)

--- a/doc/example/index.txt
+++ b/doc/example/index.txt
@@ -10,11 +10,8 @@ need more examples or have questions.
 .. toctree::
    :maxdepth: 2
 
-   builtin.txt
+   simple.txt
    pythoncollection.txt
-   controlskip.txt
    mysetup.txt
-   detectpytest.txt
+   parametrize.txt
    nonpython.txt
-   simple.txt
-   xunit_setup.txt

--- a/doc/example/pythoncollection.txt
+++ b/doc/example/pythoncollection.txt
@@ -21,7 +21,7 @@ their file system path and then running 
 an ini-file and the :confval:`addopts` option you can make
 this change more permanently::
 
-    # content of setup.cfg or tox.ini
+    # content of pytest.ini
     [pytest]
     addopts = --pyargs
 
@@ -30,8 +30,8 @@ finding out what is collected
 
 You can always peek at the collection tree without running tests like this::
 
-    . $ py.test --collectonly collectonly.py
-    <Module 'collectonly.py'>
+    . $ py.test --collectonly pythoncollection.py
+    <Module 'pythoncollection.py'><Function 'test_function'><Class 
'TestClass'><Instance '()'>

--- /dev/null
+++ b/doc/example/costlysetup/conftest.py
@@ -0,0 +1,17 @@
+
+def pytest_funcarg__setup(request):
+    return request.cached_setup(
+        setup=lambda: CostlySetup(),
+        teardown=lambda costlysetup: costlysetup.finalize(),
+        scope="session",
+    )
+
+class CostlySetup:
+    def __init__(self):
+        import time
+        print ("performing costly setup")
+        time.sleep(5)
+        self.timecostly = 1
+
+    def finalize(self):
+        del self.timecostly

--- a/doc/example/controlskip.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-
-.. _`retrieved by hooks as item keywords`:
-
-control skipping of tests according to command line option
---------------------------------------------------------------
-
-Here is a ``conftest.py`` file adding a ``--runslow`` command
-line option to control skipping of ``slow`` marked tests::
-
-    # content of conftest.py
-
-    import pytest
-    def pytest_addoption(parser):
-        parser.addoption("--runslow", action="store_true",
-            help="run slow tests")
-
-    def pytest_runtest_setup(item):
-        if 'slow' in item.keywords and not item.config.getvalue("runslow"):
-            pytest.skip("need --runslow option to run")
-
-We can now write a test module like this::
-
-    # content of test_module.py
-
-    import pytest
-    slow = pytest.mark.slow
-
-    def test_func_fast():
-        pass
-
-    @slow
-    def test_func_slow():
-        pass
-
-and when running it will see a skipped "slow" test::
-
-    $ py.test test_module.py -rs    # "-rs" means report details on the little 
's'
-    =========================== test session starts 
============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
-    test path 1: test_module.py
-    
-    test_module.py .s
-    ========================= short test summary info 
==========================
-    SKIP [1] /tmp/doc-exec-104/conftest.py:9: need --runslow option to run
-    
-    =================== 1 passed, 1 skipped in 0.02 seconds 
====================
-
-Or run it including the ``slow`` marked test::
-
-    $ py.test test_module.py --runslow
-    =========================== test session starts 
============================
-    platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
-    test path 1: test_module.py
-    
-    test_module.py ..
-    
-    ========================= 2 passed in 0.01 seconds 
=========================

--- a/example/assertion/global_testmodule_config/conftest.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import pytest, py
-mydir = py.path.local(__file__).dirpath()
-
-def pytest_runtest_setup(item):
-    if isinstance(item, pytest.Function):
-        if not item.fspath.relto(mydir):
-            return
-        mod = item.getparent(pytest.Module).obj
-        if hasattr(mod, 'hello'):
-            py.builtin.print_("mod.hello", mod.hello)

--- /dev/null
+++ b/doc/example/multipython.py
@@ -0,0 +1,63 @@
+"""
+module containing a parametrized tests testing cross-python
+serialization via the pickle module.
+"""
+import py
+
+pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8']
+
+def pytest_generate_tests(metafunc):
+    if 'python1' in metafunc.funcargnames:
+        assert 'python2' in metafunc.funcargnames
+        for obj in metafunc.function.multiarg.kwargs['obj']:
+            for py1 in pythonlist:
+                for py2 in pythonlist:
+                    metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj),
+                        param=(py1, py2, obj))
+
+...@py.test.mark.multiarg(obj=[42, {}, {1:3},])
+def test_basic_objects(python1, python2, obj):
+    python1.dumps(obj)
+    python2.load_and_is_true("obj == %s" % obj)
+
+def pytest_funcarg__python1(request):
+    tmpdir = request.getfuncargvalue("tmpdir")
+    picklefile = tmpdir.join("data.pickle")
+    return Python(request.param[0], picklefile)
+
+def pytest_funcarg__python2(request):
+    python1 = request.getfuncargvalue("python1")
+    return Python(request.param[1], python1.picklefile)
+
+def pytest_funcarg__obj(request):
+    return request.param[2]
+
+class Python:
+    def __init__(self, version, picklefile):
+        self.pythonpath = py.path.local.sysfind(version)
+        if not self.pythonpath:
+            py.test.skip("%r not found" %(version,))
+        self.picklefile = picklefile
+    def dumps(self, obj):
+        dumpfile = self.picklefile.dirpath("dump.py")
+        dumpfile.write(py.code.Source("""
+            import pickle
+            f = open(%r, 'wb')
+            s = pickle.dump(%r, f)
+            f.close()
+        """ % (str(self.picklefile), obj)))
+        py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile))
+
+    def load_and_is_true(self, expression):
+        loadfile = self.picklefile.dirpath("load.py")
+        loadfile.write(py.code.Source("""
+            import pickle
+            f = open(%r, 'rb')
+            obj = pickle.load(f)
+            f.close()
+            res = eval(%r)
+            if not res:
+                raise SystemExit(1)
+        """ % (str(self.picklefile), expression)))
+        print (loadfile)
+        py.process.cmdexec("%s %s" %(self.pythonpath, loadfile))

--- a/example/assertion/test_setup_flow_example.py
+++ /dev/null
@@ -1,42 +0,0 @@
-def setup_module(module):
-    module.TestStateFullThing.classcount = 0
-
-class TestStateFullThing:
-    def setup_class(cls):
-        cls.classcount += 1
-
-    def teardown_class(cls):
-        cls.classcount -= 1
-
-    def setup_method(self, method):
-        self.id = eval(method.__name__[5:])
-
-    def test_42(self):
-        assert self.classcount == 1
-        assert self.id == 42
-
-    def test_23(self):
-        assert self.classcount == 1
-        assert self.id == 23
-
-def teardown_module(module):
-    assert module.TestStateFullThing.classcount == 0
-
-""" For this example the control flow happens as follows::
-    import test_setup_flow_example
-    setup_module(test_setup_flow_example)
-       setup_class(TestStateFullThing)
-           instance = TestStateFullThing()
-           setup_method(instance, instance.test_42)
-              instance.test_42()
-           setup_method(instance, instance.test_23)
-              instance.test_23()
-       teardown_class(TestStateFullThing)
-    teardown_module(test_setup_flow_example)
-
-Note that ``setup_class(TestStateFullThing)`` is called and not
-``TestStateFullThing.setup_class()`` which would require you
-to insert ``setup_class = classmethod(setup_class)`` to make
-your setup function callable.
-"""
-

--- a/doc/announce/index.txt
+++ b/doc/announce/index.txt
@@ -6,6 +6,10 @@ Release announcements
    :maxdepth: 2
 
    release-2.0.0
+
+.. toctree::
+   :hidden:
+
    release-1.3.4
    release-1.3.3
    release-1.3.2

--- a/example/funcarg/mysetup2/conftest.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import py
-from mysetup2.myapp import MyApp
-
-def pytest_funcarg__mysetup(request):
-    return MySetup(request)
-
-def pytest_addoption(parser):
-    parser.addoption("--ssh", action="store", default=None,
-        help="specify ssh host to run tests with")
-
-
-class MySetup:
-    def __init__(self, request):
-        self.config = request.config
-
-    def myapp(self):
-        return MyApp()
-
-    def getsshconnection(self):
-        host = self.config.option.ssh
-        if host is None:
-            py.test.skip("specify ssh host with --ssh")
-        return execnet.SshGateway(host)
-

--- a/example/funcarg/mysetup2/test_ssh.py
+++ /dev/null
@@ -1,5 +0,0 @@
-
-class TestClass:
-    def test_function(self, mysetup):
-        conn = mysetup.getsshconnection()
-        # work with conn

--- a/example/assertion/failure_demo.py
+++ /dev/null
@@ -1,193 +0,0 @@
-from py.test import raises
-import py
-
-def otherfunc(a,b):
-    assert a==b
-
-def somefunc(x,y):
-    otherfunc(x,y)
-
-def otherfunc_multi(a,b):
-    assert (a ==
-            b)
-
-def test_generative(param1, param2):
-    assert param1 * 2 < param2
-
-def pytest_generate_tests(metafunc):
-    if 'param1' in metafunc.funcargnames:
-        metafunc.addcall(funcargs=dict(param1=3, param2=6))
-
-class TestFailing(object):
-    def test_simple(self):
-        def f():
-            return 42
-        def g():
-            return 43
-
-        assert f() == g()
-
-    def test_simple_multiline(self):
-        otherfunc_multi(
-                  42,
-                  6*9)
-
-    def test_not(self):
-        def f():
-            return 42
-        assert not f()
-
-    def test_complex_error(self):
-        def f():
-            return 44
-        def g():
-            return 43
-        somefunc(f(), g())
-
-    def test_z1_unpack_error(self):
-        l = []
-        a,b  = l
-
-    def test_z2_type_error(self):
-        l = 3
-        a,b  = l
-
-    def test_startswith(self):
-        s = "123"
-        g = "456"
-        assert s.startswith(g)
-
-    def test_startswith_nested(self):
-        def f():
-            return "123"
-        def g():
-            return "456"
-        assert f().startswith(g())
-
-    def test_global_func(self):
-        assert isinstance(globf(42), float)
-
-    def test_instance(self):
-        self.x = 6*7
-        assert self.x != 42
-
-    def test_compare(self):
-        assert globf(10) < 5
-
-    def test_try_finally(self):
-        x = 1
-        try:
-            assert x == 0
-        finally:
-            x = 0
-
-    def test_raises(self):
-        s = 'qwe'
-        raises(TypeError, "int(s)")
-
-    def test_raises_doesnt(self):
-        raises(IOError, "int('3')")
-
-    def test_raise(self):
-        raise ValueError("demo error")
-
-    def test_tupleerror(self):
-        a,b = [1]
-
-    def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
-        l = [1,2,3]
-        print ("l is %r" % l)
-        a,b = l.pop()
-
-    def test_some_error(self):
-        if namenotexi:
-            pass
-
-    def func1(self):
-        assert 41 == 42
-
-
-# thanks to Matthew Scott for this test
-def test_dynamic_compile_shows_nicely():
-    src = 'def foo():\n assert 1 == 0\n'
-    name = 'abc-123'
-    module = py.std.imp.new_module(name)
-    code = py.code.compile(src, name, 'exec')
-    py.builtin.exec_(code, module.__dict__)
-    py.std.sys.modules[name] = module
-    module.foo()
-
-
-class TestSpecialisedExplanations(object):
-    def test_eq_text(self):
-        assert 'spam' == 'eggs'
-
-    def test_eq_similar_text(self):
-        assert 'foo 1 bar' == 'foo 2 bar'
-
-    def test_eq_multiline_text(self):
-        assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
-
-    def test_eq_long_text(self):
-        a = '1'*100 + 'a' + '2'*100
-        b = '1'*100 + 'b' + '2'*100
-        assert a == b
-
-    def test_eq_long_text_multiline(self):
-        a = '1\n'*100 + 'a' + '2\n'*100
-        b = '1\n'*100 + 'b' + '2\n'*100
-        assert a == b
-
-    def test_eq_list(self):
-        assert [0, 1, 2] == [0, 1, 3]
-
-    def test_eq_list_long(self):
-        a = [0]*100 + [1] + [3]*100
-        b = [0]*100 + [2] + [3]*100
-        assert a == b
-
-    def test_eq_dict(self):
-        assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2}
-
-    def test_eq_set(self):
-        assert set([0, 10, 11, 12]) == set([0, 20, 21])
-
-    def test_eq_longer_list(self):
-        assert [1,2] == [1,2,3]
-
-    def test_in_list(self):
-        assert 1 in [0, 2, 3, 4, 5]
-
-
-def test_attribute():
-    class Foo(object):
-        b = 1
-    i = Foo()
-    assert i.b == 2
-
-
-def test_attribute_instance():
-    class Foo(object):
-        b = 1
-    assert Foo().b == 2
-
-
-def test_attribute_failure():
-    class Foo(object):
-        def _get_b(self):
-            raise Exception('Failed to get attrib')
-        b = property(_get_b)
-    i = Foo()
-    assert i.b == 2
-
-
-def test_attribute_multiple():
-    class Foo(object):
-        b = 1
-    class Bar(object):
-        b = 2
-    assert Foo().b == Bar().b
-
-
-def globf(x):
-    return x+1

--- a/example/funcarg/mysetup/test_sample.py
+++ /dev/null
@@ -1,5 +0,0 @@
-
-def test_answer(mysetup):
-    app = mysetup.myapp()
-    answer = app.question()
-    assert answer == 42
_______________________________________________
py-svn mailing list
py-svn@codespeak.net
http://codespeak.net/mailman/listinfo/py-svn

Reply via email to