# HG changeset patch -- Bitbucket.org
# Project py-trunk
# URL http://bitbucket.org/hpk42/py-trunk/overview
# User holger krekel <hol...@merlinux.eu>
# Date 1275957291 -7200
# Node ID 23865d3042213f7a6d4a8fd5ce6b5b71ac23a108
# Parent  97febc3fbe6ecf1a3c492030a06e9e690c5bf47b
introduce a new request.applymarker() function and refactor
internally to allow for dynamically adding keywords to test
items.

--- a/py/_plugin/pytest_runner.py
+++ b/py/_plugin/pytest_runner.py
@@ -134,7 +134,7 @@ class ItemTestReport(BaseReport):
         self.item = item 
         self.when = when
         if item and when != "setup":
-            self.keywords = item.readkeywords() 
+            self.keywords = item.keywords
         else:
             # if we fail during setup it might mean 
             # we are not able to access the underlying object

--- a/doc/test/plugin/skipping.txt
+++ b/doc/test/plugin/skipping.txt
@@ -81,6 +81,7 @@ apply the function will be skipped.
 
 .. _`whole class- or module level`: mark.html#scoped-marking
 
+.. _xfail:
 
 mark a test function as **expected to fail**
 -------------------------------------------------------

--- a/testing/plugin/test_pytest_terminal.py
+++ b/testing/plugin/test_pytest_terminal.py
@@ -14,6 +14,7 @@ from py._plugin.pytest_terminal import T
 from py._plugin import pytest_runner as runner 
 
 def basic_run_report(item):
+    runner.call_and_report(item, "setup", log=False)
     return runner.call_and_report(item, "call", log=False)
 
 class Option:

--- a/py/_test/funcargs.py
+++ b/py/_test/funcargs.py
@@ -92,6 +92,16 @@ class FuncargRequest:
             if argname not in self._pyfuncitem.funcargs:
                 self._pyfuncitem.funcargs[argname] = 
self.getfuncargvalue(argname)
 
+
+    def applymarker(self, marker):
+        """ apply a marker to a test function invocation. 
+
+        The 'marker' must be created with py.test.mark.* XYZ. 
+        """
+        if not isinstance(marker, py.test.mark.XYZ.__class__):
+            raise ValueError("%r is not a py.test.mark.* object")
+        self._pyfuncitem.keywords[marker.markname] = marker 
+
     def cached_setup(self, setup, teardown=None, scope="module", 
extrakey=None):
         """ cache and return result of calling setup().  
 

--- a/CHANGELOG
+++ b/CHANGELOG
@@ -4,6 +4,17 @@ Changes between 1.3.1 and 1.3.x
 New features 
 ++++++++++++++++++
 
+- Funcarg factories can now dynamically apply a marker to a 
+  test invocation.  This is particularly useful if a factory
+  provides parameters to a test which you expect-to-fail:
+
+    def pytest_funcarg__arg(request):
+        request.applymarker(py.test.mark.xfail(reason="flaky config"))
+        ...
+
+    def test_function(arg):
+        ...
+
 Bug fixes / Maintenance
 ++++++++++++++++++++++++++
 

--- a/py/_plugin/pytest_skipping.py
+++ b/py/_plugin/pytest_skipping.py
@@ -159,8 +159,10 @@ class MarkEvaluator:
     def __init__(self, item, name):
         self.item = item
         self.name = name
-        self.holder = getattr(item.obj, name, None)
 
+    @property
+    def holder(self):
+        return self.item.keywords.get(self.name, None)
     def __bool__(self):
         return bool(self.holder)
     __nonzero__ = __bool__
@@ -204,10 +206,17 @@ def pytest_runtest_setup(item):
     if evalskip.istrue():
         py.test.skip(evalskip.getexplanation())
     item._evalxfail = MarkEvaluator(item, 'xfail')
+    check_xfail_no_run(item)
+
+def pytest_pyfunc_call(pyfuncitem):
+    check_xfail_no_run(pyfuncitem)
+
+def check_xfail_no_run(item):
     if not item.config.getvalue("runxfail"):
-        if item._evalxfail.istrue():
-            if not item._evalxfail.get('run', True):
-                py.test.skip("xfail")
+        evalxfail = item._evalxfail
+        if evalxfail.istrue():
+            if not evalxfail.get('run', True):
+                py.test.xfail("[NOTRUN] " + evalxfail.getexplanation())
 
 def pytest_runtest_makereport(__multicall__, item, call):
     if not isinstance(item, py.test.collect.Function):
@@ -224,16 +233,9 @@ def pytest_runtest_makereport(__multical
             rep.skipped = True
             rep.failed = False
             return rep
-    if call.when == "setup":
+    if call.when == "call":
         rep = __multicall__.execute()
-        if rep.skipped and evalxfail.istrue():
-            expl = evalxfail.getexplanation()
-            if not evalxfail.get("run", True):
-                expl = "[NOTRUN] " + expl
-            rep.keywords['xfail'] = expl
-        return rep
-    elif call.when == "call":
-        rep = __multicall__.execute()
+        evalxfail = getattr(item, '_evalxfail')
         if not item.config.getvalue("runxfail") and evalxfail.istrue():
             if call.excinfo:
                 rep.skipped = True

--- a/doc/test/funcargs.txt
+++ b/doc/test/funcargs.txt
@@ -160,6 +160,25 @@ like this:
                  scope="session"
         )
 
+dynamically applying a marker
+---------------------------------------------
+
+.. sourcecode:: python 
+
+    def applymarker(self, marker):
+        """ apply a marker to a test function invocation. 
+
+        The 'marker' must be created with py.test.mark.* XYZ. 
+        """
+
+``request.applymarker(marker)`` will mark the test invocation
+with the given marker.  For example, if your funcarg factory provides 
+values which may cause a test function to fail you can call
+``request.applymarker(py.test.mark.xfail(reason='flaky config'))``
+and this will cause the test to not show tracebacks. See xfail_
+for details. 
+
+.. _`xfail`: plugin/skipping.html#xfail
 
 requesting values of other funcargs 
 ---------------------------------------------

--- a/testing/test_funcargs.py
+++ b/testing/test_funcargs.py
@@ -211,6 +211,23 @@ class TestRequest:
         req = funcargs.FuncargRequest(item)
         assert req.fspath == modcol.fspath 
 
+def test_applymarker(testdir):
+    item1,item2 = testdir.getitems("""
+        class TestClass:
+            def test_func1(self, something): 
+                pass
+            def test_func2(self, something): 
+                pass
+    """)
+    req1 = funcargs.FuncargRequest(item1)
+    assert 'xfail' not in item1.keywords 
+    req1.applymarker(py.test.mark.xfail)
+    assert 'xfail' in item1.keywords 
+    assert 'skipif' not in item1.keywords 
+    req1.applymarker(py.test.mark.skipif)
+    assert 'skipif' in item1.keywords 
+    py.test.raises(ValueError, "req1.applymarker(42)")
+
 class TestRequestCachedSetup:
     def test_request_cachedsetup(self, testdir):
         item1,item2 = testdir.getitems("""

--- a/testing/plugin/test_pytest_skipping.py
+++ b/testing/plugin/test_pytest_skipping.py
@@ -188,6 +188,21 @@ class TestXFail:
             "*1 passed*",
         ])
 
+    def test_xfail_not_run_no_setup_run(self, testdir):
+        p = testdir.makepyfile(test_one="""
+            import py
+            @py.test.mark.xfail(run=False, reason="hello")
+            def test_this():
+                assert 0
+            def setup_module(mod):
+                raise ValueError(42)
+        """)
+        result = testdir.runpytest(p, '--report=xfailed', )
+        result.stdout.fnmatch_lines([
+            "*test_one*test_this*NOTRUN*hello",
+            "*1 xfailed*",
+        ])
+
     def test_xfail_xpass(self, testdir):
         p = testdir.makepyfile(test_one="""
             import py
@@ -245,8 +260,47 @@ class TestXFail:
             "*py.test.xfail*",
         ])
 
+    def xtest_dynamic_xfail_set_during_setup(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def setup_function(function):
+                py.test.mark.xfail(function)
+            def test_this():
+                assert 0
+            def test_that():
+                assert 1
+        """)
+        result = testdir.runpytest(p, '-rxX')
+        result.stdout.fnmatch_lines([
+            "*XFAIL*test_this*",
+            "*XPASS*test_that*",
+        ])
 
+    def test_dynamic_xfail_no_run(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def pytest_funcarg__arg(request):
+                request.applymarker(py.test.mark.xfail(run=False))
+            def test_this(arg):
+                assert 0
+        """)
+        result = testdir.runpytest(p, '-rxX')
+        result.stdout.fnmatch_lines([
+            "*XFAIL*test_this*NOTRUN*",
+        ])
 
+    def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
+        p = testdir.makepyfile("""
+            import py
+            def pytest_funcarg__arg(request):
+                request.applymarker(py.test.mark.xfail)
+            def test_this2(arg):
+                assert 0
+        """)
+        result = testdir.runpytest(p)
+        result.stdout.fnmatch_lines([
+            "*1 xfailed*",
+        ])
 
 
 class TestSkipif:

--- a/py/_test/pycollect.py
+++ b/py/_test/pycollect.py
@@ -348,6 +348,7 @@ class Function(FunctionMixin, py.test.co
         if callobj is not _dummy: 
             self._obj = callobj 
         self.function = getattr(self.obj, 'im_func', self.obj)
+        self.keywords.update(py.builtin._getfuncdict(self.obj) or {})
 
     def _getobj(self):
         name = self.name
@@ -359,11 +360,6 @@ class Function(FunctionMixin, py.test.co
     def _isyieldedfunction(self):
         return self._args is not None
 
-    def readkeywords(self):
-        d = super(Function, self).readkeywords()
-        d.update(py.builtin._getfuncdict(self.obj))
-        return d
-
     def runtest(self):
         """ execute the underlying test function. """
         self.ihook.pytest_pyfunc_call(pyfuncitem=self)

--- a/py/_test/collect.py
+++ b/py/_test/collect.py
@@ -31,6 +31,7 @@ class Node(object):
         self.config = config or parent.config
         self.fspath = getattr(parent, 'fspath', None) 
         self.ihook = HookProxy(self)
+        self.keywords = self.readkeywords()
 
     def _reraiseunpicklingproblem(self):
         if hasattr(self, '_unpickle_exc'):
@@ -153,7 +154,7 @@ class Node(object):
     def _matchonekeyword(self, key, chain):
         elems = key.split(".")
         # XXX O(n^2), anyone cares?
-        chain = [item.readkeywords() for item in chain if item._keywords()]
+        chain = [item.keywords for item in chain if item.keywords]
         for start, _ in enumerate(chain):
             if start + len(elems) > len(chain):
                 return False

--- a/py/_plugin/pytest_mark.py
+++ b/py/_plugin/pytest_mark.py
@@ -171,4 +171,6 @@ def pytest_pycollect_makeitem(__multical
                 for mark in marker:
                     if isinstance(mark, MarkDecorator):
                         mark(func)
+                item.keywords.update(py.builtin._getfuncdict(func) or {})
+                        
     return item

--- a/testing/plugin/test_pytest_mark.py
+++ b/testing/plugin/test_pytest_mark.py
@@ -65,7 +65,7 @@ class TestFunctional:
             def test_func():
                 pass
         """)
-        keywords = item.readkeywords()
+        keywords = item.keywords
         assert 'hello' in keywords
 
     def test_marklist_per_class(self, testdir):
@@ -79,7 +79,7 @@ class TestFunctional:
         """)
         clscol = modcol.collect()[0]
         item = clscol.collect()[0].collect()[0]
-        keywords = item.readkeywords()
+        keywords = item.keywords
         assert 'hello' in keywords
 
     def test_marklist_per_module(self, testdir):
@@ -93,7 +93,7 @@ class TestFunctional:
         """)
         clscol = modcol.collect()[0]
         item = clscol.collect()[0].collect()[0]
-        keywords = item.readkeywords()
+        keywords = item.keywords
         assert 'hello' in keywords
         assert 'world' in keywords
 
@@ -108,7 +108,7 @@ class TestFunctional:
         """)
         clscol = modcol.collect()[0]
         item = clscol.collect()[0].collect()[0]
-        keywords = item.readkeywords()
+        keywords = item.keywords
         assert 'hello' in keywords
 
     @py.test.mark.skipif("sys.version_info < (2,6)")
@@ -124,7 +124,7 @@ class TestFunctional:
         """)
         clscol = modcol.collect()[0]
         item = clscol.collect()[0].collect()[0]
-        keywords = item.readkeywords()
+        keywords = item.keywords
         assert 'hello' in keywords
         assert 'world' in keywords
 
@@ -141,7 +141,7 @@ class TestFunctional:
         """)
         items, rec = testdir.inline_genitems(p)
         item, = items
-        keywords = item.readkeywords()
+        keywords = item.keywords
         marker = keywords['hello']
         assert marker.args == ["pos0", "pos1"]
         assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4}
@@ -154,4 +154,22 @@ class TestFunctional:
             def test_func():
                 pass
         """)
-        keywords = item.readkeywords()
+        keywords = item.keywords
+
+    def test_mark_dynamically_in_funcarg(self, testdir):
+        testdir.makeconftest("""
+            import py
+            def pytest_funcarg__arg(request):
+                request.applymarker(py.test.mark.hello)
+            def pytest_terminal_summary(terminalreporter):
+                l = terminalreporter.stats['passed'] 
+                terminalreporter._tw.line("keyword: %s" % l[0].keywords)
+        """)
+        testdir.makepyfile("""
+            def test_func(arg):
+                pass
+        """)
+        result = testdir.runpytest()
+        result.stdout.fnmatch_lines([
+            "keyword: *hello*"
+        ])
_______________________________________________
py-svn mailing list
py-svn@codespeak.net
http://codespeak.net/mailman/listinfo/py-svn

Reply via email to