2 new commits in pytest-xdist: https://bitbucket.org/hpk42/pytest-xdist/commits/e95235ad5f8e/ Changeset: e95235ad5f8e User: hpk42 Date: 2014-07-20 16:41:03 Summary: fix pytest issue503: avoid random re-setup of broad scoped fixtures (anything above function). Affected #: 5 files
diff -r 35661a1ed8b5c542fbee90206a0e5ddbefd7f729 -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 CHANGELOG --- a/CHANGELOG +++ b/CHANGELOG @@ -1,9 +1,14 @@ -XXX +1.11 ------------------------- -- fix pytest/xdist issue485 (also depends on py-1.4.21.dev1): +- fix pytest/xdist issue485 (also depends on py-1.4.22): attach stdout/stderr on --boxed processes that die. +- fix pytest/xdist issue503: make sure that a node has usually + two items to execute to avoid scoped fixtures to be torn down + pre-maturely (fixture teardown/setup is "nextitem" sensitive). + Thanks to Andreas Pelme for bug analysis and failing test. + 1.10 ------------------------- diff -r 35661a1ed8b5c542fbee90206a0e5ddbefd7f729 -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1,3 +1,4 @@ +import pytest import py import sys @@ -459,3 +460,29 @@ result.stdout.fnmatch_lines([ "*1 passed*", ]) + + +def test_fixture_scope_caching_issue503(testdir): + p1 = testdir.makepyfile(""" + import pytest + + @pytest.fixture(scope='session') + def fix(): + assert fix.counter == 0, 'session fixture was invoked multiple times' + fix.counter += 1 + fix.counter = 0 + + def test_a(fix): + pass + + def test_b(fix): + pass + """) + result = testdir.runpytest(p1, '-v', '-n1') + assert result.ret == 0 + result.stdout.fnmatch_lines([ + "*2 passed*", + ]) + + + diff -r 35661a1ed8b5c542fbee90206a0e5ddbefd7f729 -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 testing/test_dsession.py --- a/testing/test_dsession.py +++ b/testing/test_dsession.py @@ -98,15 +98,15 @@ assert sched.node2collection[node1] == collection assert sched.node2collection[node2] == collection sched.init_distribute() + assert not sched.pending + assert not sched.tests_finished() + assert len(node1.sent) == 2 + assert len(node2.sent) == 0 + assert node1.sent == [0, 1] + sched.remove_item(node1, node1.sent[0]) assert sched.tests_finished() - assert len(node1.sent) == 1 - assert len(node2.sent) == 1 - x = sorted(node1.sent + node2.sent) - assert x == [0, 1] - sched.remove_item(node1, node1.sent[0]) - sched.remove_item(node2, node2.sent[0]) + sched.remove_item(node1, node1.sent[1]) assert sched.tests_finished() - assert not sched.pending def test_init_distribute_chunksize(self): sched = LoadScheduling(2) @@ -114,22 +114,25 @@ node2 = MockNode() sched.addnode(node1) sched.addnode(node2) - col = ["xyz"] * (3) + col = ["xyz"] * (6) sched.addnode_collection(node1, col) sched.addnode_collection(node2, col) sched.init_distribute() #assert not sched.tests_finished() sent1 = node1.sent sent2 = node2.sent - chunkitems = col[:1] - assert (sent1 == [0] and sent2 == [1]) or ( - sent1 == [1] and sent2 == [0]) + assert sent1 == [0, 1] + assert sent2 == [2, 3] + assert sched.pending == [4, 5] assert sched.node2pending[node1] == sent1 assert sched.node2pending[node2] == sent2 - assert len(sched.pending) == 1 - for node in (node1, node2): - for i in sched.node2pending[node]: - sched.remove_item(node, i) + assert len(sched.pending) == 2 + sched.remove_item(node1, 0) + assert node1.sent == [0, 1, 4] + assert sched.pending == [5] + assert node2.sent == [2, 3] + sched.remove_item(node1, 1) + assert node1.sent == [0, 1, 4, 5] assert not sched.pending def test_add_remove_node(self): diff -r 35661a1ed8b5c542fbee90206a0e5ddbefd7f729 -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 xdist/dsession.py --- a/xdist/dsession.py +++ b/xdist/dsession.py @@ -49,7 +49,7 @@ if not pending: return crashitem = self.node2collection[node][pending.pop(0)] - # XXX what about the rest of pending? + # XXX do or report something wrt the remaining per-node pending items? return crashitem def init_distribute(self): @@ -58,11 +58,13 @@ node.send_runtest_all() pending[:] = range(len(self.node2collection[node])) + class LoadScheduling: def __init__(self, numnodes, log=None): self.numnodes = numnodes self.node2pending = {} self.node2collection = {} + self.nodes = [] self.pending = [] if log is None: self.log = py.log.Producer("loadsched") @@ -75,13 +77,14 @@ def addnode(self, node): self.node2pending[node] = [] + self.nodes.append(node) def tests_finished(self): - if not self.collection_is_completed or self.pending: + if not self.collection_is_completed: return False - #for items in self.node2pending.values(): - # if items: - # return False + for pending in self.node2pending.values(): + if len(pending) >= 2: + return False return True def addnode_collection(self, node, collection): @@ -92,37 +95,46 @@ self.collection_is_completed = True def remove_item(self, node, item_index, duration=0): - node_pending = self.node2pending[node] - node_pending.remove(item_index) - # pre-load items-to-test if the node may become ready + self.node2pending[node].remove(item_index) + self.check_schedule(node, duration=duration) + def check_schedule(self, node, duration=0): if self.pending: - if duration >= 0.1 and node_pending: - # seems the node is doing long-running tests - # so let's rather wait with sending new items - return - # how many nodes do we have remaining per node roughly? + # how many nodes do we have? num_nodes = len(self.node2pending) # if our node goes below a heuristic minimum, fill it out to # heuristic maximum items_per_node_min = max( - 1, len(self.pending) // num_nodes // 4) + 2, len(self.pending) // num_nodes // 4) items_per_node_max = max( - 1, len(self.pending) // num_nodes // 2) - if len(node_pending) <= items_per_node_min: - num_send = items_per_node_max - len(node_pending) + 1 + 2, len(self.pending) // num_nodes // 2) + node_pending = self.node2pending[node] + if len(node_pending) < items_per_node_min: + if duration >= 0.1 and len(node_pending) >= 2: + # seems the node is doing long-running tests + # and has enough items to continue + # so let's rather wait with sending new items + return + num_send = items_per_node_max - len(node_pending) self._send_tests(node, num_send) self.log("num items waiting for node:", len(self.pending)) #self.log("node2pending:", self.node2pending) def remove_node(self, node): + self.nodes.remove(node) pending = self.node2pending.pop(node) if not pending: return - # the node must have crashed on the item if there are pending ones + # the node has crashed on the item if there are pending ones + # and we are told to remove the node crashitem = self.collection[pending.pop(0)] + + # put the remaining items back to the general pending list self.pending.extend(pending) + # see if some nodes can pick the remaining tests up already + for node in self.node2pending: + self.check_schedule(node) return crashitem def init_distribute(self): @@ -147,9 +159,9 @@ # how many items per node do we have about? items_per_node = len(self.collection) // len(self.node2pending) # take a fraction of tests for initial distribution - node_chunksize = max(items_per_node // 4, 1) + node_chunksize = max(items_per_node // 4, 2) # and initialize each node with a chunk of tests - for node in self.node2pending: + for node in self.nodes: self._send_tests(node, node_chunksize) #f = open("/tmp/sent", "w") diff -r 35661a1ed8b5c542fbee90206a0e5ddbefd7f729 -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 xdist/remote.py --- a/xdist/remote.py +++ b/xdist/remote.py @@ -51,9 +51,12 @@ elif name == "runtests_all": torun.extend(range(len(session.items))) self.log("items to run:", torun) - while torun: + # only run if we have an item and a next item + while len(torun) >= 2: self.run_tests(torun) if name == "shutdown": + if torun: + self.run_tests(torun) break return True https://bitbucket.org/hpk42/pytest-xdist/commits/4c6c39266031/ Changeset: 4c6c39266031 User: hpk42 Date: 2014-07-20 16:56:21 Summary: fix various flakes issues and add "flakes" to tox tests Affected #: 10 files diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 testing/acceptance_test.py --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1,6 +1,4 @@ -import pytest import py -import sys class TestDistribution: def test_n1_pass(self, testdir): @@ -194,7 +192,7 @@ assert dest.join(subdir.basename).check(dir=1) def test_data_exchange(self, testdir): - c1 = testdir.makeconftest(""" + testdir.makeconftest(""" # This hook only called on master. def pytest_configure_node(node): node.slaveinput['a'] = 42 @@ -251,7 +249,7 @@ def test_keyboard_interrupt_dist(self, testdir): # xxx could be refined to check for return code - p = testdir.makepyfile(""" + testdir.makepyfile(""" def test_sleep(): import time time.sleep(10) @@ -301,7 +299,7 @@ class TestTerminalReporting: def test_pass_skip_fail(self, testdir): - p = testdir.makepyfile(""" + testdir.makepyfile(""" import py def test_ok(): pass @@ -323,7 +321,7 @@ ]) def test_fail_platinfo(self, testdir): - p = testdir.makepyfile(""" + testdir.makepyfile(""" def test_func(): assert 0 """) diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 testing/conftest.py --- a/testing/conftest.py +++ b/testing/conftest.py @@ -7,14 +7,11 @@ def pytest_addoption(parser): parser.addoption('--gx', - action="append", dest="gspecs", + action="append", dest="gspecs", help=("add a global test environment, XSpec-syntax. ")) def pytest_funcarg__specssh(request): return getspecssh(request.config) -def getgspecs(config): - return [execnet.XSpec(spec) - for spec in config.getvalueorskip("gspecs")] # configuration information for tests def getgspecs(config): diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 testing/test_dsession.py --- a/testing/test_dsession.py +++ b/testing/test_dsession.py @@ -4,7 +4,6 @@ EachScheduling, report_collection_diff, ) -from _pytest import main as outcome import py import pytest import execnet diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 testing/test_looponfail.py --- a/testing/test_looponfail.py +++ b/testing/test_looponfail.py @@ -42,7 +42,7 @@ def test_dirchange(self, tmpdir): tmp = tmpdir - hello = tmp.ensure("dir", "hello.py") + tmp.ensure("dir", "hello.py") sd = StatRecorder([tmp]) assert not sd.fil(tmp.join("dir")) diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 testing/test_remote.py --- a/testing/test_remote.py +++ b/testing/test_remote.py @@ -3,7 +3,6 @@ from xdist.remote import serialize_report import execnet queue = py.builtin._tryimport("queue", "Queue") -from py.builtin import print_ import marshal WAIT_TIMEOUT = 10.0 @@ -26,7 +25,7 @@ use_callback = False def __init__(self, request): - self.testdir = testdir = request.getfuncargvalue("testdir") + self.testdir = request.getfuncargvalue("testdir") self.request = request self.events = queue.Queue() @@ -140,7 +139,7 @@ class TestSlaveInteractor: def test_basic_collect_and_runtests(self, slave): - p = slave.testdir.makepyfile(""" + slave.testdir.makepyfile(""" def test_func(): pass """) @@ -170,7 +169,7 @@ assert 'slaveoutput' in ev.kwargs def test_remote_collect_skip(self, slave): - p = slave.testdir.makepyfile(""" + slave.testdir.makepyfile(""" import py py.test.skip("hello") """) @@ -187,7 +186,7 @@ assert not ev.kwargs['ids'] def test_remote_collect_fail(self, slave): - p = slave.testdir.makepyfile("""aasd qwe""") + slave.testdir.makepyfile("""aasd qwe""") slave.setup() ev = slave.popevent("collectionstart") assert not ev.kwargs @@ -201,7 +200,7 @@ assert not ev.kwargs['ids'] def test_runtests_all(self, slave): - p = slave.testdir.makepyfile(""" + slave.testdir.makepyfile(""" def test_func(): pass def test_func2(): pass """) @@ -228,7 +227,7 @@ def test_happy_run_events_converted(self, testdir, slave): py.test.xfail("implement a simple test for event production") assert not slave.use_callback - p = slave.testdir.makepyfile(""" + slave.testdir.makepyfile(""" def test_func(): pass """) diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 testing/test_slavemanage.py --- a/testing/test_slavemanage.py +++ b/testing/test_slavemanage.py @@ -1,5 +1,4 @@ import py -import os import execnet from xdist.slavemanage import HostRSync, NodeManager @@ -108,7 +107,7 @@ return mysetup(request) def test_hrsync_filter(self, mysetup): - source, dest = mysetup.source, mysetup.dest + source, _ = mysetup.source, mysetup.dest # noqa source.ensure("dir", "file.txt") source.ensure(".svn", "entries") source.ensure(".somedotfile", "moreentries") @@ -139,7 +138,7 @@ @py.test.mark.xfail def test_rsync_roots_no_roots(self, testdir, mysetup): mysetup.source.ensure("dir1", "file1").write("hello") - config = testdir.parseconfig(source) + config = testdir.parseconfig(mysetup.source) nodemanager = NodeManager(config, ["popen//chdir=%s" % mysetup.dest]) #assert nodemanager.config.topdir == source == config.topdir nodemanager.makegateways() @@ -194,11 +193,11 @@ def test_rsyncignore(self, testdir, mysetup): source, dest = mysetup.source, mysetup.dest dir2 = source.ensure("dir1", "dir2", dir=1) - dir5 = source.ensure("dir5", "dir6", "bogus") - dirf = source.ensure("dir5", "file") + source.ensure("dir5", "dir6", "bogus") + source.ensure("dir5", "file") dir2.ensure("hello") - dirfoo = source.ensure("foo", "bar") - dirbar = source.ensure("bar", "foo") + source.ensure("foo", "bar") + source.ensure("bar", "foo") source.join("tox.ini").write(py.std.textwrap.dedent(""" [pytest] rsyncdirs = dir1 dir5 @@ -217,7 +216,7 @@ assert not dest.join('bar').check() def test_optimise_popen(self, testdir, mysetup): - source, dest = mysetup.source, mysetup.dest + source = mysetup.source specs = ["popen"] * 3 source.join("conftest.py").write("rsyncdirs = ['a']") source.ensure('a', dir=1) diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 tox.ini --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist=py26,py32,py33,py34,py27,py27-pexpect,py33-pexpect,py26,py26-old,py33-old +envlist=py26,py32,py33,py34,py27,py27-pexpect,py33-pexpect,py26,py26-old,py33-old,flakes [testenv] changedir=testing @@ -13,6 +13,11 @@ deps={[testenv]deps} pexpect +[testenv:flakes] +changedir= +deps = pytest-flakes>=0.2 +commands = py.test --flakes -m flakes testing xdist + [testenv:py26-old] deps= pytest==2.4.2 diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 xdist/dsession.py --- a/xdist/dsession.py +++ b/xdist/dsession.py @@ -1,4 +1,3 @@ -import sys import difflib import pytest @@ -295,7 +294,7 @@ self.slave_errordown(node, "keyboard-interrupt") return crashitem = self.sched.remove_node(node) - #assert not crashitem, (crashitem, node) + assert not crashitem, (crashitem, node) if self.shuttingdown and not self.sched.hasnodes(): self.session_finished = True diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 xdist/looponfail.py --- a/xdist/looponfail.py +++ b/xdist/looponfail.py @@ -87,7 +87,7 @@ result = self.runsession() failures, reports, collection_failed = result if collection_failed: - reports = ["Collection failed, keeping previous failure set"] + pass # "Collection failed, keeping previous failure set" else: uniq_failures = [] for failure in failures: @@ -109,7 +109,6 @@ def init_slave_session(channel, args, option_dict): import os, sys - import py outchannel = channel.gateway.newchannel() sys.stdout = sys.stderr = outchannel.makefile('w') channel.send(outchannel) diff -r e95235ad5f8e07f3ba0cad07a0d76e899066ffd1 -r 4c6c3926603135015691bcf537ed8dfe15b59874 xdist/remote.py --- a/xdist/remote.py +++ b/xdist/remote.py @@ -128,6 +128,7 @@ if __name__ == '__channelexec__': + channel = channel # noqa # python3.2 is not concurrent import safe, so let's play it safe # https://bitbucket.org/hpk42/pytest/issue/347/pytest-xdist-and-python-32 if sys.version_info[:2] == (3,2): Repository URL: https://bitbucket.org/hpk42/pytest-xdist/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email. _______________________________________________ pytest-commit mailing list pytest-commit@python.org https://mail.python.org/mailman/listinfo/pytest-commit