Author: Martin Matusiak <numero...@gmail.com>
Branch: py3.3
Changeset: r72643:61ea6233fd47
Date: 2014-07-30 21:05 +0200
http://bitbucket.org/pypy/pypy/changeset/61ea6233fd47/

Log:    Merged pypy/pypy/py3.3 into py3.3

diff --git a/lib-python/3/test/test_builtin.py 
b/lib-python/3/test/test_builtin.py
--- a/lib-python/3/test/test_builtin.py
+++ b/lib-python/3/test/test_builtin.py
@@ -15,7 +15,8 @@
 import unittest
 import warnings
 from operator import neg
-from test.support import TESTFN, unlink,  run_unittest, check_warnings
+from test.support import (
+    TESTFN, unlink,  run_unittest, check_warnings, check_impl_detail)
 try:
     import pty, signal
 except ImportError:
@@ -423,7 +424,9 @@
         try:
             raise IndexError
         except:
-            self.assertEqual(len(dir(sys.exc_info()[2])), 4)
+            methods = [meth for meth in dir(sys.exc_info()[2])
+                       if not meth.startswith('_')]
+            self.assertEqual(len(methods), 4)
 
         # test that object has a __dir__()
         self.assertEqual(sorted([].__dir__()), dir([]))
@@ -558,18 +561,21 @@
         self.assertEqual((g, l), ({'a': 1}, {'b': 2}))
 
     def test_exec_globals(self):
-        code = compile("print('Hello World!')", "", "exec")
-        # no builtin function
-        self.assertRaisesRegex(NameError, "name 'print' is not defined",
-                               exec, code, {'__builtins__': {}})
-        # __builtins__ must be a mapping type
-        self.assertRaises(TypeError,
-                          exec, code, {'__builtins__': 123})
+        if check_impl_detail():
+            # strict __builtins__ compliance (CPython)
+            code = compile("print('Hello World!')", "", "exec")
+            # no builtin function
+            self.assertRaisesRegex(NameError, "name 'print' is not defined",
+                                   exec, code, {'__builtins__': {}})
+            # __builtins__ must be a mapping type
+            self.assertRaises(TypeError,
+                              exec, code, {'__builtins__': 123})
 
-        # no __build_class__ function
-        code = compile("class A: pass", "", "exec")
-        self.assertRaisesRegex(NameError, "__build_class__ not found",
-                               exec, code, {'__builtins__': {}})
+            # no __build_class__ function
+            code = compile("class A: pass", "", "exec")
+            if True:
+                self.assertRaisesRegex(NameError, "__build_class__ not found",
+                                       exec, code, {'__builtins__': {}})
 
         class frozendict_error(Exception):
             pass
@@ -579,7 +585,7 @@
                 raise frozendict_error("frozendict is readonly")
 
         # read-only builtins
-        frozen_builtins = frozendict(__builtins__)
+        frozen_builtins = frozendict(builtins.__dict__)
         code = compile("__builtins__['superglobal']=2; print(superglobal)", 
"test", "exec")
         self.assertRaises(frozendict_error,
                           exec, code, {'__builtins__': frozen_builtins})
diff --git a/lib-python/3/test/test_concurrent_futures.py 
b/lib-python/3/test/test_concurrent_futures.py
--- a/lib-python/3/test/test_concurrent_futures.py
+++ b/lib-python/3/test/test_concurrent_futures.py
@@ -295,14 +295,19 @@
         event = threading.Event()
         def future_func():
             event.wait()
-        oldswitchinterval = sys.getswitchinterval()
-        sys.setswitchinterval(1e-6)
+        newgil = hasattr(sys, 'getswitchinterval')
+        if newgil:
+            geti, seti = sys.getswitchinterval, sys.setswitchinterval
+        else:
+            geti, seti = sys.getcheckinterval, sys.setcheckinterval
+        oldinterval = geti()
+        seti(1e-6 if newgil else 1)
         try:
             fs = {self.executor.submit(future_func) for i in range(100)}
             event.set()
             futures.wait(fs, return_when=futures.ALL_COMPLETED)
         finally:
-            sys.setswitchinterval(oldswitchinterval)
+            seti(oldinterval)
 
 
 class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests, unittest.TestCase):
diff --git a/lib-python/3/test/test_imp.py b/lib-python/3/test/test_imp.py
--- a/lib-python/3/test/test_imp.py
+++ b/lib-python/3/test/test_imp.py
@@ -317,7 +317,6 @@
 
     @unittest.skipUnless(sys.implementation.cache_tag is not None,
                          'requires sys.implementation.cache_tag not be None')
-    @support.impl_detail("PyPy ignores the optimize flag", pypy=False)
     def test_cache_from_source(self):
         # Given the path to a .py file, return the path to its PEP 3147
         # defined .pyc file (i.e. under __pycache__).
@@ -339,7 +338,6 @@
                               'file{}.pyc'.format(self.tag))
         self.assertEqual(imp.cache_from_source(path, True), expect)
 
-    @support.impl_detail("PyPy ignores the optimize flag", pypy=False)
     def test_cache_from_source_optimized(self):
         # Given the path to a .py file, return the path to its PEP 3147
         # defined .pyo file (i.e. under __pycache__).
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -37,7 +37,7 @@
     "binascii", "_multiprocessing", '_warnings', "_collections",
     "_multibytecodec", "_continuation", "_cffi_backend",
     "_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy"
-    "faulthandler",
+    "faulthandler", "_lzma",
 ])
 
 translation_modules = default_modules.copy()
@@ -106,6 +106,7 @@
     "_hashlib"  : ["pypy.module._ssl.interp_ssl"],
     "_minimal_curses": ["pypy.module._minimal_curses.fficurses"],
     "_continuation": ["rpython.rlib.rstacklet"],
+    "_lzma"     : ["pypy.module._lzma.interp_lzma"],
     }
 
 def get_module_validator(modname):
diff --git a/pypy/doc/config/objspace.usemodules._lzma.txt 
b/pypy/doc/config/objspace.usemodules._lzma.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/config/objspace.usemodules._lzma.txt
@@ -0,0 +1,2 @@
+Use the '_lzma' module. 
+This module is expected to be working and is included by default.
diff --git a/pypy/interpreter/astcompiler/validate.py 
b/pypy/interpreter/astcompiler/validate.py
--- a/pypy/interpreter/astcompiler/validate.py
+++ b/pypy/interpreter/astcompiler/validate.py
@@ -11,8 +11,7 @@
 
 
 class ValidationError(Exception):
-    def __init__(self, message):
-        self.message = message
+    """Signals an invalid AST"""
 
 
 def expr_context_name(ctx):
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1012,13 +1012,14 @@
             else:
                 w_retval = space.call_method(w_gen, "send", w_value)
         except OperationError as e:
-            if not e.match(self.space, self.space.w_StopIteration):
+            if not e.match(space, space.w_StopIteration):
                 raise
             self.popvalue()  # Remove iter from stack
+            e.normalize_exception(space)
             try:
                 w_value = space.getattr(e.get_w_value(space), 
space.wrap("value"))
             except OperationError as e:
-                if not e.match(self.space, self.space.w_AttributeError):
+                if not e.match(space, space.w_AttributeError):
                     raise
                 w_value = space.w_None
             self.pushvalue(w_value)
diff --git a/pypy/interpreter/test/test_compiler.py 
b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -268,10 +268,7 @@
 
     def test_return_in_generator(self):
         code = 'def f():\n return None\n yield 19\n'
-        e = py.test.raises(OperationError, self.compiler.compile, code, '', 
'single', 0)
-        ex = e.value
-        ex.normalize_exception(self.space)
-        assert ex.match(self.space, self.space.w_SyntaxError)
+        self.compiler.compile(code, '', 'single', 0)
 
     def test_yield_in_finally(self):
         code ='def f():\n try:\n  yield 19\n finally:\n  pass\n'
diff --git a/pypy/interpreter/test/test_generator.py 
b/pypy/interpreter/test/test_generator.py
--- a/pypy/interpreter/test/test_generator.py
+++ b/pypy/interpreter/test/test_generator.py
@@ -316,6 +316,24 @@
             assert False, 'Expected StopIteration'
             """
 
+    def test_yield_from_return(self):
+        """
+        def f1():
+            result = yield from f2()
+            return result
+        def f2():
+            yield 1
+            return 2
+        g = f1()
+        assert next(g) == 1
+        try:
+            next(g)
+        except StopIteration as e:
+            assert e.value == 2
+        else:
+            assert False, 'Expected StopIteration'
+            """
+
 
 def test_should_not_inline(space):
     from pypy.interpreter.generator import should_not_inline
diff --git a/pypy/module/_lzma/__init__.py b/pypy/module/_lzma/__init__.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_lzma/__init__.py
@@ -0,0 +1,20 @@
+from pypy.interpreter.mixedmodule import MixedModule
+
+class Module(MixedModule):
+    # The private part of the lzma module.
+
+    applevel_name = '_lzma'
+
+    interpleveldefs = {
+        'LZMACompressor': 'interp_lzma.W_LZMACompressor',
+        'LZMADecompressor': 'interp_lzma.W_LZMADecompressor',
+        '_encode_filter_properties': 'interp_lzma.encode_filter_properties',
+        '_decode_filter_properties': 'interp_lzma.decode_filter_properties',
+        'FORMAT_AUTO': 'space.wrap(interp_lzma.FORMAT_AUTO)',
+        'FORMAT_XZ': 'space.wrap(interp_lzma.FORMAT_XZ)',
+        'FORMAT_ALONE': 'space.wrap(interp_lzma.FORMAT_ALONE)',
+        'FORMAT_RAW': 'space.wrap(interp_lzma.FORMAT_RAW)',
+    }
+
+    appleveldefs = {
+    }
diff --git a/pypy/module/_lzma/interp_lzma.py b/pypy/module/_lzma/interp_lzma.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_lzma/interp_lzma.py
@@ -0,0 +1,359 @@
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.interpreter.typedef import (
+    TypeDef, interp_attrproperty_bytes, interp_attrproperty)
+from pypy.interpreter.error import oefmt
+from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
+from pypy.module.thread.os_lock import Lock
+from rpython.rlib.objectmodel import specialize
+from rpython.rlib.rarithmetic import LONGLONG_MASK, r_ulonglong
+from rpython.rtyper.tool import rffi_platform as platform
+from rpython.rtyper.lltypesystem import rffi
+from rpython.rtyper.lltypesystem import lltype
+from rpython.translator.tool.cbuild import ExternalCompilationInfo
+
+
+FORMAT_AUTO, FORMAT_XZ, FORMAT_ALONE, FORMAT_RAW = range(4)
+
+
+eci = ExternalCompilationInfo(
+    includes = ['lzma.h'],
+    libraries = ['lzma'],
+    )
+eci = platform.configure_external_library(
+    'lzma', eci,
+    [dict(prefix='lzma-')])
+if not eci:
+    raise ImportError("Could not find lzma library")
+
+
+class CConfig:
+    _compilation_info_ = eci
+    calling_conv = 'c'
+
+    BUFSIZ = platform.ConstantInteger("BUFSIZ")
+
+    lzma_stream = platform.Struct(
+        'lzma_stream',
+        [('next_in', rffi.CCHARP),
+         ('avail_in', rffi.UINT),
+         ('total_in', rffi.UINT),
+         ('next_out', rffi.CCHARP),
+         ('avail_out', rffi.UINT),
+         ('total_out', rffi.UINT),
+         ])
+
+    lzma_options_lzma = platform.Struct(
+        'lzma_options_lzma',
+        [])
+
+constant_names = '''
+    LZMA_RUN LZMA_FINISH
+    LZMA_OK LZMA_GET_CHECK LZMA_NO_CHECK LZMA_STREAM_END
+    LZMA_PRESET_DEFAULT
+    LZMA_CHECK_ID_MAX
+    LZMA_TELL_ANY_CHECK LZMA_TELL_NO_CHECK
+    '''.split()
+for name in constant_names:
+    setattr(CConfig, name, platform.ConstantInteger(name))
+
+class cConfig(object):
+    pass
+for k, v in platform.configure(CConfig).items():
+    setattr(cConfig, k, v)
+
+for name in constant_names:
+    globals()[name] = getattr(cConfig, name)
+lzma_stream = lltype.Ptr(cConfig.lzma_stream)
+lzma_options_lzma = lltype.Ptr(cConfig.lzma_options_lzma)
+BUFSIZ = cConfig.BUFSIZ
+LZMA_CHECK_UNKNOWN = LZMA_CHECK_ID_MAX + 1
+
+def external(name, args, result, **kwds):
+    return rffi.llexternal(name, args, result, compilation_info=
+                           CConfig._compilation_info_, **kwds)
+
+lzma_ret = rffi.INT
+lzma_action = rffi.INT
+lzma_bool = rffi.INT
+
+lzma_lzma_preset = external('lzma_lzma_preset', [lzma_options_lzma, 
rffi.UINT], lzma_bool)
+lzma_alone_encoder = external('lzma_alone_encoder', [lzma_stream, 
lzma_options_lzma], lzma_ret)
+lzma_end = external('lzma_end', [lzma_stream], lltype.Void, releasegil=False)
+
+lzma_auto_decoder = external('lzma_auto_decoder', [lzma_stream, rffi.LONG, 
rffi.INT], lzma_ret)
+lzma_get_check = external('lzma_get_check', [lzma_stream], rffi.INT)
+
+lzma_code = external('lzma_code', [lzma_stream, lzma_action], rffi.INT)
+
+
+@specialize.arg(1)
+def raise_error(space, fmt, *args):
+    raise oefmt(space.w_RuntimeError, fmt, *args)
+
+
+def _catch_lzma_error(space, lzret):
+    if (lzret == LZMA_OK or lzret == LZMA_GET_CHECK or
+        lzret == LZMA_NO_CHECK or lzret == LZMA_STREAM_END):
+        return
+    raise raise_error(space, "Unrecognized error from liblzma: %d", lzret)
+
+
+if BUFSIZ < 8192:
+    SMALLCHUNK = 8192
+else:
+    SMALLCHUNK = BUFSIZ
+if rffi.sizeof(rffi.INT) > 4:
+    BIGCHUNK = 512 * 32
+else:
+    BIGCHUNK = 512 * 1024
+
+
+def _new_buffer_size(current_size):
+    # keep doubling until we reach BIGCHUNK; then the buffer size is no
+    # longer increased
+    if current_size < BIGCHUNK:
+        return current_size + current_size
+    return current_size
+
+
+class OutBuffer(object):
+    """Handler for the output buffer.  A bit custom code trying to
+    encapsulate the logic of setting up the fields of 'lzs' and
+    allocating raw memory as needed.
+    """
+    def __init__(self, lzs, initial_size=SMALLCHUNK):
+        # when the constructor is called, allocate a piece of memory
+        # of length 'piece_size' and make lzs ready to dump there.
+        self.temp = []
+        self.lzs = lzs
+        self._allocate_chunk(initial_size)
+
+    def _allocate_chunk(self, size):
+        self.raw_buf, self.gc_buf = rffi.alloc_buffer(size)
+        self.current_size = size
+        self.lzs.c_next_out = self.raw_buf
+        rffi.setintfield(self.lzs, 'c_avail_out', size)
+
+    def _get_chunk(self, chunksize):
+        assert 0 <= chunksize <= self.current_size
+        raw_buf = self.raw_buf
+        gc_buf = self.gc_buf
+        s = rffi.str_from_buffer(raw_buf, gc_buf, self.current_size, chunksize)
+        rffi.keep_buffer_alive_until_here(raw_buf, gc_buf)
+        self.current_size = 0
+        return s
+
+    def prepare_next_chunk(self):
+        size = self.current_size
+        self.temp.append(self._get_chunk(size))
+        self._allocate_chunk(_new_buffer_size(size))
+
+    def make_result_string(self):
+        count_unoccupied = rffi.getintfield(self.lzs, 'c_avail_out')
+        s = self._get_chunk(self.current_size - count_unoccupied)
+        if self.temp:
+            self.temp.append(s)
+            return ''.join(self.temp)
+        else:
+            return s
+
+    def free(self):
+        if self.current_size > 0:
+            rffi.keep_buffer_alive_until_here(self.raw_buf, self.gc_buf)
+
+    def __enter__(self):
+        return self
+    def __exit__(self, *args):
+        self.free()
+
+
+class W_LZMACompressor(W_Root):
+    def __init__(self, space, format):
+        self.format = format
+        self.lock = Lock(space)
+        self.flushed = False
+        self.lzs = lltype.malloc(lzma_stream.TO, flavor='raw', zero=True)
+
+    def __del__(self):
+        lzma_end(self.lzs)
+        lltype.free(self.lzs, flavor='raw')
+
+    def _init_alone(self, space, preset, w_filters):
+        if space.is_none(w_filters):
+            with lltype.scoped_alloc(lzma_options_lzma.TO) as options:
+                if lzma_lzma_preset(options, preset):
+                    raise_error(space, "Invalid compression preset: %d", 
preset)
+                lzret = lzma_alone_encoder(self.lzs, options)
+        else:
+            raise NotImplementedError
+        _catch_lzma_error(space, lzret)
+
+    @staticmethod
+    @unwrap_spec(format=int,
+                 w_check=WrappedDefault(None),
+                 w_preset=WrappedDefault(None), 
+                 w_filters=WrappedDefault(None))
+    def descr_new_comp(space, w_subtype, format=FORMAT_XZ, 
+                       w_check=None, w_preset=None, w_filters=None):
+        w_self = space.allocate_instance(W_LZMACompressor, w_subtype)
+        self = space.interp_w(W_LZMACompressor, w_self)
+        W_LZMACompressor.__init__(self, space, format)
+
+        if space.is_none(w_preset):
+            preset = LZMA_PRESET_DEFAULT
+        else:
+            preset = space.int_w(w_preset)
+
+        if format == FORMAT_ALONE:
+            self._init_alone(space, preset, w_filters)
+        else:
+            raise NotImplementedError
+
+        return w_self
+
+    @unwrap_spec(data='bufferstr')
+    def compress_w(self, space, data):
+        with self.lock:
+            if self.flushed:
+                raise oefmt(space.w_ValueError, "Compressor has been flushed")
+            result = self._compress(space, data, LZMA_RUN)
+        return space.wrapbytes(result)
+
+    def flush_w(self, space):
+        with self.lock:
+            if self.flushed:
+                raise oefmt(space.w_ValueError, "Repeated call to flush()")
+            result = self._compress(space, "", LZMA_FINISH)
+        return space.wrapbytes(result)
+
+    def _compress(self, space, data, action):
+        datasize = len(data)
+        with lltype.scoped_alloc(rffi.CCHARP.TO, datasize) as in_buf:
+            for i in range(datasize):
+                in_buf[i] = data[i]
+
+            with OutBuffer(self.lzs) as out:
+                self.lzs.c_next_in = in_buf
+                rffi.setintfield(self.lzs, 'c_avail_in', datasize)
+
+                while True:
+                    lzret = lzma_code(self.lzs, action)
+                    _catch_lzma_error(space, lzret)
+
+                    if (action == LZMA_RUN and
+                        rffi.getintfield(self.lzs, 'c_avail_in') == 0):
+                        break
+                    if action == LZMA_FINISH and lzret == LZMA_STREAM_END:
+                        break
+                    elif rffi.getintfield(self.lzs, 'c_avail_out') == 0:
+                        out.prepare_next_chunk()
+
+                return out.make_result_string()
+
+
+W_LZMACompressor.typedef = TypeDef("LZMACompressor",
+    __new__ = interp2app(W_LZMACompressor.descr_new_comp),
+    compress = interp2app(W_LZMACompressor.compress_w),
+    flush = interp2app(W_LZMACompressor.flush_w),
+)
+
+
+class W_LZMADecompressor(W_Root):
+    def __init__(self, space, format):
+        self.format = format
+        self.lock = Lock(space)
+        self.eof = False
+        self.lzs = lltype.malloc(lzma_stream.TO, flavor='raw', zero=True)
+        self.check = LZMA_CHECK_UNKNOWN
+        self.unused_data = ''
+
+    def __del__(self):
+        lzma_end(self.lzs)
+        lltype.free(self.lzs, flavor='raw')
+
+    @staticmethod
+    @unwrap_spec(format=int,
+                 w_memlimit=WrappedDefault(None),
+                 w_filters=WrappedDefault(None))
+    def descr_new_dec(space, w_subtype, format=FORMAT_AUTO,
+                      w_memlimit=None, w_filters=None):
+        w_self = space.allocate_instance(W_LZMADecompressor, w_subtype)
+        self = space.interp_w(W_LZMADecompressor, w_self)
+        W_LZMADecompressor.__init__(self, space, format)
+
+        if space.is_none(w_memlimit):
+            memlimit = r_ulonglong(LONGLONG_MASK)
+        else:
+            memlimit = space.r_ulonglong_w(w_memlimit)
+
+        decoder_flags = LZMA_TELL_ANY_CHECK | LZMA_TELL_NO_CHECK
+
+        if format == FORMAT_AUTO:
+            lzret = lzma_auto_decoder(self.lzs, memlimit, decoder_flags)
+            _catch_lzma_error(space, lzret)
+        else:
+            raise NotImplementedError
+
+        return w_self
+
+    @unwrap_spec(data='bufferstr')
+    def decompress_w(self, space, data):
+        with self.lock:
+            if self.eof:
+                raise oefmt(space.w_EOFError, "Already at end of stream")
+            result = self._decompress(space, data)
+        return space.wrapbytes(result)
+
+    def _decompress(self, space, data):
+        datasize = len(data)
+
+        with lltype.scoped_alloc(rffi.CCHARP.TO, datasize) as in_buf:
+            for i in range(datasize):
+                in_buf[i] = data[i]
+
+            with OutBuffer(self.lzs) as out:
+                self.lzs.c_next_in = in_buf
+                rffi.setintfield(self.lzs, 'c_avail_in', datasize)
+
+                while True:
+                    lzret = lzma_code(self.lzs, LZMA_RUN)
+                    _catch_lzma_error(space, lzret)
+                    if lzret == LZMA_GET_CHECK or lzret == LZMA_NO_CHECK:
+                        self.check = lzma_get_check(self.lzs)
+                    if lzret == LZMA_STREAM_END:
+                        self.eof = True
+                        if rffi.getintfield(self.lzs, 'c_avail_in') > 0:
+                            unused = [self.lzs.c_next_in[i]
+                                      for i in range(
+                                    rffi.getintfield(self.lzs,
+                                                     'c_avail_in'))]
+                            self.unused_data = "".join(unused)
+                            break
+                    if rffi.getintfield(self.lzs, 'c_avail_in') == 0:
+                        break
+                    elif rffi.getintfield(self.lzs, 'c_avail_out') == 0:
+                        out.prepare_next_chunk()
+
+                return out.make_result_string()
+
+
+W_LZMADecompressor.typedef = TypeDef("LZMADecompressor",
+    __new__ = interp2app(W_LZMADecompressor.descr_new_dec),
+    decompress = interp2app(W_LZMADecompressor.decompress_w),
+    eof = interp_attrproperty("eof", W_LZMADecompressor),
+    unused_data = interp_attrproperty_bytes("unused_data", W_LZMADecompressor),
+)
+
+
+def encode_filter_properties(space, w_filter):
+    """Return a bytes object encoding the options (properties) of the filter
+       specified by *filter* (a dict).
+
+    The result does not include the filter ID itself, only the options.
+    """
+
+def decode_filter_properties(space, w_filter_id, w_encoded_props):
+    """Return a dict describing a filter with ID *filter_id*, and options
+       (properties) decoded from the bytes object *encoded_props*.
+    """
+    
diff --git a/pypy/module/_lzma/test/test_lzma.py 
b/pypy/module/_lzma/test/test_lzma.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_lzma/test/test_lzma.py
@@ -0,0 +1,17 @@
+class AppTestBZ2File:
+    spaceconfig = {
+        "usemodules": ["_lzma"]
+    }
+
+    def test_module(self):
+        import lzma
+
+    def test_simple_compress(self):
+        import lzma
+        compressed = lzma.compress(b'Insert Data Here', 
format=lzma.FORMAT_ALONE)
+        assert compressed == (b']\x00\x00\x80\x00\xff\xff\xff\xff\xff'
+                              b'\xff\xff\xff\x00$\x9b\x8afg\x91'
+                              b'(\xcb\xde\xfa\x03\r\x1eQT\xbe'
+                              b't\x9e\xdfI]\xff\xf4\x9d\x80\x00')
+        decompressed = lzma.decompress(compressed)
+        assert decompressed == b'Insert Data Here'
diff --git a/pypy/module/_lzma/test/test_ztranslation.py 
b/pypy/module/_lzma/test/test_ztranslation.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_lzma/test/test_ztranslation.py
@@ -0,0 +1,4 @@
+from pypy.objspace.fake.checkmodule import checkmodule
+
+def test_lzma_translates():
+    checkmodule('_lzma')
diff --git a/pypy/module/_posixsubprocess/_posixsubprocess.c 
b/pypy/module/_posixsubprocess/_posixsubprocess.c
--- a/pypy/module/_posixsubprocess/_posixsubprocess.c
+++ b/pypy/module/_posixsubprocess/_posixsubprocess.c
@@ -67,7 +67,7 @@
  * that properly supports /dev/fd.
  */
 static int
-_is_fdescfs_mounted_on_dev_fd()
+_is_fdescfs_mounted_on_dev_fd(void)
 {
     struct stat dev_stat;
     struct stat dev_fd_stat;
@@ -142,17 +142,11 @@
  * This structure is very old and stable: It will not change unless the kernel
  * chooses to break compatibility with all existing binaries.  Highly Unlikely.
  */
-struct linux_dirent {
-#if defined(__x86_64__) && defined(__ILP32__)
-   /* Support the wacky x32 ABI (fake 32-bit userspace speaking to x86_64
-    * kernel interfaces) - https://sites.google.com/site/x32abi/ */
+struct linux_dirent64 {
    unsigned long long d_ino;
-   unsigned long long d_off;
-#else
-   unsigned long  d_ino;        /* Inode number */
-   unsigned long  d_off;        /* Offset to next linux_dirent */
-#endif
+   long long d_off;
    unsigned short d_reclen;     /* Length of this linux_dirent */
+   unsigned char  d_type;
    char           d_name[256];  /* Filename (null-terminated) */
 };
 
@@ -196,16 +190,16 @@
                                  num_fds_to_keep);
         return;
     } else {
-        char buffer[sizeof(struct linux_dirent)];
+        char buffer[sizeof(struct linux_dirent64)];
         int bytes;
-        while ((bytes = syscall(SYS_getdents, fd_dir_fd,
-                                (struct linux_dirent *)buffer,
+        while ((bytes = syscall(SYS_getdents64, fd_dir_fd,
+                                (struct linux_dirent64 *)buffer,
                                 sizeof(buffer))) > 0) {
-            struct linux_dirent *entry;
+            struct linux_dirent64 *entry;
             int offset;
             for (offset = 0; offset < bytes; offset += entry->d_reclen) {
                 int fd;
-                entry = (struct linux_dirent *)(buffer + offset);
+                entry = (struct linux_dirent64 *)(buffer + offset);
                 if ((fd = _pos_int_from_ascii(entry->d_name)) < 0)
                     continue;  /* Not a number. */
                 if (fd != fd_dir_fd && fd >= start_fd && fd < end_fd &&
@@ -299,6 +293,7 @@
 
 #endif  /* else NOT (defined(__linux__) && defined(HAVE_SYS_SYSCALL_H)) */
 
+
 /*
  * This function is code executed in the child process immediately after fork
  * to set things up and call exec().
@@ -389,17 +384,6 @@
         POSIX_CALL(close(errwrite));
     }
 
-    if (close_fds) {
-        int local_max_fd = max_fd;
-#if defined(__NetBSD__)
-        local_max_fd = fcntl(0, F_MAXFD);
-        if (local_max_fd < 0)
-            local_max_fd = max_fd;
-#endif
-        /* TODO HP-UX could use pstat_getproc() if anyone cares about it. */
-        _close_open_fd_range(3, local_max_fd, py_fds_to_keep, num_fds_to_keep);
-    }
-
     if (cwd)
         POSIX_CALL(chdir(cwd));
 
@@ -428,6 +412,18 @@
         }
     }
 
+    /* close FDs after executing preexec_fn, which might open FDs */
+    if (close_fds) {
+        int local_max_fd = max_fd;
+#if defined(__NetBSD__)
+        local_max_fd = fcntl(0, F_MAXFD);
+        if (local_max_fd < 0)
+            local_max_fd = max_fd;
+#endif
+        /* TODO HP-UX could use pstat_getproc() if anyone cares about it. */
+        _close_open_fd_range(3, local_max_fd, py_fds_to_keep, num_fds_to_keep);
+    }
+
     /* This loop matches the Lib/os.py _execvpe()'s PATH search when */
     /* given the executable_list generated by Lib/subprocess.py.     */
     saved_errno = 0;
@@ -478,20 +474,18 @@
 int
 pypy_subprocess_cloexec_pipe(int *fds)
 {
-    int res;
+    int res, saved_errno;
+    long oldflags;
 #ifdef HAVE_PIPE2
     Py_BEGIN_ALLOW_THREADS
     res = pipe2(fds, O_CLOEXEC);
     Py_END_ALLOW_THREADS
     if (res != 0 && errno == ENOSYS)
     {
-        {
 #endif
         /* We hold the GIL which offers some protection from other code calling
          * fork() before the CLOEXEC flags have been set but we can't guarantee
          * anything without pipe2(). */
-        long oldflags;
-
         res = pipe(fds);
 
         if (res == 0) {
@@ -508,9 +502,47 @@
         if (res == 0)
             res = fcntl(fds[1], F_SETFD, oldflags | FD_CLOEXEC);
 #ifdef HAVE_PIPE2
-        }
     }
 #endif
+    if (res == 0 && fds[1] < 3) {
+        /* We always want the write end of the pipe to avoid fds 0, 1 and 2
+         * as our child may claim those for stdio connections. */
+        int write_fd = fds[1];
+        int fds_to_close[3] = {-1, -1, -1};
+        int fds_to_close_idx = 0;
+#ifdef F_DUPFD_CLOEXEC
+        fds_to_close[fds_to_close_idx++] = write_fd;
+        write_fd = fcntl(write_fd, F_DUPFD_CLOEXEC, 3);
+        if (write_fd < 0)  /* We don't support F_DUPFD_CLOEXEC / other error */
+#endif
+        {
+            /* Use dup a few times until we get a desirable fd. */
+            for (; fds_to_close_idx < 3; ++fds_to_close_idx) {
+                fds_to_close[fds_to_close_idx] = write_fd;
+                write_fd = dup(write_fd);
+                if (write_fd >= 3)
+                    break;
+                /* We may dup a few extra times if it returns an error but
+                 * that is okay.  Repeat calls should return the same error. */
+            }
+            if (write_fd < 0) res = write_fd;
+            if (res == 0) {
+                oldflags = fcntl(write_fd, F_GETFD, 0);
+                if (oldflags < 0) res = oldflags;
+                if (res == 0)
+                    res = fcntl(write_fd, F_SETFD, oldflags | FD_CLOEXEC);
+            }
+        }
+        saved_errno = errno;
+        /* Close fds we tried for the write end that were too low. */
+        for (fds_to_close_idx=0; fds_to_close_idx < 3; ++fds_to_close_idx) {
+            int temp_fd = fds_to_close[fds_to_close_idx];
+            while (temp_fd >= 0 && close(temp_fd) < 0 && errno == EINTR);
+        }
+        errno = saved_errno;  /* report dup or fcntl errors, not close. */
+        fds[1] = write_fd;
+    }  /* end if write fd was too small */
+
     if (res != 0)
        return res;
     return 0;
diff --git a/pypy/module/_posixsubprocess/test/test_subprocess.py 
b/pypy/module/_posixsubprocess/test/test_subprocess.py
--- a/pypy/module/_posixsubprocess/test/test_subprocess.py
+++ b/pypy/module/_posixsubprocess/test/test_subprocess.py
@@ -1,7 +1,8 @@
 from os.path import dirname
 
 class AppTestSubprocess:
-    spaceconfig = dict(usemodules=('_posixsubprocess', 'signal', 'fcntl', 
'select'))
+    spaceconfig = dict(usemodules=('_posixsubprocess', 'signal',
+                                   'fcntl', 'select', 'rctime'))
     # XXX write more tests
 
     def setup_class(cls):
@@ -17,6 +18,7 @@
         os.close(fd2)
 
     def test_close_fds_true(self):
+        import traceback  # Work around a recursion limit
         import subprocess
         import os.path
         import os
@@ -43,6 +45,7 @@
         # For code coverage of calling setsid().  We don't care if we get an
         # EPERM error from it depending on the test execution environment, that
         # still indicates that it was called.
+        import traceback  # Work around a recursion limit
         import subprocess
         import os
         try:
diff --git a/pypy/module/bz2/__init__.py b/pypy/module/bz2/__init__.py
--- a/pypy/module/bz2/__init__.py
+++ b/pypy/module/bz2/__init__.py
@@ -1,19 +1,14 @@
-# REVIEWME
 from pypy.interpreter.mixedmodule import MixedModule
 
 class Module(MixedModule):
-    """The python bz2 module provides a comprehensive interface for
-the bz2 compression library. It implements a complete file
-interface, one shot (de)compression functions, and types for
-sequential (de)compression."""
+    # The private part of the bz2 module.
+
+    applevel_name = '_bz2'
 
     interpleveldefs = {
         'BZ2Compressor': 'interp_bz2.W_BZ2Compressor',
         'BZ2Decompressor': 'interp_bz2.W_BZ2Decompressor',
-        'compress': 'interp_bz2.compress',
-        'decompress': 'interp_bz2.decompress',
     }
 
     appleveldefs = {
-        'BZ2File': 'app_bz2file.BZ2File',
     }
diff --git a/pypy/module/bz2/app_bz2file.py b/pypy/module/bz2/app_bz2file.py
deleted file mode 100644
--- a/pypy/module/bz2/app_bz2file.py
+++ /dev/null
@@ -1,370 +0,0 @@
-"""Interface to the libbzip2 compression library.
-
-This file is an almost exact copy of CPython3.3 Lib/bz2.py.
-"""
-
-import io
-
-from bz2 import BZ2Compressor, BZ2Decompressor
-
-
-_MODE_CLOSED   = 0
-_MODE_READ     = 1
-_MODE_READ_EOF = 2
-_MODE_WRITE    = 3
-
-_BUFFER_SIZE = 8192
-
-
-class BZ2File(io.BufferedIOBase):
-
-    """A file object providing transparent bzip2 (de)compression.
-
-    A BZ2File can act as a wrapper for an existing file object, or refer
-    directly to a named file on disk.
-
-    Note that BZ2File provides a *binary* file interface - data read is
-    returned as bytes, and data to be written should be given as bytes.
-    """
-
-    def __init__(self, filename=None, mode="r", buffering=None,
-                 compresslevel=9, fileobj=None):
-        """Open a bzip2-compressed file.
-
-        If filename is given, open the named file. Otherwise, operate on
-        the file object given by fileobj. Exactly one of these two
-        parameters should be provided.
-
-        mode can be 'r' for reading (default), or 'w' for writing.
-
-        buffering is ignored. Its use is deprecated.
-
-        If mode is 'w', compresslevel can be a number between 1 and 9
-        specifying the level of compression: 1 produces the least
-        compression, and 9 (default) produces the most compression.
-        """
-        # This lock must be recursive, so that BufferedIOBase's
-        # readline(), readlines() and writelines() don't deadlock.
-        import threading
-        self._lock = threading.RLock()
-        self._fp = None
-        self._closefp = False
-        self._mode = _MODE_CLOSED
-        self._pos = 0
-        self._size = -1
-
-        if not (1 <= compresslevel <= 9):
-            raise ValueError("compresslevel must be between 1 and 9")
-
-        if mode in ("", "r", "rb"):
-            mode = "rb"
-            mode_code = _MODE_READ
-            self._decompressor = BZ2Decompressor()
-            self._buffer = None
-        elif mode in ("w", "wb"):
-            mode = "wb"
-            mode_code = _MODE_WRITE
-            self._compressor = BZ2Compressor(compresslevel)
-        elif mode in ("a", "ab"):
-            mode = "ab"
-            mode_code = _MODE_WRITE
-            self._compressor = BZ2Compressor(compresslevel)
-        else:
-            raise ValueError("Invalid mode: {!r}".format(mode))
-
-        if filename is not None and fileobj is None:
-            self._fp = open(filename, mode)
-            self._closefp = True
-            self._mode = mode_code
-        elif fileobj is not None and filename is None:
-            self._fp = fileobj
-            self._mode = mode_code
-        else:
-            raise ValueError("Must give exactly one of filename and fileobj")
-
-    def close(self):
-        """Flush and close the file.
-
-        May be called more than once without error. Once the file is
-        closed, any other operation on it will raise a ValueError.
-        """
-        with self._lock:
-            if self._mode == _MODE_CLOSED:
-                return
-            try:
-                if self._mode in (_MODE_READ, _MODE_READ_EOF):
-                    self._decompressor = None
-                elif self._mode == _MODE_WRITE:
-                    self._fp.write(self._compressor.flush())
-                    self._compressor = None
-            finally:
-                try:
-                    if self._closefp:
-                        self._fp.close()
-                finally:
-                    self._fp = None
-                    self._closefp = False
-                    self._mode = _MODE_CLOSED
-                    self._buffer = None
-
-    @property
-    def closed(self):
-        """True if this file is closed."""
-        return self._mode == _MODE_CLOSED
-
-    def fileno(self):
-        """Return the file descriptor for the underlying file."""
-        self._check_not_closed()
-        return self._fp.fileno()
-
-    def seekable(self):
-        """Return whether the file supports seeking."""
-        return self.readable()
-
-    def readable(self):
-        """Return whether the file was opened for reading."""
-        self._check_not_closed()
-        return self._mode in (_MODE_READ, _MODE_READ_EOF)
-
-    def writable(self):
-        """Return whether the file was opened for writing."""
-        self._check_not_closed()
-        return self._mode == _MODE_WRITE
-
-    # Mode-checking helper functions.
-
-    def _check_not_closed(self):
-        if self.closed:
-            raise ValueError("I/O operation on closed file")
-
-    def _check_can_read(self):
-        if not self.readable():
-            raise io.UnsupportedOperation("File not open for reading")
-
-    def _check_can_write(self):
-        if not self.writable():
-            raise io.UnsupportedOperation("File not open for writing")
-
-    def _check_can_seek(self):
-        if not self.seekable():
-            raise io.UnsupportedOperation("Seeking is only supported "
-                                          "on files open for reading")
-
-    # Fill the readahead buffer if it is empty. Returns False on EOF.
-    def _fill_buffer(self):
-        if self._buffer:
-            return True
-
-        if self._decompressor.unused_data:
-            rawblock = self._decompressor.unused_data
-        else:
-            rawblock = self._fp.read(_BUFFER_SIZE)
-
-        if not rawblock:
-            if self._decompressor.eof:
-                self._mode = _MODE_READ_EOF
-                self._size = self._pos
-                return False
-            else:
-                raise EOFError("Compressed file ended before the "
-                               "end-of-stream marker was reached")
-
-        # Continue to next stream.
-        if self._decompressor.eof:
-            self._decompressor = BZ2Decompressor()
-
-        self._buffer = self._decompressor.decompress(rawblock)
-        return True
-
-    # Read data until EOF.
-    # If return_data is false, consume the data without returning it.
-    def _read_all(self, return_data=True):
-        blocks = []
-        while self._fill_buffer():
-            if return_data:
-                blocks.append(self._buffer)
-            self._pos += len(self._buffer)
-            self._buffer = None
-        if return_data:
-            return b"".join(blocks)
-
-    # Read a block of up to n bytes.
-    # If return_data is false, consume the data without returning it.
-    def _read_block(self, n, return_data=True):
-        blocks = []
-        while n > 0 and self._fill_buffer():
-            if n < len(self._buffer):
-                data = self._buffer[:n]
-                self._buffer = self._buffer[n:]
-            else:
-                data = self._buffer
-                self._buffer = None
-            if return_data:
-                blocks.append(data)
-            self._pos += len(data)
-            n -= len(data)
-        if return_data:
-            return b"".join(blocks)
-
-    def peek(self, n=0):
-        """Return buffered data without advancing the file position.
-
-        Always returns at least one byte of data, unless at EOF.
-        The exact number of bytes returned is unspecified.
-        """
-        with self._lock:
-            self._check_can_read()
-            if self._mode == _MODE_READ_EOF or not self._fill_buffer():
-                return b""
-            return self._buffer
-
-    def read(self, size=-1):
-        """Read up to size uncompressed bytes from the file.
-
-        If size is negative or omitted, read until EOF is reached.
-        Returns b'' if the file is already at EOF.
-        """
-        with self._lock:
-            self._check_can_read()
-            if self._mode == _MODE_READ_EOF or size == 0:
-                return b""
-            elif size < 0:
-                return self._read_all()
-            else:
-                return self._read_block(size)
-
-    def read1(self, size=-1):
-        """Read up to size uncompressed bytes with at most one read
-        from the underlying stream.
-
-        Returns b'' if the file is at EOF.
-        """
-        with self._lock:
-            self._check_can_read()
-            if (size == 0 or self._mode == _MODE_READ_EOF or
-                not self._fill_buffer()):
-                return b""
-            if 0 < size < len(self._buffer):
-                data = self._buffer[:size]
-                self._buffer = self._buffer[size:]
-            else:
-                data = self._buffer
-                self._buffer = None
-            self._pos += len(data)
-            return data
-
-    def readinto(self, b):
-        """Read up to len(b) bytes into b.
-
-        Returns the number of bytes read (0 for EOF).
-        """
-        with self._lock:
-            return io.BufferedIOBase.readinto(self, b)
-
-    def readline(self, size=-1):
-        """Read a line of uncompressed bytes from the file.
-
-        The terminating newline (if present) is retained. If size is
-        non-negative, no more than size bytes will be read (in which
-        case the line may be incomplete). Returns b'' if already at EOF.
-        """
-        if not hasattr(size, "__index__"):
-            raise TypeError("Integer argument expected")
-        size = size.__index__()
-        with self._lock:
-            return io.BufferedIOBase.readline(self, size)
-
-    def readlines(self, size=-1):
-        """Read a list of lines of uncompressed bytes from the file.
-
-        size can be specified to control the number of lines read: no
-        further lines will be read once the total size of the lines read
-        so far equals or exceeds size.
-        """
-        if not hasattr(size, "__index__"):
-            raise TypeError("Integer argument expected")
-        size = size.__index__()
-        with self._lock:
-            return io.BufferedIOBase.readlines(self, size)
-
-    def write(self, data):
-        """Write a byte string to the file.
-
-        Returns the number of uncompressed bytes written, which is
-        always len(data). Note that due to buffering, the file on disk
-        may not reflect the data written until close() is called.
-        """
-        with self._lock:
-            self._check_can_write()
-            compressed = self._compressor.compress(data)
-            self._fp.write(compressed)
-            self._pos += len(data)
-            return len(data)
-
-    def writelines(self, seq):
-        """Write a sequence of byte strings to the file.
-
-        Returns the number of uncompressed bytes written.
-        seq can be any iterable yielding byte strings.
-
-        Line separators are not added between the written byte strings.
-        """
-        with self._lock:
-            return io.BufferedIOBase.writelines(self, seq)
-
-    # Rewind the file to the beginning of the data stream.
-    def _rewind(self):
-        self._fp.seek(0, 0)
-        self._mode = _MODE_READ
-        self._pos = 0
-        self._decompressor = BZ2Decompressor()
-        self._buffer = None
-
-    def seek(self, offset, whence=0):
-        """Change the file position.
-
-        The new position is specified by offset, relative to the
-        position indicated by whence. Values for whence are:
-
-            0: start of stream (default); offset must not be negative
-            1: current stream position
-            2: end of stream; offset must not be positive
-
-        Returns the new file position.
-
-        Note that seeking is emulated, so depending on the parameters,
-        this operation may be extremely slow.
-        """
-        with self._lock:
-            self._check_can_seek()
-
-            # Recalculate offset as an absolute file position.
-            if whence == 0:
-                pass
-            elif whence == 1:
-                offset = self._pos + offset
-            elif whence == 2:
-                # Seeking relative to EOF - we need to know the file's size.
-                if self._size < 0:
-                    self._read_all(return_data=False)
-                offset = self._size + offset
-            else:
-                raise ValueError("Invalid value for whence: {}".format(whence))
-
-            # Make it so that offset is the number of bytes to skip forward.
-            if offset < self._pos:
-                self._rewind()
-            else:
-                offset -= self._pos
-
-            # Read and discard data until we reach the desired position.
-            if self._mode != _MODE_READ_EOF:
-                self._read_block(offset, return_data=False)
-
-            return self._pos
-
-    def tell(self):
-        """Return the current file position."""
-        with self._lock:
-            self._check_not_closed()
-            return self._pos
diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py
--- a/pypy/module/bz2/interp_bz2.py
+++ b/pypy/module/bz2/interp_bz2.py
@@ -330,7 +330,7 @@
             res = out.make_result_string()
             return self.space.wrapbytes(res)
 
-W_BZ2Compressor.typedef = TypeDef("BZ2Compressor",
+W_BZ2Compressor.typedef = TypeDef("_bz2.BZ2Compressor",
     __doc__ = W_BZ2Compressor.__doc__,
     __new__ = interp2app(descr_compressor__new__),
     compress = interp2app(W_BZ2Compressor.compress),
@@ -426,98 +426,10 @@
                 return self.space.wrapbytes(res)
 
 
-W_BZ2Decompressor.typedef = TypeDef("BZ2Decompressor",
+W_BZ2Decompressor.typedef = TypeDef("_bz2.BZ2Decompressor",
     __doc__ = W_BZ2Decompressor.__doc__,
     __new__ = interp2app(descr_decompressor__new__),
     unused_data = interp_attrproperty_bytes("unused_data", W_BZ2Decompressor),
     eof = GetSetProperty(W_BZ2Decompressor.eof_w),
     decompress = interp2app(W_BZ2Decompressor.decompress),
 )
-
-
-@unwrap_spec(data='bufferstr', compresslevel=int)
-def compress(space, data, compresslevel=9):
-    """compress(data [, compresslevel=9]) -> string
-
-    Compress data in one shot. If you want to compress data sequentially,
-    use an instance of BZ2Compressor instead. The compresslevel parameter, if
-    given, must be a number between 1 and 9."""
-
-    if compresslevel < 1 or compresslevel > 9:
-        raise OperationError(space.w_ValueError,
-            space.wrap("compresslevel must be between 1 and 9"))
-
-    with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs:
-        in_bufsize = len(data)
-
-        with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-            for i in range(in_bufsize):
-                in_buf[i] = data[i]
-            bzs.c_next_in = in_buf
-            rffi.setintfield(bzs, 'c_avail_in', in_bufsize)
-
-            # conforming to bz2 manual, this is large enough to fit compressed
-            # data in one shot. We will check it later anyway.
-            with OutBuffer(bzs,
-                           in_bufsize + (in_bufsize / 100 + 1) + 600) as out:
-
-                bzerror = BZ2_bzCompressInit(bzs, compresslevel, 0, 0)
-                if bzerror != BZ_OK:
-                    _catch_bz2_error(space, bzerror)
-
-                while True:
-                    bzerror = BZ2_bzCompress(bzs, BZ_FINISH)
-                    if bzerror == BZ_STREAM_END:
-                        break
-                    elif bzerror != BZ_FINISH_OK:
-                        BZ2_bzCompressEnd(bzs)
-                        _catch_bz2_error(space, bzerror)
-
-                    if rffi.getintfield(bzs, 'c_avail_out') == 0:
-                        out.prepare_next_chunk()
-
-                res = out.make_result_string()
-                BZ2_bzCompressEnd(bzs)
-                return space.wrapbytes(res)
-
-@unwrap_spec(data='bufferstr')
-def decompress(space, data):
-    """decompress(data) -> decompressed data
-
-    Decompress data in one shot. If you want to decompress data sequentially,
-    use an instance of BZ2Decompressor instead."""
-
-    in_bufsize = len(data)
-    if in_bufsize == 0:
-        return space.wrapbytes("")
-
-    with lltype.scoped_alloc(bz_stream.TO, zero=True) as bzs:
-        with lltype.scoped_alloc(rffi.CCHARP.TO, in_bufsize) as in_buf:
-            for i in range(in_bufsize):
-                in_buf[i] = data[i]
-            bzs.c_next_in = in_buf
-            rffi.setintfield(bzs, 'c_avail_in', in_bufsize)
-
-            with OutBuffer(bzs) as out:
-                bzerror = BZ2_bzDecompressInit(bzs, 0, 0)
-                if bzerror != BZ_OK:
-                    _catch_bz2_error(space, bzerror)
-
-                while True:
-                    bzerror = BZ2_bzDecompress(bzs)
-                    if bzerror == BZ_STREAM_END:
-                        break
-                    if bzerror != BZ_OK:
-                        BZ2_bzDecompressEnd(bzs)
-                    _catch_bz2_error(space, bzerror)
-
-                    if rffi.getintfield(bzs, 'c_avail_in') == 0:
-                        BZ2_bzDecompressEnd(bzs)
-                        raise OperationError(space.w_ValueError, space.wrap(
-                            "couldn't find end of stream"))
-                    elif rffi.getintfield(bzs, 'c_avail_out') == 0:
-                        out.prepare_next_chunk()
-
-                res = out.make_result_string()
-                BZ2_bzDecompressEnd(bzs)
-                return space.wrapbytes(res)
diff --git a/pypy/module/bz2/test/test_bz2_compdecomp.py 
b/pypy/module/bz2/test/test_bz2_compdecomp.py
--- a/pypy/module/bz2/test/test_bz2_compdecomp.py
+++ b/pypy/module/bz2/test/test_bz2_compdecomp.py
@@ -41,7 +41,7 @@
     interp_bz2.SMALLCHUNK = mod.OLD_SMALLCHUNK
 
 class AppTestBZ2Compressor(CheckAllocation):
-    spaceconfig = dict(usemodules=('bz2',))
+    spaceconfig = dict(usemodules=('bz2', 'rctime'))
 
     def setup_class(cls):
         cls.w_TEXT = cls.space.wrapbytes(TEXT)
@@ -54,6 +54,8 @@
             cls.w_decompress = cls.space.wrap(gateway.interp2app(decompress_w))
         cls.w_HUGE_OK = cls.space.wrap(HUGE_OK)
 
+        cls.space.appexec([], """(): import warnings""")  # Work around a 
recursion limit
+
     def test_creation(self):
         from bz2 import BZ2Compressor
 
@@ -108,13 +110,15 @@
 
 
 class AppTestBZ2Decompressor(CheckAllocation):
-    spaceconfig = dict(usemodules=('bz2',))
+    spaceconfig = dict(usemodules=('bz2', 'rctime'))
 
     def setup_class(cls):
         cls.w_TEXT = cls.space.wrapbytes(TEXT)
         cls.w_DATA = cls.space.wrapbytes(DATA)
         cls.w_BUGGY_DATA = cls.space.wrapbytes(BUGGY_DATA)
 
+        cls.space.appexec([], """(): import warnings""")  # Work around a 
recursion limit
+
     def test_creation(self):
         from bz2 import BZ2Decompressor
 
@@ -184,7 +188,7 @@
 
 
 class AppTestBZ2ModuleFunctions(CheckAllocation):
-    spaceconfig = dict(usemodules=('bz2',))
+    spaceconfig = dict(usemodules=('bz2', 'rctime'))
 
     def setup_class(cls):
         cls.w_TEXT = cls.space.wrapbytes(TEXT)
diff --git a/pypy/module/bz2/test/test_bz2_file.py 
b/pypy/module/bz2/test/test_bz2_file.py
--- a/pypy/module/bz2/test/test_bz2_file.py
+++ b/pypy/module/bz2/test/test_bz2_file.py
@@ -87,6 +87,8 @@
                 gateway.interp2app(create_broken_temp_file_w))
         cls.w_random_data = cls.space.wrapbytes(RANDOM_DATA)
 
+        cls.space.appexec([], """(): import warnings""")  # Work around a 
recursion limit
+
     def test_attributes(self):
         from bz2 import BZ2File
 
diff --git a/pypy/module/faulthandler/__init__.py 
b/pypy/module/faulthandler/__init__.py
--- a/pypy/module/faulthandler/__init__.py
+++ b/pypy/module/faulthandler/__init__.py
@@ -6,5 +6,7 @@
 
     interpleveldefs = {
         'enable': 'interp_faulthandler.enable',
+        'disable': 'interp_faulthandler.disable',
+        'is_enabled': 'interp_faulthandler.is_enabled',
         'register': 'interp_faulthandler.register',
     }
diff --git a/pypy/module/faulthandler/interp_faulthandler.py 
b/pypy/module/faulthandler/interp_faulthandler.py
--- a/pypy/module/faulthandler/interp_faulthandler.py
+++ b/pypy/module/faulthandler/interp_faulthandler.py
@@ -1,5 +1,15 @@
-def enable(space, __args__):
-    pass
+class FatalErrorState(object):
+    def __init__(self, space):
+        self.enabled = False
+
+def enable(space):
+    space.fromcache(FatalErrorState).enabled = True
+
+def disable(space):
+    space.fromcache(FatalErrorState).enabled = False
+
+def is_enabled(space):
+    return space.wrap(space.fromcache(FatalErrorState).enabled)
 
 def register(space, __args__):
     pass
diff --git a/pypy/module/faulthandler/test/test_faulthander.py 
b/pypy/module/faulthandler/test/test_faulthander.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/faulthandler/test/test_faulthander.py
@@ -0,0 +1,11 @@
+class AppTestFaultHandler:
+    spaceconfig = {
+        "usemodules": ["faulthandler"]
+    }
+
+    def test_enable(self):
+        import faulthandler
+        faulthandler.enable()
+        assert faulthandler.is_enabled() is True
+        faulthandler.disable()
+        assert faulthandler.is_enabled() is False
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to