Author: Carl Friedrich Bolz <[email protected]>
Branch: space-newtext
Changeset: r89058:66c3c930e1f2
Date: 2016-12-13 16:54 +0100
http://bitbucket.org/pypy/pypy/changeset/66c3c930e1f2/
Log: merge default
diff --git a/lib_pypy/_pypy_wait.py b/lib_pypy/_pypy_wait.py
--- a/lib_pypy/_pypy_wait.py
+++ b/lib_pypy/_pypy_wait.py
@@ -1,3 +1,4 @@
+import os
from resource import ffi, lib, _make_struct_rusage
__all__ = ["wait3", "wait4"]
@@ -7,6 +8,9 @@
status = ffi.new("int *")
ru = ffi.new("struct rusage *")
pid = lib.wait3(status, options, ru)
+ if pid == -1:
+ errno = ffi.errno
+ raise OSError(errno, os.strerror(errno))
rusage = _make_struct_rusage(ru)
@@ -16,6 +20,9 @@
status = ffi.new("int *")
ru = ffi.new("struct rusage *")
pid = lib.wait4(pid, status, options, ru)
+ if pid == -1:
+ errno = ffi.errno
+ raise OSError(errno, os.strerror(errno))
rusage = _make_struct_rusage(ru)
diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info
--- a/lib_pypy/greenlet.egg-info
+++ b/lib_pypy/greenlet.egg-info
@@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: greenlet
-Version: 0.4.10
+Version: 0.4.11
Summary: Lightweight in-process concurrent programming
Home-page: https://github.com/python-greenlet/greenlet
Author: Ralf Schmitt (for CPython), PyPy team
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -1,7 +1,7 @@
import sys
import _continuation
-__version__ = "0.4.10"
+__version__ = "0.4.11"
# ____________________________________________________________
# Exceptions
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -71,8 +71,11 @@
Analyzing performance of applications is always tricky. We have various
tools, for example a `jitviewer`_ that help us analyze performance.
-The jitviewer shows the code generated by the PyPy JIT in a hierarchical way,
-as shown by the screenshot below:
+The old tool was partly rewritten and combined with vmprof. The service is
+hosted at `vmprof.com`_.
+
+The following shows an old image of the jitviewer.
+The code generated by the PyPy JIT in a hierarchical way:
- at the bottom level, it shows the Python source code of the compiled loops
@@ -84,13 +87,17 @@
.. image:: image/jitviewer.png
-The jitviewer is a web application based on flask and jinja2 (and jQuery on
-the client): if you have great web developing skills and want to help PyPy,
+The jitviewer is a web application based on django and angularjs:
+if you have great web developing skills and want to help PyPy,
this is an ideal task to get started, because it does not require any deep
-knowledge of the internals.
+knowledge of the internals. Head over to `vmprof-python`_, `vmprof-server`_ and
+`vmprof-integration`_ to find open issues and documentation.
-.. _jitviewer: http://bitbucket.org/pypy/jitviewer
-
+.. _jitviewer: http://vmprof.com
+.. _vmprof.com: http://vmprof.com
+.. _vmprof-python: https://github.com/vmprof/vmprof-python
+.. _vmprof-server: https://github.com/vmprof/vmprof-server
+.. _vmprof-integration: https://github.com/vmprof/vmprof-integration
Optimized Unicode Representation
--------------------------------
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,6 +5,15 @@
.. this is a revision shortly after release-pypy2.7-v5.6
.. startrev: 7e9787939641
+
+Since a while now, PyPy preserves the order of dictionaries and sets.
+However, the set literal syntax ``{x, y, z}`` would by mistake build a
+set with the opposite order: ``set([z, y, x])``. This has been fixed.
+Note that CPython is inconsistent too: in 2.7.12, ``{5, 5.0}`` would be
+``set([5.0])``, but in 2.7.trunk it is ``set([5])``. PyPy's behavior
+changed in exactly the same way because of this fix.
+
+
.. branch: rpython-error-to-systemerror
Any uncaught RPython exception (from a PyPy bug) is turned into an
@@ -29,3 +38,10 @@
.. branch: desc-specialize
Refactor FunctionDesc.specialize() and related code (RPython annotator).
+
+.. branch: raw-calloc
+
+.. branch: issue2446
+
+Assign ``tp_doc`` to the new TypeObject's type dictionary ``__doc__`` key
+so it will be picked up by app-level objects of that type
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -433,6 +433,8 @@
make_finalizer_queue(W_Root, self)
self._code_of_sys_exc_info = None
+ self._builtin_functions_by_identifier = {'': None}
+
# can be overridden to a subclass
self.initialize()
diff --git a/pypy/interpreter/executioncontext.py
b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -547,6 +547,8 @@
@jit.dont_look_inside
def _run_finalizers(self):
+ # called by perform() when we have to "perform" this action,
+ # and also directly at the end of gc.collect).
while True:
w_obj = self.space.finalizer_queue.next_dead()
if w_obj is None:
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -247,16 +247,15 @@
def descr_function_repr(self):
return self.getrepr(self.space, 'function %s' % (self.name,))
- # delicate
- _all = {'': None}
def _cleanup_(self):
+ # delicate
from pypy.interpreter.gateway import BuiltinCode
if isinstance(self.code, BuiltinCode):
# we have been seen by other means so rtyping should not choke
# on us
identifier = self.code.identifier
- previous = Function._all.get(identifier, self)
+ previous =
self.space._builtin_functions_by_identifier.get(identifier, self)
assert previous is self, (
"duplicate function ids with identifier=%r: %r and %r" % (
identifier, previous, self))
@@ -264,10 +263,10 @@
return False
def add_to_table(self):
- Function._all[self.code.identifier] = self
+ self.space._builtin_functions_by_identifier[self.code.identifier] =
self
- def find(identifier):
- return Function._all[identifier]
+ def find(space, identifier):
+ return space._builtin_functions_by_identifier[identifier]
find = staticmethod(find)
def descr_function__reduce__(self, space):
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -689,9 +689,9 @@
return space.newtuple([builtin_code,
space.newtuple([space.newtext(self.identifier)])])
- def find(indentifier):
+ def find(space, indentifier):
from pypy.interpreter.function import Function
- return Function._all[indentifier].code
+ return Function.find(space, identifier).code
find = staticmethod(find)
def signature(self):
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1292,9 +1292,10 @@
@jit.unroll_safe
def BUILD_SET(self, itemcount, next_instr):
w_set = self.space.newset()
- for i in range(itemcount):
- w_item = self.popvalue()
+ for i in range(itemcount-1, -1, -1):
+ w_item = self.peekvalue(i)
self.space.call_method(w_set, 'add', w_item)
+ self.popvalues(itemcount)
self.pushvalue(w_set)
def STORE_MAP(self, oparg, next_instr):
diff --git a/pypy/interpreter/test/test_compiler.py
b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -729,6 +729,10 @@
class AppTestCompiler:
+ def setup_class(cls):
+ cls.w_host_is_pypy = cls.space.wrap(
+ '__pypy__' in sys.builtin_module_names)
+
def test_bom_with_future(self):
s = '\xef\xbb\xbffrom __future__ import division\nx = 1/2'
ns = {}
@@ -771,6 +775,18 @@
assert math.copysign(1., c[0]) == -1.0
assert math.copysign(1., c[1]) == -1.0
+ def test_dict_and_set_literal_order(self):
+ x = 1
+ l1 = list({1:'a', 3:'b', 2:'c', 4:'d'})
+ l2 = list({1, 3, 2, 4})
+ l3 = list({x:'a', 3:'b', 2:'c', 4:'d'})
+ l4 = list({x, 3, 2, 4})
+ if not self.host_is_pypy:
+ # the full test relies on the host Python providing ordered dicts
+ assert set(l1) == set(l2) == set(l3) == set(l4) == {1, 3, 2, 4}
+ else:
+ assert l1 == l2 == l3 == l4 == [1, 3, 2, 4]
+
##class TestPythonAstCompiler(BaseTestCompiler):
## def setup_method(self, method):
diff --git a/pypy/interpreter/test/test_pyframe.py
b/pypy/interpreter/test/test_pyframe.py
--- a/pypy/interpreter/test/test_pyframe.py
+++ b/pypy/interpreter/test/test_pyframe.py
@@ -580,3 +580,25 @@
pass
sys.settrace(None)
assert seen == ['call', 'exception', 'return']
+
+ def test_generator_trace_stopiteration(self):
+ import sys
+ def f():
+ yield 5
+ gen = f()
+ assert next(gen) == 5
+ seen = []
+ def trace_func(frame, event, *args):
+ print('TRACE:', frame, event, args)
+ seen.append(event)
+ return trace_func
+ def g():
+ for x in gen:
+ never_entered
+ sys.settrace(trace_func)
+ g()
+ sys.settrace(None)
+ print 'seen:', seen
+ # on Python 3 we get an extra 'exception' when 'for' catches
+ # StopIteration
+ assert seen == ['call', 'line', 'call', 'return', 'return']
diff --git a/pypy/interpreter/test/test_special.py
b/pypy/interpreter/test/test_special.py
--- a/pypy/interpreter/test/test_special.py
+++ b/pypy/interpreter/test/test_special.py
@@ -4,9 +4,11 @@
def test_Ellipsis(self):
assert Ellipsis == Ellipsis
assert repr(Ellipsis) == 'Ellipsis'
+ assert Ellipsis.__class__.__name__ == 'ellipsis'
def test_NotImplemented(self):
def f():
return NotImplemented
assert f() == NotImplemented
assert repr(NotImplemented) == 'NotImplemented'
+ assert NotImplemented.__class__.__name__ == 'NotImplementedType'
diff --git a/pypy/interpreter/test/test_unicodehelper.py
b/pypy/interpreter/test/test_unicodehelper.py
new file mode 100644
--- /dev/null
+++ b/pypy/interpreter/test/test_unicodehelper.py
@@ -0,0 +1,26 @@
+from pypy.interpreter.unicodehelper import encode_utf8, decode_utf8
+
+class FakeSpace:
+ pass
+
+def test_encode_utf8():
+ space = FakeSpace()
+ assert encode_utf8(space, u"abc") == "abc"
+ assert encode_utf8(space, u"\u1234") == "\xe1\x88\xb4"
+ assert encode_utf8(space, u"\ud800") == "\xed\xa0\x80"
+ assert encode_utf8(space, u"\udc00") == "\xed\xb0\x80"
+ # for the following test, go to lengths to avoid CPython's optimizer
+ # and .pyc file storage, which collapse the two surrogates into one
+ c = u"\udc00"
+ assert encode_utf8(space, u"\ud800" + c) == "\xf0\x90\x80\x80"
+
+def test_decode_utf8():
+ space = FakeSpace()
+ assert decode_utf8(space, "abc") == u"abc"
+ assert decode_utf8(space, "\xe1\x88\xb4") == u"\u1234"
+ assert decode_utf8(space, "\xed\xa0\x80") == u"\ud800"
+ assert decode_utf8(space, "\xed\xb0\x80") == u"\udc00"
+ got = decode_utf8(space, "\xed\xa0\x80\xed\xb0\x80")
+ assert map(ord, got) == [0xd800, 0xdc00]
+ got = decode_utf8(space, "\xf0\x90\x80\x80")
+ assert map(ord, got) == [0x10000]
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -771,12 +771,12 @@
)
assert not Cell.typedef.acceptable_as_base_class # no __new__
-Ellipsis.typedef = TypeDef("Ellipsis",
+Ellipsis.typedef = TypeDef("ellipsis",
__repr__ = interp2app(Ellipsis.descr__repr__),
)
assert not Ellipsis.typedef.acceptable_as_base_class # no __new__
-NotImplemented.typedef = TypeDef("NotImplemented",
+NotImplemented.typedef = TypeDef("NotImplementedType",
__repr__ = interp2app(NotImplemented.descr__repr__),
)
assert not NotImplemented.typedef.acceptable_as_base_class # no __new__
diff --git a/pypy/interpreter/unicodehelper.py
b/pypy/interpreter/unicodehelper.py
--- a/pypy/interpreter/unicodehelper.py
+++ b/pypy/interpreter/unicodehelper.py
@@ -51,6 +51,10 @@
return result
def decode_utf8(space, string):
+ # Surrogates are accepted and not treated specially at all.
+ # If there happen to be two 3-bytes encoding a pair of surrogates,
+ # you still get two surrogate unicode characters in the result.
+ # These are the Python2 rules; Python3 differs.
result, consumed = runicode.str_decode_utf_8(
string, len(string), "strict",
final=True, errorhandler=decode_error_handler(space),
@@ -59,8 +63,9 @@
def encode_utf8(space, uni):
# Note that this function never raises UnicodeEncodeError,
- # since surrogate pairs are allowed.
- # This is not the case with Python3.
+ # since surrogates are allowed, either paired or lone.
+ # A paired surrogate is considered like the non-BMP character
+ # it stands for. These are the Python2 rules; Python3 differs.
return runicode.unicode_encode_utf_8(
uni, len(uni), "strict",
errorhandler=raise_unicode_exception_encode,
diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py
b/pypy/module/_cffi_backend/test/test_ffi_obj.py
--- a/pypy/module/_cffi_backend/test/test_ffi_obj.py
+++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py
@@ -401,7 +401,8 @@
retries += 1
assert retries <= 5
import gc; gc.collect()
- assert seen == [40, 40, raw1, raw2]
+ assert (seen == [40, 40, raw1, raw2] or
+ seen == [40, 40, raw2, raw1])
assert repr(seen[2]) == "<cdata 'char[]' owning 41 bytes>"
assert repr(seen[3]) == "<cdata 'char[]' owning 41 bytes>"
diff --git a/pypy/module/_pickle_support/maker.py
b/pypy/module/_pickle_support/maker.py
--- a/pypy/module/_pickle_support/maker.py
+++ b/pypy/module/_pickle_support/maker.py
@@ -77,7 +77,7 @@
def builtin_code(space, identifier):
from pypy.interpreter import gateway
try:
- return gateway.BuiltinCode.find(identifier)
+ return gateway.BuiltinCode.find(space, identifier)
except KeyError:
raise oefmt(space.w_RuntimeError,
"cannot unpickle builtin code: %s", identifier)
@@ -86,7 +86,7 @@
def builtin_function(space, identifier):
from pypy.interpreter import function
try:
- return function.Function.find(identifier)
+ return function.Function.find(space, identifier)
except KeyError:
raise oefmt(space.w_RuntimeError,
"cannot unpickle builtin function: %s", identifier)
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -25,11 +25,9 @@
basestruct = PyObject.TO
W_BaseObject = W_ObjectObject
- def get_dealloc(self, space):
+ def get_dealloc(self):
from pypy.module.cpyext.typeobject import subtype_dealloc
- return llhelper(
- subtype_dealloc.api_func.functype,
- subtype_dealloc.api_func.get_wrapper(space))
+ return subtype_dealloc.api_func
def allocate(self, space, w_type, itemcount=0):
# similar to PyType_GenericAlloc?
@@ -109,10 +107,8 @@
return tp_alloc(space, w_type, itemcount)
if tp_dealloc:
- def get_dealloc(self, space):
- return llhelper(
- tp_dealloc.api_func.functype,
- tp_dealloc.api_func.get_wrapper(space))
+ def get_dealloc(self):
+ return tp_dealloc.api_func
if tp_attach:
def attach(self, space, pyobj, w_obj):
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -8,12 +8,12 @@
cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES,
mangle_name, pypy_decl, Py_buffer, Py_bufferP)
from pypy.module.cpyext.typeobjectdefs import (
- unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc,
ternaryfunc,
+ unaryfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc,
getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry,
ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc,
cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc,
readbufferproc, getbufferproc, ssizessizeobjargproc)
-from pypy.module.cpyext.pyobject import from_ref, make_ref, Py_DecRef
+from pypy.module.cpyext.pyobject import make_ref, Py_DecRef
from pypy.module.cpyext.pyerrors import PyErr_Occurred
from pypy.module.cpyext.memoryobject import fill_Py_buffer
from pypy.module.cpyext.state import State
@@ -21,8 +21,10 @@
from pypy.interpreter.argument import Arguments
from rpython.rlib.buffer import Buffer
from rpython.rlib.unroll import unrolling_iterable
-from rpython.rlib.objectmodel import specialize
+from rpython.rlib.objectmodel import specialize, not_rpython
from rpython.tool.sourcetools import func_renamer
+from rpython.flowspace.model import Constant
+from rpython.flowspace.specialcase import register_flow_sc
from rpython.rtyper.annlowlevel import llhelper
from pypy.module.sys.version import CPYTHON_VERSION
@@ -59,6 +61,17 @@
"expected %d-%d arguments, got %d",
low, high, space.len_w(w_ob))
+@not_rpython
+def llslot(space, func):
+ return llhelper(func.api_func.functype, func.api_func.get_wrapper(space))
+
+@register_flow_sc(llslot)
+def sc_llslot(ctx, v_space, v_func):
+ assert isinstance(v_func, Constant)
+ get_llhelper = v_func.value.api_func.get_llhelper
+ return ctx.appcall(get_llhelper, v_space)
+
+
def wrap_init(space, w_self, w_args, func, w_kwargs):
func_init = rffi.cast(initproc, func)
res = generic_cpy_call(space, func_init, w_self, w_args, w_kwargs)
@@ -106,7 +119,7 @@
args_w = space.fixedview(w_args)
arg3 = space.w_None
if len(args_w) > 1:
- arg3 = args_w[1]
+ arg3 = args_w[1]
return generic_cpy_call(space, func_ternary, w_self, args_w[0], arg3)
def wrap_ternaryfunc_r(space, w_self, w_args, func):
@@ -121,7 +134,7 @@
Py_DecRef(space, ref)
arg3 = space.w_None
if len(args_w) > 1:
- arg3 = args_w[1]
+ arg3 = args_w[1]
return generic_cpy_call(space, func_ternary, args_w[0], w_self, arg3)
@@ -322,7 +335,7 @@
self.strides = [1]
else:
self.strides = strides
- self.ndim = ndim
+ self.ndim = ndim
self.itemsize = itemsize
self.readonly = readonly
@@ -437,9 +450,10 @@
try:
return SLOTS[key]
except KeyError:
- ret = build_slot_tp_function(space, typedef, name)
- SLOTS[key] = ret
- return ret
+ slot_func = build_slot_tp_function(space, typedef, name)
+ api_func = slot_func.api_func if slot_func else None
+ SLOTS[key] = api_func
+ return api_func
def build_slot_tp_function(space, typedef, name):
w_type = space.gettypeobject(typedef)
@@ -472,7 +486,6 @@
@func_renamer("cpyext_%s_%s" % (name.replace('.', '_'),
typedef.name))
def slot_func(space, w_self):
return space.call_function(slot_fn, w_self)
- api_func = slot_func.api_func
handled = True
# binary functions
@@ -499,7 +512,6 @@
@func_renamer("cpyext_%s_%s" % (name.replace('.', '_'),
typedef.name))
def slot_func(space, w_self, w_arg):
return space.call_function(slot_fn, w_self, w_arg)
- api_func = slot_func.api_func
handled = True
# binary-with-Py_ssize_t-type
@@ -517,7 +529,6 @@
@func_renamer("cpyext_%s_%s" % (name.replace('.', '_'),
typedef.name))
def slot_func(space, w_self, arg):
return space.call_function(slot_fn, w_self, space.newint(arg))
- api_func = slot_func.api_func
handled = True
# ternary functions
@@ -532,7 +543,6 @@
@func_renamer("cpyext_%s_%s" % (name.replace('.', '_'),
typedef.name))
def slot_func(space, w_self, w_arg1, w_arg2):
return space.call_function(slot_fn, w_self, w_arg1, w_arg2)
- api_func = slot_func.api_func
handled = True
if handled:
@@ -552,7 +562,7 @@
else:
space.call_function(delattr_fn, w_self, w_name)
return 0
- api_func = slot_tp_setattro.api_func
+ slot_func = slot_tp_setattro
elif name == 'tp_getattro':
getattr_fn = w_type.getdictvalue(space, '__getattribute__')
if getattr_fn is None:
@@ -562,7 +572,7 @@
@func_renamer("cpyext_tp_getattro_%s" % (typedef.name,))
def slot_tp_getattro(space, w_self, w_name):
return space.call_function(getattr_fn, w_self, w_name)
- api_func = slot_tp_getattro.api_func
+ slot_func = slot_tp_getattro
elif name == 'tp_call':
call_fn = w_type.getdictvalue(space, '__call__')
if call_fn is None:
@@ -574,7 +584,7 @@
args = Arguments(space, [w_self],
w_stararg=w_args, w_starstararg=w_kwds)
return space.call_args(call_fn, args)
- api_func = slot_tp_call.api_func
+ slot_func = slot_tp_call
elif name == 'tp_iternext':
iternext_fn = w_type.getdictvalue(space, 'next')
@@ -590,7 +600,7 @@
if not e.match(space, space.w_StopIteration):
raise
return None
- api_func = slot_tp_iternext.api_func
+ slot_func = slot_tp_iternext
elif name == 'tp_init':
init_fn = w_type.getdictvalue(space, '__init__')
@@ -605,7 +615,7 @@
w_stararg=w_args, w_starstararg=w_kwds)
space.call_args(init_fn, args)
return 0
- api_func = slot_tp_init.api_func
+ slot_func = slot_tp_init
elif name == 'tp_new':
new_fn = w_type.getdictvalue(space, '__new__')
if new_fn is None:
@@ -617,12 +627,12 @@
args = Arguments(space, [w_self],
w_stararg=w_args, w_starstararg=w_kwds)
return space.call_args(space.get(new_fn, w_self), args)
- api_func = slot_tp_new.api_func
+ slot_func = slot_tp_new
elif name == 'tp_as_buffer.c_bf_getbuffer':
buff_fn = w_type.getdictvalue(space, '__buffer__')
if buff_fn is None:
return
- @cpython_api([PyObject, Py_bufferP, rffi.INT_real],
+ @cpython_api([PyObject, Py_bufferP, rffi.INT_real],
rffi.INT_real, header=None, error=-1)
@func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name))
def buff_w(space, w_self, view, flags):
@@ -647,14 +657,14 @@
return 0
# XXX remove this when it no longer crashes a translated PyPy
return
- api_func = buff_w.api_func
+ slot_func = buff_w
else:
# missing: tp_as_number.nb_nonzero, tp_as_number.nb_coerce
# tp_as_sequence.c_sq_contains, tp_as_sequence.c_sq_length
# richcmpfunc(s)
return
- return lambda: llhelper(api_func.functype, api_func.get_wrapper(space))
+ return slot_func
PyWrapperFlag_KEYWORDS = 1
@@ -985,8 +995,8 @@
slotdefs = sorted(slotdefs, key=slotdef_sort_key)
slotdefs_for_tp_slots = unrolling_iterable(
- [(x.method_name, x.slot_name, x.slot_names, x.slot_func)
- for x in slotdefs])
+ [(x.method_name, x.slot_name, x.slot_names,
+ x.slot_func.api_func if x.slot_func else None) for x in slotdefs])
slotdefs_for_wrappers = unrolling_iterable(
[(x.method_name, x.slot_names, x.wrapper_func, x.wrapper_func_kwds, x.doc)
diff --git a/pypy/module/cpyext/test/test_typeobject.py
b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -142,7 +142,7 @@
assert fuu2(u"abc").baz().escape()
raises(TypeError, module.fooType.object_member.__get__, 1)
- def test_multiple_inheritance(self):
+ def test_multiple_inheritance1(self):
module = self.import_module(name='foo')
obj = module.UnicodeSubtype(u'xyz')
obj2 = module.UnicodeSubtype2()
@@ -422,7 +422,7 @@
assert space.int_w(space.getattr(w_class, w_name)) == 1
space.delitem(w_dict, w_name)
- def test_multiple_inheritance(self, space, api):
+ def test_multiple_inheritance2(self, space, api):
w_class = space.appexec([], """():
class A(object):
pass
@@ -1167,3 +1167,38 @@
__metaclass__ = FooType
print repr(X)
X()
+
+ def test_multiple_inheritance3(self):
+ module = self.import_extension('foo', [
+ ("new_obj", "METH_NOARGS",
+ '''
+ PyObject *obj;
+ PyTypeObject *Base1, *Base2, *Base12;
+ Base1 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0);
+ Base2 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0);
+ Base12 = (PyTypeObject*)PyType_Type.tp_alloc(&PyType_Type, 0);
+ Base1->tp_name = "Base1";
+ Base2->tp_name = "Base2";
+ Base12->tp_name = "Base12";
+ Base1->tp_basicsize = sizeof(PyHeapTypeObject);
+ Base2->tp_basicsize = sizeof(PyHeapTypeObject);
+ Base12->tp_basicsize = sizeof(PyHeapTypeObject);
+ Base1->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE |
Py_TPFLAGS_HEAPTYPE;
+ Base2->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE |
Py_TPFLAGS_HEAPTYPE;
+ Base12->tp_flags = Py_TPFLAGS_DEFAULT;
+ Base12->tp_base = Base1;
+ Base12->tp_bases = PyTuple_Pack(2, Base1, Base2);
+ Base12->tp_doc = "The Base12 type or object";
+ if (PyType_Ready(Base1) < 0) return NULL;
+ if (PyType_Ready(Base2) < 0) return NULL;
+ if (PyType_Ready(Base12) < 0) return NULL;
+ obj = PyObject_New(PyObject, Base12);
+ return obj;
+ '''
+ )])
+ obj = module.new_obj()
+ assert 'Base12' in str(obj)
+ assert type(obj).__doc__ == "The Base12 type or object"
+ assert obj.__doc__ == "The Base12 type or object"
+
+
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -3,7 +3,6 @@
from rpython.rlib import jit
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rstring import rsplit
-from rpython.rtyper.annlowlevel import llhelper
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.interpreter.baseobjspace import W_Root, DescrMismatch
@@ -28,7 +27,8 @@
PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr,
track_reference, Py_DecRef, as_pyobj)
from pypy.module.cpyext.slotdefs import (
- slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function)
+ slotdefs_for_tp_slots, slotdefs_for_wrappers, get_slot_tp_function,
+ llslot)
from pypy.module.cpyext.state import State
from pypy.module.cpyext.structmember import PyMember_GetOne, PyMember_SetOne
from pypy.module.cpyext.typeobjectdefs import (
@@ -249,27 +249,21 @@
# coming from a parent C type.
typedef = w_type.layout.typedef
- for method_name, slot_name, slot_names, slot_func in slotdefs_for_tp_slots:
+ for method_name, slot_name, slot_names, slot_apifunc in
slotdefs_for_tp_slots:
w_descr = w_type.lookup(method_name)
if w_descr is None:
# XXX special case iternext
continue
- slot_func_helper = None
-
- if slot_func is None and typedef is not None:
- get_slot = get_slot_tp_function(space, typedef, slot_name)
- if get_slot:
- slot_func_helper = get_slot()
- elif slot_func:
- slot_func_helper = llhelper(slot_func.api_func.functype,
- slot_func.api_func.get_wrapper(space))
-
- if slot_func_helper is None:
+ if slot_apifunc is None and typedef is not None:
+ slot_apifunc = get_slot_tp_function(space, typedef, slot_name)
+ if not slot_apifunc:
if WARN_ABOUT_MISSING_SLOT_FUNCTIONS:
- os.write(2, "%s defined by %s but no slot function defined!\n"
% (
+ os.write(2,
+ "%s defined by %s but no slot function defined!\n" % (
method_name, w_type.getname(space)))
continue
+ slot_func_helper = slot_apifunc.get_llhelper(space)
# XXX special case wrapper-functions and use a "specific" slot func
@@ -329,6 +323,8 @@
w_obj = W_PyCWrapperObject(space, pto, method_name, wrapper_func,
wrapper_func_kwds, doc, func_voidp, offset=offset)
dict_w[method_name] = w_obj
+ if pto.c_tp_doc:
+ dict_w['__doc__'] = space.newbytes(rffi.charp2str(pto.c_tp_doc))
if pto.c_tp_new:
add_tp_new_wrapper(space, dict_w, pto)
@@ -373,9 +369,8 @@
def setup_new_method_def(space):
ptr = get_new_method_def(space)
- ptr.c_ml_meth = rffi.cast(PyCFunction_typedef,
- llhelper(tp_new_wrapper.api_func.functype,
- tp_new_wrapper.api_func.get_wrapper(space)))
+ ptr.c_ml_meth = rffi.cast(
+ PyCFunction_typedef, llslot(space, tp_new_wrapper))
def add_tp_new_wrapper(space, dict_w, pto):
if "__new__" in dict_w:
@@ -465,13 +460,17 @@
convert_member_defs(space, dict_w, pto.c_tp_members, self)
name = rffi.charp2str(pto.c_tp_name)
- new_layout = (pto.c_tp_basicsize > rffi.sizeof(PyObject.TO) or
- pto.c_tp_itemsize > 0)
+ flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE
+ if flag_heaptype:
+ minsize = rffi.sizeof(PyHeapTypeObject.TO)
+ else:
+ minsize = rffi.sizeof(PyObject.TO)
+ new_layout = (pto.c_tp_basicsize > minsize or pto.c_tp_itemsize > 0)
W_TypeObject.__init__(self, space, name,
- bases_w or [space.w_object], dict_w, force_new_layout=new_layout)
+ bases_w or [space.w_object], dict_w, force_new_layout=new_layout,
+ is_heaptype=flag_heaptype)
self.flag_cpytype = True
- self.flag_heaptype = pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE
# if a sequence or a mapping, then set the flag to force it
if pto.c_tp_as_sequence and pto.c_tp_as_sequence.c_sq_item:
self.flag_map_or_seq = 'S'
@@ -494,8 +493,7 @@
def subtype_dealloc(space, obj):
pto = obj.c_ob_type
base = pto
- this_func_ptr = llhelper(subtype_dealloc.api_func.functype,
- subtype_dealloc.api_func.get_wrapper(space))
+ this_func_ptr = llslot(space, subtype_dealloc)
while base.c_tp_dealloc == this_func_ptr:
base = base.c_tp_base
assert base
@@ -597,46 +595,31 @@
return
c_buf = lltype.malloc(PyBufferProcs, flavor='raw', zero=True)
lltype.render_immortal(c_buf)
- c_buf.c_bf_getsegcount = llhelper(bf_segcount.api_func.functype,
- bf_segcount.api_func.get_wrapper(space))
+ c_buf.c_bf_getsegcount = llslot(space, bf_segcount)
if space.is_w(w_type, space.w_bytes):
# Special case: str doesn't support get_raw_address(), so we have a
# custom get*buffer that instead gives the address of the char* in the
# PyBytesObject*!
- c_buf.c_bf_getreadbuffer = llhelper(
- str_getreadbuffer.api_func.functype,
- str_getreadbuffer.api_func.get_wrapper(space))
- c_buf.c_bf_getcharbuffer = llhelper(
- str_getcharbuffer.api_func.functype,
- str_getcharbuffer.api_func.get_wrapper(space))
+ c_buf.c_bf_getreadbuffer = llslot(space, str_getreadbuffer)
+ c_buf.c_bf_getcharbuffer = llslot(space, str_getcharbuffer)
elif space.is_w(w_type, space.w_unicode):
# Special case: unicode doesn't support get_raw_address(), so we have a
# custom get*buffer that instead gives the address of the char* in the
# PyUnicodeObject*!
- c_buf.c_bf_getreadbuffer = llhelper(
- unicode_getreadbuffer.api_func.functype,
- unicode_getreadbuffer.api_func.get_wrapper(space))
+ c_buf.c_bf_getreadbuffer = llslot(space, unicode_getreadbuffer)
elif space.is_w(w_type, space.w_buffer):
# Special case: we store a permanent address on the cpyext wrapper,
# so we'll reuse that.
# Note: we could instead store a permanent address on the buffer
object,
# and use get_raw_address()
- c_buf.c_bf_getreadbuffer = llhelper(
- buf_getreadbuffer.api_func.functype,
- buf_getreadbuffer.api_func.get_wrapper(space))
- c_buf.c_bf_getcharbuffer = llhelper(
- buf_getcharbuffer.api_func.functype,
- buf_getcharbuffer.api_func.get_wrapper(space))
+ c_buf.c_bf_getreadbuffer = llslot(space, buf_getreadbuffer)
+ c_buf.c_bf_getcharbuffer = llslot(space, buf_getcharbuffer)
else:
# use get_raw_address()
- c_buf.c_bf_getreadbuffer = llhelper(bf_getreadbuffer.api_func.functype,
-
bf_getreadbuffer.api_func.get_wrapper(space))
- c_buf.c_bf_getcharbuffer = llhelper(bf_getcharbuffer.api_func.functype,
-
bf_getcharbuffer.api_func.get_wrapper(space))
+ c_buf.c_bf_getreadbuffer = llslot(space, bf_getreadbuffer)
+ c_buf.c_bf_getcharbuffer = llslot(space, bf_getcharbuffer)
if bufspec == 'read-write':
- c_buf.c_bf_getwritebuffer = llhelper(
- bf_getwritebuffer.api_func.functype,
- bf_getwritebuffer.api_func.get_wrapper(space))
+ c_buf.c_bf_getwritebuffer = llslot(space, bf_getwritebuffer)
pto.c_tp_as_buffer = c_buf
pto.c_tp_flags |= Py_TPFLAGS_HAVE_GETCHARBUFFER
pto.c_tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER
@@ -697,12 +680,10 @@
# dealloc
if space.gettypeobject(w_type.layout.typedef) is w_type:
# only for the exact type, like 'space.w_tuple' or 'space.w_list'
- pto.c_tp_dealloc = typedescr.get_dealloc(space)
+ pto.c_tp_dealloc = typedescr.get_dealloc().get_llhelper(space)
else:
# for all subtypes, use subtype_dealloc()
- pto.c_tp_dealloc = llhelper(
- subtype_dealloc.api_func.functype,
- subtype_dealloc.api_func.get_wrapper(space))
+ pto.c_tp_dealloc = llslot(space, subtype_dealloc)
if space.is_w(w_type, space.w_bytes):
pto.c_tp_itemsize = 1
elif space.is_w(w_type, space.w_tuple):
@@ -710,10 +691,8 @@
# buffer protocol
setup_buffer_procs(space, w_type, pto)
- pto.c_tp_free = llhelper(PyObject_Free.api_func.functype,
- PyObject_Free.api_func.get_wrapper(space))
- pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype,
- PyType_GenericAlloc.api_func.get_wrapper(space))
+ pto.c_tp_free = llslot(space, PyObject_Free)
+ pto.c_tp_alloc = llslot(space, PyType_GenericAlloc)
builder = space.fromcache(StaticObjectBuilder)
if ((pto.c_tp_flags & Py_TPFLAGS_HEAPTYPE) != 0
and builder.cpyext_type_init is None):
@@ -904,15 +883,11 @@
if not pto.c_tp_setattro:
from pypy.module.cpyext.object import PyObject_GenericSetAttr
- pto.c_tp_setattro = llhelper(
- PyObject_GenericSetAttr.api_func.functype,
- PyObject_GenericSetAttr.api_func.get_wrapper(space))
+ pto.c_tp_setattro = llslot(space, PyObject_GenericSetAttr)
if not pto.c_tp_getattro:
from pypy.module.cpyext.object import PyObject_GenericGetAttr
- pto.c_tp_getattro = llhelper(
- PyObject_GenericGetAttr.api_func.functype,
- PyObject_GenericGetAttr.api_func.get_wrapper(space))
+ pto.c_tp_getattro = llslot(space, PyObject_GenericGetAttr)
if w_obj.is_cpytype():
Py_DecRef(space, pto.c_tp_dict)
diff --git a/pypy/module/gc/interp_gc.py b/pypy/module/gc/interp_gc.py
--- a/pypy/module/gc/interp_gc.py
+++ b/pypy/module/gc/interp_gc.py
@@ -14,7 +14,28 @@
cache.clear()
cache = space.fromcache(MapAttrCache)
cache.clear()
+
rgc.collect()
+
+ # if we are running in gc.disable() mode but gc.collect() is called,
+ # we should still call the finalizers now. We do this as an attempt
+ # to get closer to CPython's behavior: in Py3.5 some tests
+ # specifically rely on that. This is similar to how, in CPython, an
+ # explicit gc.collect() will invoke finalizers from cycles and fully
+ # ignore the gc.disable() mode.
+ temp_reenable = not space.user_del_action.enabled_at_app_level
+ if temp_reenable:
+ enable_finalizers(space)
+ try:
+ # fetch the pending finalizers from the queue, where they are
+ # likely to have been added by rgc.collect() above, and actually
+ # run them now. This forces them to run before this function
+ # returns, and also always in the enable_finalizers() mode.
+ space.user_del_action._run_finalizers()
+ finally:
+ if temp_reenable:
+ disable_finalizers(space)
+
return space.newint(0)
def enable(space):
diff --git a/pypy/module/gc/test/test_gc.py b/pypy/module/gc/test/test_gc.py
--- a/pypy/module/gc/test/test_gc.py
+++ b/pypy/module/gc/test/test_gc.py
@@ -70,6 +70,19 @@
gc.enable()
assert gc.isenabled()
+ def test_gc_collect_overrides_gc_disable(self):
+ import gc
+ deleted = []
+ class X(object):
+ def __del__(self):
+ deleted.append(1)
+ assert gc.isenabled()
+ gc.disable()
+ X()
+ gc.collect()
+ assert deleted == [1]
+ gc.enable()
+
class AppTestGcDumpHeap(object):
pytestmark = py.test.mark.xfail(run=False)
diff --git a/pypy/module/micronumpy/test/test_zjit.py
b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -374,17 +374,7 @@
def test_sum(self):
result = self.run("sum")
assert result == sum(range(30))
- self.check_vectorized(1, 1)
-
- def define_sum():
- return """
- a = |30|
- sum(a)
- """
- def test_sum(self):
- result = self.run("sum")
- assert result == sum(range(30))
- self.check_vectorized(1, 1)
+ self.check_vectorized(1, 0)
def define_sum_int():
return """
@@ -408,7 +398,7 @@
def test_sum_multi(self):
result = self.run("sum_multi")
assert result == sum(range(30)) + sum(range(60))
- self.check_vectorized(1, 1)
+ self.check_vectorized(1, 0)
def define_sum_float_to_int16():
return """
@@ -490,7 +480,7 @@
assert retval == sum(range(1,11))
# check that we got only one loop
assert len(get_stats().loops) == 1
- self.check_vectorized(2, 1)
+ self.check_vectorized(2, 0)
def test_reduce_axis_compile_only_once(self):
self.compile_graph()
@@ -501,7 +491,7 @@
retval = self.interp.eval_graph(self.graph, [i])
# check that we got only one loop
assert len(get_stats().loops) == 1
- self.check_vectorized(3, 1)
+ self.check_vectorized(3, 0)
def define_prod():
return """
@@ -518,12 +508,10 @@
def test_prod(self):
result = self.run("prod")
assert int(result) == 576
- self.check_vectorized(1, 1)
def test_prod_zero(self):
result = self.run("prod_zero")
assert int(result) == 0
- self.check_vectorized(1, 1)
def define_max():
diff --git a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
--- a/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_micronumpy.py
@@ -75,8 +75,6 @@
arith_comb = [
('sum','int', 1742, 1742, 1),
- ('sum','float', 2581, 2581, 1),
- ('prod','float', 1, 3178, 1),
('prod','int', 1, 3178, 1),
('any','int', 1, 2239, 1),
('any','int', 0, 4912, 0),
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
@@ -494,3 +494,15 @@
def test_negative_array_size(self):
ffi = FFI()
py.test.raises(ValueError, ffi.cast, "int[-5]", 0)
+
+ def test_cannot_instantiate_manually(self):
+ ffi = FFI()
+ ct = type(ffi.typeof("void *"))
+ py.test.raises(TypeError, ct)
+ py.test.raises(TypeError, ct, ffi.NULL)
+ for cd in [type(ffi.cast("void *", 0)),
+ type(ffi.new("char[]", 3)),
+ type(ffi.gc(ffi.NULL, lambda x: None))]:
+ py.test.raises(TypeError, cd)
+ py.test.raises(TypeError, cd, ffi.NULL)
+ py.test.raises(TypeError, cd, ffi.typeof("void *"))
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
@@ -361,7 +361,8 @@
retries += 1
assert retries <= 5
import gc; gc.collect()
- assert seen == [40, 40, raw1, raw2]
+ assert (seen == [40, 40, raw1, raw2] or
+ seen == [40, 40, raw2, raw1])
assert repr(seen[2]) == "<cdata 'char[]' owning 41 bytes>"
assert repr(seen[3]) == "<cdata 'char[]' owning 41 bytes>"
diff --git a/pypy/module/test_lib_pypy/test_os_wait.py
b/pypy/module/test_lib_pypy/test_os_wait.py
--- a/pypy/module/test_lib_pypy/test_os_wait.py
+++ b/pypy/module/test_lib_pypy/test_os_wait.py
@@ -34,3 +34,7 @@
assert os.WEXITSTATUS(status) == exit_status
assert isinstance(rusage.ru_utime, float)
assert isinstance(rusage.ru_maxrss, int)
+
+def test_errors():
+ py.test.raises(OSError, _pypy_wait.wait3, -999)
+ py.test.raises(OSError, _pypy_wait.wait4, -999, -999)
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -162,7 +162,8 @@
@dont_look_inside
def __init__(self, space, name, bases_w, dict_w,
- overridetypedef=None, force_new_layout=False):
+ overridetypedef=None, force_new_layout=False,
+ is_heaptype=True):
self.space = space
self.name = name
self.bases_w = bases_w
@@ -172,7 +173,7 @@
self.weakrefable = False
self.w_doc = space.w_None
self.weak_subclasses = []
- self.flag_heaptype = False
+ self.flag_heaptype = is_heaptype
self.flag_cpytype = False
self.flag_abstract = False
self.flag_sequence_bug_compat = False
@@ -740,7 +741,7 @@
dict_w[key] = space.getitem(w_dict, w_key)
w_type = space.allocate_instance(W_TypeObject, w_typetype)
W_TypeObject.__init__(w_type, space, name, bases_w or [space.w_object],
- dict_w)
+ dict_w, is_heaptype=True)
w_type.ready()
return w_type
@@ -1136,7 +1137,6 @@
if len(w_self.bases_w) == 0:
w_self.bases_w = [w_self.space.w_object]
w_bestbase = check_and_find_best_base(w_self.space, w_self.bases_w)
- w_self.flag_heaptype = True
for w_base in w_self.bases_w:
if not isinstance(w_base, W_TypeObject):
continue
@@ -1161,7 +1161,6 @@
w_doc = w_self.space.newtext_or_none(instancetypedef.doc)
w_self.w_doc = w_doc
ensure_common_attributes(w_self)
- w_self.flag_heaptype = instancetypedef.heaptype
#
# usually 'instancetypedef' is new, i.e. not seen in any base,
# but not always (see Exception class)
@@ -1335,7 +1334,8 @@
else:
overridetypedef = typedef
w_type = W_TypeObject(space, typedef.name, bases_w, dict_w,
- overridetypedef=overridetypedef)
+ overridetypedef=overridetypedef,
+ is_heaptype=overridetypedef.heaptype)
if typedef is not overridetypedef:
w_type.w_doc = space.newtext_or_none(typedef.doc)
if hasattr(typedef, 'flag_sequence_bug_compat'):
diff --git a/rpython/jit/backend/llgraph/runner.py
b/rpython/jit/backend/llgraph/runner.py
--- a/rpython/jit/backend/llgraph/runner.py
+++ b/rpython/jit/backend/llgraph/runner.py
@@ -1128,7 +1128,7 @@
value = sum(value)
elif info.accum_operation == '*':
def prod(acc, x): return acc * x
- value = reduce(prod, value, 1)
+ value = reduce(prod, value, 1.0)
else:
raise NotImplementedError("accum operator in fail guard")
values[i] = value
diff --git a/rpython/jit/backend/ppc/regalloc.py
b/rpython/jit/backend/ppc/regalloc.py
--- a/rpython/jit/backend/ppc/regalloc.py
+++ b/rpython/jit/backend/ppc/regalloc.py
@@ -1066,7 +1066,6 @@
prepare_cond_call_value_r = prepare_cond_call_value_i
-
def notimplemented(self, op):
msg = '[PPC/regalloc] %s not implemented\n' % op.getopname()
if we_are_translated():
diff --git a/rpython/jit/backend/zarch/assembler.py
b/rpython/jit/backend/zarch/assembler.py
--- a/rpython/jit/backend/zarch/assembler.py
+++ b/rpython/jit/backend/zarch/assembler.py
@@ -396,6 +396,7 @@
# * gcmap is pushed
# * the old value of these regs must already be stored in the
jitframe
# * on exit, all registers are restored from the jitframe
+ # * the result of the call is moved to register r1
mc = InstrBuilder()
self.mc = mc
@@ -427,6 +428,9 @@
self._reload_frame_if_necessary(mc)
self.pop_gcmap(mc) # cancel the push_gcmap(store=True) in the caller
+
+ mc.LGR(r.SCRATCH2, r.RES)
+
self._pop_core_regs_from_jitframe(mc, saved_regs)
if supports_floats:
self._pop_fp_regs_from_jitframe(mc)
diff --git a/rpython/jit/backend/zarch/opassembler.py
b/rpython/jit/backend/zarch/opassembler.py
--- a/rpython/jit/backend/zarch/opassembler.py
+++ b/rpython/jit/backend/zarch/opassembler.py
@@ -374,10 +374,12 @@
_COND_CALL_SAVE_REGS = [r.r11, r.r2, r.r3, r.r4, r.r5]
def emit_cond_call(self, op, arglocs, regalloc):
+ resloc = arglocs[0]
+ arglocs = arglocs[1:]
+
fcond = self.guard_success_cc
self.guard_success_cc = c.cond_none
assert fcond.value != c.cond_none.value
- fcond = c.negate(fcond)
jmp_adr = self.mc.get_relative_pos()
self.mc.reserve_cond_jump() # patched later to a relative branch
@@ -411,6 +413,8 @@
self.mc.BASR(r.r14, r.r14)
# restoring the registers saved above, and doing pop_gcmap(), is left
# to the cond_call_slowpath helper. We never have any result value.
+ if resloc is not None:
+ self.mc.LGR(resloc, r.SCRATCH2)
relative_target = self.mc.currpos() - jmp_adr
pmc = OverwritingBuilder(self.mc, jmp_adr, 1)
pmc.BRCL(fcond, l.imm(relative_target))
@@ -419,6 +423,9 @@
# guard_no_exception too
self.previous_cond_call_jcond = jmp_adr, fcond
+ emit_cond_call_value_i = emit_cond_call
+ emit_cond_call_value_r = emit_cond_call
+
class AllocOpAssembler(object):
_mixin_ = True
diff --git a/rpython/jit/backend/zarch/regalloc.py
b/rpython/jit/backend/zarch/regalloc.py
--- a/rpython/jit/backend/zarch/regalloc.py
+++ b/rpython/jit/backend/zarch/regalloc.py
@@ -1107,14 +1107,32 @@
def prepare_cond_call(self, op):
self.load_condition_into_cc(op.getarg(0))
- locs = []
+ locs = [None]
+ self.assembler.guard_success_cc = c.negate(
+ self.assembler.guard_success_cc)
# support between 0 and 4 integer arguments
assert 2 <= op.numargs() <= 2 + 4
for i in range(1, op.numargs()):
loc = self.loc(op.getarg(i))
assert loc.type != FLOAT
locs.append(loc)
- return locs
+ return locs # [None, function, arg0, ..., argn]
+
+ def prepare_cond_call_value_i(self, op):
+ x = self.ensure_reg(op.getarg(0))
+ self.load_condition_into_cc(op.getarg(0))
+ self.rm.force_allocate_reg(op, selected_reg=x) # spilled if survives
+ # ^^^ if arg0!=0, we jump over the next block of code (the call)
+ locs = [x]
+ # support between 0 and 4 integer arguments
+ assert 2 <= op.numargs() <= 2 + 4
+ for i in range(1, op.numargs()):
+ loc = self.loc(op.getarg(i))
+ assert loc.type != FLOAT
+ locs.append(loc)
+ return locs # [res, function, args...]
+
+ prepare_cond_call_value_r = prepare_cond_call_value_i
def prepare_cond_call_gc_wb(self, op):
arglocs = [self.ensure_reg(op.getarg(0))]
diff --git a/rpython/jit/codewriter/jtransform.py
b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -593,6 +593,8 @@
log.WARNING('ignoring hint %r at %r' % (hints, self.graph))
def _rewrite_raw_malloc(self, op, name, args):
+ # NB. the operation 'raw_malloc' is not supported; this is for
+ # the operation 'malloc'/'malloc_varsize' with {flavor: 'gc'}
d = op.args[1].value.copy()
d.pop('flavor')
add_memory_pressure = d.pop('add_memory_pressure', False)
diff --git a/rpython/jit/codewriter/support.py
b/rpython/jit/codewriter/support.py
--- a/rpython/jit/codewriter/support.py
+++ b/rpython/jit/codewriter/support.py
@@ -142,10 +142,14 @@
assert len(lst) == len(args_v), (
"not supported so far: 'greens' variables contain Void")
# a crash here means that you have to reorder the variable named in
- # the JitDriver. Indeed, greens and reds must both be sorted: first
- # all INTs, followed by all REFs, followed by all FLOATs.
+ # the JitDriver.
lst2 = sort_vars(lst)
- assert lst == lst2
+ assert lst == lst2, ("You have to reorder the variables named in "
+ "the JitDriver (both the 'greens' and 'reds' independently). "
+ "They must be sorted like this: first all the integer-like, "
+ "then all the pointer-like, and finally the floats.\n"
+ "Got: %r\n"
+ "Expected: %r" % (lst, lst2))
return lst
#
return (_sort(greens_v, True), _sort(reds_v, False))
diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py
b/rpython/jit/metainterp/optimizeopt/schedule.py
--- a/rpython/jit/metainterp/optimizeopt/schedule.py
+++ b/rpython/jit/metainterp/optimizeopt/schedule.py
@@ -978,10 +978,7 @@
self.right is other.right
class AccumPack(Pack):
- SUPPORTED = { rop.FLOAT_ADD: '+',
- rop.INT_ADD: '+',
- rop.FLOAT_MUL: '*',
- }
+ SUPPORTED = { rop.INT_ADD: '+', }
def __init__(self, nodes, operator, position):
Pack.__init__(self, nodes)
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py
b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_costmodel.py
@@ -197,7 +197,7 @@
f13 = float_add(f12, f11)
""")
savings = self.savings(loop1)
- assert savings == 2
+ assert savings == -2
@py.test.mark.parametrize("bytes,s", [(4,0),(8,0)])
def test_sum_float_to_int(self, bytes, s):
diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
--- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
+++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
@@ -1162,32 +1162,32 @@
vopt = self.vectorize(loop,1)
self.assert_equal(loop, self.parse_loop(opt))
- def test_accumulate_basic(self):
- trace = """
- [p0, i0, f0]
- f1 = raw_load_f(p0, i0, descr=floatarraydescr)
- f2 = float_add(f0, f1)
- i1 = int_add(i0, 8)
- i2 = int_lt(i1, 100)
- guard_true(i2) [p0, i0, f2]
- jump(p0, i1, f2)
- """
- trace_opt = """
- [p0, i0, f0]
- v6[0xf64] = vec_f()
- v7[2xf64] = vec_float_xor(v6[0xf64], v6[0xf64])
- v2[2xf64] = vec_pack_f(v7[2xf64], f0, 0, 1)
- label(p0, i0, v2[2xf64])
- i1 = int_add(i0, 16)
- i2 = int_lt(i1, 100)
- guard_true(i2) [p0, i0, v2[2xf64]]
- v1[2xf64] = vec_load_f(p0, i0, 1, 0, descr=floatarraydescr)
- v3[2xf64] = vec_float_add(v2[2xf64], v1[2xf64])
- jump(p0, i1, v3[2xf64])
- """
- loop = self.parse_loop(trace)
- opt = self.vectorize(loop)
- self.assert_equal(loop, self.parse_loop(trace_opt))
+ #def test_accumulate_basic(self):
+ # trace = """
+ # [p0, i0, f0]
+ # f1 = raw_load_f(p0, i0, descr=floatarraydescr)
+ # f2 = float_add(f0, f1)
+ # i1 = int_add(i0, 8)
+ # i2 = int_lt(i1, 100)
+ # guard_true(i2) [p0, i0, f2]
+ # jump(p0, i1, f2)
+ # """
+ # trace_opt = """
+ # [p0, i0, f0]
+ # v6[0xf64] = vec_f()
+ # v7[2xf64] = vec_float_xor(v6[0xf64], v6[0xf64])
+ # v2[2xf64] = vec_pack_f(v7[2xf64], f0, 0, 1)
+ # label(p0, i0, v2[2xf64])
+ # i1 = int_add(i0, 16)
+ # i2 = int_lt(i1, 100)
+ # guard_true(i2) [p0, i0, v2[2xf64]]
+ # v1[2xf64] = vec_load_f(p0, i0, 1, 0, descr=floatarraydescr)
+ # v3[2xf64] = vec_float_add(v2[2xf64], v1[2xf64])
+ # jump(p0, i1, v3[2xf64])
+ # """
+ # loop = self.parse_loop(trace)
+ # opt = self.vectorize(loop)
+ # self.assert_equal(loop, self.parse_loop(trace_opt))
def test_element_f45_in_guard_failargs(self):
trace = self.parse_loop("""
diff --git a/rpython/jit/metainterp/optimizeopt/vector.py
b/rpython/jit/metainterp/optimizeopt/vector.py
--- a/rpython/jit/metainterp/optimizeopt/vector.py
+++ b/rpython/jit/metainterp/optimizeopt/vector.py
@@ -842,11 +842,16 @@
oplist.append(vecop)
opnum = rop.VEC_INT_XOR
if datatype == FLOAT:
- opnum = rop.VEC_FLOAT_XOR
+ # see PRECISION loss below
+ raise NotImplementedError
vecop = VecOperation(opnum, [vecop, vecop],
vecop, count)
oplist.append(vecop)
elif pack.reduce_init() == 1:
+ # PRECISION loss, because the numbers are accumulated
(associative, commutative properties must hold)
+ # you can end up a small number and a huge number that is
finally multiplied. giving an
+ # inprecision result, thus this is disabled now
+ raise NotImplementedError
# multiply is only supported by floats
vecop = OpHelpers.create_vec_expand(ConstFloat(1.0), bytesize,
signed, count)
diff --git a/rpython/jit/metainterp/test/test_vector.py
b/rpython/jit/metainterp/test/test_vector.py
--- a/rpython/jit/metainterp/test/test_vector.py
+++ b/rpython/jit/metainterp/test/test_vector.py
@@ -414,7 +414,9 @@
lambda a,b:
lltype.intmask(lltype.intmask(a)+lltype.intmask(b)), lltype.Signed)
small_floats = st.floats(min_value=-100, max_value=100, allow_nan=False,
allow_infinity=False)
test_vec_float_sum = vec_reduce(small_floats, lambda a,b: a+b, rffi.DOUBLE)
- test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b,
rffi.DOUBLE)
+ # PRECISION loss, because the numbers are accumulated (associative,
commutative properties must hold)
+ # you can end up a small number and a huge number that is finally
multiplied losing precision
+ # test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b,
rffi.DOUBLE)
def test_constant_expand(self):
diff --git a/rpython/memory/gctransform/transform.py
b/rpython/memory/gctransform/transform.py
--- a/rpython/memory/gctransform/transform.py
+++ b/rpython/memory/gctransform/transform.py
@@ -427,6 +427,13 @@
return result
mh._ll_malloc_fixedsize = _ll_malloc_fixedsize
+ def _ll_malloc_fixedsize_zero(size):
+ result = mh.allocate(size, zero=True)
+ if not result:
+ raise MemoryError()
+ return result
+ mh._ll_malloc_fixedsize_zero = _ll_malloc_fixedsize_zero
+
def _ll_compute_size(length, size, itemsize):
try:
varsize = ovfcheck(itemsize * length)
@@ -453,10 +460,9 @@
def _ll_malloc_varsize_no_length_zero(length, size, itemsize):
tot_size = _ll_compute_size(length, size, itemsize)
- result = mh.allocate(tot_size)
+ result = mh.allocate(tot_size, zero=True)
if not result:
raise MemoryError()
- llmemory.raw_memclear(result, tot_size)
return result
mh.ll_malloc_varsize_no_length_zero = _ll_malloc_varsize_no_length_zero
@@ -470,17 +476,16 @@
mh = mallocHelpers()
mh.allocate = llmemory.raw_malloc
ll_raw_malloc_fixedsize = mh._ll_malloc_fixedsize
+ ll_raw_malloc_fixedsize_zero = mh._ll_malloc_fixedsize_zero
ll_raw_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length
ll_raw_malloc_varsize = mh.ll_malloc_varsize
ll_raw_malloc_varsize_no_length_zero =
mh.ll_malloc_varsize_no_length_zero
- stack_mh = mallocHelpers()
- stack_mh.allocate = lambda size: llop.stack_malloc(llmemory.Address,
size)
- ll_stack_malloc_fixedsize = stack_mh._ll_malloc_fixedsize
-
if self.translator:
self.raw_malloc_fixedsize_ptr = self.inittime_helper(
ll_raw_malloc_fixedsize, [lltype.Signed], llmemory.Address)
+ self.raw_malloc_fixedsize_zero_ptr = self.inittime_helper(
+ ll_raw_malloc_fixedsize_zero, [lltype.Signed],
llmemory.Address)
self.raw_malloc_varsize_no_length_ptr = self.inittime_helper(
ll_raw_malloc_varsize_no_length, [lltype.Signed]*3,
llmemory.Address, inline=False)
self.raw_malloc_varsize_ptr = self.inittime_helper(
@@ -488,9 +493,6 @@
self.raw_malloc_varsize_no_length_zero_ptr = self.inittime_helper(
ll_raw_malloc_varsize_no_length_zero, [lltype.Signed]*3,
llmemory.Address, inline=False)
- self.stack_malloc_fixedsize_ptr = self.inittime_helper(
- ll_stack_malloc_fixedsize, [lltype.Signed], llmemory.Address)
-
def gct_malloc(self, hop, add_flags=None):
TYPE = hop.spaceop.result.concretetype.TO
assert not TYPE._is_varsize()
@@ -503,21 +505,16 @@
hop.cast_result(v_raw)
def gct_fv_raw_malloc(self, hop, flags, TYPE, c_size):
- v_raw = hop.genop("direct_call", [self.raw_malloc_fixedsize_ptr,
c_size],
+ if flags.get('zero'):
+ ll_func = self.raw_malloc_fixedsize_zero_ptr
+ else:
+ ll_func = self.raw_malloc_fixedsize_ptr
+ v_raw = hop.genop("direct_call", [ll_func, c_size],
resulttype=llmemory.Address)
- if flags.get('zero'):
- hop.genop("raw_memclear", [v_raw, c_size])
if flags.get('track_allocation', True):
hop.genop("track_alloc_start", [v_raw])
return v_raw
- def gct_fv_stack_malloc(self, hop, flags, TYPE, c_size):
- v_raw = hop.genop("direct_call", [self.stack_malloc_fixedsize_ptr,
c_size],
- resulttype=llmemory.Address)
- if flags.get('zero'):
- hop.genop("raw_memclear", [v_raw, c_size])
- return v_raw
-
def gct_malloc_varsize(self, hop, add_flags=None):
flags = hop.spaceop.args[1].value
if add_flags:
diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
--- a/rpython/rlib/rposix.py
+++ b/rpython/rlib/rposix.py
@@ -1778,25 +1778,23 @@
finally:
lltype.free(l_utsbuf, flavor='raw')
-# These are actually macros on some/most systems
-c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT)
-c_major = external('major', [rffi.INT], rffi.INT)
-c_minor = external('minor', [rffi.INT], rffi.INT)
+if sys.platform != 'win32':
+ # These are actually macros on some/most systems
+ c_makedev = external('makedev', [rffi.INT, rffi.INT], rffi.INT, macro=True)
+ c_major = external('major', [rffi.INT], rffi.INT, macro=True)
+ c_minor = external('minor', [rffi.INT], rffi.INT, macro=True)
-@replace_os_function('makedev')
[email protected]_look_inside
-def makedev(maj, min):
- return c_makedev(maj, min)
+ @replace_os_function('makedev')
+ def makedev(maj, min):
+ return c_makedev(maj, min)
-@replace_os_function('major')
[email protected]_look_inside
-def major(dev):
- return c_major(dev)
+ @replace_os_function('major')
+ def major(dev):
+ return c_major(dev)
-@replace_os_function('minor')
[email protected]_look_inside
-def minor(dev):
- return c_minor(dev)
+ @replace_os_function('minor')
+ def minor(dev):
+ return c_minor(dev)
#___________________________________________________________________
diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py
--- a/rpython/rlib/rsocket.py
+++ b/rpython/rlib/rsocket.py
@@ -398,7 +398,7 @@
baseofs = offsetof(_c.sockaddr_un, 'c_sun_path')
self.setdata(sun, baseofs + len(path))
rffi.setintfield(sun, 'c_sun_family', AF_UNIX)
- if _c.linux and path.startswith('\x00'):
+ if _c.linux and path[0] == '\x00':
# Linux abstract namespace extension
if len(path) > sizeof(_c.sockaddr_un.c_sun_path):
raise RSocketError("AF_UNIX path too long")
diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py
--- a/rpython/rlib/runicode.py
+++ b/rpython/rlib/runicode.py
@@ -327,6 +327,16 @@
def unicode_encode_utf_8(s, size, errors, errorhandler=None,
allow_surrogates=allow_surrogate_by_default):
+ # In this function, allow_surrogates can be:
+ #
+ # * True: surrogates are always allowed. A valid surrogate pair
+ # is replaced with the non-BMP unicode char it stands for,
+ # which is then encoded as 4 bytes.
+ #
+ # * False: surrogates are always forbidden.
+ #
+ # See also unicode_encode_utf8sp().
+ #
if errorhandler is None:
errorhandler = default_unicode_error_encode
return unicode_encode_utf_8_impl(s, size, errors, errorhandler,
@@ -391,6 +401,33 @@
_encodeUCS4(result, ch)
return result.build()
+def unicode_encode_utf8sp(s, size):
+ # Surrogate-preserving utf-8 encoding. Any surrogate character
+ # turns into its 3-bytes encoding, whether it is paired or not.
+ # This should always be reversible, and the reverse is the regular
+ # str_decode_utf_8() with allow_surrogates=True.
+ assert(size >= 0)
+ result = StringBuilder(size)
+ pos = 0
+ while pos < size:
+ ch = ord(s[pos])
+ pos += 1
+ if ch < 0x80:
+ # Encode ASCII
+ result.append(chr(ch))
+ elif ch < 0x0800:
+ # Encode Latin-1
+ result.append(chr((0xc0 | (ch >> 6))))
+ result.append(chr((0x80 | (ch & 0x3f))))
+ elif ch < 0x10000:
+ # Encode UCS2 Unicode ordinals, and surrogates
+ result.append((chr((0xe0 | (ch >> 12)))))
+ result.append((chr((0x80 | ((ch >> 6) & 0x3f)))))
+ result.append((chr((0x80 | (ch & 0x3f)))))
+ else:
+ _encodeUCS4(result, ch)
+ return result.build()
+
# ____________________________________________________________
# utf-16
diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py
--- a/rpython/rlib/test/test_rposix.py
+++ b/rpython/rlib/test/test_rposix.py
@@ -281,6 +281,12 @@
def test_isatty(self):
assert rposix.isatty(-1) is False
+ @py.test.mark.skipif("not hasattr(rposix, 'makedev')")
+ def test_makedev(self):
+ dev = rposix.makedev(24, 7)
+ assert rposix.major(dev) == 24
+ assert rposix.minor(dev) == 7
+
@py.test.mark.skipif("not hasattr(os, 'ttyname')")
class TestOsExpect(ExpectTest):
diff --git a/rpython/rlib/test/test_runicode.py
b/rpython/rlib/test/test_runicode.py
--- a/rpython/rlib/test/test_runicode.py
+++ b/rpython/rlib/test/test_runicode.py
@@ -812,6 +812,21 @@
py.test.raises(UnicodeEncodeError, encoder, u' 12, \u1234 ', 7, None)
assert encoder(u'u\u1234', 2, 'replace') == 'u?'
+ def test_encode_utf8sp(self):
+ # for the following test, go to lengths to avoid CPython's optimizer
+ # and .pyc file storage, which collapse the two surrogates into one
+ c = u"\udc00"
+ for input, expected in [
+ (u"", ""),
+ (u"abc", "abc"),
+ (u"\u1234", "\xe1\x88\xb4"),
+ (u"\ud800", "\xed\xa0\x80"),
+ (u"\udc00", "\xed\xb0\x80"),
+ (u"\ud800" + c, "\xed\xa0\x80\xed\xb0\x80"),
+ ]:
+ got = runicode.unicode_encode_utf8sp(input, len(input))
+ assert got == expected
+
class TestTranslation(object):
def setup_class(cls):
diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py
--- a/rpython/rtyper/llinterp.py
+++ b/rpython/rtyper/llinterp.py
@@ -997,11 +997,14 @@
# __________________________________________________________
# operations on addresses
- def op_raw_malloc(self, size):
+ def op_raw_malloc(self, size, zero):
+ assert lltype.typeOf(size) == lltype.Signed
+ return llmemory.raw_malloc(size, zero=zero)
+
+ def op_boehm_malloc(self, size):
assert lltype.typeOf(size) == lltype.Signed
return llmemory.raw_malloc(size)
-
- op_boehm_malloc = op_boehm_malloc_atomic = op_raw_malloc
+ op_boehm_malloc_atomic = op_boehm_malloc
def op_boehm_register_finalizer(self, p, finalizer):
pass
@@ -1069,9 +1072,6 @@
assert offset.TYPE == ARGTYPE
getattr(addr, str(ARGTYPE).lower())[offset.repeat] = value
- def op_stack_malloc(self, size): # mmh
- raise NotImplementedError("backend only")
-
def op_track_alloc_start(self, addr):
# we don't do tracking at this level
checkadr(addr)
diff --git a/rpython/rtyper/lltypesystem/llarena.py
b/rpython/rtyper/lltypesystem/llarena.py
--- a/rpython/rtyper/lltypesystem/llarena.py
+++ b/rpython/rtyper/lltypesystem/llarena.py
@@ -506,13 +506,17 @@
llimpl_malloc = rffi.llexternal('malloc', [lltype.Signed], llmemory.Address,
sandboxsafe=True, _nowrapper=True)
+llimpl_calloc = rffi.llexternal('calloc', [lltype.Signed, lltype.Signed],
+ llmemory.Address,
+ sandboxsafe=True, _nowrapper=True)
llimpl_free = rffi.llexternal('free', [llmemory.Address], lltype.Void,
sandboxsafe=True, _nowrapper=True)
def llimpl_arena_malloc(nbytes, zero):
- addr = llimpl_malloc(nbytes)
- if bool(addr):
- llimpl_arena_reset(addr, nbytes, zero)
+ if zero:
+ addr = llimpl_calloc(nbytes, 1)
+ else:
+ addr = llimpl_malloc(nbytes)
return addr
llimpl_arena_malloc._always_inline_ = True
register_external(arena_malloc, [int, int], llmemory.Address,
diff --git a/rpython/rtyper/lltypesystem/llmemory.py
b/rpython/rtyper/lltypesystem/llmemory.py
--- a/rpython/rtyper/lltypesystem/llmemory.py
+++ b/rpython/rtyper/lltypesystem/llmemory.py
@@ -7,6 +7,7 @@
import weakref
from rpython.annotator.bookkeeper import analyzer_for
from rpython.annotator.model import SomeInteger, SomeObject, SomeString, s_Bool
+from rpython.annotator.model import SomeBool
from rpython.rlib.objectmodel import Symbolic, specialize
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lltype import SomePtr
@@ -936,14 +937,15 @@
# ____________________________________________________________
-def raw_malloc(size):
+def raw_malloc(size, zero=False):
if not isinstance(size, AddressOffset):
raise NotImplementedError(size)
- return size._raw_malloc([], zero=False)
+ return size._raw_malloc([], zero=zero)
@analyzer_for(raw_malloc)
-def ann_raw_malloc(s_size):
+def ann_raw_malloc(s_size, s_zero=None):
assert isinstance(s_size, SomeInteger) # XXX add noneg...?
+ assert s_zero is None or isinstance(s_zero, SomeBool)
return SomeAddress()
diff --git a/rpython/rtyper/lltypesystem/lloperation.py
b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -396,7 +396,6 @@
'raw_store': LLOp(canrun=True),
'bare_raw_store': LLOp(),
'gc_load_indexed': LLOp(sideeffects=False, canrun=True),
- 'stack_malloc': LLOp(), # mmh
'track_alloc_start': LLOp(),
'track_alloc_stop': LLOp(),
'adr_add': LLOp(canfold=True),
diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py
--- a/rpython/rtyper/rbuiltin.py
+++ b/rpython/rtyper/rbuiltin.py
@@ -574,10 +574,14 @@
# memory addresses
@typer_for(llmemory.raw_malloc)
-def rtype_raw_malloc(hop):
- v_size, = hop.inputargs(lltype.Signed)
+def rtype_raw_malloc(hop, i_zero=None):
+ v_size = hop.inputarg(lltype.Signed, arg=0)
+ v_zero, = parse_kwds(hop, (i_zero, None))
+ if v_zero is None:
+ v_zero = hop.inputconst(lltype.Bool, False)
hop.exception_cannot_occur()
- return hop.genop('raw_malloc', [v_size], resulttype=llmemory.Address)
+ return hop.genop('raw_malloc', [v_size, v_zero],
+ resulttype=llmemory.Address)
@typer_for(llmemory.raw_malloc_usage)
def rtype_raw_malloc_usage(hop):
diff --git a/rpython/rtyper/test/test_llinterp.py
b/rpython/rtyper/test/test_llinterp.py
--- a/rpython/rtyper/test/test_llinterp.py
+++ b/rpython/rtyper/test/test_llinterp.py
@@ -372,19 +372,6 @@
result = interpret(getids, [i, j])
assert result
-def test_stack_malloc():
- py.test.skip("stack-flavored mallocs no longer supported")
- class A(object):
- pass
- def f():
- a = A()
- a.i = 1
- return a.i
- interp, graph = get_interpreter(f, [])
- graph.startblock.operations[0].args[1] = inputconst(Void, {'flavor':
"stack"})
- result = interp.eval_graph(graph, [])
- assert result == 1
-
def test_invalid_stack_access():
py.test.skip("stack-flavored mallocs no longer supported")
class A(object):
diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py
--- a/rpython/translator/c/funcgen.py
+++ b/rpython/translator/c/funcgen.py
@@ -608,16 +608,6 @@
return 'GC_REGISTER_FINALIZER(%s, (GC_finalization_proc)%s, NULL,
NULL, NULL);' \
% (self.expr(op.args[0]), self.expr(op.args[1]))
- def OP_RAW_MALLOC(self, op):
- eresult = self.expr(op.result)
- esize = self.expr(op.args[0])
- return "OP_RAW_MALLOC(%s, %s, void *);" % (esize, eresult)
-
- def OP_STACK_MALLOC(self, op):
- eresult = self.expr(op.result)
- esize = self.expr(op.args[0])
- return "OP_STACK_MALLOC(%s, %s, void *);" % (esize, eresult)
-
def OP_DIRECT_FIELDPTR(self, op):
return self.OP_GETFIELD(op, ampersand='&')
diff --git a/rpython/translator/c/src/mem.h b/rpython/translator/c/src/mem.h
--- a/rpython/translator/c/src/mem.h
+++ b/rpython/translator/c/src/mem.h
@@ -8,11 +8,14 @@
#define OP_STACK_CURRENT(r) r = (Signed)&r
-#define OP_RAW_MALLOC(size, r, restype) { \
- r = (restype) malloc(size); \
- if (r != NULL) { \
- COUNT_MALLOC; \
- } \
+#define OP_RAW_MALLOC(size, zero, result) { \
+ if (zero) \
+ result = calloc(size, 1); \
+ else \
+ result = malloc(size); \
+ if (result != NULL) { \
+ COUNT_MALLOC; \
+ } \
}
#define OP_RAW_FREE(p, r) free(p); COUNT_FREE;
@@ -26,10 +29,6 @@
#define alloca _alloca
#endif
-#define OP_STACK_MALLOC(size,r,restype) \
- r = (restype) alloca(size); \
- if (r != NULL) memset((void*) r, 0, size);
-
#define OP_RAW_MEMCOPY(x,y,size,r) memcpy(y,x,size);
#define OP_RAW_MEMMOVE(x,y,size,r) memmove(y,x,size);
diff --git a/rpython/translator/c/test/test_lladdresses.py
b/rpython/translator/c/test/test_lladdresses.py
--- a/rpython/translator/c/test/test_lladdresses.py
+++ b/rpython/translator/c/test/test_lladdresses.py
@@ -32,7 +32,29 @@
assert res == 42
res = fc(1)
assert res == 1
-
+
+def test_memory_access_zero():
+ def f():
+ blocks = []
+ for i in range(1000):
+ addr = raw_malloc(16, zero=False)
+ addr.signed[1] = 10000 + i
+ blocks.append(addr)
+ for addr in blocks:
+ raw_free(addr)
+ result = 0
+ blocks = []
+ for i in range(1000):
+ addr = raw_malloc(16, zero=True)
+ result |= addr.signed[1]
+ blocks.append(addr)
+ for addr in blocks:
+ raw_free(addr)
+ return result
+ fc = compile(f, [])
+ res = fc()
+ assert res == 0
+
def test_memory_float():
S = lltype.GcStruct("S", ("x", lltype.Float), ("y", lltype.Float))
offset = FieldOffset(S, 'x')
@@ -155,18 +177,6 @@
fn = compile(f, [int])
assert fn(1) == 2
-def test_flavored_malloc_stack():
- class A(object):
- _alloc_flavor_ = "stack"
- def __init__(self, val):
- self.val = val
- def f(x):
- a = A(x + 1)
- result = a.val
- return result
- fn = compile(f, [int])
- assert fn(1) == 2
-
def test_gcref():
if sys.platform == 'darwin':
py.test.skip("'boehm' may crash")
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit