Author: Armin Rigo <ar...@tunes.org> Branch: ppc-updated-backend Changeset: r80259:c63862115db0 Date: 2015-10-16 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/c63862115db0/
Log: hg merge default diff too long, truncating to 2000 out of 31445 lines diff --git a/.gitignore b/.gitignore --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,14 @@ .hg .svn +# VIM +*.swp +*.swo + *.pyc *.pyo *~ +__pycache__/ bin/pypy-c include/*.h @@ -22,4 +27,6 @@ pypy/translator/goal/pypy-c pypy/translator/goal/target*-c release/ +!pypy/tool/release/ rpython/_cache/ +__pycache__/ diff --git a/dotviewer/graphclient.py b/dotviewer/graphclient.py --- a/dotviewer/graphclient.py +++ b/dotviewer/graphclient.py @@ -127,16 +127,8 @@ return spawn_graphserver_handler((host, port)) def spawn_local_handler(): - if hasattr(sys, 'pypy_objspaceclass'): - # if 'python' is actually PyPy, e.g. in a virtualenv, then - # try hard to find a real CPython - try: - python = subprocess.check_output( - 'env -i $SHELL -l -c "which python"', shell=True).strip() - except subprocess.CalledProcessError: - # did not work, fall back to 'python' - python = 'python' - else: + python = os.getenv('PYPY_PYGAME_PYTHON') + if not python: python = sys.executable args = [python, '-u', GRAPHSERVER, '--stdio'] p = subprocess.Popen(args, diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -158,7 +158,7 @@ RegrTest('test_codecs.py', core=True, usemodules='_multibytecodec'), RegrTest('test_codeop.py', core=True), RegrTest('test_coding.py', core=True), - RegrTest('test_coercion.py', core=True), + RegrTest('test_coercion.py', core=True, usemodules='struct'), RegrTest('test_collections.py', usemodules='binascii struct'), RegrTest('test_colorsys.py'), RegrTest('test_commands.py'), diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -310,6 +310,22 @@ """ return self._backend.from_buffer(self.BCharA, python_buffer) + def memmove(self, dest, src, n): + """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + + Like the C function memmove(), the memory areas may overlap; + apart from that it behaves like the C function memcpy(). + + 'src' can be any cdata ptr or array, or any Python buffer object. + 'dest' can be any cdata ptr or array, or a writable Python buffer + object. The size to copy, 'n', is always measured in bytes. + + Unlike other methods, this one supports all Python buffer including + byte strings and bytearrays---but it still does not support + non-contiguous buffers. + """ + return self._backend.memmove(dest, src, n) + def callback(self, cdecl, python_callable=None, error=None, onerror=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. @@ -609,7 +625,7 @@ def make_accessor_locked(name): key = 'function ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) try: value = backendlib.load_function(BType, name) @@ -620,7 +636,7 @@ # key = 'variable ' + name if key in ffi._parser._declarations: - tp = ffi._parser._declarations[key] + tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable @@ -631,12 +647,23 @@ # if not copied_enums: from . import model - for key, tp in ffi._parser._declarations.items(): + error = None + for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): continue + try: + tp.check_not_partial() + except Exception as e: + error = e + continue for enumname, enumval in zip(tp.enumerators, tp.enumvalues): if enumname not in library.__dict__: library.__dict__[enumname] = enumval + if error is not None: + if name in library.__dict__: + return # ignore error, about a different enum + raise error + for key, val in ffi._parser._int_constants.items(): if key not in library.__dict__: library.__dict__[key] = val diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -26,6 +26,9 @@ _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") def _get_parser(): global _parser_cache @@ -44,6 +47,14 @@ macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # Replace "...}" with "__dotdotdotNUM__}". This construction should @@ -192,6 +203,7 @@ if not decl.name: raise api.CDefError("typedef does not declare any name", decl) + quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1] == '__dotdotdot__'): realtype = self._get_unknown_type(decl) @@ -202,8 +214,9 @@ decl.type.type.type.names == ['__dotdotdot__']): realtype = model.unknown_ptr_type(decl.name) else: - realtype = self._get_type(decl.type, name=decl.name) - self._declare('typedef ' + decl.name, realtype) + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name) + self._declare('typedef ' + decl.name, realtype, quals=quals) else: raise api.CDefError("unrecognized construct", decl) except api.FFIError as e: @@ -255,9 +268,9 @@ def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): - tp = self._get_type(node, name=decl.name) + tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) else: if isinstance(node, pycparser.c_ast.Struct): @@ -271,9 +284,10 @@ decl) # if decl.name: - tp = self._get_type(node, partial_length_ok=True) + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) if tp.is_raw_function: - tp = self._get_type_pointer(tp) + tp = self._get_type_pointer(tp, quals) self._declare('function ' + decl.name, tp) elif (tp.is_integer_type() and hasattr(decl, 'init') and @@ -287,10 +301,10 @@ _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) - elif self._is_constant_globalvar(node): - self._declare('constant ' + decl.name, tp) + elif (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) else: - self._declare('variable ' + decl.name, tp) + self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] @@ -298,40 +312,51 @@ exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise api.CDefError("unknown identifier '%s'" % (exprnode.name,)) - return self._get_type(exprnode.type) + tp, quals = self._get_type_and_quals(exprnode.type) + return tp - def _declare(self, name, obj, included=False): + def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: - if self._declarations[name] is obj: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: return if not self._override: raise api.FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() - self._declarations[name] = obj + self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) - def _get_type_pointer(self, type, const=False, declname=None): + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): - return model.NamedPointerType(type, declname) - if const: - return model.ConstPointerType(type) - return model.PointerType(type) + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) - def _get_type(self, typenode, name=None, partial_length_ok=False): + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): - type = self._declarations['typedef ' + typenode.type.names[0]] - return type + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type @@ -340,18 +365,19 @@ else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) - tp = self._get_type(typenode.type, + tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok) - return model.ArrayType(tp, length) + return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type - const = (isinstance(typenode.type, pycparser.c_ast.TypeDecl) - and 'const' in typenode.type.quals) - return self._get_type_pointer(self._get_type(typenode.type), const, - declname=name) + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce @@ -379,35 +405,38 @@ names = newnames + names ident = ' '.join(names) if ident == 'void': - return model.void_type + return model.void_type, quals if ident == '__dotdotdot__': raise api.FFIError(':%d: bad usage of "..."' % typenode.coord.line) - return resolve_common_type(ident) + return resolve_common_type(ident), quals # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' - return self._get_struct_union_enum_type('struct', type, name) + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' - return self._get_struct_union_enum_type('union', type, name) + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' - return self._get_struct_union_enum_type('enum', type, name) + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type - return self._parse_function_type(typenode, name) + return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, - nested=True) + nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, - nested=True) + nested=True), 0 # raise api.FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) @@ -426,28 +455,28 @@ raise api.CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) - args = [self._as_func_arg(self._get_type(argdeclnode.type)) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] - result = self._get_type(typenode.type) - return model.RawFunctionType(tuple(args), result, ellipsis) + result, quals = self._get_type_and_quals(typenode.type) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) - def _as_func_arg(self, type): + def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): - return model.PointerType(type.item) + return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type - def _is_constant_globalvar(self, typenode): - if isinstance(typenode, pycparser.c_ast.PtrDecl): - return 'const' in typenode.quals - if isinstance(typenode, pycparser.c_ast.TypeDecl): - return 'const' in typenode.quals - return False - def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations @@ -486,7 +515,7 @@ else: explicit_name = name key = '%s %s' % (kind, name) - tp = self._declarations.get(key, None) + tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': @@ -528,6 +557,7 @@ fldnames = [] fldtypes = [] fldbitsize = [] + fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): @@ -541,7 +571,8 @@ else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False - type = self._get_type(decl.type, partial_length_ok=True) + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: @@ -549,9 +580,11 @@ fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) + fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" @@ -632,14 +665,12 @@ return tp def include(self, other): - for name, tp in other._declarations.items(): + for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] - if kind in ('struct', 'union', 'enum', 'anonymous'): - self._declare(name, tp, included=True) - elif kind == 'typedef': - self._declare(name, tp, included=True) + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) diff --git a/lib_pypy/cffi/ffiplatform.py b/lib_pypy/cffi/ffiplatform.py --- a/lib_pypy/cffi/ffiplatform.py +++ b/lib_pypy/cffi/ffiplatform.py @@ -14,17 +14,7 @@ LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', 'extra_objects', 'depends'] -def _hack_at_distutils(): - # Windows-only workaround for some configurations: see - # https://bugs.python.org/issue23246 (Python 2.7.9) - if sys.platform == "win32": - try: - import setuptools # for side-effects, patches distutils - except ImportError: - pass - def get_extension(srcfilename, modname, sources=(), **kwds): - _hack_at_distutils() # *before* the following import from distutils.core import Extension allsources = [srcfilename] allsources.extend(sources) @@ -47,7 +37,6 @@ def _build(tmpdir, ext): # XXX compact but horrible :-( - _hack_at_distutils() from distutils.core import Distribution import distutils.errors # diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,14 +1,29 @@ -import types +import types, sys import weakref from .lock import allocate_lock +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False - def get_c_name(self, replace_with='', context='a C file'): + def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( @@ -18,6 +33,7 @@ replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: from .ffiplatform import VerificationError @@ -177,18 +193,21 @@ class BaseFunctionType(BaseType): - _attrs_ = ('args', 'result', 'ellipsis') + _attrs_ = ('args', 'result', 'ellipsis', 'abi') - def __init__(self, args, result, ellipsis): + def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis + self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) @@ -206,7 +225,7 @@ "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): - return FunctionPtrType(self.args, self.result, self.ellipsis) + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): @@ -217,24 +236,29 @@ args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass return global_cache(self, ffi, 'new_function_type', - tuple(args), result, self.ellipsis) + tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): - return RawFunctionType(self.args, self.result, self.ellipsis) + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): - _attrs_ = ('totype',) - _base_pattern = " *&" - _base_pattern_array = "(*&)" + _attrs_ = ('totype', 'quals') - def __init__(self, totype): + def __init__(self, totype, quals=0): self.totype = totype + self.quals = quals + extra = qualify(quals, " *&") if totype.is_array_type: - extra = self._base_pattern_array - else: - extra = self._base_pattern + extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): @@ -243,10 +267,8 @@ voidp_type = PointerType(void_type) - -class ConstPointerType(PointerType): - _base_pattern = " const *&" - _base_pattern_array = "(const *&)" +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) @@ -254,8 +276,8 @@ class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') - def __init__(self, totype, name): - PointerType.__init__(self, totype) + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' @@ -315,11 +337,12 @@ partial = False packed = False - def __init__(self, name, fldnames, fldtypes, fldbitsize): + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize + self.fldquals = fldquals self.build_c_name_with_marker() def has_anonymous_struct_fields(self): @@ -331,14 +354,17 @@ return False def enumfields(self): - for name, type, bitsize in zip(self.fldnames, self.fldtypes, - self.fldbitsize): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): if name == '' and isinstance(type, StructOrUnion): # nested anonymous struct/union for result in type.enumfields(): yield result else: - yield (name, type, bitsize) + yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists @@ -347,13 +373,16 @@ names = [] types = [] bitsizes = [] - for name, type, bitsize in self.enumfields(): + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) + fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, diff --git a/lib_pypy/cffi/parse_c_type.h b/lib_pypy/cffi/parse_c_type.h --- a/lib_pypy/cffi/parse_c_type.h +++ b/lib_pypy/cffi/parse_c_type.h @@ -5,7 +5,7 @@ #define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) #define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) -#define _CFFI_GETARG(cffi_opcode) (((uintptr_t)cffi_opcode) >> 8) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) #define _CFFI_OP_PRIMITIVE 1 #define _CFFI_OP_POINTER 3 diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py --- a/lib_pypy/cffi/recompiler.py +++ b/lib_pypy/cffi/recompiler.py @@ -195,17 +195,15 @@ elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): - for name1, tp1, _ in tp.enumfields(): + for name1, tp1, _, _ in tp.enumfields(): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) - def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) - def _generate(self, step_name): - for name, tp in self._get_declarations(): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, @@ -214,6 +212,7 @@ raise ffiplatform.VerificationError( "not implemented in recompile(): %r" % name) try: + self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) @@ -608,7 +607,11 @@ call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) @@ -711,7 +714,8 @@ if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' - name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments) + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: @@ -774,7 +778,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double @@ -789,7 +793,8 @@ ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -823,7 +828,7 @@ c_fields = [] if reason_for_not_expanding is None: enumfields = list(tp.enumfields()) - for fldname, fldtype, fbitsize in enumfields: + for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) # cname is None for _add_missing_struct_unions() only op = OP_NOOP @@ -879,7 +884,9 @@ # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. - for tp in list(self._struct_unions): + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " @@ -1004,6 +1011,8 @@ def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, @@ -1081,7 +1090,8 @@ # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) - prnt('static ' + tp.get_c_name('*_cffi_var_%s(void)' % (name,))) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') @@ -1130,7 +1140,13 @@ else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis)) + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -197,7 +197,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -468,7 +471,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -477,7 +480,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -488,7 +492,7 @@ prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -552,7 +556,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/vengine_gen.py b/lib_pypy/cffi/vengine_gen.py --- a/lib_pypy/cffi/vengine_gen.py +++ b/lib_pypy/cffi/vengine_gen.py @@ -87,7 +87,10 @@ return library def _get_declarations(self): - return sorted(self.ffi._parser._declarations.items()) + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst def _generate(self, step_name): for name, tp in self._get_declarations(): @@ -156,7 +159,11 @@ arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) - funcdecl = ' %s(%s)' % (wrappername, arglist) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') @@ -260,7 +267,7 @@ prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double @@ -269,7 +276,8 @@ # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname), fname)) + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) except ffiplatform.VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') @@ -280,7 +288,7 @@ prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) @@ -342,7 +350,7 @@ check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 - for fname, ftype, fbitsize in tp.enumfields(): + for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), diff --git a/lib_pypy/cffi/verifier.py b/lib_pypy/cffi/verifier.py --- a/lib_pypy/cffi/verifier.py +++ b/lib_pypy/cffi/verifier.py @@ -22,6 +22,16 @@ s = s.encode('ascii') super(NativeIO, self).write(s) +def _hack_at_distutils(): + # Windows-only workaround for some configurations: see + # https://bugs.python.org/issue23246 (Python 2.7 with + # a specific MS compiler suite download) + if sys.platform == "win32": + try: + import setuptools # for side-effects, patches distutils + except ImportError: + pass + class Verifier(object): @@ -112,6 +122,7 @@ return basename def get_extension(self): + _hack_at_distutils() # backward compatibility hack if not self._has_source: with self.ffi._lock: if not self._has_source: diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -39,8 +39,9 @@ "_csv", "cppyy", "_pypyjson" ]) -if (sys.platform.startswith('linux') and os.uname()[4] == 'x86_64' - and sys.maxint > 2**32): # it's not enough that we get x86_64 +if ((sys.platform.startswith('linux') or sys.platform == 'darwin') + and os.uname()[4] == 'x86_64' and sys.maxint > 2**32): + # it's not enough that we get x86_64 working_modules.add('_vmprof') translation_modules = default_modules.copy() diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst --- a/pypy/doc/extending.rst +++ b/pypy/doc/extending.rst @@ -5,8 +5,8 @@ with any external library. Right now, there are the following possibilities of providing -third-party modules for the PyPy python interpreter (in order of -usefulness): +third-party modules for the PyPy python interpreter (in order, from most +directly useful to most messy to use with PyPy): * Write them in pure Python and use CFFI_. @@ -83,7 +83,7 @@ RPython Mixed Modules -===================== +--------------------- This is the internal way to write built-in extension modules in PyPy. It cannot be used by any 3rd-party module: the extension modules are diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -67,7 +67,7 @@ The other commands of ``setup.py`` are available too, like ``build``. .. _PyPI: https://pypi.python.org/pypi -.. _`use virtualenv (as documented here)`: getting-started.html#installing-using-virtualenv +.. _`use virtualenv (as documented here)`: install.html#installing-using-virtualenv Module xyz does not work in the sandboxed PyPy? diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-15.11.0.rst release-2.6.1.rst release-2.6.0.rst release-2.5.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-15.11.0.rst whatsnew-2.6.1.rst whatsnew-2.6.0.rst whatsnew-2.5.1.rst diff --git a/pypy/doc/release-15.11.0.rst b/pypy/doc/release-15.11.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-15.11.0.rst @@ -0,0 +1,191 @@ +============ +PyPy 15.11.0 +============ + +We're pleased and proud to unleash PyPy 15.11, a major update of the PyPy +python2.7.10 compatible interpreter with a Just In Time compiler. +We have improved `warmup time and memory overhead used for tracing`_, added +`vectorization`_ for numpy and general loops where possible on x86 hardware, +refactored rough edges in rpython, and increased functionality of numpy. + +You can download the PyPy 15.11 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors (7 new ones since PyPy 2.6.0) and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + + +Vectorization +============= + +Richard Plangger began work in March and continued over a Google Summer of Code +to add a vectorization step to the trace optimizer. The step recognizes common +constructs and emits SIMD code where possible, much as any modern compiler does. +This vectorization happens while tracing running code, so it is actually easier +at run-time to determine the +availability of possible vectorization than it is for ahead-of-time compilers. + +Availability of SIMD hardware is detected at run time, without needing to +precompile various code paths into the executable. + +Internal Refactoring and Warmup Time Improvement +================================================ + +Maciej Fijalkowski and Armin Rigo refactored internals of rpython that now allow +PyPy to more efficiently use `guards`_ in jitted code. They also rewrote unrolling, +leading to a warmup time improvement of 20% or so at the cost of a minor +regression in jitted code speed. + +Numpy +===== + +Our implementation of numpy continues to improve. ndarray and the numeric dtypes +are very close to feature-complete; record, string and unicode dtypes are mostly +supported. We have reimplemented numpy linalg, random and fft as cffi-1.0 +modules that call out to the same underlying libraries that upstream numpy uses. +Please try it out, especially using the new vectorization (via --jit vec=1 on the +command line) and let us know what is missing for your code. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. Armin Rigo continued improving it, +and PyPy reaps the benefits of cffi-1.3: improved manangement of object +lifetimes, __stdcall on Win32, ffi.memmove(), ... + +.. _`warmup time and memory overhead used for tracing`: http://morepypy.blogspot.com/2015/10 +.. _`vectorization`: http://pypyvecopt.blogspot.co.at/ +.. _`guards`: http://rpython.readthedocs.org/en/latest/glossary.html + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD_, freebsd_), +as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _OpenBSD: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/lang/pypy +.. _freebsd: https://svnweb.freebsd.org/ports/head/lang/pypy/ +.. _`dynamic languages`: http://pypyjs.org + +Highlights (since 2.6.1 release two months ago) +=============================================== + +* Bug Fixes + + * Applied OPENBSD downstream fixes + + * Fix a crash on non-linux when running more than 20 threads + + * In cffi, ffi.new_handle() is more cpython compliant + + * Accept unicode in functions inside the _curses cffi backend exactly like cpython + + * Fix a segfault in itertools.islice() + + * Use gcrootfinder=shadowstack by default, asmgcc on linux only + + * Fix ndarray.copy() for upstream compatability when copying non-contiguous arrays + + * Fix assumption that lltype.UniChar is unsigned + + * Fix a subtle bug with stacklets on shadowstack + + * Improve support for the cpython capi in cpyext (our capi compatibility + layer). Fixing these issues inspired some thought about cpyext in general, + stay tuned for more improvements + + * When loading dynamic libraries, in case of a certain loading error, retry + loading the library assuming it is actually a linker script, like on Arch + and Gentoo + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* New features: + + * Add an optimization pass to vectorize loops using x86 SIMD intrinsics. + + * Support __stdcall on Windows in CFFI + + * Improve debug logging when using PYPYLOG=??? + + * Deal with platforms with no RAND_egd() in OpenSSL + + * Enable building _vmprof in translation on OS/X by default + +* Numpy: + + * Add support for ndarray.ctypes + + * Fast path for mixing numpy scalars and floats + + * Add support for creating Fortran-ordered ndarrays + + * Fix casting failures in linalg (by extending ufunc casting) + + * Recognize and disallow (for now) pickling of ndarrays with objects + embedded in them + +* Performance improvements and refactorings: + + * Reuse hashed keys across dictionaries and sets + + * Refactor JIT interals to improve warmup time by 20% or so at the cost of a + minor regression in JIT speed + + * Recognize patterns of common sequences in the JIT backends and optimize them + + * Make the garbage collecter more intcremental over external_malloc() calls + + * Share guard resume data where possible which reduces memory usage + + * Fast path for zip(list, list) + + * Reduce the number of checks in the JIT for lst[a:] + + * Move the non-optimizable part of callbacks outside the JIT + + * Factor in field immutability when invalidating heap information + + * Unroll itertools.izip_longest() with two sequences + + * Minor optimizations after analyzing output from `vmprof`_ and trace logs + + * Remove many class attributes in rpython classes + + * Handle getfield_gc_pure* and getfield_gc_* uniformly in heap.py + +.. _`vmprof`: https://vmprof.readthedocs.org +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-15.11.0.html + +Please try it out and let us know what you think. We welcome feedback, +we know you are using PyPy, please tell us about it! + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-15.11.0.rst b/pypy/doc/whatsnew-15.11.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-15.11.0.rst @@ -0,0 +1,87 @@ +======================== +What's new in PyPy 15.11 +======================== + +.. this is a revision shortly after release-2.6.1 +.. startrev: 07769be4057b + +.. branch: keys_with_hash +Improve the performance of dict.update() and a bunch of methods from +sets, by reusing the hash value stored in one dict when inspecting +or changing another dict with that key. + +.. branch: optresult-unroll +A major refactoring of the ResOperations that kills Box. Also rewrote +unrolling to enable future enhancements. Should improve warmup time +by 20% or so. + +.. branch: optimize-cond-call +Optimize common sequences of operations like +``int_lt/cond_call`` in the JIT backends + +.. branch: missing_openssl_include +Fix for missing headers in OpenBSD, already applied in downstream ports + +.. branch: gc-more-incremental +Remove a source of non-incremental-ness in the GC: now +external_malloc() no longer runs gc_step_until() any more. If there +is a currently-running major collection, we do only so many steps +before returning. This number of steps depends on the size of the +allocated object. It is controlled by tracking the general progress +of these major collection steps and the size of old objects that +keep adding up between them. + +.. branch: remember-tracing-counts +Reenable jithooks + +.. branch: detect_egd2 + +.. branch: shadowstack-no-move-2 +Issue #2141: fix a crash on Windows and OS/X and ARM when running +at least 20 threads. + +.. branch: numpy-ctypes + +Add support for ndarray.ctypes property. + +.. branch: share-guard-info + +Share guard resume data between consecutive guards that have only +pure operations and guards in between. + +.. branch: issue-2148 + +Fix performance regression on operations mixing numpy scalars and Python +floats, cf. issue #2148. + +.. branch: cffi-stdcall +Win32: support '__stdcall' in CFFI. + +.. branch: callfamily + +Refactorings of annotation and rtyping of function calls. + +.. branch: fortran-order + +Allow creation of fortran-ordered ndarrays + +.. branch: type_system-cleanup + +Remove some remnants of the old ootypesystem vs lltypesystem dichotomy. + +.. branch: cffi-handle-lifetime + +ffi.new_handle() returns handles that work more like CPython's: they +remain valid as long as the target exists (unlike the previous +version, where handles become invalid *before* the __del__ is called). + +.. branch: ufunc-casting + +allow automatic casting in ufuncs (and frompypyfunc) to cast the +arguments to the allowed function type declarations, fixes various +failures in linalg cffi functions + +.. branch: vecopt +.. branch: vecopt-merge + +A new optimization pass to use emit vectorized loops diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,35 +1,8 @@ -======================= -What's new in PyPy 2.6+ -======================= +========================= +What's new in PyPy 15.11+ +========================= -.. this is a revision shortly after release-2.6.1 -.. startrev: 07769be4057b +.. this is a revision shortly after release-15.11.0 +.. startrev: d924723d483b -.. branch: keys_with_hash -Improve the performance of dict.update() and a bunch of methods from -sets, by reusing the hash value stored in one dict when inspecting -or changing another dict with that key. -.. branch: optresult-unroll -A major refactoring of the ResOperations that kills Box. Also rewrote -unrolling to enable future enhancements. Should improve warmup time -by 20% or so. - -.. branch: optimize-cond-call -Optimize common sequences of operations like -``int_lt/cond_call`` in the JIT backends - -.. branch: missing_openssl_include -Fix for missing headers in OpenBSD, already applied in downstream ports - -.. branch: gc-more-incremental -Remove a source of non-incremental-ness in the GC: now -external_malloc() no longer runs gc_step_until() any more. If there -is a currently-running major collection, we do only so many steps -before returning. This number of steps depends on the size of the -allocated object. It is controlled by tracking the general progress -of these major collection steps and the size of old objects that -keep adding up between them. - -.. branch: remember-tracing-counts -Reenable jithooks diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1058,6 +1058,14 @@ args = Arguments.frompacked(self, w_args, w_kwds) return self.call_args(w_callable, args) + def _try_fetch_pycode(self, w_func): + from pypy.interpreter.function import Function, Method + if isinstance(w_func, Method): + w_func = w_func.w_function + if isinstance(w_func, Function): + return w_func.code + return None + def call_function(self, w_func, *args_w): nargs = len(args_w) # used for pruning funccall versions if not self.config.objspace.disable_call_speedhacks and nargs < 5: diff --git a/pypy/module/__builtin__/app_functional.py b/pypy/module/__builtin__/app_functional.py --- a/pypy/module/__builtin__/app_functional.py +++ b/pypy/module/__builtin__/app_functional.py @@ -5,6 +5,7 @@ from __future__ import with_statement import operator from __pypy__ import resizelist_hint, newlist_hint +from __pypy__ import specialized_zip_2_lists # ____________________________________________________________ @@ -217,11 +218,16 @@ in length to the length of the shortest argument sequence.""" l = len(sequences) if l == 2: + # A very fast path if the two sequences are lists + seq0 = sequences[0] + seq1 = sequences[1] + try: + return specialized_zip_2_lists(seq0, seq1) + except TypeError: + pass # This is functionally the same as the code below, but more # efficient because it unrolls the loops over 'sequences'. # Only for two arguments, which is the most common case. - seq0 = sequences[0] - seq1 = sequences[1] iter0 = iter(seq0) iter1 = iter(seq1) hint = min(100000000, # max 100M diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -253,26 +253,27 @@ def binaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_meth = self.getattr(space, specialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, specialname, False) if w_meth is None: return space.w_NotImplemented return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_a, w_b) binaryop.func_name = name def rbinaryop(self, space, w_other): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None or w_a is self: - w_meth = self.getattr(space, rspecialname, False) + if isinstance(w_a, W_InstanceObject): + w_meth = w_a.getattr(space, rspecialname, False) if w_meth is None: return space.w_NotImplemented - return space.call_function(w_meth, w_other) + return space.call_function(w_meth, w_b) else: + # fall back to space.xxx() if coerce returns a non-W_Instance + # object as first argument return getattr(space, objspacename)(w_b, w_a) rbinaryop.func_name = "r" + name return binaryop, rbinaryop @@ -283,7 +284,7 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - return [None, None] + return [w_self, w_other] return space.fixedview(w_tup, 2) def descr_instance_new(space, w_type, w_class, w_dict=None): @@ -523,13 +524,9 @@ def descr_cmp(self, space, w_other): # do all the work here like CPython w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - else: - if (not isinstance(w_a, W_InstanceObject) and - not isinstance(w_b, W_InstanceObject)): - return space.cmp(w_a, w_b) + if (not isinstance(w_a, W_InstanceObject) and + not isinstance(w_b, W_InstanceObject)): + return space.cmp(w_a, w_b) if isinstance(w_a, W_InstanceObject): w_func = w_a.getattr(space, '__cmp__', False) if w_func is not None: @@ -636,42 +633,36 @@ def descr_pow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__pow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_a, w_b, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__pow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_rpow(self, space, w_other, w_modulo=None): if space.is_none(w_modulo): w_a, w_b = _coerce_helper(space, self, w_other) - if w_a is None: - w_a = self - w_b = w_other - if w_a is self: - w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other) - return space.w_NotImplemented + if isinstance(w_a, W_InstanceObject): + w_func = w_a.getattr(space, '__rpow__', False) + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other) else: return space.pow(w_b, w_a, space.w_None) else: # CPython also doesn't try coercion in this case w_func = self.getattr(space, '__rpow__', False) - if w_func is not None: - return space.call_function(w_func, w_other, w_modulo) - return space.w_NotImplemented + if w_func is None: + return space.w_NotImplemented + return space.call_function(w_func, w_other, w_modulo) def descr_next(self, space): w_func = self.getattr(space, 'next', False) diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -417,6 +417,22 @@ pass raises(TypeError, coerce, B(), []) + def test_coerce_inf(self): + class B: + def __coerce__(self, other): + return B(), B() + def __add__(self, other): + return 42 + assert B() + B() == 42 + + def test_coerce_reverse(self): + class CoerceNumber: + def __coerce__(self, other): + assert isinstance(other, int) + return (6, other) + assert 5 + CoerceNumber() == 11 + assert 2 ** CoerceNumber() == 64 + def test_binaryop(self): class A: def __add__(self, other): diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -83,6 +83,7 @@ 'newdict' : 'interp_dict.newdict', 'reversed_dict' : 'interp_dict.reversed_dict', 'strategy' : 'interp_magic.strategy', # dict,set,list + 'specialized_zip_2_lists' : 'interp_magic.specialized_zip_2_lists', 'set_debug' : 'interp_magic.set_debug', 'locals_to_fast' : 'interp_magic.locals_to_fast', 'save_module_content_for_future_reload': diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -147,3 +147,7 @@ @unwrap_spec(w_module=MixedModule) def save_module_content_for_future_reload(space, w_module): w_module.save_module_content_for_future_reload() + +def specialized_zip_2_lists(space, w_list1, w_list2): + from pypy.objspace.std.specialisedtupleobject import specialized_zip_2_lists + return specialized_zip_2_lists(space, w_list1, w_list2) diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py --- a/pypy/module/_cffi_backend/__init__.py +++ b/pypy/module/_cffi_backend/__init__.py @@ -1,9 +1,16 @@ import sys from pypy.interpreter.mixedmodule import MixedModule -from rpython.rlib import rdynload +from rpython.rlib import rdynload, clibffi VERSION = "1.3.0" +FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI +try: + FFI_STDCALL = clibffi.FFI_STDCALL + has_stdcall = True +except AttributeError: + has_stdcall = False + class Module(MixedModule): @@ -40,12 +47,13 @@ 'string': 'func.string', 'buffer': 'cbuffer.buffer', + 'memmove': 'func.memmove', 'get_errno': 'cerrno.get_errno', 'set_errno': 'cerrno.set_errno', - 'FFI_DEFAULT_ABI': 'ctypefunc._get_abi(space, "FFI_DEFAULT_ABI")', - 'FFI_CDECL': 'ctypefunc._get_abi(space,"FFI_DEFAULT_ABI")',#win32 name + 'FFI_DEFAULT_ABI': 'space.wrap(%d)' % FFI_DEFAULT_ABI, + 'FFI_CDECL': 'space.wrap(%d)' % FFI_DEFAULT_ABI, # win32 name # CFFI 1.0 'FFI': 'ffi_obj.W_FFIObject', @@ -53,6 +61,9 @@ if sys.platform == 'win32': interpleveldefs['getwinerror'] = 'cerrno.getwinerror' + if has_stdcall: + interpleveldefs['FFI_STDCALL'] = 'space.wrap(%d)' % FFI_STDCALL + def get_dict_rtld_constants(): found = {} diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -1,11 +1,11 @@ """ Callbacks. """ -import sys, os +import sys, os, py -from rpython.rlib import clibffi, rweakref, jit, jit_libffi -from rpython.rlib.objectmodel import compute_unique_id, keepalive_until_here -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib import clibffi, jit, jit_libffi, rgc, objectmodel +from rpython.rlib.objectmodel import keepalive_until_here +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt from pypy.module._cffi_backend import cerrno, misc @@ -19,6 +19,23 @@ # ____________________________________________________________ +@jit.dont_look_inside +def make_callback(space, ctype, w_callable, w_error, w_onerror): + # Allocate a callback as a nonmovable W_CDataCallback instance, which + # we can cast to a plain VOIDP. As long as the object is not freed, + # we can cast the VOIDP back to a W_CDataCallback in reveal_callback(). + cdata = objectmodel.instantiate(W_CDataCallback, nonmovable=True) + gcref = rgc.cast_instance_to_gcref(cdata) + raw_cdata = rgc.hide_nonmovable_gcref(gcref) + cdata.__init__(space, ctype, w_callable, w_error, w_onerror, raw_cdata) + return cdata + +def reveal_callback(raw_ptr): + addr = rffi.cast(llmemory.Address, raw_ptr) + gcref = rgc.reveal_gcref(addr) + return rgc.try_cast_gcref_to_instance(W_CDataCallback, gcref) + + class Closure(object): """This small class is here to have a __del__ outside any cycle.""" @@ -34,10 +51,11 @@ class W_CDataCallback(W_CData): - #_immutable_fields_ = ... + _immutable_fields_ = ['key_pycode'] w_onerror = None - def __init__(self, space, ctype, w_callable, w_error, w_onerror): + def __init__(self, space, ctype, w_callable, w_error, w_onerror, + raw_cdata): raw_closure = rffi.cast(rffi.CCHARP, clibffi.closureHeap.alloc()) self._closure = Closure(raw_closure) W_CData.__init__(self, space, raw_closure, ctype) @@ -46,6 +64,7 @@ raise oefmt(space.w_TypeError, "expected a callable object, not %T", w_callable) self.w_callable = w_callable + self.key_pycode = space._try_fetch_pycode(w_callable) if not space.is_none(w_onerror): if not space.is_true(space.callable(w_onerror)): raise oefmt(space.w_TypeError, @@ -64,8 +83,12 @@ convert_from_object_fficallback(fresult, self._closure.ll_error, w_error) # - self.unique_id = compute_unique_id(self) - global_callback_mapping.set(self.unique_id, self) + # We must setup the GIL here, in case the callback is invoked in + # some other non-Pythonic thread. This is the same as cffi on + # CPython. + if space.config.translation.thread: + from pypy.module.thread.os_thread import setup_threads + setup_threads(space) # cif_descr = self.getfunctype().cif_descr if not cif_descr: @@ -74,20 +97,13 @@ "return type or with '...'", self.getfunctype().name) with self as ptr: closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr) - unique_id = rffi.cast(rffi.VOIDP, self.unique_id) + unique_id = rffi.cast(rffi.VOIDP, raw_cdata) res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif, invoke_callback, unique_id) if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK: raise OperationError(space.w_SystemError, space.wrap("libffi failed to build this callback")) - # - # We must setup the GIL here, in case the callback is invoked in - # some other non-Pythonic thread. This is the same as cffi on - # CPython. - if space.config.translation.thread: - from pypy.module.thread.os_thread import setup_threads - setup_threads(space) def _repr_extra(self): space = self.space @@ -105,6 +121,7 @@ def invoke(self, ll_args): space = self.space ctype = self.getfunctype() + ctype = jit.promote(ctype) args_w = [] for i, farg in enumerate(ctype.fargs): ll_arg = rffi.cast(rffi.CCHARP, ll_args[i]) @@ -127,9 +144,6 @@ keepalive_until_here(self) # to keep self._closure.ll_error alive -global_callback_mapping = rweakref.RWeakValueDictionary(int, W_CDataCallback) - - def convert_from_object_fficallback(fresult, ll_res, w_res): space = fresult.space small_result = fresult.size < SIZE_OF_FFI_ARG @@ -178,7 +192,8 @@ @jit.dont_look_inside -def _handle_applevel_exception(space, callback, e, ll_res, extra_line): +def _handle_applevel_exception(callback, e, ll_res, extra_line): + space = callback.space callback.write_error_return_value(ll_res) if callback.w_onerror is None: callback.print_error(e, extra_line) @@ -199,19 +214,36 @@ extra_line="\nDuring the call to 'onerror', " "another exception occurred:\n\n") +def get_printable_location(key_pycode): + if key_pycode is None: + return 'cffi_callback <?>' + return 'cffi_callback ' + key_pycode.get_repr() -@jit.jit_callback("CFFI") +jitdriver = jit.JitDriver(name='cffi_callback', + greens=['callback.key_pycode'], + reds=['ll_res', 'll_args', 'callback'], + get_printable_location=get_printable_location) + +def py_invoke_callback(callback, ll_res, ll_args): + jitdriver.jit_merge_point(callback=callback, ll_res=ll_res, ll_args=ll_args) + extra_line = '' + try: + w_res = callback.invoke(ll_args) + extra_line = "Trying to convert the result back to C:\n" + callback.convert_result(ll_res, w_res) + except OperationError, e: + _handle_applevel_exception(callback, e, ll_res, extra_line) + def _invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata): """ Callback specification. ffi_cif - something ffi specific, don't care ll_args - rffi.VOIDPP - pointer to array of pointers to args - ll_restype - rffi.VOIDP - pointer to result + ll_res - rffi.VOIDP - pointer to result ll_userdata - a special structure which holds necessary information (what the real callback is for example), casted to VOIDP """ ll_res = rffi.cast(rffi.CCHARP, ll_res) - unique_id = rffi.cast(lltype.Signed, ll_userdata) - callback = global_callback_mapping.get(unique_id) + callback = reveal_callback(ll_userdata) if callback is None: # oups! try: @@ -224,17 +256,11 @@ misc._raw_memclear(ll_res, SIZE_OF_FFI_ARG) return # + space = callback.space must_leave = False - space = callback.space try: must_leave = space.threadlocals.try_enter_thread(space) - extra_line = '' - try: - w_res = callback.invoke(ll_args) - extra_line = "Trying to convert the result back to C:\n" - callback.convert_result(ll_res, w_res) - except OperationError, e: - _handle_applevel_exception(space, callback, e, ll_res, extra_line) + py_invoke_callback(callback, ll_res, ll_args) # except Exception, e: # oups! last-level attempt to recover. diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -18,6 +18,7 @@ _attrs_ = ['ctptr'] _immutable_fields_ = ['ctptr'] kind = "array" + is_nonfunc_pointer_or_array = True def __init__(self, space, ctptr, length, arraysize, extra): W_CTypePtrOrArray.__init__(self, space, arraysize, extra, 0, diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -12,6 +12,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from pypy.interpreter.error import OperationError, oefmt +from pypy.module import _cffi_backend from pypy.module._cffi_backend import ctypearray, cdataobj, cerrno from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer @@ -23,20 +24,22 @@ class W_CTypeFunc(W_CTypePtrBase): - _attrs_ = ['fargs', 'ellipsis', 'cif_descr'] - _immutable_fields_ = ['fargs[*]', 'ellipsis', 'cif_descr'] + _attrs_ = ['fargs', 'ellipsis', 'abi', 'cif_descr'] + _immutable_fields_ = ['fargs[*]', 'ellipsis', 'abi', 'cif_descr'] kind = "function" cif_descr = lltype.nullptr(CIF_DESCRIPTION) - def __init__(self, space, fargs, fresult, ellipsis): + def __init__(self, space, fargs, fresult, ellipsis, + abi=_cffi_backend.FFI_DEFAULT_ABI): assert isinstance(ellipsis, bool) - extra = self._compute_extra_text(fargs, fresult, ellipsis) + extra, xpos = self._compute_extra_text(fargs, fresult, ellipsis, abi) size = rffi.sizeof(rffi.VOIDP) - W_CTypePtrBase.__init__(self, space, size, extra, 2, fresult, + W_CTypePtrBase.__init__(self, space, size, extra, xpos, fresult, could_cast_anything=False) self.fargs = fargs self.ellipsis = ellipsis + self.abi = abi # fresult is stored in self.ctitem if not ellipsis: @@ -44,7 +47,7 @@ # at all. The cif is computed on every call from the actual # types passed in. For all other functions, the cif_descr # is computed here. - builder = CifDescrBuilder(fargs, fresult) + builder = CifDescrBuilder(fargs, fresult, abi) try: builder.rawallocate(self) except OperationError, e: @@ -76,7 +79,7 @@ ctypefunc.fargs = fvarargs ctypefunc.ctitem = self.ctitem #ctypefunc.cif_descr = NULL --- already provided as the default - CifDescrBuilder(fvarargs, self.ctitem).rawallocate(ctypefunc) + CifDescrBuilder(fvarargs, self.ctitem, self.abi).rawallocate(ctypefunc) return ctypefunc @rgc.must_be_light_finalizer @@ -84,8 +87,13 @@ if self.cif_descr: lltype.free(self.cif_descr, flavor='raw') - def _compute_extra_text(self, fargs, fresult, ellipsis): + def _compute_extra_text(self, fargs, fresult, ellipsis, abi): + from pypy.module._cffi_backend import newtype argnames = ['(*)('] + xpos = 2 + if _cffi_backend.has_stdcall and abi == _cffi_backend.FFI_STDCALL: + argnames[0] = '(__stdcall *)(' + xpos += len('__stdcall ') for i, farg in enumerate(fargs): if i > 0: argnames.append(', ') @@ -95,7 +103,7 @@ argnames.append(', ') argnames.append('...') argnames.append(')') - return ''.join(argnames) + return ''.join(argnames), xpos def _fget(self, attrchar): if attrchar == 'a': # args @@ -106,7 +114,7 @@ if attrchar == 'E': # ellipsis return self.space.wrap(self.ellipsis) if attrchar == 'A': # abi - return self.space.wrap(clibffi.FFI_DEFAULT_ABI) # XXX + return self.space.wrap(self.abi) return W_CTypePtrBase._fget(self, attrchar) def call(self, funcaddr, args_w): @@ -181,11 +189,6 @@ def set_mustfree_flag(data, flag): rffi.ptradd(data, -1)[0] = chr(flag) -def _get_abi(space, name): - abi = getattr(clibffi, name) - assert isinstance(abi, int) - return space.wrap(abi) - # ____________________________________________________________ @@ -260,9 +263,10 @@ class CifDescrBuilder(object): rawmem = lltype.nullptr(rffi.CCHARP.TO) - def __init__(self, fargs, fresult): + def __init__(self, fargs, fresult, fabi): self.fargs = fargs self.fresult = fresult + self.fabi = fabi def fb_alloc(self, size): size = llmemory.raw_malloc_usage(size) @@ -421,7 +425,7 @@ cif_descr.exchange_size = exchange_offset def fb_extra_fields(self, cif_descr): - cif_descr.abi = clibffi.FFI_DEFAULT_ABI # XXX + cif_descr.abi = self.fabi cif_descr.nargs = len(self.fargs) cif_descr.rtype = self.rtype cif_descr.atypes = self.atypes diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -11,7 +11,8 @@ class W_CType(W_Root): - _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_'] + _attrs_ = ['space', 'size', 'name', 'name_position', '_lifeline_', + '_pointer_type'] _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. @@ -20,6 +21,7 @@ cast_anything = False is_primitive_integer = False + is_nonfunc_pointer_or_array = False kind = "?" def __init__(self, space, size, name, name_position): @@ -142,7 +144,7 @@ # obscure hack when untranslated, maybe, approximate, don't use if isinstance(align, llmemory.FieldOffset): align = rffi.sizeof(align.TYPE.y) - if (1 << (8*align-2)) > sys.maxint: + if sys.platform != 'win32' and (1 << (8*align-2)) > sys.maxint: align /= 2 else: # a different hack when translated, to avoid seeing constants diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -7,6 +7,7 @@ from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.tool import rfficache from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import cdataobj, misc @@ -125,12 +126,25 @@ cdata[0] = value +# XXX explicitly use an integer type instead of lltype.UniChar here, +# because for now the latter is defined as unsigned by RPython (even +# though it may be signed when 'wchar_t' is written to C). _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit