Author: Matti Picus <[email protected]>
Branch: buffer-interface2
Changeset: r87402:46f3011f6b83
Date: 2016-09-26 22:07 +0300
http://bitbucket.org/pypy/pypy/changeset/46f3011f6b83/
Log: merge default into branch
diff too long, truncating to 2000 out of 2477 lines
diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py
b/lib-python/2.7/distutils/sysconfig_pypy.py
--- a/lib-python/2.7/distutils/sysconfig_pypy.py
+++ b/lib-python/2.7/distutils/sysconfig_pypy.py
@@ -13,6 +13,7 @@
import sys
import os
import shlex
+import imp
from distutils.errors import DistutilsPlatformError
@@ -62,8 +63,7 @@
"""Initialize the module as appropriate for POSIX systems."""
g = {}
g['EXE'] = ""
- g['SO'] = ".so"
- g['SOABI'] = g['SO'].rsplit('.')[0]
+ g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0]
g['LIBDIR'] = os.path.join(sys.prefix, 'lib')
g['CC'] = "gcc -pthread" # -pthread might not be valid on OS/X, check
@@ -75,8 +75,7 @@
"""Initialize the module as appropriate for NT"""
g = {}
g['EXE'] = ".exe"
- g['SO'] = ".pyd"
- g['SOABI'] = g['SO'].rsplit('.')[0]
+ g['SO'] = [s[0] for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION][0]
global _config_vars
_config_vars = g
diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py
--- a/lib-python/2.7/sysconfig.py
+++ b/lib-python/2.7/sysconfig.py
@@ -529,7 +529,7 @@
for suffix, mode, type_ in imp.get_suffixes():
if type_ == imp.C_EXTENSION:
_CONFIG_VARS['SOABI'] = suffix.split('.')[1]
- break
+ break
if args:
vals = []
diff --git a/lib_pypy/_subprocess.py b/lib_pypy/_subprocess.py
--- a/lib_pypy/_subprocess.py
+++ b/lib_pypy/_subprocess.py
@@ -22,7 +22,10 @@
code, message = _ffi.getwinerror()
raise WindowsError(code, message)
-_INVALID_HANDLE_VALUE = _ffi.cast("HANDLE", -1)
+def _int2handle(val):
+ return _ffi.cast("HANDLE", val)
+
+_INVALID_HANDLE_VALUE = _int2handle(-1)
class _handle(object):
def __init__(self, c_handle):
@@ -70,9 +73,9 @@
target = _ffi.new("HANDLE[1]")
res = _kernel32.DuplicateHandle(
- _ffi.cast("HANDLE", source_process),
- _ffi.cast("HANDLE", source),
- _ffi.cast("HANDLE", target_process),
+ _int2handle(source_process),
+ _int2handle(source),
+ _int2handle(target_process),
target, access, inherit, options)
if not res:
@@ -119,12 +122,14 @@
if not res:
raise _WinError()
- return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessId,
pi.dwThreadId
+ return (_handle(pi.hProcess),
+ _handle(pi.hThread),
+ pi.dwProcessId,
+ pi.dwThreadId)
def WaitForSingleObject(handle, milliseconds):
# CPython: the first argument is expected to be an integer.
- res = _kernel32.WaitForSingleObject(_ffi.cast("HANDLE", handle),
- milliseconds)
+ res = _kernel32.WaitForSingleObject(_int2handle(handle), milliseconds)
if res < 0:
raise _WinError()
@@ -134,7 +139,7 @@
# CPython: the first argument is expected to be an integer.
code = _ffi.new("DWORD[1]")
- res = _kernel32.GetExitCodeProcess(_ffi.cast("HANDLE", handle), code)
+ res = _kernel32.GetExitCodeProcess(_int2handle(handle), code)
if not res:
raise _WinError()
@@ -144,7 +149,7 @@
def TerminateProcess(handle, exitcode):
# CPython: the first argument is expected to be an integer.
# The second argument is silently wrapped in a UINT.
- res = _kernel32.TerminateProcess(_ffi.cast("HANDLE", handle),
+ res = _kernel32.TerminateProcess(_int2handle(handle),
_ffi.cast("UINT", exitcode))
if not res:
diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
--- a/lib_pypy/cffi.egg-info/PKG-INFO
+++ b/lib_pypy/cffi.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: cffi
-Version: 1.8.3
+Version: 1.8.4
Summary: Foreign Function Interface for Python calling C code.
Home-page: http://cffi.readthedocs.org
Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
-__version__ = "1.8.3"
-__version_info__ = (1, 8, 3)
+__version__ = "1.8.4"
+__version_info__ = (1, 8, 4)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
--- a/lib_pypy/cffi/_embedding.h
+++ b/lib_pypy/cffi/_embedding.h
@@ -233,7 +233,7 @@
f = PySys_GetObject((char *)"stderr");
if (f != NULL && f != Py_None) {
PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
- "\ncompiled with cffi version: 1.8.3"
+ "\ncompiled with cffi version: 1.8.4"
"\n_cffi_backend module: ", f);
modules = PyImport_GetModuleDict();
mod = PyDict_GetItemString(modules, "_cffi_backend");
diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py
--- a/lib_pypy/cffi/cparser.py
+++ b/lib_pypy/cffi/cparser.py
@@ -332,7 +332,7 @@
realtype = model.unknown_ptr_type(decl.name)
else:
realtype, quals = self._get_type_and_quals(
- decl.type, name=decl.name)
+ decl.type, name=decl.name, partial_length_ok=True)
self._declare('typedef ' + decl.name, realtype,
quals=quals)
else:
raise api.CDefError("unrecognized construct", decl)
@@ -781,11 +781,14 @@
exprnode.name in self._int_constants):
return self._int_constants[exprnode.name]
#
- if partial_length_ok:
- if (isinstance(exprnode, pycparser.c_ast.ID) and
+ if (isinstance(exprnode, pycparser.c_ast.ID) and
exprnode.name == '__dotdotdotarray__'):
+ if partial_length_ok:
self._partial_length = True
return '...'
+ raise api.FFIError(":%d: unsupported '[...]' here, cannot derive "
+ "the actual array length in this context"
+ % exprnode.coord.line)
#
raise api.FFIError(":%d: unsupported expression: expected a "
"simple numeric constant" % exprnode.coord.line)
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -587,8 +587,11 @@
# ----------
# typedefs
+ def _typedef_type(self, tp, name):
+ return self._global_type(tp, "(*(%s *)0)" % (name,))
+
def _generate_cpy_typedef_collecttype(self, tp, name):
- self._do_collect_type(tp)
+ self._do_collect_type(self._typedef_type(tp, name))
def _generate_cpy_typedef_decl(self, tp, name):
pass
@@ -598,6 +601,7 @@
self._lsts["typename"].append(TypenameExpr(name, type_index))
def _generate_cpy_typedef_ctx(self, tp, name):
+ tp = self._typedef_type(tp, name)
self._typedef_ctx(tp, name)
if getattr(tp, "origin", None) == "unknown_type":
self._struct_ctx(tp, tp.name, approxname=None)
diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst
--- a/pypy/doc/embedding.rst
+++ b/pypy/doc/embedding.rst
@@ -34,9 +34,11 @@
This function searches the PyPy standard library starting from the given
"PyPy home directory". The arguments are:
- * ``home``: NULL terminated path to an executable inside the pypy directory
+ * ``home``: path to an executable inside the pypy directory
(can be a .so name, can be made up). Used to look up the standard
- library, and is also set as ``sys.executable``.
+ library, and is also set as ``sys.executable``. From PyPy 5.5, you can
+ just say NULL here, as long as the ``libpypy-c.so/dylib/dll`` is itself
+ inside this directory.
* ``verbose``: if non-zero, it will print error messages to stderr
@@ -82,18 +84,14 @@
Note that this API is a lot more minimal than say CPython C API, so at first
it's obvious to think that you can't do much. However, the trick is to do
-all the logic in Python and expose it via `cffi`_ callbacks. Let's assume
-we're on linux and pypy is installed in ``/opt/pypy`` (with
-subdirectories like ``lib-python`` and ``lib_pypy``), and with the
-library in ``/opt/pypy/bin/libpypy-c.so``. (It doesn't need to be
-installed; you can also replace these paths with a local extract of the
-installation tarballs, or with your local checkout of pypy.) We write a
-little C program:
+all the logic in Python and expose it via `cffi`_ callbacks.
+We write a little C program:
.. code-block:: c
#include "PyPy.h"
#include <stdio.h>
+ #include <stdlib.h>
static char source[] = "print 'hello from pypy'";
@@ -102,9 +100,9 @@
int res;
rpython_startup_code();
- /* note: in the path /opt/pypy/x, the final x is ignored and
- replaced with lib-python and lib_pypy. */
- res = pypy_setup_home("/opt/pypy/x", 1);
+ /* Before PyPy 5.5, you may need to say e.g. "/opt/pypy/bin" instead
+ * of NULL. */
+ res = pypy_setup_home(NULL, 1);
if (res) {
printf("Error setting pypy home!\n");
return 1;
@@ -123,11 +121,6 @@
$ LD_LIBRARY_PATH=/opt/pypy/bin ./x
hello from pypy
-.. note:: If the compilation fails because of missing PyPy.h header file,
- you are running PyPy <= 2.2.1. Get it here__.
-
-.. __:
https://bitbucket.org/pypy/pypy/raw/c4cd6eca9358066571500ac82aaacfdaa3889e8c/include/PyPy.h
-
On OSX it is necessary to set the rpath of the binary if one wants to link to
it,
with a command like::
@@ -181,6 +174,7 @@
/* C example */
#include "PyPy.h"
#include <stdio.h>
+ #include <stdlib.h>
struct API {
double (*add_numbers)(double x, double y);
@@ -196,7 +190,7 @@
int res;
rpython_startup_code();
- res = pypy_setup_home("/opt/pypy/x", 1);
+ res = pypy_setup_home(NULL, 1);
if (res) {
fprintf(stderr, "Error setting pypy home!\n");
return -1;
@@ -237,6 +231,8 @@
Finding pypy_home
-----------------
+**You can usually skip this section if you are running PyPy >= 5.5.**
+
The function pypy_setup_home() takes as first parameter the path to a
file from which it can deduce the location of the standard library.
More precisely, it tries to remove final components until it finds
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -21,3 +21,14 @@
JIT residual calls: if the called function starts with a fast-path
like "if x.foo != 0: return x.foo", then inline the check before
doing the CALL. For now, string hashing is about the only case.
+
+.. branch: search-path-from-libpypy
+
+The compiled pypy now looks for its lib-python/lib_pypy path starting
+from the location of the *libpypy-c* instead of the executable. This is
+arguably more consistent, and also it is what occurs anyway if you're
+embedding pypy. Linux distribution packagers, take note! At a minimum,
+the ``libpypy-c.so`` must really be inside the path containing
+``lib-python`` and ``lib_pypy``. Of course, you can put a symlink to it
+from somewhere else. You no longer have to do the same with the
+``pypy`` executable, as long as it finds its ``libpypy-c.so`` library.
diff --git a/pypy/goal/targetpypystandalone.py
b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -89,17 +89,20 @@
def pypy_setup_home(ll_home, verbose):
from pypy.module.sys.initpath import pypy_find_stdlib
verbose = rffi.cast(lltype.Signed, verbose)
- if ll_home:
+ if ll_home and ord(ll_home[0]):
home1 = rffi.charp2str(ll_home)
home = os.path.join(home1, 'x') # <- so that 'll_home' can be
# directly the root directory
+ dynamic = False
else:
- home = home1 = pypydir
- w_path = pypy_find_stdlib(space, home)
+ home1 = "pypy's shared library location"
+ home = pypydir
+ dynamic = True
+ w_path = pypy_find_stdlib(space, home, dynamic)
if space.is_none(w_path):
if verbose:
debug("pypy_setup_home: directories 'lib-python' and
'lib_pypy'"
- " not found in '%s' or in any parent directory" % home1)
+ " not found in %s or in any parent directory" % home1)
return rffi.cast(rffi.INT, 1)
space.startup()
space.appexec([w_path], """(path):
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -374,11 +374,8 @@
self._value = value
self.setup(w_type)
- def get_w_value(self, space):
- w_value = self._w_value
- if w_value is None:
- self._w_value = w_value = space.wrap(self._value)
- return w_value
+ def _compute_value(self, space):
+ return self._value
@specialize.memo()
def get_operr_class(valuefmt):
diff --git a/pypy/interpreter/test/test_app_main.py
b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -1019,23 +1019,32 @@
old_sys_path = sys.path[:]
old_cwd = os.getcwd()
- sys.path.append(self.goal_dir)
# make sure cwd does not contain a stdlib
if self.tmp_dir.startswith(self.trunkdir):
skip('TMPDIR is inside the PyPy source')
- os.chdir(self.tmp_dir)
+ sys.path.append(self.goal_dir)
tmp_pypy_c = os.path.join(self.tmp_dir, 'pypy-c')
try:
+ os.chdir(self.tmp_dir)
+
+ # If we are running PyPy with a libpypy-c, the following
+ # lines find the stdlib anyway. Otherwise, it is not found.
+ expected_found = (
+ '__pypy__' in sys.builtin_module_names and
+ sys.pypy_translation_info['translation.shared'])
+
import app_main
- app_main.setup_bootstrap_path(tmp_pypy_c) # stdlib not found
+ app_main.setup_bootstrap_path(tmp_pypy_c)
assert sys.executable == ''
- assert sys.path == old_sys_path + [self.goal_dir]
+ if not expected_found:
+ assert sys.path == old_sys_path + [self.goal_dir]
app_main.setup_bootstrap_path(self.fake_exe)
if not sys.platform == 'win32':
# an existing file is always 'executable' on windows
assert sys.executable == '' # not executable!
- assert sys.path == old_sys_path + [self.goal_dir]
+ if not expected_found:
+ assert sys.path == old_sys_path + [self.goal_dir]
os.chmod(self.fake_exe, 0755)
app_main.setup_bootstrap_path(self.fake_exe)
@@ -1046,7 +1055,8 @@
if newpath[0].endswith('__extensions__'):
newpath = newpath[1:]
# we get at least 'expected_path', and maybe more (e.g.plat-linux2)
- assert newpath[:len(self.expected_path)] == self.expected_path
+ if not expected_found:
+ assert newpath[:len(self.expected_path)] == self.expected_path
finally:
sys.path[:] = old_sys_path
os.chdir(old_cwd)
diff --git a/pypy/module/_cffi_backend/__init__.py
b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -3,7 +3,7 @@
from rpython.rlib import rdynload, clibffi, entrypoint
from rpython.rtyper.lltypesystem import rffi
-VERSION = "1.8.3"
+VERSION = "1.8.4"
FFI_DEFAULT_ABI = clibffi.FFI_DEFAULT_ABI
try:
diff --git a/pypy/module/_cffi_backend/embedding.py
b/pypy/module/_cffi_backend/embedding.py
--- a/pypy/module/_cffi_backend/embedding.py
+++ b/pypy/module/_cffi_backend/embedding.py
@@ -112,29 +112,7 @@
#define _WIN32_WINNT 0x0501
#include <windows.h>
-#define CFFI_INIT_HOME_PATH_MAX _MAX_PATH
static void _cffi_init(void);
-static void _cffi_init_error(const char *msg, const char *extra);
-
-static int _cffi_init_home(char *output_home_path)
-{
- HMODULE hModule = 0;
- DWORD res;
-
- GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
- GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
- (LPCTSTR)&_cffi_init, &hModule);
-
- if (hModule == 0 ) {
- _cffi_init_error("GetModuleHandleEx() failed", "");
- return -1;
- }
- res = GetModuleFileName(hModule, output_home_path,
CFFI_INIT_HOME_PATH_MAX);
- if (res >= CFFI_INIT_HOME_PATH_MAX) {
- return -1;
- }
- return 0;
-}
static void _cffi_init_once(void)
{
@@ -155,28 +133,9 @@
else:
do_includes = r"""
-#include <dlfcn.h>
#include <pthread.h>
-#define CFFI_INIT_HOME_PATH_MAX PATH_MAX
static void _cffi_init(void);
-static void _cffi_init_error(const char *msg, const char *extra);
-
-static int _cffi_init_home(char *output_home_path)
-{
- Dl_info info;
- dlerror(); /* reset */
- if (dladdr(&_cffi_init, &info) == 0) {
- _cffi_init_error("dladdr() failed: ", dlerror());
- return -1;
- }
- if (realpath(info.dli_fname, output_home_path) == NULL) {
- perror("realpath() failed");
- _cffi_init_error("realpath() failed", "");
- return -1;
- }
- return 0;
-}
static void _cffi_init_once(void)
{
@@ -201,14 +160,10 @@
static void _cffi_init(void)
{
- char home[CFFI_INIT_HOME_PATH_MAX + 1];
-
rpython_startup_code();
RPyGilAllocate();
- if (_cffi_init_home(home) != 0)
- return;
- if (pypy_setup_home(home, 1) != 0) {
+ if (pypy_setup_home(NULL, 1) != 0) {
_cffi_init_error("pypy_setup_home() failed", "");
return;
}
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -1,7 +1,7 @@
# ____________________________________________________________
import sys
-assert __version__ == "1.8.3", ("This test_c.py file is for testing a version"
+assert __version__ == "1.8.4", ("This test_c.py file is for testing a version"
" of cffi that differs from the one that we"
" get from 'import _cffi_backend'")
if sys.version_info < (3,):
diff --git a/pypy/module/_cffi_backend/test/test_ffi_obj.py
b/pypy/module/_cffi_backend/test/test_ffi_obj.py
--- a/pypy/module/_cffi_backend/test/test_ffi_obj.py
+++ b/pypy/module/_cffi_backend/test/test_ffi_obj.py
@@ -507,6 +507,11 @@
def test_bug_1(self):
import _cffi_backend as _cffi1_backend
ffi = _cffi1_backend.FFI()
- q = ffi.new("char[]", "abcd")
+ q = ffi.new("char[]", b"abcd")
p = ffi.cast("char(*)(void)", q)
raises(TypeError, ffi.string, p)
+
+ def test_negative_array_size(self):
+ import _cffi_backend as _cffi1_backend
+ ffi = _cffi1_backend.FFI()
+ raises(ffi.error, ffi.cast, "int[-5]", 0)
diff --git a/pypy/module/_cffi_backend/test/test_recompiler.py
b/pypy/module/_cffi_backend/test/test_recompiler.py
--- a/pypy/module/_cffi_backend/test/test_recompiler.py
+++ b/pypy/module/_cffi_backend/test/test_recompiler.py
@@ -8,7 +8,7 @@
@unwrap_spec(cdef=str, module_name=str, source=str)
def prepare(space, cdef, module_name, source, w_includes=None,
- w_extra_source=None):
+ w_extra_source=None, w_min_version=None):
try:
import cffi
from cffi import FFI # <== the system one, which
@@ -16,8 +16,13 @@
from cffi import ffiplatform
except ImportError:
py.test.skip("system cffi module not found or older than 1.0.0")
- if cffi.__version_info__ < (1, 4, 0):
- py.test.skip("system cffi module needs to be at least 1.4.0")
+ if w_min_version is None:
+ min_version = (1, 4, 0)
+ else:
+ min_version = tuple(space.unwrap(w_min_version))
+ if cffi.__version_info__ < min_version:
+ py.test.skip("system cffi module needs to be at least %s, got %s" % (
+ min_version, cffi.__version_info__))
space.appexec([], """():
import _cffi_backend # force it to be initialized
""")
@@ -1790,3 +1795,28 @@
"void f(void) { }")
assert lib.f.__get__(42) is lib.f
assert lib.f.__get__(42, int) is lib.f
+
+ def test_typedef_array_dotdotdot(self):
+ ffi, lib = self.prepare("""
+ typedef int foo_t[...], bar_t[...];
+ int gv[...];
+ typedef int mat_t[...][...];
+ typedef int vmat_t[][...];
+ """,
+ "test_typedef_array_dotdotdot", """
+ typedef int foo_t[50], bar_t[50];
+ int gv[23];
+ typedef int mat_t[6][7];
+ typedef int vmat_t[][8];
+ """, min_version=(1, 8, 4))
+ assert ffi.sizeof("foo_t") == 50 * ffi.sizeof("int")
+ assert ffi.sizeof("bar_t") == 50 * ffi.sizeof("int")
+ assert len(ffi.new("foo_t")) == 50
+ assert len(ffi.new("bar_t")) == 50
+ assert ffi.sizeof(lib.gv) == 23 * ffi.sizeof("int")
+ assert ffi.sizeof("mat_t") == 6 * 7 * ffi.sizeof("int")
+ assert len(ffi.new("mat_t")) == 6
+ assert len(ffi.new("mat_t")[3]) == 7
+ raises(ffi.error, ffi.sizeof, "vmat_t")
+ p = ffi.new("vmat_t", 4)
+ assert ffi.sizeof(p[3]) == 8 * ffi.sizeof("int")
diff --git a/pypy/module/array/interp_array.py
b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -30,28 +30,25 @@
raise oefmt(space.w_TypeError,
"array.array() does not take keyword arguments")
- w_initializer_type = None
- w_initializer = None
- if len(__args__.arguments_w) > 0:
- w_initializer = __args__.arguments_w[0]
- w_initializer_type = space.type(w_initializer)
for tc in unroll_typecodes:
if typecode == tc:
a = space.allocate_instance(types[tc].w_class, w_cls)
a.__init__(space)
- if w_initializer is not None:
- if w_initializer_type is space.w_str:
- a.descr_fromstring(space, w_initializer)
- elif w_initializer_type is space.w_list:
- a.descr_fromlist(space, w_initializer)
- else:
- a.extend(w_initializer, True)
break
else:
raise oefmt(space.w_ValueError,
"bad typecode (must be c, b, B, u, h, H, i, I, l, L, f or "
"d)")
+ if len(__args__.arguments_w) > 0:
+ w_initializer = __args__.arguments_w[0]
+ w_initializer_type = space.type(w_initializer)
+ if w_initializer_type is space.w_str:
+ a.descr_fromstring(space, w_initializer)
+ elif w_initializer_type is space.w_list:
+ a.descr_fromlist(space, w_initializer)
+ else:
+ a.extend(w_initializer, True)
return a
diff --git a/pypy/module/cpyext/longobject.py b/pypy/module/cpyext/longobject.py
--- a/pypy/module/cpyext/longobject.py
+++ b/pypy/module/cpyext/longobject.py
@@ -6,7 +6,6 @@
from pypy.interpreter.error import OperationError
from pypy.module.cpyext.intobject import PyInt_AsUnsignedLongMask
from rpython.rlib.rbigint import rbigint
-from rpython.rlib.rarithmetic import widen
PyLong_Check, PyLong_CheckExact = build_type_checkers("Long")
@@ -28,25 +27,25 @@
"""Return a new PyLongObject object from a C size_t, or NULL on
failure.
"""
- return space.wrap(val)
+ return space.newlong_from_rarith_int(val)
@cpython_api([rffi.LONGLONG], PyObject)
def PyLong_FromLongLong(space, val):
"""Return a new PyLongObject object from a C long long, or NULL
on failure."""
- return space.newlong(val)
+ return space.newlong_from_rarith_int(val)
@cpython_api([rffi.ULONG], PyObject)
def PyLong_FromUnsignedLong(space, val):
"""Return a new PyLongObject object from a C unsigned long, or
NULL on failure."""
- return space.wrap(val)
+ return space.newlong_from_rarith_int(val)
@cpython_api([rffi.ULONGLONG], PyObject)
def PyLong_FromUnsignedLongLong(space, val):
"""Return a new PyLongObject object from a C unsigned long long,
or NULL on failure."""
- return space.wrap(val)
+ return space.newlong_from_rarith_int(val)
@cpython_api([PyObject], rffi.ULONG, error=-1)
def PyLong_AsUnsignedLong(space, w_long):
@@ -203,7 +202,10 @@
can be retrieved from the resulting value using PyLong_AsVoidPtr().
If the integer is larger than LONG_MAX, a positive long integer is
returned."""
- return space.newlong(rffi.cast(ADDR, p))
+ value = rffi.cast(ADDR, p) # signed integer
+ if value < 0:
+ return space.newlong_from_rarith_int(rffi.cast(lltype.Unsigned, p))
+ return space.wrap(value)
@cpython_api([PyObject], rffi.VOIDP, error=lltype.nullptr(rffi.VOIDP.TO))
def PyLong_AsVoidPtr(space, w_long):
diff --git a/pypy/module/cpyext/test/test_longobject.py
b/pypy/module/cpyext/test/test_longobject.py
--- a/pypy/module/cpyext/test/test_longobject.py
+++ b/pypy/module/cpyext/test/test_longobject.py
@@ -1,5 +1,6 @@
import sys, py
from rpython.rtyper.lltypesystem import rffi, lltype
+from rpython.rlib.rarithmetic import maxint
from pypy.objspace.std.intobject import W_IntObject
from pypy.objspace.std.longobject import W_LongObject
from pypy.module.cpyext.test.test_api import BaseApiTest
@@ -108,10 +109,26 @@
lltype.free(overflow, flavor='raw')
def test_as_voidptr(self, space, api):
+ # CPython returns an int (not a long) depending on the value
+ # passed to PyLong_FromVoidPtr(). In all cases, NULL becomes
+ # the int 0.
w_l = api.PyLong_FromVoidPtr(lltype.nullptr(rffi.VOIDP.TO))
- assert isinstance(w_l, W_LongObject)
- assert space.unwrap(w_l) == 0L
+ assert space.is_w(space.type(w_l), space.w_int)
+ assert space.unwrap(w_l) == 0
assert api.PyLong_AsVoidPtr(w_l) == lltype.nullptr(rffi.VOIDP.TO)
+ # Positive values also return an int (assuming, like always in
+ # PyPy, that an int is big enough to store any pointer).
+ p = rffi.cast(rffi.VOIDP, maxint)
+ w_l = api.PyLong_FromVoidPtr(p)
+ assert space.is_w(space.type(w_l), space.w_int)
+ assert space.unwrap(w_l) == maxint
+ assert api.PyLong_AsVoidPtr(w_l) == p
+ # Negative values always return a long.
+ p = rffi.cast(rffi.VOIDP, -maxint-1)
+ w_l = api.PyLong_FromVoidPtr(p)
+ assert space.is_w(space.type(w_l), space.w_long)
+ assert space.unwrap(w_l) == maxint+1
+ assert api.PyLong_AsVoidPtr(w_l) == p
def test_sign_and_bits(self, space, api):
if space.is_true(space.lt(space.sys.get('version_info'),
diff --git a/pypy/module/posix/interp_posix.py
b/pypy/module/posix/interp_posix.py
--- a/pypy/module/posix/interp_posix.py
+++ b/pypy/module/posix/interp_posix.py
@@ -226,13 +226,13 @@
w_keywords = space.newdict()
stat_float_times = space.fromcache(StatState).stat_float_times
for i, (name, TYPE) in FIELDS:
- value = getattr(st, name)
- if name in ('st_atime', 'st_mtime', 'st_ctime'):
- value = int(value) # rounded to an integer for indexed access
- w_value = space.wrap(value)
if i < rposix_stat.N_INDEXABLE_FIELDS:
+ # get the first 10 items by indexing; this gives us
+ # 'st_Xtime' as an integer, too
+ w_value = space.wrap(st[i])
lst[i] = w_value
- else:
+ elif name.startswith('st_'): # exclude 'nsec_Xtime'
+ w_value = space.wrap(getattr(st, name))
space.setitem(w_keywords, space.wrap(name), w_value)
# non-rounded values for name-based access
@@ -243,13 +243,8 @@
space.wrap('st_mtime'), space.wrap(st.st_mtime))
space.setitem(w_keywords,
space.wrap('st_ctime'), space.wrap(st.st_ctime))
- else:
- space.setitem(w_keywords,
- space.wrap('st_atime'), space.wrap(int(st.st_atime)))
- space.setitem(w_keywords,
- space.wrap('st_mtime'), space.wrap(int(st.st_mtime)))
- space.setitem(w_keywords,
- space.wrap('st_ctime'), space.wrap(int(st.st_ctime)))
+ #else:
+ # filled by the __init__ method
w_tuple = space.newtuple(lst)
w_stat_result = space.getattr(space.getbuiltinmodule(os.name),
diff --git a/pypy/module/posix/test/test_posix2.py
b/pypy/module/posix/test/test_posix2.py
--- a/pypy/module/posix/test/test_posix2.py
+++ b/pypy/module/posix/test/test_posix2.py
@@ -129,9 +129,9 @@
assert st[4] == st.st_uid
assert st[5] == st.st_gid
assert st[6] == st.st_size
- assert st[7] == int(st.st_atime)
- assert st[8] == int(st.st_mtime)
- assert st[9] == int(st.st_ctime)
+ assert st[7] == int(st.st_atime) # in complete corner cases, rounding
+ assert st[8] == int(st.st_mtime) # here could maybe get the wrong
+ assert st[9] == int(st.st_ctime) # integer...
assert stat.S_IMODE(st.st_mode) & stat.S_IRUSR
assert stat.S_IMODE(st.st_mode) & stat.S_IWUSR
@@ -141,13 +141,12 @@
assert st.st_size == 14
assert st.st_nlink == 1
- #if sys.platform.startswith('linux'):
- # # expects non-integer timestamps - it's unlikely that they are
- # # all three integers
- # assert ((st.st_atime, st.st_mtime, st.st_ctime) !=
- # (st[7], st[8], st[9]))
- # assert st.st_blksize * st.st_blocks >= st.st_size
+ assert not hasattr(st, 'nsec_atime')
+
if sys.platform.startswith('linux'):
+ assert isinstance(st.st_atime, float)
+ assert isinstance(st.st_mtime, float)
+ assert isinstance(st.st_ctime, float)
assert hasattr(st, 'st_rdev')
def test_stat_float_times(self):
diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py
--- a/pypy/module/sys/initpath.py
+++ b/pypy/module/sys/initpath.py
@@ -9,6 +9,8 @@
from rpython.rlib import rpath
from rpython.rlib.objectmodel import we_are_translated
+from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.translator.tool.cbuild import ExternalCompilationInfo
from pypy.interpreter.gateway import unwrap_spec
from pypy.module.sys.state import get as get_state
@@ -155,8 +157,13 @@
return space.wrap(resolvedirof(filename))
-@unwrap_spec(executable='str0')
-def pypy_find_stdlib(space, executable):
+@unwrap_spec(executable='str0', dynamic=int)
+def pypy_find_stdlib(space, executable, dynamic=1):
+ if dynamic and space.config.translation.shared:
+ dynamic_location = pypy_init_home()
+ if dynamic_location:
+ executable = rffi.charp2str(dynamic_location)
+ pypy_init_free(dynamic_location)
path, prefix = find_stdlib(get_state(space), executable)
if path is None:
return space.w_None
@@ -164,3 +171,75 @@
space.setitem(space.sys.w_dict, space.wrap('prefix'), w_prefix)
space.setitem(space.sys.w_dict, space.wrap('exec_prefix'), w_prefix)
return space.newlist([space.wrap(p) for p in path])
+
+
+# ____________________________________________________________
+
+
+if os.name == 'nt':
+
+ _source_code = r"""
+#define _WIN32_WINNT 0x0501
+#include <windows.h>
+#include <stdio.h>
+
+RPY_EXPORTED
+char *_pypy_init_home(void)
+{
+ HMODULE hModule = 0;
+ DWORD res;
+ char *p;
+
+ GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ (LPCTSTR)&_pypy_init_home, &hModule);
+
+ if (hModule == 0 ) {
+ fprintf(stderr, "PyPy initialization: GetModuleHandleEx() failed\n");
+ return NULL;
+ }
+ p = malloc(_MAX_PATH);
+ if (p == NULL)
+ return NULL;
+ res = GetModuleFileName(hModule, p, _MAX_PATH);
+ if (res >= _MAX_PATH || res <= 0) {
+ free(p);
+ fprintf(stderr, "PyPy initialization: GetModuleFileName() failed\n");
+ return NULL;
+ }
+ return p;
+}
+"""
+
+else:
+
+ _source_code = r"""
+#include <dlfcn.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+RPY_EXPORTED
+char *_pypy_init_home(void)
+{
+ Dl_info info;
+ dlerror(); /* reset */
+ if (dladdr(&_pypy_init_home, &info) == 0) {
+ fprintf(stderr, "PyPy initialization: dladdr() failed: %s\n",
+ dlerror());
+ return NULL;
+ }
+ char *p = realpath(info.dli_fname, NULL);
+ if (p == NULL) {
+ p = strdup(info.dli_fname);
+ }
+ return p;
+}
+"""
+
+_eci = ExternalCompilationInfo(separate_module_sources=[_source_code])
+
+pypy_init_home = rffi.llexternal("_pypy_init_home", [], rffi.CCHARP,
+ _nowrapper=True, compilation_info=_eci)
+pypy_init_free = rffi.llexternal("free", [rffi.CCHARP], lltype.Void,
+ _nowrapper=True, compilation_info=_eci)
diff --git a/pypy/module/sys/test/test_initpath.py
b/pypy/module/sys/test/test_initpath.py
--- a/pypy/module/sys/test/test_initpath.py
+++ b/pypy/module/sys/test/test_initpath.py
@@ -1,8 +1,10 @@
import py
import os.path
-from pypy.module.sys.initpath import (compute_stdlib_path, find_executable,
find_stdlib,
- resolvedirof)
+from pypy.module.sys.initpath import (compute_stdlib_path, find_executable,
+ find_stdlib, resolvedirof,
+ pypy_init_home, pypy_init_free)
from pypy.module.sys.version import PYPY_VERSION, CPYTHON_VERSION
+from rpython.rtyper.lltypesystem import rffi
def build_hierarchy(prefix):
dirname = '%d.%d' % CPYTHON_VERSION[:2]
@@ -10,7 +12,7 @@
b = prefix.join('lib-python', dirname).ensure(dir=1)
return a, b
-def test_find_stdlib(tmpdir, monkeypatch):
+def test_find_stdlib(tmpdir):
bin_dir = tmpdir.join('bin').ensure(dir=True)
pypy = bin_dir.join('pypy').ensure(file=True)
build_hierarchy(tmpdir)
@@ -33,6 +35,14 @@
path, prefix = find_stdlib(None, str(pypy_sym))
assert prefix == pypydir
+def test_pypy_init_home():
+ p = pypy_init_home()
+ assert p
+ s = rffi.charp2str(p)
+ pypy_init_free(p)
+ print s
+ assert os.path.exists(s)
+
def test_compute_stdlib_path(tmpdir):
dirs = build_hierarchy(tmpdir)
path = compute_stdlib_path(None, str(tmpdir))
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_ffi_backend.py
@@ -480,3 +480,7 @@
assert ffi.unpack(p+1, 7) == b"bc\x00def\x00"
p = ffi.new("int[]", [-123456789])
assert ffi.unpack(p, 1) == [-123456789]
+
+ def test_negative_array_size(self):
+ ffi = FFI()
+ py.test.raises(ValueError, ffi.cast, "int[-5]", 0)
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_ffi_obj.py
@@ -503,3 +503,7 @@
assert ffi.unpack(p+1, 7) == b"bc\x00def\x00"
p = ffi.new("int[]", [-123456789])
assert ffi.unpack(p, 1) == [-123456789]
+
+def test_negative_array_size():
+ ffi = _cffi1_backend.FFI()
+ py.test.raises(ffi.error, ffi.cast, "int[-5]", 0)
diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
--- a/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
+++ b/pypy/module/test_lib_pypy/cffi_tests/cffi1/test_recompiler.py
@@ -1981,3 +1981,29 @@
static struct aaa f1(int x) { struct aaa s = {0}; s.a = x; return s; }
""")
assert lib.f1(52).a == 52
+
+def test_typedef_array_dotdotdot():
+ ffi = FFI()
+ ffi.cdef("""
+ typedef int foo_t[...], bar_t[...];
+ int gv[...];
+ typedef int mat_t[...][...];
+ typedef int vmat_t[][...];
+ """)
+ lib = verify(ffi, "test_typedef_array_dotdotdot", """
+ typedef int foo_t[50], bar_t[50];
+ int gv[23];
+ typedef int mat_t[6][7];
+ typedef int vmat_t[][8];
+ """)
+ assert ffi.sizeof("foo_t") == 50 * ffi.sizeof("int")
+ assert ffi.sizeof("bar_t") == 50 * ffi.sizeof("int")
+ assert len(ffi.new("foo_t")) == 50
+ assert len(ffi.new("bar_t")) == 50
+ assert ffi.sizeof(lib.gv) == 23 * ffi.sizeof("int")
+ assert ffi.sizeof("mat_t") == 6 * 7 * ffi.sizeof("int")
+ assert len(ffi.new("mat_t")) == 6
+ assert len(ffi.new("mat_t")[3]) == 7
+ py.test.raises(ffi.error, ffi.sizeof, "vmat_t")
+ p = ffi.new("vmat_t", 4)
+ assert ffi.sizeof(p[3]) == 8 * ffi.sizeof("int")
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -270,6 +270,10 @@
return W_SmallLongObject.fromint(val)
return W_LongObject.fromint(self, val)
+ @specialize.argtype(1)
+ def newlong_from_rarith_int(self, val): # val is an rarithmetic type
+ return W_LongObject.fromrarith_int(val)
+
def newlong_from_rbigint(self, val):
return newlong(self, val)
diff --git a/pypy/objspace/std/test/test_longobject.py
b/pypy/objspace/std/test/test_longobject.py
--- a/pypy/objspace/std/test/test_longobject.py
+++ b/pypy/objspace/std/test/test_longobject.py
@@ -25,7 +25,6 @@
space.raises_w(space.w_OverflowError, space.float_w, w_big)
def test_rint_variants(self):
- py.test.skip("XXX broken!")
from rpython.rtyper.tool.rfficache import platform
space = self.space
for r in platform.numbertype_to_rclass.values():
@@ -36,8 +35,8 @@
for x in values:
if not r.SIGNED:
x &= r.MASK
- w_obj = space.wrap(r(x))
- assert space.bigint_w(w_obj).eq(rbigint.fromint(x))
+ w_obj = space.newlong_from_rarith_int(r(x))
+ assert space.bigint_w(w_obj).eq(rbigint.fromlong(x))
class AppTestLong:
diff --git a/pypy/objspace/std/test/test_typeobject.py
b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -797,9 +797,7 @@
class AA(object):
__slots__ = ('a',)
aa = AA()
- # the following line works on CPython >= 2.6 but not on PyPy.
- # but see below for more
- raises(TypeError, "aa.__class__ = A")
+ aa.__class__ = A
raises(TypeError, "aa.__class__ = object")
class Z1(A):
pass
@@ -861,9 +859,13 @@
__slots__ = ['a', 'b']
class Order2(object):
__slots__ = ['b', 'a']
- # the following line works on CPython >= 2.6 but not on PyPy.
- # but see below for more
- raises(TypeError, "Order1().__class__ = Order2")
+ Order1().__class__ = Order2
+
+ # like CPython, the order of slot names doesn't matter
+ x = Order1()
+ x.a, x.b = 1, 2
+ x.__class__ = Order2
+ assert (x.a, x.b) == (1, 2)
class U1(object):
__slots__ = ['a', 'b']
@@ -873,10 +875,11 @@
__slots__ = ['a', 'b']
class V2(V1):
__slots__ = ['c', 'd', 'e']
- # the following line does not work on CPython >= 2.6 either.
- # that's just obscure. Really really. So we just ignore
- # the whole issue until someone comes complaining. Then we'll
- # just kill slots altogether apart from maybe doing a few checks.
+ # the following line does not work on CPython either: we can't
+ # change a class if the old and new class have different layouts
+ # that look compatible but aren't, because they don't have the
+ # same base-layout class (even if these base classes are
+ # themselves compatible)... obscure.
raises(TypeError, "U2().__class__ = V2")
def test_name(self):
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -102,9 +102,10 @@
"""
_immutable_ = True
- def __init__(self, typedef, nslots, base_layout=None):
+ def __init__(self, typedef, nslots, newslotnames=[], base_layout=None):
self.typedef = typedef
self.nslots = nslots
+ self.newslotnames = newslotnames[:] # make a fixed-size list
self.base_layout = base_layout
def issublayout(self, parent):
@@ -114,6 +115,12 @@
return False
return True
+ def expand(self, hasdict, weakrefable):
+ """Turn this Layout into a tuple. If two classes get equal
+ tuples, it means their instances have a fully compatible layout."""
+ return (self.typedef, self.newslotnames, self.base_layout,
+ hasdict, weakrefable)
+
# possible values of compares_by_identity_status
UNKNOWN = 0
@@ -289,8 +296,7 @@
# compute a tuple that fully describes the instance layout
def get_full_instance_layout(self):
- layout = self.layout
- return (layout, self.hasdict, self.weakrefable)
+ return self.layout.expand(self.hasdict, self.weakrefable)
def compute_default_mro(self):
return compute_C3_mro(self.space, self)
@@ -1001,11 +1007,15 @@
w_self.weakrefable = w_self.weakrefable or w_base.weakrefable
return hasoldstylebase
+
def create_all_slots(w_self, hasoldstylebase, w_bestbase, force_new_layout):
+ from pypy.objspace.std.listobject import StringSort
+
base_layout = w_bestbase.layout
index_next_extra_slot = base_layout.nslots
space = w_self.space
dict_w = w_self.dict_w
+ newslotnames = []
if '__slots__' not in dict_w:
wantdict = True
wantweakref = True
@@ -1031,8 +1041,22 @@
"__weakref__ slot disallowed: we already got
one")
wantweakref = True
else:
- index_next_extra_slot = create_slot(w_self, slot_name,
- index_next_extra_slot)
+ newslotnames.append(slot_name)
+ # Sort the list of names collected so far
+ sorter = StringSort(newslotnames, len(newslotnames))
+ sorter.sort()
+ # Try to create all slots in order. The creation of some of
+ # them might silently fail; then we delete the name from the
+ # list. At the end, 'index_next_extra_slot' has been advanced
+ # by the final length of 'newslotnames'.
+ i = 0
+ while i < len(newslotnames):
+ if create_slot(w_self, newslotnames[i], index_next_extra_slot):
+ index_next_extra_slot += 1
+ i += 1
+ else:
+ del newslotnames[i]
+ #
wantdict = wantdict or hasoldstylebase
if wantdict:
create_dict_slot(w_self)
@@ -1041,11 +1065,12 @@
if '__del__' in dict_w:
w_self.hasuserdel = True
#
+ assert index_next_extra_slot == base_layout.nslots + len(newslotnames)
if index_next_extra_slot == base_layout.nslots and not force_new_layout:
return base_layout
else:
return Layout(base_layout.typedef, index_next_extra_slot,
- base_layout=base_layout)
+ newslotnames, base_layout=base_layout)
def create_slot(w_self, slot_name, index_next_extra_slot):
space = w_self.space
@@ -1058,9 +1083,10 @@
slot_name = space.str_w(space.new_interned_str(slot_name))
# in cpython it is ignored less, but we probably don't care
member = Member(index_next_extra_slot, slot_name, w_self)
- index_next_extra_slot += 1
w_self.dict_w[slot_name] = space.wrap(member)
- return index_next_extra_slot
+ return True
+ else:
+ return False
def create_dict_slot(w_self):
if not w_self.hasdict:
diff --git a/rpython/doc/jit/backend.rst b/rpython/doc/jit/backend.rst
new file mode 100644
--- /dev/null
+++ b/rpython/doc/jit/backend.rst
@@ -0,0 +1,263 @@
+=========================
+PyPy's assembler backends
+=========================
+
+Draft notes about the organization of assembler backends in the PyPy JIT, in
2016
+=================================================================================
+
+
+input: linear sequence of instructions, called a "trace".
+
+A trace is a sequence of instructions in SSA form. Most instructions
+correspond to one or a few CPU-level instructions. There are a few
+meta-instructions like `label` and debugging stuff. All branching is
+done with guards, which are instructions that check that a condition is
+true and exit the trace if not. A failing guard can have a new trace
+added to it later, called a "bridge". A patched guard becomes a direct
+`Jcond` instruction going to the bridge, with no indirection, no
+register spilling, etc.
+
+A trace ends with either a `return` or a `jump to label`. The target
+label is either inside the same trace, or in some older one. For
+historical reasons we call a "loop" a trace that is not a bridge. The
+machine code that we generate is organized as a forest of trees; the
+trunk of the tree is a "loop", and the branches are all bridges
+(branching off the trunk or off another branch).
+
+* every trunk or branch that ends in a `jump to label` can target a
+ label from a different tree, too.
+
+* the whole process of assembling a loop or a branch is basically
+ single-threaded, so no synchronization issue there (including to patch
+ older generated instructions).
+
+* the generated assembler has got a "frame" in %rbp, which is actually
+ not on the stack at all, but is a GC object (called a "jitframe").
+ Spilling goes there.
+
+* the guards are `Jcond` to a very small piece of generated code, which
+ is basically pushing a couple of constants on the stack and then
+ jumping to the general guard-recovery code. That code will save the
+ registers into the jitframe and then exit the whole generated
+ function. The caller of that generated function checks how it
+ finished: if it finished by hitting a guard, then the caller is
+ responsible for calling the "blackhole interpreter". This is the part
+ of the front-end that recovers from failing guards and finishes
+ running the frame (including, possibly, by jumping again into
+ generated assembler).
+
+
+Details about the JITting process:
+
+* front-end and optimization pass
+
+* rewrite (includes gc related transformation as well as simplifactions)
+
+* assembler generation
+
+
+Front-end and optimization pass
+-------------------------------
+
+Not discussed here in detail. This produces loops and bridges using an
+instruction set that is "high-level" in some sense: it contains
+intructions like "new"/"new_array", and
+"setfield"/"setarrayitem"/"setinteriorfield" which describe the action
+of storing a value in a precise field of the structure or array. For
+example, the "setfield" action might require implicitly a GC write
+barrier. This is the high-level trace that we send to the following
+step.
+
+
+Rewrite
+-------
+
+A mostly but not completely CPU-independent phase: lowers some
+instructions. For example, the variants of "new" are lowered to
+"malloc" and a few "gc_store": it bumps the pointer of the GC and then
+sets a few fields explicitly in the newly allocated structure. The
+"setfield" is replaced with a "cond_gc_wb_call" (conditional call to the
+write barrier) if needed, followed by a "gc_store".
+
+The "gc_store" instruction can be encoded in a single MOV assembler
+instruction, but is not as flexible as a MOV. The address is always
+specified as "some GC pointer + an offset". We don't have the notion of
+interior pointer for GC objects.
+
+A different instruction, "gc_store_indexed", offers additional operands,
+which can be mapped to a single MOV instruction using forms like
+`[rax+8*rcx+24]`.
+
+Some other complex instructions pass through to the backend, which must
+deal with them: for example, "card marking" in the GC. (Writing an
+object pointer inside an array would require walking the whole array
+later to find "young" references. Instead of that, we flip a bit for
+every range of 128 entries. This is a common GC optimization.) Setting
+the card bit of a GC object requires a sequence of assembler
+instructions that depends too much on the target CPU to be expressed
+explicitly here (moreover, it contains a few branches, which are hard to
+express at this level).
+
+
+Assembly
+--------
+
+No fancy code generation technique, but greedy forward pass that tries
+to avoid some pitfalls
+
+
+Handling instructions
+~~~~~~~~~~~~~~~~~~~~~
+
+* One by one (forward direction). Each instruction asks the register
+ allocator to ensure that some arguments are in registers (not in the
+ jitframe); asks for a register to put its result into; and asks for
+ additional scratch registers that will be freed at the end of the
+ instruction. There is a special case for boolean variables: they are
+ stored in the condition code flags instead of being materialized as a
+ 0/1 value. (They are materialized later, except in the common case
+ where they are only used by the next `guard_false` or `guard_true` and
+ then forgotten.)
+
+* Instruction arguments are loaded into a register on demand. This
+ makes the backend quite easy to write, but leads do some bad
+ decisions.
+
+
+Linear scan register allocation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Although it's always a linear trace that we consider, we don't use
+advanced techniques for register allocation: we do forward, on-demand
+allocation as the backend produces the assembler. When it asks for a
+register to put some value into, we give it any free register, without
+consideration for what will be done with it later. We compute the
+longevity of all variables, but only use it when choosing which register
+to spill (we spill the variable with the longest longevity).
+
+This works to some extend because it is well integrated with the earlier
+optimization pass. Loops are unrolled once by the optimization pass to
+allow more powerful optimizations---the optimization pass itself is the
+place that benefits the most, but it also has benefits here in the
+assembly pass. These are:
+
+* The first peeling initializes the register binding on the first use.
+
+* This leads to an already allocated register of the trace loop.
+
+* As well as allocated registers when exiting bridges
+
+[Try to better allocate registers to match the ABI (minor to non benefit
+in the current state)]
+
+
+More complex mappings
+~~~~~~~~~~~~~~~~~~~~~
+
+Some instructions generate more complex code. These are either or both of:
+
+* complex instructions generating some local control flow, like
+ "cond_gc_wb_call" (for write barriers), "call_assembler" (a call
+ followed by a few checks).
+
+* instructions that invoke custom assembler helpers, like the slow-path
+ of write barriers or the slow-path of allocations. These slow-paths
+ are typically generated too, so that we are not constrained by the
+ usual calling conventions.
+
+
+GC pointers
+~~~~~~~~~~~
+
+Around most CALL instructions, we need to record a description of where
+the GC pointers are (registers and stack frame). This is needed in case
+the CALL invokes a garbage collection. The GC pointers can move; the
+pointers in the registers and stack frame are updated by the GC. That's
+a reason for why we don't have explicit interior pointers.
+
+GC pointers can appear as constants in the trace. We are busy changing
+that to use a constant table and `MOV REG, (%RIP+offset)`. The
+"constant" in the table is actually updated by the GC if the object
+move.
+
+
+Vectorization
+~~~~~~~~~~~~~
+
+Optimization developed to use SIMD instructions for trace loops. Primary
+idea was to use it as an optimization of micro numpy. It has several
+passes on the already optimized trace.
+
+Shortly explained: It builds dependencies for an unrolled trace loop,
+gathering pairs/packs of operations that could be executed in parallel
+and finally schedules the operations.
+
+What did it add to the code base:
+
+* Dependencies can be constructed
+
+* Code motion of guards to relax dependencies
+
+* Scheduler to reorder trace
+
+* Array bound check removal (especially for unrolled traces)
+
+What can it do:
+
+* Transform vector loops (element wise operations)
+
+* Accumulation (`reduce([...],operator,0)`). Requires Operation to be
+ associative and commutative
+
+* SSE 4.1 as "vector backend"
+
+
+We do not
+~~~~~~~~~
+
+* Keep tracing data around to reoptimize the trace tree. (Once a trace
+ is compiled, minimal data is kept.) This is one reason (there are
+ others in the front-end) for the following result: JIT-compiling a
+ small loop with two common paths ends up as one "loop" and one bridge
+ assembled, and the bridge-following path is slightly less efficient.
+ This is notably because this bridge is assembled with two constraints:
+ the input registers are fixed (from the guard), and the output
+ registers are fixed (from the jump target); usually these two sets of
+ fixed registers are different, and copying around is needed.
+
+* We don't join trace tails: we only assemble *trees*.
+
+* We don't do any reordering (neither of trace instructions nor of
+ individual assembler instructions)
+
+* We don't do any cross-instruction optimization that makes sense only
+ for the backend and can't easily be expressed at a higher level. I'm
+ sure there are tons of examples of that, but e.g. loading a large
+ constant in a register that will survive for several instructions;
+ moving out of loops *parts* of some instruction like the address
+ calculation; etc. etc.
+
+* Other optimization opportunities I can think about: look at the
+ function prologue/epilogue; look at the overhead (small but not zero)
+ at the start of a bridge. Also check if the way guards are
+ implemented makes sense. Also, we generate large-ish sequences of
+ assembler instructions with tons of `Jcond` that are almost never
+ followed; any optimization opportunity there? (They all go forward,
+ if it changes anything.) In theory we could also replace some of
+ these with a signal handler on segfault (e.g. `guard_nonnull_class`).
+
+
+a GCC or LLVM backend?
+~~~~~~~~~~~~~~~~~~~~~~
+
+At least for comparison we'd like a JIT backend that emits its code
+using GCC or LLVM (irrespective of the time it would take). But it's
+hard to map reasonably well the guards to the C language or to LLVM IR.
+The problems are: (1) we have many guards, we would like to avoid having
+many paths that each do a full
+saving-all-local-variables-that-are-still-alive; (2) it's hard to patch
+a guard when a bridge is compiled from it; (3) instructions like a CALL
+need to expose the local variables that are GC pointers; CALL_MAY_FORCE
+need to expose *all* local variables for optional off-line
+reconstruction of the interpreter state.
+
diff --git a/rpython/doc/jit/index.rst b/rpython/doc/jit/index.rst
--- a/rpython/doc/jit/index.rst
+++ b/rpython/doc/jit/index.rst
@@ -26,6 +26,7 @@
optimizer
virtualizable
vectorization
+ backend
- :doc:`Overview <overview>`: motivating our approach
@@ -34,5 +35,8 @@
- :doc:`Optimizer <optimizer>`: the step between tracing and writing
machine code
-- :doc:`Virtulizable <virtualizable>` how virtualizables work and what they are
- (in other words how to make frames more efficient).
+- :doc:`Virtualizable <virtualizable>`: how virtualizables work and what
+ they are (in other words how to make frames more efficient).
+
+- :doc:`Assembler backend <backend>`: draft notes about the organization
+ of the assembler backends
diff --git a/rpython/jit/backend/test/test_ll_random.py
b/rpython/jit/backend/test/test_ll_random.py
--- a/rpython/jit/backend/test/test_ll_random.py
+++ b/rpython/jit/backend/test/test_ll_random.py
@@ -710,6 +710,12 @@
# 6. a conditional call (for now always with no exception raised)
class CondCallOperation(BaseCallOperation):
+
+ def filter(self, builder):
+ if not builder.cpu.supports_cond_call_value and \
+ self.opnum == rop.COND_CALL_VALUE_I:
+ raise test_random.CannotProduceOperation
+
def produce_into(self, builder, r):
fail_subset = builder.subset_of_intvars(r)
if self.opnum == rop.COND_CALL:
diff --git a/rpython/jit/metainterp/test/test_ajit.py
b/rpython/jit/metainterp/test/test_ajit.py
--- a/rpython/jit/metainterp/test/test_ajit.py
+++ b/rpython/jit/metainterp/test/test_ajit.py
@@ -4558,3 +4558,20 @@
self.meta_interp(f, [])
self.check_resops(guard_nonnull=0)
+ def test_loop_before_main_loop(self):
+ fdriver = JitDriver(greens=[], reds='auto')
+ gdriver = JitDriver(greens=[], reds='auto')
+ def f(i, j):
+ while j > 0: # this loop unrolls because it is in the same
+ j -= 1 # function as a jit_merge_point()
+ while i > 0:
+ fdriver.jit_merge_point()
+ i -= 1
+ def g(i, j, k):
+ while k > 0:
+ gdriver.jit_merge_point()
+ f(i, j)
+ k -= 1
+
+ self.meta_interp(g, [5, 5, 5])
+ self.check_resops(guard_true=10) # 5 unrolled, plus 5 unrelated
diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py
--- a/rpython/rlib/clibffi.py
+++ b/rpython/rlib/clibffi.py
@@ -359,12 +359,13 @@
tpe.members[n] = lltype.nullptr(FFI_TYPE_P.TO)
return tpe
[email protected]()
def cast_type_to_ffitype(tp):
""" This function returns ffi representation of rpython type tp
"""
return TYPE_MAP[tp]
-cast_type_to_ffitype._annspecialcase_ = 'specialize:memo'
[email protected](1)
def push_arg_as_ffiptr(ffitp, arg, ll_buf):
# This is for primitive types. Note that the exact type of 'arg' may be
# different from the expected 'c_size'. To cope with that, we fall back
@@ -396,7 +397,6 @@
arg >>= 8
else:
raise AssertionError
-push_arg_as_ffiptr._annspecialcase_ = 'specialize:argtype(1)'
# type defs for callback and closure userdata
@@ -470,12 +470,12 @@
FUNCFLAG_USE_ERRNO = 8
FUNCFLAG_USE_LASTERROR = 16
[email protected](1) # hack :-/
def get_call_conv(flags, from_jit):
if _WIN32 and not _WIN64 and (flags & FUNCFLAG_CDECL == 0):
return FFI_STDCALL
else:
return FFI_DEFAULT_ABI
-get_call_conv._annspecialcase_ = 'specialize:arg(1)' # hack :-/
class AbstractFuncPtr(object):
@@ -599,6 +599,7 @@
else:
self.restype_size = -1
+ @specialize.argtype(1)
def push_arg(self, value):
#if self.pushed_args == self.argnum:
# raise TypeError("Too many arguments, eats %d, pushed %d" %
@@ -618,7 +619,6 @@
push_arg_as_ffiptr(self.argtypes[self.pushed_args], value,
self.ll_args[self.pushed_args])
self.pushed_args += 1
- push_arg._annspecialcase_ = 'specialize:argtype(1)'
def _check_args(self):
if self.pushed_args < self.argnum:
@@ -627,6 +627,7 @@
def _clean_args(self):
self.pushed_args = 0
+ @specialize.arg(1)
def call(self, RES_TP):
self._check_args()
ffires = c_ffi_call(self.ll_cif, self.funcsym,
@@ -645,7 +646,6 @@
self._clean_args()
check_fficall_result(ffires, self.flags)
return res
- call._annspecialcase_ = 'specialize:arg(1)'
def __del__(self):
if self.ll_args:
diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py
--- a/rpython/rlib/jit.py
+++ b/rpython/rlib/jit.py
@@ -280,6 +280,7 @@
@oopspec("jit.isconstant(value)")
[email protected]_location()
def isconstant(value):
"""
While tracing, returns whether or not the value is currently known to be
@@ -289,9 +290,9 @@
This is for advanced usage only.
"""
return NonConstant(False)
-isconstant._annspecialcase_ = "specialize:call_location"
@oopspec("jit.isvirtual(value)")
[email protected]_location()
def isvirtual(value):
"""
Returns if this value is virtual, while tracing, it's relatively
@@ -300,7 +301,6 @@
This is for advanced usage only.
"""
return NonConstant(False)
-isvirtual._annspecialcase_ = "specialize:call_location"
@specialize.call_location()
def loop_unrolling_heuristic(lst, size, cutoff=2):
@@ -401,28 +401,27 @@
hop.exception_cannot_occur()
return hop.inputconst(lltype.Signed, _we_are_jitted)
-
+@oopspec('jit.current_trace_length()')
def current_trace_length():
"""During JIT tracing, returns the current trace length (as a constant).
If not tracing, returns -1."""
if NonConstant(False):
return 73
return -1
-current_trace_length.oopspec = 'jit.current_trace_length()'
+@oopspec('jit.debug(string, arg1, arg2, arg3, arg4)')
def jit_debug(string, arg1=-sys.maxint-1, arg2=-sys.maxint-1,
arg3=-sys.maxint-1, arg4=-sys.maxint-1):
"""When JITted, cause an extra operation JIT_DEBUG to appear in
the graphs. Should not be left after debugging."""
keepalive_until_here(string) # otherwise the whole function call is removed
-jit_debug.oopspec = 'jit.debug(string, arg1, arg2, arg3, arg4)'
+@oopspec('jit.assert_green(value)')
[email protected](0)
def assert_green(value):
"""Very strong assert: checks that 'value' is a green
(a JIT compile-time constant)."""
keepalive_until_here(value)
-assert_green._annspecialcase_ = 'specialize:argtype(0)'
-assert_green.oopspec = 'jit.assert_green(value)'
class AssertGreenFailed(Exception):
pass
@@ -457,6 +456,7 @@
# ____________________________________________________________
# VRefs
+@oopspec('virtual_ref(x)')
@specialize.argtype(0)
def virtual_ref(x):
"""Creates a 'vref' object that contains a reference to 'x'. Calls
@@ -467,14 +467,13 @@
dereferenced (by the call syntax 'vref()'), it returns 'x', which is
then forced."""
return DirectJitVRef(x)
-virtual_ref.oopspec = 'virtual_ref(x)'
+@oopspec('virtual_ref_finish(x)')
@specialize.argtype(1)
def virtual_ref_finish(vref, x):
"""See docstring in virtual_ref(x)"""
keepalive_until_here(x) # otherwise the whole function call is removed
_virtual_ref_finish(vref, x)
-virtual_ref_finish.oopspec = 'virtual_ref_finish(x)'
def non_virtual_ref(x):
"""Creates a 'vref' that just returns x when called; nothing more special.
@@ -831,6 +830,7 @@
jit_opencoder_model
"""
[email protected](0)
def set_user_param(driver, text):
"""Set the tunable JIT parameters from a user-supplied string
following the format 'param=value,param=value', or 'off' to
@@ -866,7 +866,6 @@
break
else:
raise ValueError
-set_user_param._annspecialcase_ = 'specialize:arg(0)'
# ____________________________________________________________
#
diff --git a/rpython/rlib/listsort.py b/rpython/rlib/listsort.py
--- a/rpython/rlib/listsort.py
+++ b/rpython/rlib/listsort.py
@@ -1,4 +1,5 @@
from rpython.rlib.rarithmetic import ovfcheck
+from rpython.rlib.objectmodel import specialize
## ------------------------------------------------------------------------
@@ -141,6 +142,12 @@
# or, IOW, the first k elements of a should precede key, and the last
# n-k should follow key.
+ # hint for the annotator: the argument 'rightmost' is always passed in
as
+ # a constant (either True or False), so we can specialize the function
for
+ # the two cases. (This is actually needed for technical reasons: the
+ # variable 'lower' must contain a known method, which is the case in
each
+ # specialized version but not in the unspecialized one.)
+ @specialize.arg(4)
def gallop(self, key, a, hint, rightmost):
assert 0 <= hint < a.len
if rightmost:
@@ -212,12 +219,6 @@
assert lastofs == ofs # so a[ofs-1] < key <= a[ofs]
return ofs
- # hint for the annotator: the argument 'rightmost' is always passed in
as
- # a constant (either True or False), so we can specialize the function
for
- # the two cases. (This is actually needed for technical reasons: the
- # variable 'lower' must contain a known method, which is the case in
each
- # specialized version but not in the unspecialized one.)
- gallop._annspecialcase_ = "specialize:arg(4)"
# ____________________________________________________________
diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py
--- a/rpython/rlib/rarithmetic.py
+++ b/rpython/rlib/rarithmetic.py
@@ -37,6 +37,7 @@
from rpython.rlib import objectmodel
from rpython.flowspace.model import Constant, const
from rpython.flowspace.specialcase import register_flow_sc
+from rpython.rlib.objectmodel import specialize
"""
Long-term target:
@@ -135,14 +136,15 @@
# We deal directly with overflow there anyway.
return r_longlonglong(n)
[email protected](0)
def widen(n):
from rpython.rtyper.lltypesystem import lltype
if _should_widen_type(lltype.typeOf(n)):
return intmask(n)
else:
return n
-widen._annspecialcase_ = 'specialize:argtype(0)'
[email protected]()
def _should_widen_type(tp):
from rpython.rtyper.lltypesystem import lltype, rffi
if tp is lltype.Bool:
@@ -153,19 +155,18 @@
assert issubclass(r_class, base_int)
return r_class.BITS < LONG_BIT or (
r_class.BITS == LONG_BIT and r_class.SIGNED)
-_should_widen_type._annspecialcase_ = 'specialize:memo'
# the replacement for sys.maxint
maxint = int(LONG_TEST - 1)
# for now, it should be equal to sys.maxint on all supported platforms
assert maxint == sys.maxint
[email protected](0)
def is_valid_int(r):
if objectmodel.we_are_translated():
return isinstance(r, int)
return isinstance(r, (base_int, int, long, bool)) and (
-maxint - 1 <= r <= maxint)
-is_valid_int._annspecialcase_ = 'specialize:argtype(0)'
def ovfcheck(r):
"NOT_RPYTHON"
@@ -225,12 +226,12 @@
return build_int(None, self_type.SIGNED, max(self_type.BITS,
other_type.BITS))
raise AssertionError("Merging these types (%s, %s) is not supported" %
(self_type, other_type))
[email protected]()
def signedtype(t):
if t in (bool, int, long):
return True
else:
return t.SIGNED
-signedtype._annspecialcase_ = 'specialize:memo'
def normalizedinttype(t):
if t is int:
@@ -241,11 +242,12 @@
assert t.BITS <= r_longlong.BITS
return build_int(None, t.SIGNED, r_longlong.BITS)
[email protected](0)
def most_neg_value_of_same_type(x):
from rpython.rtyper.lltypesystem import lltype
return most_neg_value_of(lltype.typeOf(x))
-most_neg_value_of_same_type._annspecialcase_ = 'specialize:argtype(0)'
[email protected]()
def most_neg_value_of(tp):
from rpython.rtyper.lltypesystem import lltype, rffi
if tp is lltype.Signed:
@@ -256,13 +258,13 @@
return r_class(-(r_class.MASK >> 1) - 1)
else:
return r_class(0)
-most_neg_value_of._annspecialcase_ = 'specialize:memo'
[email protected](0)
def most_pos_value_of_same_type(x):
from rpython.rtyper.lltypesystem import lltype
return most_pos_value_of(lltype.typeOf(x))
-most_pos_value_of_same_type._annspecialcase_ = 'specialize:argtype(0)'
[email protected]()
def most_pos_value_of(tp):
from rpython.rtyper.lltypesystem import lltype, rffi
if tp is lltype.Signed:
@@ -273,8 +275,8 @@
return r_class(r_class.MASK >> 1)
else:
return r_class(r_class.MASK)
-most_pos_value_of._annspecialcase_ = 'specialize:memo'
[email protected]()
def is_signed_integer_type(tp):
from rpython.rtyper.lltypesystem import lltype, rffi
if tp is lltype.Signed:
@@ -284,7 +286,6 @@
return r_class.SIGNED
except KeyError:
return False # not an integer type
-is_signed_integer_type._annspecialcase_ = 'specialize:memo'
def highest_bit(n):
"""
@@ -676,7 +677,7 @@
from rpython.rtyper.lltypesystem.lloperation import llop
return llop.int_mod(lltype.Signed, x, y)
[email protected]()
[email protected]()
def byteswap(arg):
""" Convert little->big endian and the opposite
"""
diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py
--- a/rpython/rlib/rbigint.py
+++ b/rpython/rlib/rbigint.py
@@ -90,16 +90,16 @@
FIVEARY_CUTOFF = 8
[email protected](0)
def _mask_digit(x):
return UDIGIT_MASK(x & MASK)
-_mask_digit._annspecialcase_ = 'specialize:argtype(0)'
def _widen_digit(x):
return rffi.cast(LONG_TYPE, x)
[email protected](0)
def _store_digit(x):
return rffi.cast(STORE_TYPE, x)
-_store_digit._annspecialcase_ = 'specialize:argtype(0)'
def _load_unsigned_digit(x):
return rffi.cast(UNSIGNED_TYPE, x)
@@ -175,11 +175,11 @@
return _load_unsigned_digit(self._digits[x])
udigit._always_inline_ = True
+ @specialize.argtype(2)
def setdigit(self, x, val):
val = _mask_digit(val)
assert val >= 0
self._digits[x] = _store_digit(val)
- setdigit._annspecialcase_ = 'specialize:argtype(2)'
setdigit._always_inline_ = True
def numdigits(self):
@@ -1312,6 +1312,7 @@
return res
[email protected](0)
def digits_from_nonneg_long(l):
digits = []
while True:
@@ -1319,8 +1320,8 @@
l = l >> SHIFT
if not l:
return digits[:] # to make it non-resizable
-digits_from_nonneg_long._annspecialcase_ = "specialize:argtype(0)"
-
+
[email protected](0)
def digits_for_most_neg_long(l):
# This helper only works if 'l' is the most negative integer of its
# type, which in base 2 looks like: 1000000..0000
@@ -1335,8 +1336,8 @@
assert l & MASK == l
digits.append(_store_digit(l))
return digits[:] # to make it non-resizable
-digits_for_most_neg_long._annspecialcase_ = "specialize:argtype(0)"
-
+
[email protected](0)
def args_from_rarith_int1(x):
if x > 0:
return digits_from_nonneg_long(x), 1
@@ -1348,11 +1349,10 @@
else:
# the most negative integer! hacks needed...
return digits_for_most_neg_long(x), -1
-args_from_rarith_int1._annspecialcase_ = "specialize:argtype(0)"
-
+
[email protected](0)
def args_from_rarith_int(x):
return args_from_rarith_int1(widen(x))
-args_from_rarith_int._annspecialcase_ = "specialize:argtype(0)"
# ^^^ specialized by the precise type of 'x', which is typically a r_xxx
# instance from rlib.rarithmetic
@@ -1909,6 +1909,7 @@
i += 1
return borrow
[email protected](2)
def _muladd1(a, n, extra=0):
"""Multiply by a single digit and add a single digit, ignoring the sign.
"""
@@ -1926,7 +1927,7 @@
z.setdigit(i, carry)
z._normalize()
return z
-_muladd1._annspecialcase_ = "specialize:argtype(2)"
+
def _v_lshift(z, a, m, d):
""" Shift digit vector a[0:m] d bits left, with 0 <= d < SHIFT. Put
* result in z[0:m], and return the d bits shifted out of the top.
@@ -2178,6 +2179,7 @@
ad = -ad
return ad
[email protected](0)
def _loghelper(func, arg):
"""
A decent logarithm is easy to compute even for huge bigints, but libm can't
@@ -2195,7 +2197,6 @@
# CAUTION: e*SHIFT may overflow using int arithmetic,
# so force use of double. */
return func(x) + (e * float(SHIFT) * func(2.0))
-_loghelper._annspecialcase_ = 'specialize:arg(0)'
# ____________________________________________________________
@@ -2519,6 +2520,7 @@
return output.build()
[email protected](1)
def _bitwise(a, op, b): # '&', '|', '^'
""" Bitwise and/or/xor operations """
@@ -2598,8 +2600,8 @@
return z
return z.invert()
-_bitwise._annspecialcase_ = "specialize:arg(1)"
-
+
[email protected](1)
def _int_bitwise(a, op, b): # '&', '|', '^'
""" Bitwise and/or/xor operations """
@@ -2682,7 +2684,6 @@
return z
return z.invert()
-_int_bitwise._annspecialcase_ = "specialize:arg(1)"
ULONGLONG_BOUND = r_ulonglong(1L << (r_longlong.BITS-1))
LONGLONG_MIN = r_longlong(-(1L << (r_longlong.BITS-1)))
diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py
--- a/rpython/rlib/rmmap.py
+++ b/rpython/rlib/rmmap.py
@@ -10,7 +10,7 @@
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import rposix
from rpython.translator.tool.cbuild import ExternalCompilationInfo
-from rpython.rlib.objectmodel import we_are_translated
+from rpython.rlib.objectmodel import we_are_translated, specialize
from rpython.rlib.nonconst import NonConstant
from rpython.rlib.rarithmetic import intmask
@@ -239,12 +239,12 @@
_, _VirtualProtect_safe = winexternal('VirtualProtect',
[rffi.VOIDP, rffi.SIZE_T, DWORD, LPDWORD],
BOOL)
+ @specialize.ll()
def VirtualProtect(addr, size, mode, oldmode_ptr):
return _VirtualProtect_safe(addr,
rffi.cast(rffi.SIZE_T, size),
rffi.cast(DWORD, mode),
oldmode_ptr)
- VirtualProtect._annspecialcase_ = 'specialize:ll'
VirtualFree, VirtualFree_safe = winexternal('VirtualFree',
[rffi.VOIDP, rffi.SIZE_T, DWORD], BOOL)
diff --git a/rpython/rlib/rposix_stat.py b/rpython/rlib/rposix_stat.py
--- a/rpython/rlib/rposix_stat.py
+++ b/rpython/rlib/rposix_stat.py
@@ -17,7 +17,7 @@
from rpython.rtyper.error import TyperError
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit