Author: anton_gulenko <[email protected]>
Branch:
Changeset: r76869:9ca81bee353c
Date: 2015-02-22 11:45 +0100
http://bitbucket.org/pypy/pypy/changeset/9ca81bee353c/
Log: Merged pypy/pypy into default
diff too long, truncating to 2000 out of 2663 lines
diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py
--- a/lib_pypy/audioop.py
+++ b/lib_pypy/audioop.py
@@ -480,6 +480,7 @@
state_d, prev_i, cur_i,
weightA, weightB)
result = ffi.buffer(rv)[:trim_index]
+ d = state_d[0]
samps = zip(prev_i, cur_i)
return (result, (d, tuple(samps)))
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -6,8 +6,8 @@
.. startrev: 397b96217b85
-Fix non-blocking file reads sometimes raising EAGAIN even though they
-have buffered data waiting (b1c4fcb04a42)
+Non-blocking file reads sometimes raised EAGAIN even though they
+had buffered data waiting, fixed in b1c4fcb04a42
.. branch: vmprof
@@ -18,3 +18,15 @@
.. branch: stdlib-2.7.9
Update stdlib to version 2.7.9
+
+.. branch: fix-kqueue-error2
+Fix exception being raised by kqueue.control (CPython compatibility)
+
+.. branch: gitignore
+
+.. branch: framestate2
+Refactor rpython.flowspace.framestate.FrameState.
+
+.. branch: alt_errno
+Add an alternative location to save LastError, errno around ctypes,
+cffi external calls so things like pdb will not overwrite it
diff --git a/pypy/interpreter/unicodehelper.py
b/pypy/interpreter/unicodehelper.py
--- a/pypy/interpreter/unicodehelper.py
+++ b/pypy/interpreter/unicodehelper.py
@@ -25,8 +25,8 @@
self.reason = reason
@specialize.memo()
-def encode_error_handler(space):
- # Fast version of the "strict" errors handler.
+def rpy_encode_error_handler():
+ # A RPython version of the "strict" error handler.
def raise_unicode_exception_encode(errors, encoding, msg, u,
startingpos, endingpos):
raise RUnicodeEncodeError(encoding, u, startingpos, endingpos, msg)
@@ -62,7 +62,10 @@
return result
def encode_utf8(space, uni):
+ # Note that this function never raises UnicodeEncodeError,
+ # since surrogate pairs are allowed.
+ # This is not the case with Python3.
return runicode.unicode_encode_utf_8(
uni, len(uni), "strict",
- errorhandler=encode_error_handler(space),
+ errorhandler=rpy_encode_error_handler(),
allow_surrogates=True)
diff --git a/pypy/module/_cffi_backend/ccallback.py
b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -210,6 +210,6 @@
space.threadlocals.leave_thread(space)
def invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata):
- cerrno._errno_after(rffi.RFFI_ERR_ALL)
+ cerrno._errno_after(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO)
_invoke_callback(ffi_cif, ll_res, ll_args, ll_userdata)
- cerrno._errno_before(rffi.RFFI_ERR_ALL)
+ cerrno._errno_before(rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO)
diff --git a/pypy/module/_cffi_backend/cerrno.py
b/pypy/module/_cffi_backend/cerrno.py
--- a/pypy/module/_cffi_backend/cerrno.py
+++ b/pypy/module/_cffi_backend/cerrno.py
@@ -13,18 +13,18 @@
_errno_after = rposix._errno_after
def get_errno(space):
- return space.wrap(rposix.get_saved_errno())
+ return space.wrap(rposix.get_saved_alterrno())
@unwrap_spec(errno=int)
def set_errno(space, errno):
- rposix.set_saved_errno(errno)
+ rposix.set_saved_alterrno(errno)
# ____________________________________________________________
@unwrap_spec(code=int)
def getwinerror(space, code=-1):
- from rpython.rlib.rwin32 import GetLastError_saved, FormatError
+ from rpython.rlib.rwin32 import GetLastError_alt_saved, FormatError
if code == -1:
- code = GetLastError_saved()
+ code = GetLastError_alt_saved()
message = FormatError(code)
return space.newtuple([space.wrap(code), space.wrap(message)])
diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py
b/pypy/module/_cffi_backend/test/_backend_test_c.py
--- a/pypy/module/_cffi_backend/test/_backend_test_c.py
+++ b/pypy/module/_cffi_backend/test/_backend_test_c.py
@@ -2716,6 +2716,14 @@
assert data == b"Xhello\n"
posix.close(fdr)
+def test_errno_saved():
+ set_errno(42)
+ # a random function that will reset errno to 0 (at least on non-windows)
+ import os; os.stat('.')
+ #
+ res = get_errno()
+ assert res == 42
+
def test_GetLastError():
if sys.platform != "win32":
py.test.skip("GetLastError(): only for Windows")
diff --git a/pypy/module/_rawffi/interp_rawffi.py
b/pypy/module/_rawffi/interp_rawffi.py
--- a/pypy/module/_rawffi/interp_rawffi.py
+++ b/pypy/module/_rawffi/interp_rawffi.py
@@ -608,19 +608,19 @@
return space.wrap(W_CDLL(space, name, cdll))
def get_errno(space):
- return space.wrap(rposix.get_saved_errno())
+ return space.wrap(rposix.get_saved_alterrno())
def set_errno(space, w_errno):
- rposix.set_saved_errno(space.int_w(w_errno))
+ rposix.set_saved_alterrno(space.int_w(w_errno))
if sys.platform == 'win32':
# see also
# https://bitbucket.org/pypy/pypy/issue/1944/ctypes-on-windows-getlasterror
def get_last_error(space):
- return space.wrap(rwin32.GetLastError_saved())
+ return space.wrap(rwin32.GetLastError_alt_saved())
@unwrap_spec(error=int)
def set_last_error(space, error):
- rwin32.SetLastError_saved(error)
+ rwin32.SetLastError_alt_saved(error)
else:
# always have at least a dummy version of these functions
# (https://bugs.pypy.org/issue1242)
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -1,6 +1,8 @@
from rpython.rlib import rpoll, rsocket, rthread, rweakref
from rpython.rlib.rarithmetic import intmask, widen, r_uint
from rpython.rlib.ropenssl import *
+from pypy.module._socket import interp_socket
+from rpython.rlib._rsocket_rffi import MAX_FD_SIZE
from rpython.rlib.rposix import get_saved_errno
from rpython.rlib.rweakref import RWeakValueDictionary
from rpython.rlib.objectmodel import specialize, compute_unique_id
@@ -12,7 +14,6 @@
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.module._ssl.ssl_data import (
LIBRARY_CODES_TO_NAMES, ERROR_CODES_TO_NAMES)
-from pypy.module._socket import interp_socket
# user defined constants
@@ -79,7 +80,8 @@
constants["OP_CIPHER_SERVER_PREFERENCE"] = SSL_OP_CIPHER_SERVER_PREFERENCE
constants["OP_SINGLE_DH_USE"] = SSL_OP_SINGLE_DH_USE
constants["OP_SINGLE_ECDH_USE"] = SSL_OP_SINGLE_ECDH_USE
-constants["OP_NO_COMPRESSION"] = SSL_OP_NO_COMPRESSION
+if SSL_OP_NO_COMPRESSION is not None:
+ constants["OP_NO_COMPRESSION"] = SSL_OP_NO_COMPRESSION
constants["OPENSSL_VERSION_NUMBER"] = OPENSSL_VERSION_NUMBER
ver = OPENSSL_VERSION_NUMBER
@@ -886,9 +888,13 @@
libssl_AUTHORITY_INFO_ACCESS_free(info)
def _get_crl_dp(space, certificate):
- # Calls x509v3_cache_extensions and sets up crldp
- libssl_X509_check_ca(certificate)
- dps = certificate[0].c_crldp
+ if OPENSSL_VERSION_NUMBER >= 0x10001000:
+ # Calls x509v3_cache_extensions and sets up crldp
+ libssl_X509_check_ca(certificate)
+ dps = certificate[0].c_crldp
+ else:
+ dps = rffi.cast(stack_st_DIST_POINT, libssl_X509_get_ext_d2i(
+ certificate, NID_crl_distribution_points, None, None))
if not dps:
return None
diff --git a/pypy/module/_ssl/ssl_data.py b/pypy/module/_ssl/ssl_data.py
--- a/pypy/module/_ssl/ssl_data.py
+++ b/pypy/module/_ssl/ssl_data.py
@@ -369,7 +369,8 @@
LIBRARY_CODES_TO_NAMES[cconfig[code]] = code
ERROR_CODES_TO_NAMES = {}
for lib, code in error_codes:
- ERROR_CODES_TO_NAMES[cconfig[lib], cconfig[code]] = code
+ if cconfig[code] is not None:
+ ERROR_CODES_TO_NAMES[cconfig[lib], cconfig[code]] = code
ALERT_DESCRIPTION_CODES = {}
for name in AD_NAMES:
diff --git a/pypy/module/_ssl/test/test_ssl.py
b/pypy/module/_ssl/test/test_ssl.py
--- a/pypy/module/_ssl/test/test_ssl.py
+++ b/pypy/module/_ssl/test/test_ssl.py
@@ -236,6 +236,9 @@
def test_npn_protocol(self):
import socket, _ssl, gc
+ if not _ssl.HAS_NPN:
+ skip("NPN requires OpenSSL 1.0.1 or greater")
+
ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1)
ctx._set_npn_protocols(b'\x08http/1.1\x06spdy/2')
ss = ctx._wrap_socket(self.s._sock, True,
@@ -307,12 +310,13 @@
os.path.dirname(__file__), 'dh512.pem'))
def test_load_cert_chain(self):
- import _ssl
+ import _ssl, errno
ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(self.keycert)
ctx.load_cert_chain(self.cert, self.key)
- raises(IOError, ctx.load_cert_chain, "inexistent.pem")
- raises(_ssl.SSLError, ctx.load_cert_chain, self.badcert)
+ exc = raises(IOError, ctx.load_cert_chain, "inexistent.pem")
+ assert exc.value.errno == errno.ENOENT
+ exc = raises(_ssl.SSLError, ctx.load_cert_chain, self.badcert)
raises(_ssl.SSLError, ctx.load_cert_chain, self.emptycert)
# Password protected key and cert
raises(_ssl.SSLError, ctx.load_cert_chain, self.cert_protected,
@@ -360,12 +364,14 @@
assert ctx.cert_store_stats() == {'x509_ca': 0, 'crl': 0, 'x509': 1}
def test_load_dh_params(self):
- import _ssl
+ import _ssl, errno
ctx = _ssl._SSLContext(_ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(self.dh512)
raises(TypeError, ctx.load_dh_params)
raises(TypeError, ctx.load_dh_params, None)
raises(_ssl.SSLError, ctx.load_dh_params, self.keycert)
+ exc = raises(IOError, ctx.load_dh_params, "inexistent.pem")
+ assert exc.value.errno == errno.ENOENT
def test_set_ecdh_curve(self):
import _ssl
diff --git a/pypy/module/cpyext/include/patchlevel.h
b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -29,7 +29,7 @@
#define PY_VERSION "2.7.9"
/* PyPy version as a string */
-#define PYPY_VERSION "2.6.0"
+#define PYPY_VERSION "2.6.0-alpha0"
/* Subversion Revision number of this file (not of the repository).
* Empty since Mercurial migration. */
diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py
b/pypy/module/pypyjit/test_pypy_c/test_ffi.py
--- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py
@@ -202,7 +202,7 @@
assert loop.match_by_id('cfficall', """
p96 = force_token()
setfield_gc(p0, p96, descr=<FieldP
pypy.interpreter.pyframe.PyFrame.vable_token .>)
- f97 = call_release_gil(27, i59, 1.0, 3, descr=<Callf 8 fi EF=6
OS=62>)
+ f97 = call_release_gil(91, i59, 1.0, 3, descr=<Callf 8 fi EF=6
OS=62>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
""", ignore_ops=['guard_not_invalidated'])
diff --git a/pypy/module/pypyjit/test_pypy_c/test_thread.py
b/pypy/module/pypyjit/test_pypy_c/test_thread.py
--- a/pypy/module/pypyjit/test_pypy_c/test_thread.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_thread.py
@@ -67,21 +67,10 @@
i58 = call_release_gil(0, _, i37, 1, descr=<Calli 4 ii EF=6>)
guard_not_forced(descr=...)
guard_no_exception(descr=...)
- i59 = int_is_true(i58)
- guard_true(i59, descr=...)
- i60 = int_sub(i44, 1)
- p62 = force_token()
- setfield_gc(p0, p62, descr=<FieldP
pypy.interpreter.pyframe.PyFrame.vable_token 8>)
- i63 = call_release_gil(0, _, i37, 0, descr=<Calli 4 ii EF=6>)
- guard_not_forced(descr=...)
- guard_no_exception(descr=...)
- i64 = int_is_true(i63)
- guard_false(i64, descr=...)
- p65 = force_token()
- setfield_gc(p0, p65, descr=<FieldP
pypy.interpreter.pyframe.PyFrame.vable_token 8>)
- call_release_gil(0, _, i37, descr=<Callv 0 i EF=6>)
- guard_not_forced(descr=...)
- guard_no_exception(descr=...)
+ i58 = int_sub(i44, 1)
+ i59 = call(ConstClass(RPyThreadReleaseLock), i37, descr=<Calli . i
EF=2>)
+ i60 = int_is_true(i59)
+ guard_false(i60, descr=...)
guard_not_invalidated(descr=...)
--TICK--
jump(..., descr=...)
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c
b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c
--- a/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c
+++ b/pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test.c
@@ -272,7 +272,11 @@
{
double x, sum=0.0, dx=(b-a)/(double)nstep;
for(x=a+0.5*dx; (b-x)*(x-a)>0.0; x+=dx)
+ {
+ double y = f(x);
+ printf("f(x)=%.1f\n", y);
sum += f(x);
+ }
return sum/(double)nstep;
}
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py
b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_callbacks.py
@@ -138,6 +138,7 @@
integrate.restype = c_double
def func(x):
+ print 'calculating x**2 of',x
return x**2
result = integrate(0.0, 1.0, CALLBACK(func), 10)
diff --git a/pypy/module/time/interp_time.py b/pypy/module/time/interp_time.py
--- a/pypy/module/time/interp_time.py
+++ b/pypy/module/time/interp_time.py
@@ -168,7 +168,6 @@
if cConfig.has_gettimeofday:
c_gettimeofday = external('gettimeofday', [rffi.VOIDP, rffi.VOIDP],
rffi.INT)
TM_P = lltype.Ptr(tm)
-c_clock = external('clock', [rffi.TIME_TP], clock_t)
c_time = external('time', [rffi.TIME_TP], rffi.TIME_T)
c_ctime = external('ctime', [rffi.TIME_TP], rffi.CCHARP)
c_gmtime = external('gmtime', [rffi.TIME_TP], TM_P,
diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py
--- a/pypy/objspace/std/intobject.py
+++ b/pypy/objspace/std/intobject.py
@@ -427,9 +427,11 @@
def descr_bit_length(self, space):
val = self.intval
+ bits = 0
if val < 0:
- val = -val
- bits = 0
+ # warning, "-val" overflows here
+ val = -((val + 1) >> 1)
+ bits = 1
while val:
bits += 1
val >>= 1
diff --git a/pypy/objspace/std/test/test_intobject.py
b/pypy/objspace/std/test/test_intobject.py
--- a/pypy/objspace/std/test/test_intobject.py
+++ b/pypy/objspace/std/test/test_intobject.py
@@ -521,11 +521,20 @@
(10, 4),
(150, 8),
(-1, 1),
+ (-2, 2),
+ (-3, 2),
+ (-4, 3),
(-10, 4),
(-150, 8),
]:
assert val.bit_length() == bits
+ def test_bit_length_max(self):
+ import sys
+ val = -sys.maxint-1
+ bits = 32 if val == -2147483648 else 64
+ assert val.bit_length() == bits
+
def test_int_real(self):
class A(int): pass
b = A(5).real
diff --git a/pypy/objspace/std/unicodeobject.py
b/pypy/objspace/std/unicodeobject.py
--- a/pypy/objspace/std/unicodeobject.py
+++ b/pypy/objspace/std/unicodeobject.py
@@ -439,12 +439,12 @@
try:
if encoding == 'ascii':
u = space.unicode_w(w_object)
- eh = unicodehelper.encode_error_handler(space)
+ eh = unicodehelper.rpy_encode_error_handler()
return space.wrap(unicode_encode_ascii(
u, len(u), None, errorhandler=eh))
if encoding == 'utf-8':
u = space.unicode_w(w_object)
- eh = unicodehelper.encode_error_handler(space)
+ eh = unicodehelper.rpy_encode_error_handler()
return space.wrap(unicode_encode_utf_8(
u, len(u), None, errorhandler=eh,
allow_surrogates=True))
diff --git a/rpython/flowspace/flowcontext.py b/rpython/flowspace/flowcontext.py
--- a/rpython/flowspace/flowcontext.py
+++ b/rpython/flowspace/flowcontext.py
@@ -12,8 +12,7 @@
from rpython.flowspace.argument import CallSpec
from rpython.flowspace.model import (Constant, Variable, Block, Link,
c_last_exception, const, FSException)
-from rpython.flowspace.framestate import (FrameState, recursively_unflatten,
- recursively_flatten)
+from rpython.flowspace.framestate import FrameState
from rpython.flowspace.specialcase import (rpython_print_item,
rpython_print_newline)
from rpython.flowspace.operation import op
@@ -278,6 +277,7 @@
"cmp_exc_match",
]
+
class FlowContext(object):
def __init__(self, graph, code):
self.graph = graph
@@ -307,112 +307,91 @@
The locals are ordered according to self.pycode.signature.
"""
- self.valuestackdepth = code.co_nlocals
- self.locals_stack_w = [None] * (code.co_stacksize + code.co_nlocals)
+ self.nlocals = code.co_nlocals
+ self.locals_w = [None] * code.co_nlocals
+ self.stack = []
+
+ @property
+ def stackdepth(self):
+ return len(self.stack)
def pushvalue(self, w_object):
- depth = self.valuestackdepth
- self.locals_stack_w[depth] = w_object
- self.valuestackdepth = depth + 1
+ self.stack.append(w_object)
def popvalue(self):
- depth = self.valuestackdepth - 1
- assert depth >= self.pycode.co_nlocals, "pop from empty value stack"
- w_object = self.locals_stack_w[depth]
- self.locals_stack_w[depth] = None
- self.valuestackdepth = depth
- return w_object
+ return self.stack.pop()
def peekvalue(self, index_from_top=0):
# NOTE: top of the stack is peekvalue(0).
- index = self.valuestackdepth + ~index_from_top
- assert index >= self.pycode.co_nlocals, (
- "peek past the bottom of the stack")
- return self.locals_stack_w[index]
+ index = ~index_from_top
+ return self.stack[index]
def settopvalue(self, w_object, index_from_top=0):
- index = self.valuestackdepth + ~index_from_top
- assert index >= self.pycode.co_nlocals, (
- "settop past the bottom of the stack")
- self.locals_stack_w[index] = w_object
+ index = ~index_from_top
+ self.stack[index] = w_object
def popvalues(self, n):
- values_w = [self.popvalue() for i in range(n)]
- values_w.reverse()
+ if n == 0:
+ return []
+ values_w = self.stack[-n:]
+ del self.stack[-n:]
return values_w
- def dropvalues(self, n):
- finaldepth = self.valuestackdepth - n
- for n in range(finaldepth, self.valuestackdepth):
- self.locals_stack_w[n] = None
- self.valuestackdepth = finaldepth
-
def dropvaluesuntil(self, finaldepth):
- for n in range(finaldepth, self.valuestackdepth):
- self.locals_stack_w[n] = None
- self.valuestackdepth = finaldepth
-
- def save_locals_stack(self):
- return self.locals_stack_w[:self.valuestackdepth]
-
- def restore_locals_stack(self, items_w):
- self.locals_stack_w[:len(items_w)] = items_w
- self.dropvaluesuntil(len(items_w))
+ del self.stack[finaldepth:]
def getstate(self, next_offset):
- # getfastscope() can return real None, for undefined locals
- data = self.save_locals_stack()
- if self.last_exception is None:
- data.append(Constant(None))
- data.append(Constant(None))
- else:
- data.append(self.last_exception.w_type)
- data.append(self.last_exception.w_value)
- recursively_flatten(data)
- return FrameState(data, self.blockstack[:], next_offset)
+ return FrameState(self.locals_w[:], self.stack[:],
+ self.last_exception, self.blockstack[:], next_offset)
def setstate(self, state):
""" Reset the context to the given frame state. """
- data = state.mergeable[:]
- recursively_unflatten(data)
- self.restore_locals_stack(data[:-2]) # Nones == undefined locals
- if data[-2] == Constant(None):
- assert data[-1] == Constant(None)
- self.last_exception = None
- else:
- self.last_exception = FSException(data[-2], data[-1])
+ self.locals_w = state.locals_w[:]
+ self.stack = state.stack[:]
+ self.last_exception = state.last_exception
self.blockstack = state.blocklist[:]
+ self._normalize_raise_signals()
+
+ def _normalize_raise_signals(self):
+ st = self.stack
+ for i in range(len(st)):
+ if isinstance(st[i], RaiseImplicit):
+ st[i] = Raise(st[i].w_exc)
def guessbool(self, w_condition):
if isinstance(w_condition, Constant):
return w_condition.value
return self.recorder.guessbool(self, w_condition)
- def record(self, spaceop):
+ def maybe_merge(self):
recorder = self.recorder
if getattr(recorder, 'final_state', None) is not None:
self.mergeblock(recorder.crnt_block, recorder.final_state)
raise StopFlowing
+
+ def record(self, spaceop):
spaceop.offset = self.last_offset
- recorder.append(spaceop)
+ self.recorder.append(spaceop)
def do_op(self, op):
+ self.maybe_merge()
self.record(op)
self.guessexception(op.canraise)
return op.result
- def guessexception(self, exceptions, force=False):
+ def guessexception(self, exceptions):
"""
Catch possible exceptions implicitly.
"""
if not exceptions:
return
- if not force and not any(isinstance(block, (ExceptBlock, FinallyBlock))
- for block in self.blockstack):
- # The implicit exception wouldn't be caught and would later get
- # removed, so don't bother creating it.
- return
- self.recorder.guessexception(self, *exceptions)
+ # Implicit exceptions are ignored unless they are caught explicitly
+ if self.has_exc_handler():
+ self.recorder.guessexception(self, *exceptions)
+
+ def has_exc_handler(self):
+ return any(isinstance(block, (ExceptBlock, FinallyBlock))
+ for block in self.blockstack)
def build_flow(self):
graph = self.graph
@@ -430,35 +409,8 @@
while True:
next_offset = self.handle_bytecode(next_offset)
self.recorder.final_state = self.getstate(next_offset)
-
- except RaiseImplicit as e:
- w_exc = e.w_exc
- if isinstance(w_exc.w_type, Constant):
- exc_cls = w_exc.w_type.value
- else:
- exc_cls = Exception
- msg = "implicit %s shouldn't occur" % exc_cls.__name__
- w_type = Constant(AssertionError)
- w_value = Constant(AssertionError(msg))
- link = Link([w_type, w_value], self.graph.exceptblock)
- self.recorder.crnt_block.closeblock(link)
-
- except Raise as e:
- w_exc = e.w_exc
- if w_exc.w_type == const(ImportError):
- msg = 'import statement always raises %s' % e
- raise ImportError(msg)
- link = Link([w_exc.w_type, w_exc.w_value], self.graph.exceptblock)
- self.recorder.crnt_block.closeblock(link)
-
except StopFlowing:
pass
-
- except Return as exc:
- w_result = exc.w_value
- link = Link([w_result], self.graph.returnblock)
- self.recorder.crnt_block.closeblock(link)
-
except FlowingError as exc:
if exc.ctx is None:
exc.ctx = self
@@ -476,14 +428,8 @@
if newstate is not None:
break
else:
- newstate = currentstate.copy()
- newblock = SpamBlock(newstate)
- # unconditionally link the current block to the newblock
- outputargs = currentstate.getoutputargs(newstate)
- link = Link(outputargs, newblock)
- currentblock.closeblock(link)
+ newblock = self.make_next_block(currentblock, currentstate)
candidates.insert(0, newblock)
- self.pendingblocks.append(newblock)
return
if newstate.matches(block.framestate):
@@ -493,7 +439,7 @@
newblock = SpamBlock(newstate)
varnames = self.pycode.co_varnames
- for name, w_value in zip(varnames, newstate.mergeable):
+ for name, w_value in zip(varnames, newstate.locals_w):
if isinstance(w_value, Variable):
w_value.rename(name)
# unconditionally link the current block to the newblock
@@ -513,11 +459,21 @@
candidates.insert(0, newblock)
self.pendingblocks.append(newblock)
+ def make_next_block(self, block, state):
+ newstate = state.copy()
+ newblock = SpamBlock(newstate)
+ # unconditionally link the current block to the newblock
+ outputargs = state.getoutputargs(newstate)
+ link = Link(outputargs, newblock)
+ block.closeblock(link)
+ self.pendingblocks.append(newblock)
+ return newblock
+
# hack for unrolling iterables, don't use this
def replace_in_stack(self, oldvalue, newvalue):
w_new = Constant(newvalue)
- stack_items_w = self.locals_stack_w
- for i in range(self.valuestackdepth - 1, self.pycode.co_nlocals - 1,
-1):
+ stack_items_w = self.stack
+ for i in range(self.stackdepth - 1, - 1, -1):
w_v = stack_items_w[i]
if isinstance(w_v, Constant):
if w_v.value is oldvalue:
@@ -541,7 +497,7 @@
if isinstance(signal, block.handles):
return block.handle(self, signal)
block.cleanupstack(self)
- return signal.nomoreblocks()
+ return signal.nomoreblocks(self)
def getlocalvarname(self, index):
return self.pycode.co_varnames[index]
@@ -870,7 +826,7 @@
op.simple_call(w_exitfunc, w_None, w_None, w_None).eval(self)
def LOAD_FAST(self, varindex):
- w_value = self.locals_stack_w[varindex]
+ w_value = self.locals_w[varindex]
if w_value is None:
raise FlowingError("Local variable referenced before assignment")
self.pushvalue(w_value)
@@ -915,7 +871,7 @@
def STORE_FAST(self, varindex):
w_newvalue = self.popvalue()
assert w_newvalue is not None
- self.locals_stack_w[varindex] = w_newvalue
+ self.locals_w[varindex] = w_newvalue
if isinstance(w_newvalue, Variable):
w_newvalue.rename(self.getlocalvarname(varindex))
@@ -1128,11 +1084,11 @@
op.simple_call(w_append_meth, w_value).eval(self)
def DELETE_FAST(self, varindex):
- if self.locals_stack_w[varindex] is None:
+ if self.locals_w[varindex] is None:
varname = self.getlocalvarname(varindex)
message = "local variable '%s' referenced before assignment"
raise UnboundLocalError(message, varname)
- self.locals_stack_w[varindex] = None
+ self.locals_w[varindex] = None
def STORE_MAP(self, oparg):
w_key = self.popvalue()
@@ -1220,25 +1176,32 @@
WHY_CONTINUE, Continue
WHY_YIELD not needed
"""
- def nomoreblocks(self):
+ def nomoreblocks(self, ctx):
raise BytecodeCorruption("misplaced bytecode - should not return")
+ def __eq__(self, other):
+ return type(other) is type(self) and other.args == self.args
+
class Return(FlowSignal):
"""Signals a 'return' statement.
- Argument is the wrapped object to return."""
-
+ Argument is the wrapped object to return.
+ """
def __init__(self, w_value):
self.w_value = w_value
- def nomoreblocks(self):
- raise Return(self.w_value)
+ def nomoreblocks(self, ctx):
+ w_result = self.w_value
+ link = Link([w_result], ctx.graph.returnblock)
+ ctx.recorder.crnt_block.closeblock(link)
+ raise StopFlowing
- def state_unpack_variables(self):
+ @property
+ def args(self):
return [self.w_value]
@staticmethod
- def state_pack_variables(w_value):
+ def rebuild(w_value):
return Return(w_value)
class Raise(FlowSignal):
@@ -1248,28 +1211,48 @@
def __init__(self, w_exc):
self.w_exc = w_exc
- def nomoreblocks(self):
- raise self
+ def nomoreblocks(self, ctx):
+ w_exc = self.w_exc
+ if w_exc.w_type == const(ImportError):
+ msg = 'import statement always raises %s' % self
+ raise ImportError(msg)
+ link = Link([w_exc.w_type, w_exc.w_value], ctx.graph.exceptblock)
+ ctx.recorder.crnt_block.closeblock(link)
+ raise StopFlowing
- def state_unpack_variables(self):
+ @property
+ def args(self):
return [self.w_exc.w_type, self.w_exc.w_value]
- @staticmethod
- def state_pack_variables(w_type, w_value):
- return Raise(FSException(w_type, w_value))
+ @classmethod
+ def rebuild(cls, w_type, w_value):
+ return cls(FSException(w_type, w_value))
class RaiseImplicit(Raise):
"""Signals an exception raised implicitly"""
+ def nomoreblocks(self, ctx):
+ w_exc = self.w_exc
+ if isinstance(w_exc.w_type, Constant):
+ exc_cls = w_exc.w_type.value
+ else:
+ exc_cls = Exception
+ msg = "implicit %s shouldn't occur" % exc_cls.__name__
+ w_type = Constant(AssertionError)
+ w_value = Constant(AssertionError(msg))
+ link = Link([w_type, w_value], ctx.graph.exceptblock)
+ ctx.recorder.crnt_block.closeblock(link)
+ raise StopFlowing
class Break(FlowSignal):
"""Signals a 'break' statement."""
- def state_unpack_variables(self):
+ @property
+ def args(self):
return []
@staticmethod
- def state_pack_variables():
+ def rebuild():
return Break.singleton
Break.singleton = Break()
@@ -1281,11 +1264,12 @@
def __init__(self, jump_to):
self.jump_to = jump_to
- def state_unpack_variables(self):
+ @property
+ def args(self):
return [const(self.jump_to)]
@staticmethod
- def state_pack_variables(w_jump_to):
+ def rebuild(w_jump_to):
return Continue(w_jump_to.value)
@@ -1295,21 +1279,21 @@
def __init__(self, ctx, handlerposition):
self.handlerposition = handlerposition
- self.valuestackdepth = ctx.valuestackdepth
+ self.stackdepth = ctx.stackdepth
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.handlerposition == other.handlerposition and
- self.valuestackdepth == other.valuestackdepth)
+ self.stackdepth == other.stackdepth)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
- return hash((self.handlerposition, self.valuestackdepth))
+ return hash((self.handlerposition, self.stackdepth))
def cleanupstack(self, ctx):
- ctx.dropvaluesuntil(self.valuestackdepth)
+ ctx.dropvaluesuntil(self.stackdepth)
def handle(self, ctx, unroller):
raise NotImplementedError
diff --git a/rpython/flowspace/framestate.py b/rpython/flowspace/framestate.py
--- a/rpython/flowspace/framestate.py
+++ b/rpython/flowspace/framestate.py
@@ -1,21 +1,50 @@
-from rpython.flowspace.model import Variable, Constant
+from rpython.flowspace.model import Variable, Constant, FSException
from rpython.rlib.unroll import SpecTag
+def _copy(v):
+ from rpython.flowspace.flowcontext import FlowSignal
+ if isinstance(v, Variable):
+ return Variable(v)
+ elif isinstance(v, FlowSignal):
+ vars = [_copy(var) for var in v.args]
+ return v.rebuild(*vars)
+ else:
+ return v
+
+def _union(seq1, seq2):
+ return [union(v1, v2) for v1, v2 in zip(seq1, seq2)]
+
class FrameState(object):
- def __init__(self, mergeable, blocklist, next_offset):
- self.mergeable = mergeable
+ def __init__(self, locals_w, stack, last_exception, blocklist,
next_offset):
+ self.locals_w = locals_w
+ self.stack = stack
+ self.last_exception = last_exception
self.blocklist = blocklist
self.next_offset = next_offset
+ self._mergeable = None
+
+ @property
+ def mergeable(self):
+ if self._mergeable is not None:
+ return self._mergeable
+ self._mergeable = data = self.locals_w + self.stack
+ if self.last_exception is None:
+ data.append(Constant(None))
+ data.append(Constant(None))
+ else:
+ data.append(self.last_exception.w_type)
+ data.append(self.last_exception.w_value)
+ recursively_flatten(data)
+ return data
def copy(self):
"Make a copy of this state in which all Variables are fresh."
- newstate = []
- for w in self.mergeable:
- if isinstance(w, Variable):
- w = Variable(w)
- newstate.append(w)
- return FrameState(newstate, self.blocklist, self.next_offset)
+ exc = self.last_exception
+ if exc is not None:
+ exc = FSException(_copy(exc.w_type), _copy(exc.w_value))
+ return FrameState(map(_copy, self.locals_w), map(_copy, self.stack),
+ exc, self.blocklist, self.next_offset)
def getvariables(self):
return [w for w in self.mergeable if isinstance(w, Variable)]
@@ -33,18 +62,31 @@
return False
return True
+ def _exc_args(self):
+ if self.last_exception is None:
+ return [Constant(None), Constant(None)]
+ else:
+ return [self.last_exception.w_type,
+ self.last_exception.w_value]
+
def union(self, other):
"""Compute a state that is at least as general as both self and other.
A state 'a' is more general than a state 'b' if all Variables in 'b'
are also Variables in 'a', but 'a' may have more Variables.
"""
- newstate = []
try:
- for w1, w2 in zip(self.mergeable, other.mergeable):
- newstate.append(union(w1, w2))
+ locals = _union(self.locals_w, other.locals_w)
+ stack = _union(self.stack, other.stack)
+ if self.last_exception is None and other.last_exception is None:
+ exc = None
+ else:
+ args1 = self._exc_args()
+ args2 = other._exc_args()
+ exc = FSException(union(args1[0], args2[0]),
+ union(args1[1], args2[1]))
except UnionError:
return None
- return FrameState(newstate, self.blocklist, self.next_offset)
+ return FrameState(locals, stack, exc, self.blocklist, self.next_offset)
def getoutputargs(self, targetstate):
"Return the output arguments needed to link self to targetstate."
@@ -61,6 +103,7 @@
def union(w1, w2):
"Union of two variables or constants."
+ from rpython.flowspace.flowcontext import FlowSignal
if w1 == w2:
return w1
if w1 is None or w2 is None:
@@ -69,38 +112,21 @@
if isinstance(w1, Variable) or isinstance(w2, Variable):
return Variable() # new fresh Variable
if isinstance(w1, Constant) and isinstance(w2, Constant):
- # FlowSignal represent stack unrollers in the stack.
- # They should not be merged because they will be unwrapped.
- # This is needed for try:except: and try:finally:, though
- # it makes the control flow a bit larger by duplicating the
- # handlers.
- dont_merge_w1 = w1 in UNPICKLE_TAGS or isinstance(w1.value, SpecTag)
- dont_merge_w2 = w2 in UNPICKLE_TAGS or isinstance(w2.value, SpecTag)
- if dont_merge_w1 or dont_merge_w2:
+ if isinstance(w1.value, SpecTag) or isinstance(w2.value, SpecTag):
raise UnionError
else:
return Variable() # generalize different constants
+ if isinstance(w1, FlowSignal) and isinstance(w2, FlowSignal):
+ if type(w1) is not type(w2):
+ raise UnionError
+ vars = [union(v1, v2) for v1, v2 in zip(w1.args, w2.args)]
+ return w1.rebuild(*vars)
+ if isinstance(w1, FlowSignal) or isinstance(w2, FlowSignal):
+ raise UnionError
raise TypeError('union of %r and %r' % (w1.__class__.__name__,
w2.__class__.__name__))
-# ____________________________________________________________
-#
-# We have to flatten out the state of the frame into a list of
-# Variables and Constants. This is done above by collecting the
-# locals and the items on the value stack, but the latter may contain
-# FlowSignal. We have to handle these specially, because
-# some of them hide references to more Variables and Constants.
-# The trick is to flatten ("pickle") them into the list so that the
-# extra Variables show up directly in the list too.
-
-class PickleTag:
- pass
-
-PICKLE_TAGS = {}
-UNPICKLE_TAGS = {}
-
-
def recursively_flatten(lst):
from rpython.flowspace.flowcontext import FlowSignal
i = 0
@@ -109,22 +135,4 @@
if not isinstance(unroller, FlowSignal):
i += 1
else:
- vars = unroller.state_unpack_variables()
- key = unroller.__class__, len(vars)
- try:
- tag = PICKLE_TAGS[key]
- except KeyError:
- tag = PICKLE_TAGS[key] = Constant(PickleTag())
- UNPICKLE_TAGS[tag] = key
- lst[i:i + 1] = [tag] + vars
-
-
-def recursively_unflatten(lst):
- for i in xrange(len(lst) - 1, -1, -1):
- item = lst[i]
- if item in UNPICKLE_TAGS:
- unrollerclass, argcount = UNPICKLE_TAGS[item]
- arguments = lst[i + 1:i + 1 + argcount]
- del lst[i + 1:i + 1 + argcount]
- unroller = unrollerclass.state_pack_variables(*arguments)
- lst[i] = unroller
+ lst[i:i + 1] = unroller.args
diff --git a/rpython/flowspace/operation.py b/rpython/flowspace/operation.py
--- a/rpython/flowspace/operation.py
+++ b/rpython/flowspace/operation.py
@@ -517,7 +517,7 @@
ctx.replace_in_stack(it, next_unroller)
return const(v)
w_item = ctx.do_op(self)
- ctx.guessexception([StopIteration, RuntimeError], force=True)
+ ctx.recorder.guessexception(ctx, StopIteration, RuntimeError)
return w_item
class GetAttr(SingleDispatchMixin, HLOperation):
diff --git a/rpython/flowspace/pygraph.py b/rpython/flowspace/pygraph.py
--- a/rpython/flowspace/pygraph.py
+++ b/rpython/flowspace/pygraph.py
@@ -11,10 +11,10 @@
def __init__(self, func, code):
from rpython.flowspace.flowcontext import SpamBlock
- data = [None] * code.co_nlocals
+ locals = [None] * code.co_nlocals
for i in range(code.formalargcount):
- data[i] = Variable(code.co_varnames[i])
- state = FrameState(data + [Constant(None), Constant(None)], [], 0)
+ locals[i] = Variable(code.co_varnames[i])
+ state = FrameState(locals, [], None, [], 0)
initialblock = SpamBlock(state)
super(PyGraph, self).__init__(self._sanitize_funcname(func),
initialblock)
self.func = func
diff --git a/rpython/flowspace/test/test_flowcontext.py
b/rpython/flowspace/test/test_flowcontext.py
new file mode 100644
--- /dev/null
+++ b/rpython/flowspace/test/test_flowcontext.py
@@ -0,0 +1,15 @@
+""" Unit tests for flowcontext.py """
+import pytest
+from rpython.flowspace.model import Variable, FSException
+from rpython.flowspace.flowcontext import (
+ Return, Raise, RaiseImplicit, Continue, Break)
+
[email protected]('signal', [
+ Return(Variable()),
+ Raise(FSException(Variable(), Variable())),
+ RaiseImplicit(FSException(Variable(), Variable())),
+ Break(),
+ Continue(42),
+])
+def test_signals(signal):
+ assert signal.rebuild(*signal.args) == signal
diff --git a/rpython/flowspace/test/test_framestate.py
b/rpython/flowspace/test/test_framestate.py
--- a/rpython/flowspace/test/test_framestate.py
+++ b/rpython/flowspace/test/test_framestate.py
@@ -15,7 +15,7 @@
ctx = FlowContext(graph, code)
# hack the frame
ctx.setstate(graph.startblock.framestate)
- ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Constant(None)
+ ctx.locals_w[-1] = Constant(None)
return ctx
def func_simple(x):
@@ -31,7 +31,7 @@
def test_neq_hacked_framestate(self):
ctx = self.get_context(self.func_simple)
fs1 = ctx.getstate(0)
- ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Variable()
+ ctx.locals_w[-1] = Variable()
fs2 = ctx.getstate(0)
assert not fs1.matches(fs2)
@@ -44,7 +44,7 @@
def test_union_on_hacked_framestates(self):
ctx = self.get_context(self.func_simple)
fs1 = ctx.getstate(0)
- ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Variable()
+ ctx.locals_w[-1] = Variable()
fs2 = ctx.getstate(0)
assert fs1.union(fs2).matches(fs2) # fs2 is more general
assert fs2.union(fs1).matches(fs2) # fs2 is more general
@@ -52,7 +52,7 @@
def test_restore_frame(self):
ctx = self.get_context(self.func_simple)
fs1 = ctx.getstate(0)
- ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Variable()
+ ctx.locals_w[-1] = Variable()
ctx.setstate(fs1)
assert fs1.matches(ctx.getstate(0))
@@ -71,26 +71,25 @@
def test_getoutputargs(self):
ctx = self.get_context(self.func_simple)
fs1 = ctx.getstate(0)
- ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Variable()
+ ctx.locals_w[-1] = Variable()
fs2 = ctx.getstate(0)
outputargs = fs1.getoutputargs(fs2)
# 'x' -> 'x' is a Variable
# locals_w[n-1] -> locals_w[n-1] is Constant(None)
- assert outputargs == [ctx.locals_stack_w[0], Constant(None)]
+ assert outputargs == [ctx.locals_w[0], Constant(None)]
def test_union_different_constants(self):
ctx = self.get_context(self.func_simple)
fs1 = ctx.getstate(0)
- ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Constant(42)
+ ctx.locals_w[-1] = Constant(42)
fs2 = ctx.getstate(0)
fs3 = fs1.union(fs2)
ctx.setstate(fs3)
- assert isinstance(ctx.locals_stack_w[ctx.pycode.co_nlocals-1],
- Variable) # generalized
+ assert isinstance(ctx.locals_w[-1], Variable) # generalized
def test_union_spectag(self):
ctx = self.get_context(self.func_simple)
fs1 = ctx.getstate(0)
- ctx.locals_stack_w[ctx.pycode.co_nlocals-1] = Constant(SpecTag())
+ ctx.locals_w[-1] = Constant(SpecTag())
fs2 = ctx.getstate(0)
assert fs1.union(fs2) is None # UnionError
diff --git a/rpython/jit/backend/arm/callbuilder.py
b/rpython/jit/backend/arm/callbuilder.py
--- a/rpython/jit/backend/arm/callbuilder.py
+++ b/rpython/jit/backend/arm/callbuilder.py
@@ -176,11 +176,14 @@
def write_real_errno(self, save_err):
if save_err & rffi.RFFI_READSAVED_ERRNO:
- # Just before a call, read 'rpy_errno' and write it into the
+ # Just before a call, read '*_errno' and write it into the
# real 'errno'. The r0-r3 registers contain arguments to the
# future call; the r5-r7 registers contain various stuff.
# We still have r8-r12.
- rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
+ if save_err & rffi.RFFI_ALT_ERRNO:
+ rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
+ else:
+ rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
self.mc.LDR_ri(r.r9.value, r.sp.value,
self.asm.saved_threadlocal_addr + self.current_sp)
@@ -199,10 +202,13 @@
def read_real_errno(self, save_err):
if save_err & rffi.RFFI_SAVE_ERRNO:
# Just after a call, read the real 'errno' and save a copy of
- # it inside our thread-local 'rpy_errno'. Registers r8-r12
+ # it inside our thread-local '*_errno'. Registers r8-r12
# are unused here, and registers r2-r3 never contain anything
# after the call.
- rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
+ if save_err & rffi.RFFI_ALT_ERRNO:
+ rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
+ else:
+ rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
self.mc.LDR_ri(r.r3.value, r.sp.value,
self.asm.saved_threadlocal_addr)
diff --git a/rpython/jit/backend/llsupport/llerrno.py
b/rpython/jit/backend/llsupport/llerrno.py
--- a/rpython/jit/backend/llsupport/llerrno.py
+++ b/rpython/jit/backend/llsupport/llerrno.py
@@ -18,19 +18,41 @@
return 3 * WORD
+def get_debug_saved_alterrno(cpu):
+ return cpu._debug_errno_container[4]
+
+def set_debug_saved_alterrno(cpu, nerrno):
+ assert nerrno >= 0
+ cpu._debug_errno_container[4] = nerrno
+
+def get_alt_errno_offset(cpu):
+ if cpu.translate_support_code:
+ from rpython.rlib import rthread
+ return rthread.tlfield_alt_errno.getoffset()
+ else:
+ return 4 * WORD
+
+
def get_debug_saved_lasterror(cpu):
- return cpu._debug_errno_container[4]
+ return cpu._debug_errno_container[5]
def set_debug_saved_lasterror(cpu, nerrno):
assert nerrno >= 0
- cpu._debug_errno_container[4] = nerrno
+ cpu._debug_errno_container[5] = nerrno
def get_rpy_lasterror_offset(cpu):
if cpu.translate_support_code:
from rpython.rlib import rthread
return rthread.tlfield_rpy_lasterror.getoffset()
else:
- return 4 * WORD
+ return 5 * WORD
+
+def get_alt_lasterror_offset(cpu):
+ if cpu.translate_support_code:
+ from rpython.rlib import rthread
+ return rthread.tlfield_alt_lasterror.getoffset()
+ else:
+ return 6 * WORD
def _fetch_addr_errno():
diff --git a/rpython/jit/backend/llsupport/llmodel.py
b/rpython/jit/backend/llsupport/llmodel.py
--- a/rpython/jit/backend/llsupport/llmodel.py
+++ b/rpython/jit/backend/llsupport/llmodel.py
@@ -63,7 +63,7 @@
ad.lendescr, FLAG_FLOAT)
self.setup()
self._debug_errno_container = lltype.malloc(
- rffi.CArray(lltype.Signed), 5, flavor='raw', zero=True,
+ rffi.CArray(lltype.Signed), 7, flavor='raw', zero=True,
track_allocation=False)
def getarraydescr_for_frame(self, type):
diff --git a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py
b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py
--- a/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py
+++ b/rpython/jit/backend/llsupport/test/zrpy_releasegil_test.py
@@ -98,6 +98,7 @@
self.run('close_stack')
assert 'call_release_gil' in
udir.join('TestCompileFramework.log').read()
+ # XXX this should also test get/set_alterrno ?
def define_get_set_errno(self):
eci = ExternalCompilationInfo(
post_include_bits=[r'''
diff --git a/rpython/jit/backend/test/runner_test.py
b/rpython/jit/backend/test/runner_test.py
--- a/rpython/jit/backend/test/runner_test.py
+++ b/rpython/jit/backend/test/runner_test.py
@@ -2948,7 +2948,11 @@
calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7,
types.slong)
#
- for saveerr in [rffi.RFFI_ERR_NONE, rffi.RFFI_SAVE_ERRNO]:
+ for saveerr in [rffi.RFFI_ERR_NONE,
+ rffi.RFFI_SAVE_ERRNO,
+ rffi.RFFI_ERR_NONE | rffi.RFFI_ALT_ERRNO,
+ rffi.RFFI_SAVE_ERRNO | rffi.RFFI_ALT_ERRNO,
+ ]:
faildescr = BasicFailDescr(1)
inputargs = [BoxInt() for i in range(7)]
i1 = BoxInt()
@@ -2965,15 +2969,23 @@
self.cpu.compile_loop(inputargs, ops, looptoken)
#
llerrno.set_debug_saved_errno(self.cpu, 24)
+ llerrno.set_debug_saved_alterrno(self.cpu, 25)
deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3)
original_result = self.cpu.get_int_value(deadframe, 0)
result = llerrno.get_debug_saved_errno(self.cpu)
- print 'saveerr =', saveerr, ': got result =', result
+ altresult = llerrno.get_debug_saved_alterrno(self.cpu)
+ print 'saveerr =', saveerr, ': got result =', result, \
+ 'altresult =', altresult
#
- if saveerr == rffi.RFFI_SAVE_ERRNO:
- assert result == 42 # from the C code
- else:
- assert result == 24 # not touched
+ expected = {
+ rffi.RFFI_ERR_NONE: (24, 25),
+ rffi.RFFI_SAVE_ERRNO: (42, 25),
+ rffi.RFFI_ERR_NONE | rffi.RFFI_ALT_ERRNO: (24, 25),
+ rffi.RFFI_SAVE_ERRNO | rffi.RFFI_ALT_ERRNO: (24, 42),
+ }
+ # expected (24, 25) as originally set, with possibly one
+ # of the two changed to 42 by the assembler code
+ assert (result, altresult) == expected[saveerr]
assert original_result == 3456789
def test_call_release_gil_readsaved_errno(self):
@@ -3007,7 +3019,11 @@
calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7,
types.slong)
#
- for saveerr in [rffi.RFFI_READSAVED_ERRNO,
rffi.RFFI_ZERO_ERRNO_BEFORE]:
+ for saveerr in [rffi.RFFI_READSAVED_ERRNO,
+ rffi.RFFI_ZERO_ERRNO_BEFORE,
+ rffi.RFFI_READSAVED_ERRNO | rffi.RFFI_ALT_ERRNO,
+ rffi.RFFI_ZERO_ERRNO_BEFORE | rffi.RFFI_ALT_ERRNO,
+ ]:
faildescr = BasicFailDescr(1)
inputargs = [BoxInt() for i in range(7)]
i1 = BoxInt()
@@ -3024,12 +3040,17 @@
self.cpu.compile_loop(inputargs, ops, looptoken)
#
llerrno.set_debug_saved_errno(self.cpu, 24)
+ llerrno.set_debug_saved_alterrno(self.cpu, 25)
deadframe = self.cpu.execute_token(looptoken, 9, 8, 7, 6, 5, 4, 3)
result = self.cpu.get_int_value(deadframe, 0)
assert llerrno.get_debug_saved_errno(self.cpu) == 24
+ assert llerrno.get_debug_saved_alterrno(self.cpu) == 25
#
- if saveerr == rffi.RFFI_READSAVED_ERRNO:
- assert result == 24 + 345678900
+ if saveerr & rffi.RFFI_READSAVED_ERRNO:
+ if saveerr & rffi.RFFI_ALT_ERRNO:
+ assert result == 25 + 345678900
+ else:
+ assert result == 24 + 345678900
else:
assert result == 0 + 345678900
@@ -3064,7 +3085,10 @@
types.slong)
#
for saveerr in [rffi.RFFI_SAVE_ERRNO, # but not _LASTERROR
- rffi.RFFI_SAVE_LASTERROR]:
+ rffi.RFFI_SAVE_ERRNO | rffi.RFFI_ALT_ERRNO,
+ rffi.RFFI_SAVE_LASTERROR,
+ rffi.RFFI_SAVE_LASTERROR | rffi.RFFI_ALT_ERRNO,
+ ]:
faildescr = BasicFailDescr(1)
inputargs = [BoxInt() for i in range(7)]
i1 = BoxInt()
@@ -3125,7 +3149,9 @@
calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7,
types.slong)
#
- for saveerr in [rffi.RFFI_READSAVED_LASTERROR]:
+ for saveerr in [rffi.RFFI_READSAVED_LASTERROR,
+ rffi.RFFI_READSAVED_LASTERROR | rffi.RFFI_ALT_ERRNO,
+ ]:
faildescr = BasicFailDescr(1)
inputargs = [BoxInt() for i in range(7)]
i1 = BoxInt()
@@ -3198,7 +3224,10 @@
calldescr = self.cpu._calldescr_dynamic_for_tests([types.slong]*7,
types.slong)
#
- for saveerr in [rffi.RFFI_ERR_ALL]:
+ for saveerr in [rffi.RFFI_ERR_ALL,
+ rffi.RFFI_ERR_ALL | rffi.RFFI_ALT_ERRNO,
+ ]:
+ use_alt_errno = saveerr & rffi.RFFI_ALT_ERRNO
faildescr = BasicFailDescr(1)
inputargs = [BoxInt() for i in range(7)]
i1 = BoxInt()
@@ -3214,7 +3243,10 @@
looptoken = JitCellToken()
self.cpu.compile_loop(inputargs, ops, looptoken)
#
- llerrno.set_debug_saved_errno(self.cpu, 8)
+ if use_alt_errno:
+ llerrno.set_debug_saved_alterrno(self.cpu, 8)
+ else:
+ llerrno.set_debug_saved_errno(self.cpu, 8)
llerrno.set_debug_saved_lasterror(self.cpu, 9)
deadframe = self.cpu.execute_token(looptoken, 1, 2, 3, 4, 5, 6, 7)
result = self.cpu.get_int_value(deadframe, 0)
diff --git a/rpython/jit/backend/x86/callbuilder.py
b/rpython/jit/backend/x86/callbuilder.py
--- a/rpython/jit/backend/x86/callbuilder.py
+++ b/rpython/jit/backend/x86/callbuilder.py
@@ -196,21 +196,27 @@
SetLastError_addr = self.asm.cpu.cast_adr_to_int(adr)
assert isinstance(self, CallBuilder32) # Windows 32-bit only
#
- rpy_lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu)
+ if save_err & rffi.RFFI_ALT_ERRNO:
+ lasterror = llerrno.get_alt_lasterror_offset(self.asm.cpu)
+ else:
+ lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu)
tlofsreg = self.get_tlofs_reg() # => esi, callee-saved
self.save_stack_position() # => edi, callee-saved
- mc.PUSH_m((tlofsreg.value, rpy_lasterror))
+ mc.PUSH_m((tlofsreg.value, lasterror))
mc.CALL(imm(SetLastError_addr))
# restore the stack position without assuming a particular
# calling convention of _SetLastError()
self.mc.MOV(esp, self.saved_stack_position_reg)
if save_err & rffi.RFFI_READSAVED_ERRNO:
- # Just before a call, read 'rpy_errno' and write it into the
+ # Just before a call, read '*_errno' and write it into the
# real 'errno'. Most registers are free here, including the
# callee-saved ones, except 'ebx' and except the ones used to
# pass the arguments on x86-64.
- rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
+ if save_err & rffi.RFFI_ALT_ERRNO:
+ rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
+ else:
+ rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
tlofsreg = self.get_tlofs_reg() # => esi or r12, callee-saved
if IS_X86_32:
@@ -234,11 +240,14 @@
if save_err & rffi.RFFI_SAVE_ERRNO:
# Just after a call, read the real 'errno' and save a copy of
- # it inside our thread-local 'rpy_errno'. Most registers are
+ # it inside our thread-local '*_errno'. Most registers are
# free here, including the callee-saved ones, except 'ebx'.
# The tlofs register might have been loaded earlier and is
# callee-saved, so it does not need to be reloaded.
- rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
+ if save_err & rffi.RFFI_ALT_ERRNO:
+ rpy_errno = llerrno.get_alt_errno_offset(self.asm.cpu)
+ else:
+ rpy_errno = llerrno.get_rpy_errno_offset(self.asm.cpu)
p_errno = llerrno.get_p_errno_offset(self.asm.cpu)
tlofsreg = self.get_tlofs_reg() # => esi or r12 (possibly reused)
mc.MOV_rm(edi.value, (tlofsreg.value, p_errno))
@@ -256,13 +265,16 @@
GetLastError_addr = self.asm.cpu.cast_adr_to_int(adr)
assert isinstance(self, CallBuilder32) # Windows 32-bit only
#
- rpy_lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu)
+ if save_err & rffi.RFFI_ALT_ERRNO:
+ lasterror = llerrno.get_alt_lasterror_offset(self.asm.cpu)
+ else:
+ lasterror = llerrno.get_rpy_lasterror_offset(self.asm.cpu)
self.save_result_value(save_edx=True) # save eax/edx/xmm0
self.result_value_saved_early = True
mc.CALL(imm(GetLastError_addr))
#
tlofsreg = self.get_tlofs_reg() # => esi (possibly reused)
- mc.MOV32_mr((tlofsreg.value, rpy_lasterror), eax.value)
+ mc.MOV32_mr((tlofsreg.value, lasterror), eax.value)
def move_real_result_and_call_reacqgil_addr(self, fastgil):
from rpython.jit.backend.x86 import rx86
@@ -314,8 +326,8 @@
# in 'ebx'), and if not, we fall back to 'reacqgil_addr'.
mc.J_il8(rx86.Conditions['NE'], 0)
jne_location = mc.get_relative_pos()
- # here, ecx is zero (so rpy_fastgil was in 'released' state
- # before the XCHG, but the XCHG acquired it by writing 1)
+ # here, ecx (=old_value) is zero (so rpy_fastgil was in 'released'
+ # state before the XCHG, but the XCHG acquired it by writing 1)
rst = gcrootmap.get_root_stack_top_addr()
mc = self.mc
mc.CMP(ebx, heap(rst))
diff --git a/rpython/jit/metainterp/compile.py
b/rpython/jit/metainterp/compile.py
--- a/rpython/jit/metainterp/compile.py
+++ b/rpython/jit/metainterp/compile.py
@@ -749,18 +749,14 @@
rstack._stack_criticalcode_stop()
def handle_async_forcing(self, deadframe):
- from rpython.jit.metainterp.resume import (force_from_resumedata,
- AlreadyForced)
+ from rpython.jit.metainterp.resume import force_from_resumedata
metainterp_sd = self.metainterp_sd
vinfo = self.jitdriver_sd.virtualizable_info
ginfo = self.jitdriver_sd.greenfield_info
# there is some chance that this is already forced. In this case
# the virtualizable would have a token = NULL
- try:
- all_virtuals = force_from_resumedata(metainterp_sd, self,
deadframe,
- vinfo, ginfo)
- except AlreadyForced:
- return
+ all_virtuals = force_from_resumedata(metainterp_sd, self, deadframe,
+ vinfo, ginfo)
# The virtualizable data was stored on the real virtualizable above.
# Handle all_virtuals: keep them for later blackholing from the
# future failure of the GUARD_NOT_FORCED
diff --git a/rpython/jit/metainterp/heapcache.py
b/rpython/jit/metainterp/heapcache.py
--- a/rpython/jit/metainterp/heapcache.py
+++ b/rpython/jit/metainterp/heapcache.py
@@ -1,61 +1,111 @@
from rpython.jit.metainterp.history import ConstInt
from rpython.jit.metainterp.resoperation import rop
+class HeapCacheValue(object):
+ def __init__(self, box):
+ self.box = box
+ self.likely_virtual = False
+ self.reset_keep_likely_virtual()
+
+ def reset_keep_likely_virtual(self):
+ self.known_class = False
+ # did we see the allocation during tracing?
+ self.seen_allocation = False
+ self.is_unescaped = False
+ self.nonstandard_virtualizable = False
+ self.length = None
+ self.dependencies = None
+
+ def __repr__(self):
+ return 'HeapCacheValue(%s)' % (self.box, )
+
+
+class CacheEntry(object):
+ def __init__(self):
+ # both are {from_value: to_value} dicts
+ # the first is for boxes where we did not see the allocation, the
+ # second for anything else. the reason that distinction makes sense is
+ # because if we saw the allocation, we know it cannot alias with
+ # anything else where we saw the allocation.
+ self.cache_anything = {}
+ self.cache_seen_allocation = {}
+
+ def _clear_cache_on_write(self, seen_allocation_of_target):
+ if not seen_allocation_of_target:
+ self.cache_seen_allocation.clear()
+ self.cache_anything.clear()
+
+ def _getdict(self, value):
+ if value.seen_allocation:
+ return self.cache_seen_allocation
+ else:
+ return self.cache_anything
+
+ def do_write_with_aliasing(self, value, fieldvalue):
+ self._clear_cache_on_write(value.seen_allocation)
+ self._getdict(value)[value] = fieldvalue
+
+ def read(self, value):
+ return self._getdict(value).get(value, None)
+
+ def read_now_known(self, value, fieldvalue):
+ self._getdict(value)[value] = fieldvalue
+
+ def invalidate_unescaped(self):
+ self._invalidate_unescaped(self.cache_anything)
+ self._invalidate_unescaped(self.cache_seen_allocation)
+
+ def _invalidate_unescaped(self, d):
+ for value in d.keys():
+ if not value.is_unescaped:
+ del d[value]
class HeapCache(object):
def __init__(self):
self.reset()
- def reset(self, reset_virtuals=True, trace_branch=True):
- # contains boxes where the class is already known
- self.known_class_boxes = {}
+ def reset(self):
+ # maps boxes to values
+ self.values = {}
# store the boxes that contain newly allocated objects, this maps the
# boxes to a bool, the bool indicates whether or not the object has
# escaped the trace or not (True means the box never escaped, False
# means it did escape), its presences in the mapping shows that it was
# allocated inside the trace
- if trace_branch:
- self.new_boxes = {}
- else:
- for box in self.new_boxes:
- self.new_boxes[box] = False
- if reset_virtuals:
- self.likely_virtuals = {} # only for jit.isvirtual()
+ #if trace_branch:
+ #self.new_boxes = {}
+ # pass
+ #else:
+ #for box in self.new_boxes:
+ # self.new_boxes[box] = False
+ # pass
+ #if reset_virtuals:
+ # self.likely_virtuals = {} # only for jit.isvirtual()
# Tracks which boxes should be marked as escaped when the key box
# escapes.
- self.dependencies = {}
- # contains frame boxes that are not virtualizables
- if trace_branch:
- self.nonstandard_virtualizables = {}
+ #self.dependencies = {}
# heap cache
- # maps descrs to {from_box, to_box} dicts
+ # maps descrs to CacheEntry
self.heap_cache = {}
# heap array cache
- # maps descrs to {index: {from_box: to_box}} dicts
+ # maps descrs to {index: {from_value: to_value}} dicts
self.heap_array_cache = {}
- # cache the length of arrays
- self.length_cache = {}
- # replace_box is called surprisingly often, therefore it's not
efficient
- # to go over all the dicts and fix them.
- # instead, these two dicts are kept, and a replace_box adds an entry to
- # each of them.
- # every time one of the dicts heap_cache, heap_array_cache,
length_cache
- # is accessed, suitable indirections need to be performed
+ def reset_keep_likely_virtuals(self):
+ for value in self.values.itervalues():
+ value.reset_keep_likely_virtual()
+ self.heap_cache = {}
+ self.heap_array_cache = {}
- # this looks all very subtle, but in practice the patterns of
- # replacements should not be that complex. Usually a box is replaced by
- # a const, once. Also, if something goes wrong, the effect is that less
- # caching than possible is done, which is not a huge problem.
- self.input_indirections = {}
- self.output_indirections = {}
+ def getvalue(self, box):
+ value = self.values.get(box, None)
+ if not value:
+ value = self.values[box] = HeapCacheValue(box)
+ return value
- def _input_indirection(self, box):
- return self.input_indirections.get(box, box)
-
- def _output_indirection(self, box):
- return self.output_indirections.get(box, box)
+ def getvalues(self, boxes):
+ return [self.getvalue(box) for box in boxes]
def invalidate_caches(self, opnum, descr, argboxes):
self.mark_escaped(opnum, descr, argboxes)
@@ -64,18 +114,22 @@
def mark_escaped(self, opnum, descr, argboxes):
if opnum == rop.SETFIELD_GC:
assert len(argboxes) == 2
- box, valuebox = argboxes
- if self.is_unescaped(box) and self.is_unescaped(valuebox):
- self.dependencies.setdefault(box, []).append(valuebox)
+ value, fieldvalue = self.getvalues(argboxes)
+ if value.is_unescaped and fieldvalue.is_unescaped:
+ if value.dependencies is None:
+ value.dependencies = []
+ value.dependencies.append(fieldvalue)
else:
- self._escape(valuebox)
+ self._escape(fieldvalue)
elif opnum == rop.SETARRAYITEM_GC:
assert len(argboxes) == 3
- box, indexbox, valuebox = argboxes
- if self.is_unescaped(box) and self.is_unescaped(valuebox):
- self.dependencies.setdefault(box, []).append(valuebox)
+ value, indexvalue, fieldvalue = self.getvalues(argboxes)
+ if value.is_unescaped and fieldvalue.is_unescaped:
+ if value.dependencies is None:
+ value.dependencies = []
+ value.dependencies.append(fieldvalue)
else:
- self._escape(valuebox)
+ self._escape(fieldvalue)
elif (opnum == rop.CALL and
descr.get_extra_info().oopspecindex ==
descr.get_extra_info().OS_ARRAYCOPY and
isinstance(argboxes[3], ConstInt) and
@@ -84,6 +138,7 @@
len(descr.get_extra_info().write_descrs_arrays) == 1):
# ARRAYCOPY with constant starts and constant length doesn't escape
# its argument
+ # XXX really?
pass
# GETFIELD_GC, MARK_OPAQUE_PTR, PTR_EQ, and PTR_NE don't escape their
# arguments
@@ -95,25 +150,20 @@
opnum != rop.INSTANCE_PTR_EQ and
opnum != rop.INSTANCE_PTR_NE):
for box in argboxes:
- self._escape(box)
+ self._escape_box(box)
- def _escape(self, box):
- try:
- unescaped = self.new_boxes[box]
- except KeyError:
- pass
- else:
- if unescaped:
- self.new_boxes[box] = False
- try:
- del self.likely_virtuals[box]
- except KeyError:
- pass
- try:
- deps = self.dependencies.pop(box)
- except KeyError:
- pass
- else:
+ def _escape_box(self, box):
+ value = self.values.get(box, None)
+ if not value:
+ return
+ self._escape(value)
+
+ def _escape(self, value):
+ value.is_unescaped = False
+ value.likely_virtual = False
+ deps = value.dependencies
+ value.dependencies = None
+ if deps is not None:
for dep in deps:
self._escape(dep)
@@ -146,181 +196,198 @@
# A special case for ll_arraycopy, because it is so common, and its
# effects are so well defined.
elif effectinfo.oopspecindex == effectinfo.OS_ARRAYCOPY:
- if (
- isinstance(argboxes[3], ConstInt) and
- isinstance(argboxes[4], ConstInt) and
- isinstance(argboxes[5], ConstInt) and
- len(effectinfo.write_descrs_arrays) == 1
- ):
- descr = effectinfo.write_descrs_arrays[0]
- cache = self.heap_array_cache.get(descr, None)
- srcstart = argboxes[3].getint()
- dststart = argboxes[4].getint()
- length = argboxes[5].getint()
- for i in xrange(length):
- value = self.getarrayitem(
- argboxes[1],
- ConstInt(srcstart + i),
- descr,
- )
- if value is not None:
- self.setarrayitem(
- argboxes[2],
- ConstInt(dststart + i),
- value,
- descr,
- )
- elif cache is not None:
- try:
- idx_cache = cache[dststart + i]
- except KeyError:
- pass
- else:
- if argboxes[2] in self.new_boxes:
- for frombox in idx_cache.keys():
- if not self.is_unescaped(frombox):
- del idx_cache[frombox]
- else:
- idx_cache.clear()
- return
- elif (
- argboxes[2] in self.new_boxes and
- len(effectinfo.write_descrs_arrays) == 1
- ):
- # Fish the descr out of the effectinfo
- cache =
self.heap_array_cache.get(effectinfo.write_descrs_arrays[0], None)
- if cache is not None:
- for idx, cache in cache.iteritems():
- for frombox in cache.keys():
- if not self.is_unescaped(frombox):
- del cache[frombox]
- return
+ self._clear_caches_arraycopy(opnum, descr, argboxes,
effectinfo)
+ return
else:
- # Only invalidate things that are either escaped or arguments
- for descr, boxes in self.heap_cache.iteritems():
- for box in boxes.keys():
- if not self.is_unescaped(box) or box in argboxes:
- del boxes[box]
+ # first escape arguments:
+ for argbox in argboxes:
+ self._escape_box(argbox)
+
+ # Only invalidate things that are escaped
+ # XXX can do better, only do it for the descrs in the
effectinfo
+ for descr, cache in self.heap_cache.iteritems():
+ cache.invalidate_unescaped()
for descr, indices in self.heap_array_cache.iteritems():
- for boxes in indices.itervalues():
- for box in boxes.keys():
- if not self.is_unescaped(box) or box in argboxes:
- del boxes[box]
+ for cache in indices.itervalues():
+ cache.invalidate_unescaped()
return
# XXX not completely sure, but I *think* it is needed to reset() the
# state at least in the 'CALL_*' operations that release the GIL. We
# tried to do only the kind of resetting done by the two loops just
# above, but hit an assertion in "pypy test_multiprocessing.py".
- self.reset(reset_virtuals=False, trace_branch=False)
+ self.reset_keep_likely_virtuals()
+
+ def _clear_caches_arraycopy(self, opnum, desrc, argboxes, effectinfo):
+ seen_allocation_of_target = self.getvalue(argboxes[2]).seen_allocation
+ if (
+ isinstance(argboxes[3], ConstInt) and
+ isinstance(argboxes[4], ConstInt) and
+ isinstance(argboxes[5], ConstInt) and
+ len(effectinfo.write_descrs_arrays) == 1
+ ):
+ descr = effectinfo.write_descrs_arrays[0]
+ cache = self.heap_array_cache.get(descr, None)
+ srcstart = argboxes[3].getint()
+ dststart = argboxes[4].getint()
+ length = argboxes[5].getint()
+ for i in xrange(length):
+ value = self.getarrayitem(
+ argboxes[1],
+ ConstInt(srcstart + i),
+ descr,
+ )
+ if value is not None:
+ self.setarrayitem(
+ argboxes[2],
+ ConstInt(dststart + i),
+ value,
+ descr,
+ )
+ elif cache is not None:
+ try:
+ idx_cache = cache[dststart + i]
+ except KeyError:
+ pass
+ else:
+
idx_cache._clear_cache_on_write(seen_allocation_of_target)
+ return
+ elif (
+ len(effectinfo.write_descrs_arrays) == 1
+ ):
+ # Fish the descr out of the effectinfo
+ cache =
self.heap_array_cache.get(effectinfo.write_descrs_arrays[0], None)
+ if cache is not None:
+ for idx, cache in cache.iteritems():
+ cache._clear_cache_on_write(seen_allocation_of_target)
+ return
+ self.reset_keep_likely_virtuals()
def is_class_known(self, box):
- return box in self.known_class_boxes
+ value = self.values.get(box, None)
+ if value:
+ return value.known_class
+ return False
def class_now_known(self, box):
- self.known_class_boxes[box] = None
+ self.getvalue(box).known_class = True
def is_nonstandard_virtualizable(self, box):
- return box in self.nonstandard_virtualizables
+ value = self.values.get(box, None)
+ if value:
+ return value.nonstandard_virtualizable
+ return False
def nonstandard_virtualizables_now_known(self, box):
- self.nonstandard_virtualizables[box] = None
+ self.getvalue(box).nonstandard_virtualizable = True
def is_unescaped(self, box):
- return self.new_boxes.get(box, False)
+ value = self.values.get(box, None)
+ if value:
+ return value.is_unescaped
+ return False
def is_likely_virtual(self, box):
- return box in self.likely_virtuals
+ value = self.values.get(box, None)
+ if value:
+ return value.likely_virtual
+ return False
def new(self, box):
- self.new_boxes[box] = True
- self.likely_virtuals[box] = None
+ value = self.getvalue(box)
+ value.is_unescaped = True
+ value.likely_virtual = True
+ value.seen_allocation = True
def new_array(self, box, lengthbox):
self.new(box)
self.arraylen_now_known(box, lengthbox)
def getfield(self, box, descr):
- box = self._input_indirection(box)
- d = self.heap_cache.get(descr, None)
- if d:
- tobox = d.get(box, None)
- return self._output_indirection(tobox)
+ value = self.values.get(box, None)
+ if value:
+ cache = self.heap_cache.get(descr, None)
+ if cache:
+ tovalue = cache.read(value)
+ if tovalue:
+ return tovalue.box
return None
def getfield_now_known(self, box, descr, fieldbox):
- box = self._input_indirection(box)
- fieldbox = self._input_indirection(fieldbox)
- self.heap_cache.setdefault(descr, {})[box] = fieldbox
+ value = self.getvalue(box)
+ fieldvalue = self.getvalue(fieldbox)
+ cache = self.heap_cache.get(descr, None)
+ if cache is None:
+ cache = self.heap_cache[descr] = CacheEntry()
+ cache.read_now_known(value, fieldvalue)
def setfield(self, box, fieldbox, descr):
- d = self.heap_cache.get(descr, None)
- new_d = self._do_write_with_aliasing(d, box, fieldbox)
- self.heap_cache[descr] = new_d
-
- def _do_write_with_aliasing(self, d, box, fieldbox):
- box = self._input_indirection(box)
- fieldbox = self._input_indirection(fieldbox)
- # slightly subtle logic here
- # a write to an arbitrary box, all other boxes can alias this one
- if not d or box not in self.new_boxes:
- # therefore we throw away the cache
- return {box: fieldbox}
- # the object we are writing to is freshly allocated
- # only remove some boxes from the cache
- new_d = {}
- for frombox, tobox in d.iteritems():
- # the other box is *also* freshly allocated
- # therefore frombox and box *must* contain different objects
- # thus we can keep it in the cache
- if frombox in self.new_boxes:
- new_d[frombox] = tobox
- new_d[box] = fieldbox
- return new_d
+ cache = self.heap_cache.get(descr, None)
+ if cache is None:
+ cache = self.heap_cache[descr] = CacheEntry()
+ value = self.getvalue(box)
+ fieldvalue = self.getvalue(fieldbox)
+ cache.do_write_with_aliasing(value, fieldvalue)
def getarrayitem(self, box, indexbox, descr):
if not isinstance(indexbox, ConstInt):
- return
- box = self._input_indirection(box)
+ return None
+ value = self.values.get(box, None)
+ if value is None:
+ return None
index = indexbox.getint()
cache = self.heap_array_cache.get(descr, None)
if cache:
indexcache = cache.get(index, None)
if indexcache is not None:
- return self._output_indirection(indexcache.get(box, None))
+ resvalue = indexcache.read(value)
+ if resvalue:
+ return resvalue.box
+ return None
- def getarrayitem_now_known(self, box, indexbox, valuebox, descr):
+ def _get_or_make_array_cache_entry(self, indexbox, descr):
if not isinstance(indexbox, ConstInt):
- return
- box = self._input_indirection(box)
- valuebox = self._input_indirection(valuebox)
+ return None
index = indexbox.getint()
cache = self.heap_array_cache.setdefault(descr, {})
indexcache = cache.get(index, None)
- if indexcache is not None:
- indexcache[box] = valuebox
- else:
- cache[index] = {box: valuebox}
+ if indexcache is None:
+ cache[index] = indexcache = CacheEntry()
+ return indexcache
- def setarrayitem(self, box, indexbox, valuebox, descr):
+
+ def getarrayitem_now_known(self, box, indexbox, fieldbox, descr):
+ value = self.getvalue(box)
+ fieldvalue = self.getvalue(fieldbox)
+ indexcache = self._get_or_make_array_cache_entry(indexbox, descr)
+ if indexcache:
+ indexcache.read_now_known(value, fieldvalue)
+
+ def setarrayitem(self, box, indexbox, fieldbox, descr):
if not isinstance(indexbox, ConstInt):
cache = self.heap_array_cache.get(descr, None)
if cache is not None:
cache.clear()
return
- index = indexbox.getint()
- cache = self.heap_array_cache.setdefault(descr, {})
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit