Author: Maciej Fijalkowski <fij...@gmail.com> Branch: optresult Changeset: r75187:7bdcf543fd1c Date: 2014-12-31 09:42 +0200 http://bitbucket.org/pypy/pypy/changeset/7bdcf543fd1c/
Log: merge default (badly) diff too long, truncating to 2000 out of 5605 lines diff --git a/lib-python/2.7/test/test_xml_etree.py b/lib-python/2.7/test/test_xml_etree.py --- a/lib-python/2.7/test/test_xml_etree.py +++ b/lib-python/2.7/test/test_xml_etree.py @@ -225,9 +225,9 @@ >>> element.remove(subelement) >>> serialize(element) # 5 '<tag key="value" />' - >>> element.remove(subelement) + >>> element.remove(subelement) # doctest: +ELLIPSIS Traceback (most recent call last): - ValueError: list.remove(x): x not in list + ValueError: list.remove(... >>> serialize(element) # 6 '<tag key="value" />' >>> element[0:0] = [subelement, subelement, subelement] diff --git a/lib_pypy/readline.py b/lib_pypy/readline.py --- a/lib_pypy/readline.py +++ b/lib_pypy/readline.py @@ -6,4 +6,11 @@ are only stubs at the moment. """ -from pyrepl.readline import * +try: + from pyrepl.readline import * +except ImportError: + import sys + if sys.platform == 'win32': + raise ImportError("the 'readline' module is not available on Windows" + " (on either PyPy or CPython)") + raise diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py --- a/pypy/interpreter/astcompiler/optimize.py +++ b/pypy/interpreter/astcompiler/optimize.py @@ -83,17 +83,16 @@ class __extend__(ast.BoolOp): - def _accept_jump_if_any_is(self, gen, condition, target): - self.values[0].accept_jump_if(gen, condition, target) - for i in range(1, len(self.values)): + def _accept_jump_if_any_is(self, gen, condition, target, skip_last=0): + for i in range(len(self.values) - skip_last): self.values[i].accept_jump_if(gen, condition, target) def accept_jump_if(self, gen, condition, target): if condition and self.op == ast.And or \ (not condition and self.op == ast.Or): end = gen.new_block() - self._accept_jump_if_any_is(gen, not condition, end) - gen.emit_jump(ops.JUMP_FORWARD, target) + self._accept_jump_if_any_is(gen, not condition, end, skip_last=1) + self.values[-1].accept_jump_if(gen, condition, target) gen.use_next_block(end) else: self._accept_jump_if_any_is(gen, condition, target) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -62,10 +62,54 @@ eptype("intptr_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) eptype("uintptr_t", rffi.UINTPTR_T, ctypeprim.W_CTypePrimitiveUnsigned) -eptype("ptrdiff_t", rffi.INTPTR_T, ctypeprim.W_CTypePrimitiveSigned) # <-xxx eptype("size_t", rffi.SIZE_T, ctypeprim.W_CTypePrimitiveUnsigned) eptype("ssize_t", rffi.SSIZE_T, ctypeprim.W_CTypePrimitiveSigned) +_WCTSigned = ctypeprim.W_CTypePrimitiveSigned +_WCTUnsign = ctypeprim.W_CTypePrimitiveUnsigned + +eptype("ptrdiff_t", getattr(rffi, 'PTRDIFF_T', rffi.INTPTR_T), _WCTSigned) +eptype("intmax_t", getattr(rffi, 'INTMAX_T', rffi.LONGLONG), _WCTSigned) +eptype("uintmax_t", getattr(rffi, 'UINTMAX_T', rffi.LONGLONG), _WCTUnsign) + +if hasattr(rffi, 'INT_LEAST8_T'): + eptype("int_least8_t", rffi.INT_LEAST8_T, _WCTSigned) + eptype("int_least16_t", rffi.INT_LEAST16_T, _WCTSigned) + eptype("int_least32_t", rffi.INT_LEAST32_T, _WCTSigned) + eptype("int_least64_t", rffi.INT_LEAST64_T, _WCTSigned) + eptype("uint_least8_t", rffi.UINT_LEAST8_T, _WCTUnsign) + eptype("uint_least16_t",rffi.UINT_LEAST16_T, _WCTUnsign) + eptype("uint_least32_t",rffi.UINT_LEAST32_T, _WCTUnsign) + eptype("uint_least64_t",rffi.UINT_LEAST64_T, _WCTUnsign) +else: + eptypesize("int_least8_t", 1, _WCTSigned) + eptypesize("uint_least8_t", 1, _WCTUnsign) + eptypesize("int_least16_t", 2, _WCTSigned) + eptypesize("uint_least16_t", 2, _WCTUnsign) + eptypesize("int_least32_t", 4, _WCTSigned) + eptypesize("uint_least32_t", 4, _WCTUnsign) + eptypesize("int_least64_t", 8, _WCTSigned) + eptypesize("uint_least64_t", 8, _WCTUnsign) + +if hasattr(rffi, 'INT_FAST8_T'): + eptype("int_fast8_t", rffi.INT_FAST8_T, _WCTSigned) + eptype("int_fast16_t", rffi.INT_FAST16_T, _WCTSigned) + eptype("int_fast32_t", rffi.INT_FAST32_T, _WCTSigned) + eptype("int_fast64_t", rffi.INT_FAST64_T, _WCTSigned) + eptype("uint_fast8_t", rffi.UINT_FAST8_T, _WCTUnsign) + eptype("uint_fast16_t",rffi.UINT_FAST16_T, _WCTUnsign) + eptype("uint_fast32_t",rffi.UINT_FAST32_T, _WCTUnsign) + eptype("uint_fast64_t",rffi.UINT_FAST64_T, _WCTUnsign) +else: + eptypesize("int_fast8_t", 1, _WCTSigned) + eptypesize("uint_fast8_t", 1, _WCTUnsign) + eptypesize("int_fast16_t", 2, _WCTSigned) + eptypesize("uint_fast16_t", 2, _WCTUnsign) + eptypesize("int_fast32_t", 4, _WCTSigned) + eptypesize("uint_fast32_t", 4, _WCTUnsign) + eptypesize("int_fast64_t", 8, _WCTSigned) + eptypesize("uint_fast64_t", 8, _WCTUnsign) + @unwrap_spec(name=str) def new_primitive_type(space, name): try: diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -397,7 +397,7 @@ def test_invalid_indexing(): p = new_primitive_type("int") x = cast(p, 42) - py.test.raises(TypeError, "p[0]") + py.test.raises(TypeError, "x[0]") def test_default_str(): BChar = new_primitive_type("char") @@ -2718,7 +2718,16 @@ def test_nonstandard_integer_types(): for typename in ['int8_t', 'uint8_t', 'int16_t', 'uint16_t', 'int32_t', 'uint32_t', 'int64_t', 'uint64_t', 'intptr_t', - 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t']: + 'uintptr_t', 'ptrdiff_t', 'size_t', 'ssize_t', + 'int_least8_t', 'uint_least8_t', + 'int_least16_t', 'uint_least16_t', + 'int_least32_t', 'uint_least32_t', + 'int_least64_t', 'uint_least64_t', + 'int_fast8_t', 'uint_fast8_t', + 'int_fast16_t', 'uint_fast16_t', + 'int_fast32_t', 'uint_fast32_t', + 'int_fast64_t', 'uint_fast64_t', + 'intmax_t', 'uintmax_t']: new_primitive_type(typename) # works def test_cannot_convert_unicode_to_charp(): diff --git a/pypy/module/_rawffi/buffer.py b/pypy/module/_rawffi/buffer.py --- a/pypy/module/_rawffi/buffer.py +++ b/pypy/module/_rawffi/buffer.py @@ -1,4 +1,5 @@ from rpython.rlib.buffer import Buffer +from rpython.rtyper.lltypesystem import rffi # XXX not the most efficient implementation @@ -20,3 +21,7 @@ def setitem(self, index, char): ll_buffer = self.datainstance.ll_buffer ll_buffer[index] = char + + def get_raw_address(self): + ll_buffer = self.datainstance.ll_buffer + return rffi.cast(rffi.CCHARP, ll_buffer) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -1144,6 +1144,15 @@ b[3] = b'x' assert b[3] == b'x' + def test_pypy_raw_address(self): + import _rawffi + S = _rawffi.Structure((40, 1)) + s = S(autofree=True) + addr = buffer(s)._pypy_raw_address() + assert type(addr) is int + assert buffer(s)._pypy_raw_address() == addr + assert buffer(s, 10)._pypy_raw_address() == addr + 10 + def test_union(self): import _rawffi longsize = _rawffi.sizeof('l') diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -244,6 +244,9 @@ def getitem(self, index): return self.ptr[index] + def get_raw_address(self): + return rffi.cast(rffi.CCHARP, self.ptr) + def wrap_getreadbuffer(space, w_self, w_args, func): func_target = rffi.cast(readbufferproc, func) with lltype.scoped_alloc(rffi.VOIDPP.TO, 1) as ptr: diff --git a/pypy/module/gc/__init__.py b/pypy/module/gc/__init__.py --- a/pypy/module/gc/__init__.py +++ b/pypy/module/gc/__init__.py @@ -30,6 +30,7 @@ 'get_referrers': 'referents.get_referrers', '_dump_rpy_heap': 'referents._dump_rpy_heap', 'get_typeids_z': 'referents.get_typeids_z', + 'get_typeids_list': 'referents.get_typeids_list', 'GcRef': 'referents.W_GcRef', }) MixedModule.__init__(self, space, w_name) diff --git a/pypy/module/gc/app_referents.py b/pypy/module/gc/app_referents.py --- a/pypy/module/gc/app_referents.py +++ b/pypy/module/gc/app_referents.py @@ -16,7 +16,8 @@ [0][0][0][-1] inserted after all GC roots, before all non-roots. If the argument is a filename and the 'zlib' module is available, - we also write a 'typeids.txt' in the same directory, if none exists. + we also write 'typeids.txt' and 'typeids.lst' in the same directory, + if they don't already exist. """ if isinstance(file, str): f = open(file, 'wb') @@ -30,7 +31,13 @@ filename2 = os.path.join(os.path.dirname(file), 'typeids.txt') if not os.path.exists(filename2): data = zlib.decompress(gc.get_typeids_z()) - f = open(filename2, 'wb') + f = open(filename2, 'w') + f.write(data) + f.close() + filename2 = os.path.join(os.path.dirname(file), 'typeids.lst') + if not os.path.exists(filename2): + data = ''.join(['%d\n' % n for n in gc.get_typeids_list()]) + f = open(filename2, 'w') f.write(data) f.close() else: diff --git a/pypy/module/gc/referents.py b/pypy/module/gc/referents.py --- a/pypy/module/gc/referents.py +++ b/pypy/module/gc/referents.py @@ -228,3 +228,8 @@ a = rgc.get_typeids_z() s = ''.join([a[i] for i in range(len(a))]) return space.wrap(s) + +def get_typeids_list(space): + l = rgc.get_typeids_list() + list_w = [space.wrap(l[i]) for i in range(len(l))] + return space.newlist(list_w) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -392,7 +392,7 @@ alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): - raise oefmt(space.w_ValueError, "inconsistent fields and names") + raise oefmt(space.w_ValueError, "inconsistent fields and names in Numpy dtype unpickling") self.byteorder = endian self.shape = [] diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -478,6 +478,15 @@ for i in range(4): assert c[i] == max(a[i], b[i]) + + def test_abs_overflow(self): + from numpy import array, absolute, isinf + a = array(complex(1.5e308,1.5e308)) + # Prints a RuntimeWarning, but does not raise + b = absolute(a) + assert isinf(b) + + def test_basic(self): import sys from numpy import (dtype, add, array, dtype, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -665,6 +665,7 @@ assert numpy.int64(9223372036854775807) == 9223372036854775807 assert numpy.int64(9223372036854775807) == 9223372036854775807 + assert numpy.int64(-9223372036854775807) == -9223372036854775807 raises(OverflowError, numpy.int64, 9223372036854775808) raises(OverflowError, numpy.int64, 9223372036854775808L) @@ -1233,7 +1234,8 @@ d = np.dtype(('<f8', 2)) exc = raises(ValueError, "d.__setstate__((3, '|', None, ('f0', 'f1'), None, 16, 1, 0))") - assert exc.value[0] == 'inconsistent fields and names' + inconsistent = 'inconsistent fields and names in Numpy dtype unpickling' + assert exc.value[0] == inconsistent assert d.fields is None assert d.shape == (2,) assert d.subdtype is not None @@ -1241,7 +1243,7 @@ d = np.dtype(('<f8', 2)) exc = raises(ValueError, "d.__setstate__((3, '|', None, None, {'f0': (np.dtype('float64'), 0), 'f1': (np.dtype('float64'), 8)}, 16, 1, 0))") - assert exc.value[0] == 'inconsistent fields and names' + assert exc.value[0] == inconsistent assert d.fields is None assert d.shape == (2,) assert d.subdtype is not None @@ -1282,7 +1284,11 @@ from cPickle import loads, dumps d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", float)]) - assert d.__reduce__() == (dtype, ('V20', 0, 1), (3, '|', None, ('x', 'y', 'z', 'value'), {'y': (dtype('int32'), 4), 'x': (dtype('int32'), 0), 'z': (dtype('int32'), 8), 'value': (dtype('float64'), 12)}, 20, 1, 0)) + assert d.__reduce__() == (dtype, ('V20', 0, 1), (3, '|', None, + ('x', 'y', 'z', 'value'), + {'y': (dtype('int32'), 4), 'x': (dtype('int32'), 0), + 'z': (dtype('int32'), 8), 'value': (dtype('float64'), 12), + }, 20, 1, 0)) new_d = loads(dumps(d)) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1195,7 +1195,11 @@ @complex_to_real_unary_op def abs(self, v): - return rcomplex.c_abs(v[0], v[1]) + try: + return rcomplex.c_abs(v[0], v[1]) + except OverflowError: + # warning ... + return rfloat.INFINITY @raw_unary_op def isnan(self, v): diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -619,6 +619,9 @@ space.int_w(w_obj) except OperationError, e: if e.match(space, space.w_OverflowError): + if space.is_true(space.le(w_obj, space.wrap(0))): + return find_binop_result_dtype(space, int64_dtype, + current_guess) return find_binop_result_dtype(space, uint64_dtype, current_guess) raise diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -286,7 +286,7 @@ t = thread.start_new_thread(pollster.poll, ()) try: - time.sleep(0.1) + time.sleep(0.3) for i in range(5): print '', # to release GIL untranslated # trigger ufds array reallocation for fd in rfds: @@ -297,7 +297,7 @@ finally: # and make the call to poll() from the thread return os.write(w, b'spam') - time.sleep(0.1) + time.sleep(0.3) for i in range(5): print '', # to release GIL untranslated finally: os.close(r) diff --git a/pypy/module/thread/gil.py b/pypy/module/thread/gil.py --- a/pypy/module/thread/gil.py +++ b/pypy/module/thread/gil.py @@ -7,7 +7,7 @@ # all but one will be blocked. The other threads get a chance to run # from time to time, using the periodic action GILReleaseAction. -from rpython.rlib import rthread, rgil +from rpython.rlib import rthread, rgil, rwin32 from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.executioncontext import PeriodicAsyncAction from pypy.module.thread.threadlocals import OSThreadLocals @@ -76,9 +76,14 @@ def after_external_call(): e = get_errno() + e2 = 0 + if rwin32.WIN32: + e2 = rwin32.GetLastError() rgil.gil_acquire() rthread.gc_thread_run() after_thread_switch() + if rwin32.WIN32: + rwin32.SetLastError(e2) set_errno(e) after_external_call._gctransformer_hint_cannot_collect_ = True after_external_call._dont_reach_me_in_del_ = True diff --git a/pypy/objspace/std/bufferobject.py b/pypy/objspace/std/bufferobject.py --- a/pypy/objspace/std/bufferobject.py +++ b/pypy/objspace/std/bufferobject.py @@ -5,7 +5,7 @@ from rpython.rlib.objectmodel import compute_hash from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef @@ -123,6 +123,17 @@ return space.wrap("<%s for 0x%s, size %d>" % (info, addrstring, self.buf.getlength())) + def descr_pypy_raw_address(self, space): + from rpython.rtyper.lltypesystem import lltype, rffi + try: + ptr = self.buf.get_raw_address() + except ValueError: + # report the error using the RPython-level internal repr of self.buf + msg = ("cannot find the underlying address of buffer that " + "is internally %r" % (self.buf,)) + raise OperationError(space.w_ValueError, space.wrap(msg)) + return space.wrap(rffi.cast(lltype.Signed, ptr)) + W_Buffer.typedef = TypeDef( "buffer", __doc__ = """\ @@ -149,5 +160,6 @@ __mul__ = interp2app(W_Buffer.descr_mul), __rmul__ = interp2app(W_Buffer.descr_mul), __repr__ = interp2app(W_Buffer.descr_repr), + _pypy_raw_address = interp2app(W_Buffer.descr_pypy_raw_address), ) W_Buffer.typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -618,8 +618,8 @@ try: i = self.find(w_value, 0, sys.maxint) except ValueError: - raise OperationError(space.w_ValueError, - space.wrap("list.remove(x): x not in list")) + raise oefmt(space.w_ValueError, + "list.remove(): %R is not in list", w_value) if i < self.length(): # otherwise list was mutated self.pop(i) @@ -633,8 +633,7 @@ try: i = self.find(w_value, i, stop) except ValueError: - raise OperationError(space.w_ValueError, - space.wrap("list.index(x): x not in list")) + raise oefmt(space.w_ValueError, "%R is not in list", w_value) return space.wrap(i) @unwrap_spec(reverse=bool) diff --git a/pypy/objspace/std/test/test_bufferobject.py b/pypy/objspace/std/test/test_bufferobject.py --- a/pypy/objspace/std/test/test_bufferobject.py +++ b/pypy/objspace/std/test/test_bufferobject.py @@ -197,3 +197,9 @@ buf = buffer('hello world') raises(TypeError, "buf[MyInt(0)]") raises(TypeError, "buf[MyInt(0):MyInt(5)]") + + def test_pypy_raw_address_base(self): + raises(ValueError, buffer("foobar")._pypy_raw_address) + raises(ValueError, buffer(u"foobar")._pypy_raw_address) + e = raises(ValueError, buffer(bytearray("foobar"))._pypy_raw_address) + assert 'BytearrayBuffer' in str(e.value) diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py --- a/pypy/objspace/std/test/test_dictproxy.py +++ b/pypy/objspace/std/test/test_dictproxy.py @@ -15,14 +15,15 @@ assert NotEmpty.__dict__.get("b") is None raises(TypeError, 'NotEmpty.__dict__[15] = "y"') raises(KeyError, 'del NotEmpty.__dict__[15]') + + key, value = NotEmpty.__dict__.popitem() + assert (key == 'a' and value == 1) or (key == 'b' and value == 4) + assert NotEmpty.__dict__.setdefault("string", 1) == 1 assert NotEmpty.__dict__.setdefault("string", 2) == 1 assert NotEmpty.string == 1 raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)') - key, value = NotEmpty.__dict__.popitem() - assert (key == 'a' and value == 1) or (key == 'b' and value == 4) - def test_dictproxy_getitem(self): class NotEmpty(object): a = 1 diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -976,7 +976,10 @@ c = [0.0, 2.2, 4.4] assert c.index(0) == 0.0 - raises(ValueError, c.index, 3) + e = raises(ValueError, c.index, 3) + import sys + if sys.version_info[:2] == (2, 7): # CPython 2.7, PyPy + assert str(e.value) == '3 is not in list' def test_index_cpython_bug(self): if self.on_cpython: @@ -1228,7 +1231,9 @@ assert l == [0.0, 1.1, 3.3, 4.4] l = [0.0, 3.3, 5.5] raises(ValueError, c.remove, 2) - raises(ValueError, c.remove, 2.2) + e = raises(ValueError, c.remove, 2.2) + if not self.on_cpython: + assert str(e.value) == 'list.remove(): 2.2 is not in list' def test_reverse(self): c = list('hello world') diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -254,7 +254,10 @@ return self.__class__(can_be_None=False, no_nul=self.no_nul) def nonnulify(self): - return self.__class__(can_be_None=self.can_be_None, no_nul=True) + if self.can_be_None: + return self.__class__(can_be_None=True, no_nul=True) + else: + return self.__class__(no_nul=True) class SomeString(SomeStringOrUnicode): diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4326,6 +4326,13 @@ assert isinstance(s, annmodel.SomeString) assert not s.can_be_none() + def test_nonnulify(self): + s = annmodel.SomeString(can_be_None=True).nonnulify() + assert s.can_be_None is True + assert s.no_nul is True + s = annmodel.SomeChar().nonnulify() + assert s.no_nul is True + def g(n): return [0, 1, 2, n] diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -901,7 +901,7 @@ descr = tok.faildescr assert isinstance(descr, AbstractFailDescr) failure_recovery_pos = block_start + tok.pos_recovery_stub - descr._arm_failure_recovery_block = failure_recovery_pos + descr.adr_jump_offset = failure_recovery_pos relative_offset = tok.pos_recovery_stub - tok.offset guard_pos = block_start + tok.offset if not tok.is_guard_not_invalidated: @@ -968,11 +968,11 @@ def patch_trace(self, faildescr, looptoken, bridge_addr, regalloc): b = InstrBuilder(self.cpu.cpuinfo.arch_version) - patch_addr = faildescr._arm_failure_recovery_block + patch_addr = faildescr.adr_jump_offset assert patch_addr != 0 b.B(bridge_addr) b.copy_to_raw_memory(patch_addr) - faildescr._arm_failure_recovery_block = 0 + faildescr.adr_jump_offset = 0 # regalloc support def load(self, loc, value): diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -192,8 +192,6 @@ positions[i] = rffi.cast(rffi.USHORT, position) # write down the positions of locs guardtok.faildescr.rd_locs = positions - # we want the descr to keep alive - guardtok.faildescr.rd_loop_token = self.current_clt return fail_descr, target def call_assembler(self, op, guard_op, argloc, vloc, result_loc, tmploc): diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -572,13 +572,13 @@ def patch_pending_failure_recoveries(self, rawstart): # after we wrote the assembler to raw memory, set up - # tok.faildescr._x86_adr_jump_offset to contain the raw address of + # tok.faildescr.adr_jump_offset to contain the raw address of # the 4-byte target field in the JMP/Jcond instruction, and patch # the field in question to point (initially) to the recovery stub clt = self.current_clt for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset - tok.faildescr._x86_adr_jump_offset = addr + tok.faildescr.adr_jump_offset = addr relative_target = tok.pos_recovery_stub - (tok.pos_jump_offset + 4) assert rx86.fits_in_32bits(relative_target) # @@ -685,7 +685,7 @@ self.cpu.gc_ll_descr.gcrootmap) def patch_jump_for_descr(self, faildescr, adr_new_target): - adr_jump_offset = faildescr._x86_adr_jump_offset + adr_jump_offset = faildescr.adr_jump_offset assert adr_jump_offset != 0 offset = adr_new_target - (adr_jump_offset + 4) # If the new target fits within a rel32 of the jump, just patch @@ -705,7 +705,7 @@ p = rffi.cast(rffi.INTP, adr_jump_offset) adr_target = adr_jump_offset + 4 + rffi.cast(lltype.Signed, p[0]) mc.copy_to_raw_memory(adr_target) - faildescr._x86_adr_jump_offset = 0 # means "patched" + faildescr.adr_jump_offset = 0 # means "patched" def fixup_target_tokens(self, rawstart): for targettoken in self.target_tokens_currently_compiling: diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1479,15 +1479,15 @@ assert kind == 'v' return lltype.nullptr(rclass.OBJECTPTR.TO) - def _prepare_resume_from_failure(self, opnum, dont_change_position, - deadframe): + def _prepare_resume_from_failure(self, opnum, deadframe): from rpython.jit.metainterp.resoperation import rop # - if opnum == rop.GUARD_TRUE: + if opnum == rop.GUARD_FUTURE_CONDITION: + pass + elif opnum == rop.GUARD_TRUE: # Produced directly by some goto_if_not_xxx() opcode that did not # jump, but which must now jump. The pc is just after the opcode. - if not dont_change_position: - self.position = self.jitcode.follow_jump(self.position) + self.position = self.jitcode.follow_jump(self.position) # elif opnum == rop.GUARD_FALSE: # Produced directly by some goto_if_not_xxx() opcode that jumped, @@ -1517,8 +1517,7 @@ elif opnum == rop.GUARD_NO_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. # We get here because it did not used to overflow, but now it does. - if not dont_change_position: - return get_llexception(self.cpu, OverflowError()) + return get_llexception(self.cpu, OverflowError()) # elif opnum == rop.GUARD_OVERFLOW: # Produced by int_xxx_ovf(). The pc is just after the opcode. @@ -1649,13 +1648,9 @@ resumedescr, deadframe, all_virtuals) - if isinstance(resumedescr, ResumeAtPositionDescr): - dont_change_position = True - else: - dont_change_position = False current_exc = blackholeinterp._prepare_resume_from_failure( - resumedescr.guard_opnum, dont_change_position, deadframe) + resumedescr.guard_opnum, deadframe) _run_forever(blackholeinterp, current_exc) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -61,14 +61,18 @@ if metainterp_sd.warmrunnerdesc is not None: # for tests assert original_jitcell_token.generation > 0 # has been registered with memmgr wref = weakref.ref(original_jitcell_token) + clt = original_jitcell_token.compiled_loop_token + clt.loop_token_wref = wref for op in loop.operations: descr = op.getdescr() + # not sure what descr.index is about if isinstance(descr, ResumeDescr): - descr.wref_original_loop_token = wref # stick it there - n = descr.index - if n >= 0: # we also record the resumedescr number - original_jitcell_token.compiled_loop_token.record_faildescr_index(n) - elif isinstance(descr, JitCellToken): + descr.rd_loop_token = clt # stick it there + #n = descr.index + #if n >= 0: # we also record the resumedescr number + # original_jitcell_token.compiled_loop_token.record_faildescr_index(n) + # pass + if isinstance(descr, JitCellToken): # for a CALL_ASSEMBLER: record it as a potential jump. if descr is not original_jitcell_token: original_jitcell_token.record_jump_to(descr) @@ -135,15 +139,14 @@ part = create_empty_loop(metainterp) part.inputargs = inputargs[:] h_ops = history.operations - memo = Memo() - part.operations = [ResOperation(rop.LABEL, inputargs, descr=TargetToken(jitcell_token))] + \ - [h_ops[i].clone(memo) for i in range(start, len(h_ops))] - jumpargs = [memo.get(box, box) for box in jumpargs] - part.operations.append(ResOperation(rop.LABEL, jumpargs, descr=jitcell_token)) + label = ResOperation(rop.LABEL, inputargs, None, + descr=TargetToken(jitcell_token)) + end_label = ResOperation(rop.LABEL, jumpargs, None, descr=jitcell_token) + part.operations = [label] + h_ops[start:] + [end_label] try: - start_state = optimize_trace(metainterp_sd, part, enable_opts, - export_state=True) + start_state = optimize_trace(metainterp_sd, jitdriver_sd, part, + enable_opts, export_state=True) except InvalidLoop: return None target_token = part.operations[0].getdescr() @@ -170,7 +173,7 @@ jumpargs = part.operations[-1].getarglist() try: - optimize_trace(metainterp_sd, part, enable_opts, + optimize_trace(metainterp_sd, jitdriver_sd, part, enable_opts, start_state=start_state, export_state=False) except InvalidLoop: return None @@ -218,13 +221,14 @@ h_ops = history.operations part.operations = [partial_trace.operations[-1]] + \ - [h_ops[i].clone() for i in range(start, len(h_ops))] + \ + h_ops[start:] + \ [ResOperation(rop.JUMP, jumpargs, None, descr=loop_jitcell_token)] label = part.operations[0] orignial_label = label.clone() assert label.getopnum() == rop.LABEL try: - optimize_trace(metainterp_sd, part, jitdriver_sd.warmstate.enable_opts, + optimize_trace(metainterp_sd, jitdriver_sd, part, + jitdriver_sd.warmstate.enable_opts, start_state=start_state, export_state=False) except InvalidLoop: # Fall back on jumping to preamble @@ -234,7 +238,7 @@ [ResOperation(rop.JUMP, inputargs[:], None, descr=loop_jitcell_token)] try: - optimize_trace(metainterp_sd, part, + optimize_trace(metainterp_sd, jitdriver_sd, part, jitdriver_sd.warmstate.enable_opts, inline_short_preamble=False, start_state=start_state, export_state=False) @@ -494,22 +498,31 @@ return d class ResumeDescr(AbstractFailDescr): - pass + _attrs_ = () class ResumeGuardDescr(ResumeDescr): - # this class also gets the following attributes stored by resume.py code - # XXX move all of unused stuff to guard_op, now that we can have - # a separate class, so it does not survive that long - rd_snapshot = None - rd_frame_info_list = None + _attrs_ = ('rd_numb', 'rd_count', 'rd_consts', 'rd_virtuals', + 'rd_frame_info_list', 'rd_pendingfields', 'status') + rd_numb = lltype.nullptr(NUMBERING) rd_count = 0 rd_consts = None rd_virtuals = None + rd_frame_info_list = None rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO) status = r_uint(0) + def copy_all_attributes_from(self, other): + assert isinstance(other, ResumeGuardDescr) + self.rd_count = other.rd_count + self.rd_consts = other.rd_consts + self.rd_frame_info_list = other.rd_frame_info_list + self.rd_pendingfields = other.rd_pendingfields + self.rd_virtuals = other.rd_virtuals + self.rd_numb = other.rd_numb + # we don't copy status + ST_BUSY_FLAG = 0x01 # if set, busy tracing from the guard ST_TYPE_MASK = 0x06 # mask for the type (TY_xxx) ST_SHIFT = 3 # in "status >> ST_SHIFT" is stored: @@ -524,31 +537,12 @@ def store_final_boxes(self, guard_op, boxes, metainterp_sd): guard_op.setfailargs(boxes) self.rd_count = len(boxes) - self.guard_opnum = guard_op.getopnum() # if metainterp_sd.warmrunnerdesc is not None: # for tests jitcounter = metainterp_sd.warmrunnerdesc.jitcounter hash = jitcounter.fetch_next_hash() self.status = hash & self.ST_SHIFT_MASK - def make_a_counter_per_value(self, guard_value_op): - assert guard_value_op.getopnum() == rop.GUARD_VALUE - box = guard_value_op.getarg(0) - try: - i = guard_value_op.getfailargs().index(box) - except ValueError: - return # xxx probably very rare - else: - if box.type == history.INT: - ty = self.TY_INT - elif box.type == history.REF: - ty = self.TY_REF - elif box.type == history.FLOAT: - ty = self.TY_FLOAT - else: - assert 0, box.type - self.status = ty | (r_uint(i) << self.ST_SHIFT) - def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): if self.must_compile(deadframe, metainterp_sd, jitdriver_sd): self.start_compiling() @@ -647,32 +641,62 @@ self, inputargs, new_loop.operations, new_loop.original_jitcell_token) - def copy_all_attributes_into(self, res, memo): - # XXX a bit ugly to have to list them all here - res.rd_snapshot = self.rd_snapshot.copy(memo) - res.rd_frame_info_list = self.rd_frame_info_list - res.rd_numb = self.rd_numb - res.rd_consts = self.rd_consts - res.rd_virtuals = self.rd_virtuals - res.rd_pendingfields = self.rd_pendingfields - res.rd_count = self.rd_count + def make_a_counter_per_value(self, guard_value_op): + assert guard_value_op.getopnum() == rop.GUARD_VALUE + box = guard_value_op.getarg(0) + try: + i = guard_value_op.getfailargs().index(box) + except ValueError: + return # xxx probably very rare + else: + if box.type == history.INT: + ty = self.TY_INT + elif box.type == history.REF: + ty = self.TY_REF + elif box.type == history.FLOAT: + ty = self.TY_FLOAT + else: + assert 0, box.type + self.status = ty | (r_uint(i) << self.ST_SHIFT) - def _clone_if_mutable(self, memo): - res = ResumeGuardDescr() - self.copy_all_attributes_into(res, memo) - return res +class ResumeGuardNonnullDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_NONNULL + +class ResumeGuardIsnullDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_ISNULL + +class ResumeGuardClassDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_CLASS + +class ResumeGuardTrueDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_TRUE + +class ResumeGuardFalseDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_FALSE + +class ResumeGuardNonnullClassDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_NONNULL_CLASS + +class ResumeGuardExceptionDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_EXCEPTION + +class ResumeGuardNoExceptionDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_NO_EXCEPTION + +class ResumeGuardOverflowDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_OVERFLOW + +class ResumeGuardNoOverflowDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_NO_OVERFLOW + +class ResumeGuardValueDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_VALUE class ResumeGuardNotInvalidated(ResumeGuardDescr): - def _clone_if_mutable(self, memo): - res = ResumeGuardNotInvalidated() - self.copy_all_attributes_into(res, memo) - return res + guard_opnum = rop.GUARD_NOT_INVALIDATED class ResumeAtPositionDescr(ResumeGuardDescr): - def _clone_if_mutable(self, memo): - res = ResumeAtPositionDescr() - self.copy_all_attributes_into(res, memo) - return res + guard_opnum = rop.GUARD_FUTURE_CONDITION class AllVirtuals: llopaque = True @@ -693,8 +717,10 @@ class ResumeGuardForcedDescr(ResumeGuardDescr): + guard_opnum = rop.GUARD_NOT_FORCED - def __init__(self, metainterp_sd, jitdriver_sd): + def _init(self, metainterp_sd, jitdriver_sd): + # to please the annotator self.metainterp_sd = metainterp_sd self.jitdriver_sd = jitdriver_sd @@ -755,12 +781,39 @@ hidden_all_virtuals = obj.hide(metainterp_sd.cpu) metainterp_sd.cpu.set_savedata_ref(deadframe, hidden_all_virtuals) - def _clone_if_mutable(self, memo): - res = ResumeGuardForcedDescr(self.metainterp_sd, - self.jitdriver_sd) - self.copy_all_attributes_into(res, memo) - return res - +def invent_fail_descr_for_op(opnum, optimizer): + if opnum == rop.GUARD_NOT_FORCED or opnum == rop.GUARD_NOT_FORCED_2: + resumedescr = ResumeGuardForcedDescr() + resumedescr._init(optimizer.metainterp_sd, optimizer.jitdriver_sd) + elif opnum == rop.GUARD_NOT_INVALIDATED: + resumedescr = ResumeGuardNotInvalidated() + elif opnum == rop.GUARD_FUTURE_CONDITION: + resumedescr = ResumeAtPositionDescr() + elif opnum == rop.GUARD_VALUE: + resumedescr = ResumeGuardValueDescr() + elif opnum == rop.GUARD_NONNULL: + resumedescr = ResumeGuardNonnullDescr() + elif opnum == rop.GUARD_ISNULL: + resumedescr = ResumeGuardIsnullDescr() + elif opnum == rop.GUARD_NONNULL_CLASS: + resumedescr = ResumeGuardNonnullClassDescr() + elif opnum == rop.GUARD_CLASS: + resumedescr = ResumeGuardClassDescr() + elif opnum == rop.GUARD_TRUE: + resumedescr = ResumeGuardTrueDescr() + elif opnum == rop.GUARD_FALSE: + resumedescr = ResumeGuardFalseDescr() + elif opnum == rop.GUARD_EXCEPTION: + resumedescr = ResumeGuardExceptionDescr() + elif opnum == rop.GUARD_NO_EXCEPTION: + resumedescr = ResumeGuardNoExceptionDescr() + elif opnum == rop.GUARD_OVERFLOW: + resumedescr = ResumeGuardOverflowDescr() + elif opnum == rop.GUARD_NO_OVERFLOW: + resumedescr = ResumeGuardNoOverflowDescr() + else: + assert False + return resumedescr class ResumeFromInterpDescr(ResumeDescr): def __init__(self, original_greenkey): @@ -796,19 +849,18 @@ # it does not work -- i.e. none of the existing old_loop_tokens match. new_trace = create_empty_loop(metainterp) new_trace.inputargs = metainterp.history.inputargs[:] - # clone ops, as optimize_bridge can mutate the ops - memo = Memo() - new_trace.operations = [op.clone(memo) for op in - metainterp.history.operations] + new_trace.operations = metainterp.history.operations[:] metainterp_sd = metainterp.staticdata - state = metainterp.jitdriver_sd.warmstate + jitdriver_sd = metainterp.jitdriver_sd + state = jitdriver_sd.warmstate if isinstance(resumekey, ResumeAtPositionDescr): inline_short_preamble = False else: inline_short_preamble = True try: - state = optimize_trace(metainterp_sd, new_trace, state.enable_opts, + state = optimize_trace(metainterp_sd, jitdriver_sd, new_trace, + state.enable_opts, inline_short_preamble, export_state=True) except InvalidLoop: debug_print("compile_new_bridge: got an InvalidLoop") diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -124,14 +124,6 @@ def repr_of_descr(self): return '%r' % (self,) - def _clone_if_mutable(self, memo): - return self - def clone_if_mutable(self, memo): - clone = self._clone_if_mutable(memo) - if not we_are_translated(): - assert clone.__class__ is self.__class__ - return clone - def hide(self, cpu): descr_ptr = cpu.ts.cast_instance_to_base_ref(self) return cpu.ts.cast_to_ref(descr_ptr) @@ -149,6 +141,8 @@ index = -1 final_descr = False + _attrs_ = ('adr_jump_offset', 'rd_locs', 'rd_loop_token') + def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): raise NotImplementedError def compile_and_attach(self, metainterp, new_loop): diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -47,7 +47,7 @@ return optimizations, unroll -def optimize_trace(metainterp_sd, loop, enable_opts, +def optimize_trace(metainterp_sd, jitdriver_sd, loop, enable_opts, inline_short_preamble=True, start_state=None, export_state=True): """Optimize loop.operations to remove internal overheadish operations. @@ -59,11 +59,13 @@ loop.operations) optimizations, unroll = build_opt_chain(metainterp_sd, enable_opts) if unroll: - return optimize_unroll(metainterp_sd, loop, optimizations, + return optimize_unroll(metainterp_sd, jitdriver_sd, loop, + optimizations, inline_short_preamble, start_state, export_state) else: - optimizer = Optimizer(metainterp_sd, loop, optimizations) + optimizer = Optimizer(metainterp_sd, jitdriver_sd, loop, + optimizations) optimizer.propagate_all_forward() finally: debug_stop("jit-optimize") diff --git a/rpython/jit/metainterp/optimizeopt/generalize.py b/rpython/jit/metainterp/optimizeopt/generalize.py --- a/rpython/jit/metainterp/optimizeopt/generalize.py +++ b/rpython/jit/metainterp/optimizeopt/generalize.py @@ -1,4 +1,5 @@ -from rpython.jit.metainterp.optimizeopt.optimizer import MININT, MAXINT +from rpython.jit.metainterp.optimizeopt.optimizer import MININT, MAXINT,\ + IntOptValue class GeneralizationStrategy(object): @@ -14,7 +15,8 @@ for v in self.optimizer.values.values(): if v.is_constant(): continue - if v.intbound.lower < MININT / 2: - v.intbound.lower = MININT - if v.intbound.upper > MAXINT / 2: - v.intbound.upper = MAXINT + if isinstance(v, IntOptValue): + if v.intbound.lower < MININT / 2: + v.intbound.lower = MININT + if v.intbound.upper > MAXINT / 2: + v.intbound.upper = MAXINT diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -4,7 +4,8 @@ from rpython.jit.metainterp.optimizeopt.util import args_dict from rpython.jit.metainterp.history import Const from rpython.jit.metainterp.jitexc import JitException -from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED +from rpython.jit.metainterp.optimizeopt.optimizer import Optimization,\ + MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.resoperation import rop, ResOperation, OpHelpers @@ -63,6 +64,17 @@ # cancelling its previous effects with no side effect. self._lazy_setfield = None + def value_updated(self, oldvalue, newvalue): + try: + fieldvalue = self._cached_fields[oldvalue] + except KeyError: + pass + else: + self._cached_fields[newvalue] = fieldvalue + op = self._cached_fields_getfield_op[oldvalue].clone() + op.setarg(0, newvalue.box) + self._cached_fields_getfield_op[newvalue] = op + def possible_aliasing(self, optheap, structvalue): # If lazy_setfield is set and contains a setfield on a different # structvalue, then we are annoyed, because it may point to either @@ -82,10 +94,12 @@ else: return self._cached_fields.get(structvalue, None) - def remember_field_value(self, structvalue, fieldvalue, getfield_op=None): + def remember_field_value(self, structvalue, fieldvalue, op=None, + optimizer=None): assert self._lazy_setfield is None self._cached_fields[structvalue] = fieldvalue - self._cached_fields_getfield_op[structvalue] = getfield_op + op = optimizer.get_op_replacement(op) + self._cached_fields_getfield_op[structvalue] = op def force_lazy_setfield(self, optheap, can_cache=True): op = self._lazy_setfield @@ -109,7 +123,8 @@ # field. structvalue = optheap.getvalue(op.getarg(0)) fieldvalue = optheap.getvalue(op.getarglist()[-1]) - self.remember_field_value(structvalue, fieldvalue, op) + self.remember_field_value(structvalue, fieldvalue, op, + optheap.optimizer) elif not can_cache: self.clear() @@ -117,18 +132,6 @@ self._cached_fields.clear() self._cached_fields_getfield_op.clear() - def turned_constant(self, newvalue, value): - if newvalue not in self._cached_fields and value in self._cached_fields: - self._cached_fields[newvalue] = self._cached_fields[value] - op = self._cached_fields_getfield_op[value].clone() - constbox = value.box - assert isinstance(constbox, Const) - op.setarg(0, constbox) - self._cached_fields_getfield_op[newvalue] = op - for structvalue in self._cached_fields.keys(): - if self._cached_fields[structvalue] is value: - self._cached_fields[structvalue] = newvalue - def produce_potential_short_preamble_ops(self, optimizer, shortboxes, descr): if self._lazy_setfield is not None: return @@ -138,7 +141,7 @@ continue value = optimizer.getvalue(op.getarg(0)) if value in optimizer.opaque_pointers: - if value.level < LEVEL_KNOWNCLASS: + if value.getlevel() < LEVEL_KNOWNCLASS: continue if op.getopnum() != rop.SETFIELD_GC and op.getopnum() != rop.GETFIELD_GC: continue @@ -191,6 +194,17 @@ self._seen_guard_not_invalidated = False self.postponed_op = None + def setup(self): + self.optimizer.optheap = self + + def value_updated(self, oldvalue, newvalue): + # XXXX very unhappy about that + for cf in self.cached_fields.itervalues(): + cf.value_updated(oldvalue, newvalue) + for submap in self.cached_arrayitems.itervalues(): + for cf in submap.itervalues(): + cf.value_updated(oldvalue, newvalue) + def force_at_end_of_preamble(self): self.cached_dict_reads.clear() self.corresponding_array_descrs.clear() @@ -363,16 +377,6 @@ # ^^^ we only need to force this field; the other fields # of virtualref_info and virtualizable_info are not gcptrs. - def turned_constant(self, value): - assert value.is_constant() - newvalue = self.getvalue(value.box) - if value is not newvalue: - for cf in self.cached_fields.itervalues(): - cf.turned_constant(newvalue, value) - for submap in self.cached_arrayitems.itervalues(): - for cf in submap.itervalues(): - cf.turned_constant(newvalue, value) - def force_lazy_setfield(self, descr, can_cache=True): try: cf = self.cached_fields[descr] @@ -386,7 +390,7 @@ except KeyError: return for idx, cf in submap.iteritems(): - if indexvalue is None or indexvalue.intbound.contains(idx): + if indexvalue is None or indexvalue.getintbound().contains(idx): cf.force_lazy_setfield(self, can_cache) def _assert_valid_cf(self, cf): @@ -448,7 +452,7 @@ self.emit_operation(op) # then remember the result of reading the field fieldvalue = self.getvalue(op) - cf.remember_field_value(structvalue, fieldvalue, op) + cf.remember_field_value(structvalue, fieldvalue, op, self.optimizer) optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I @@ -497,7 +501,7 @@ # the remember the result of reading the array item if cf is not None: fieldvalue = self.getvalue(op) - cf.remember_field_value(arrayvalue, fieldvalue, op) + cf.remember_field_value(arrayvalue, fieldvalue, op, self.optimizer) optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -4,7 +4,7 @@ from rpython.jit.metainterp.optimizeopt.intutils import (IntBound, IntLowerBound, IntUpperBound) from rpython.jit.metainterp.optimizeopt.optimizer import (Optimization, CONST_1, - CONST_0, MODE_ARRAY, MODE_STR, MODE_UNICODE) + CONST_0, MODE_ARRAY, MODE_STR, MODE_UNICODE, IntOptValue) from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, AbstractResOp from rpython.jit.backend.llsupport import symbolic @@ -53,7 +53,9 @@ # FIXME: This takes care of the instruction where box is the reuslt # but the bounds produced by all instructions where box is # an argument might also be tighten - b = v.intbound + b = v.getintbound() + if b is None: + return # pointer if b.has_lower and b.has_upper and b.lower == b.upper: v.make_constant(ConstInt(b.lower)) @@ -78,11 +80,13 @@ self.make_constant_int(op, 0) return self.emit_operation(op) - if v1.intbound.known_ge(IntBound(0, 0)) and \ - v2.intbound.known_ge(IntBound(0, 0)): - r = self.getvalue(op) - mostsignificant = v1.intbound.upper | v2.intbound.upper - r.intbound.intersect(IntBound(0, next_pow2_m1(mostsignificant))) + bound1 = v1.getintbound() + bound2 = v2.getintbound() + if bound1.known_ge(IntBound(0, 0)) and \ + bound2.known_ge(IntBound(0, 0)): + r = self.getvalue(op).getintbound() + mostsignificant = bound1.upper | bound2.upper + r.intersect(IntBound(0, next_pow2_m1(mostsignificant))) optimize_INT_OR = optimize_INT_OR_or_XOR optimize_INT_XOR = optimize_INT_OR_or_XOR @@ -96,55 +100,55 @@ if v2.is_constant(): val = v2.box.getint() if val >= 0: - r.intbound.intersect(IntBound(0, val)) + r.getintbound().intersect(IntBound(0, val)) elif v1.is_constant(): val = v1.box.getint() if val >= 0: - r.intbound.intersect(IntBound(0, val)) - elif v1.intbound.known_ge(IntBound(0, 0)) and \ - v2.intbound.known_ge(IntBound(0, 0)): - lesser = min(v1.intbound.upper, v2.intbound.upper) - r.intbound.intersect(IntBound(0, next_pow2_m1(lesser))) + r.getintbound().intersect(IntBound(0, val)) + elif v1.getintbound().known_ge(IntBound(0, 0)) and \ + v2.getintbound().known_ge(IntBound(0, 0)): + lesser = min(v1.getintbound().upper, v2.getintbound().upper) + r.getintbound().intersect(IntBound(0, next_pow2_m1(lesser))) def optimize_INT_SUB(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op) - b = v1.intbound.sub_bound(v2.intbound) + b = v1.getintbound().sub_bound(v2.getintbound()) if b.bounded(): - r.intbound.intersect(b) + r.getintbound().intersect(b) def optimize_INT_ADD(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op) - b = v1.intbound.add_bound(v2.intbound) + b = v1.getintbound().add_bound(v2.getintbound()) if b.bounded(): - r.intbound.intersect(b) + r.getintbound().intersect(b) def optimize_INT_MUL(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op) - b = v1.intbound.mul_bound(v2.intbound) + b = v1.getintbound().mul_bound(v2.getintbound()) if b.bounded(): - r.intbound.intersect(b) + r.getintbound().intersect(b) def optimize_INT_FLOORDIV(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op) - r.intbound.intersect(v1.intbound.div_bound(v2.intbound)) + r.getintbound().intersect(v1.getintbound().div_bound(v2.getintbound())) def optimize_INT_MOD(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - known_nonneg = (v1.intbound.known_ge(IntBound(0, 0)) and - v2.intbound.known_ge(IntBound(0, 0))) + known_nonneg = (v1.getintbound().known_ge(IntBound(0, 0)) and + v2.getintbound().known_ge(IntBound(0, 0))) if known_nonneg and v2.is_constant(): val = v2.box.getint() if (val & (val-1)) == 0: @@ -162,18 +166,18 @@ return # give up val = -val if known_nonneg: - r.intbound.make_ge(IntBound(0, 0)) + r.getintbound().make_ge(IntBound(0, 0)) else: - r.intbound.make_gt(IntBound(-val, -val)) - r.intbound.make_lt(IntBound(val, val)) + r.getintbound().make_gt(IntBound(-val, -val)) + r.getintbound().make_lt(IntBound(val, val)) def optimize_INT_LSHIFT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) self.emit_operation(op) r = self.getvalue(op) - b = v1.intbound.lshift_bound(v2.intbound) - r.intbound.intersect(b) + b = v1.getintbound().lshift_bound(v2.getintbound()) + r.getintbound().intersect(b) # intbound.lshift_bound checks for an overflow and if the # lshift can be proven not to overflow sets b.has_upper and # b.has_lower @@ -184,14 +188,14 @@ def optimize_INT_RSHIFT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - b = v1.intbound.rshift_bound(v2.intbound) + b = v1.getintbound().rshift_bound(v2.getintbound()) if b.has_lower and b.has_upper and b.lower == b.upper: # constant result (likely 0, for rshifts that kill all bits) self.make_constant_int(op, b.lower) else: self.emit_operation(op) r = self.getvalue(op) - r.intbound.intersect(b) + r.getintbound().intersect(b) def optimize_GUARD_NO_OVERFLOW(self, op): lastop = self.last_emitted_operation @@ -236,17 +240,16 @@ def optimize_INT_ADD_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - resbound = v1.intbound.add_bound(v2.intbound) + resbound = v1.getintbound().add_bound(v2.getintbound()) if resbound.bounded(): # Transform into INT_ADD. The following guard will be killed # by optimize_GUARD_NO_OVERFLOW; if we see instead an # optimize_GUARD_OVERFLOW, then InvalidLoop. - newop = self.replace_op_with(op, rop.INT_ADD) - else: - newop = op - self.emit_operation(newop) # emit the op + xxx + op = op.copy_and_change(rop.INT_ADD) + self.emit_operation(op) # emit the op r = self.getvalue(op) - r.intbound.intersect(resbound) + r.getintbound().intersect(resbound) def optimize_INT_SUB_OVF(self, op): v1 = self.getvalue(op.getarg(0)) @@ -254,31 +257,30 @@ if v1 is v2: self.make_constant_int(op, 0) return - resbound = v1.intbound.sub_bound(v2.intbound) + resbound = v1.getintbound().sub_bound(v2.getintbound()) if resbound.bounded(): - newop = self.replace_op_with(op, rop.INT_SUB) - else: - newop = op - self.emit_operation(newop) # emit the op + xxx + op = op.copy_and_change(rop.INT_SUB) + self.emit_operation(op) # emit the op r = self.getvalue(op) - r.intbound.intersect(resbound) + r.getintbound().intersect(resbound) def optimize_INT_MUL_OVF(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - resbound = v1.intbound.mul_bound(v2.intbound) + resbound = v1.getintbound().mul_bound(v2.getintbound()) if resbound.bounded(): op = self.replace_op_with(op, rop.INT_MUL) self.emit_operation(op) r = self.getvalue(op) - r.intbound.intersect(resbound) + r.getintbound().intersect(resbound) def optimize_INT_LT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_lt(v2.intbound): + if v1.getintbound().known_lt(v2.getintbound()): self.make_constant_int(op, 1) - elif v1.intbound.known_ge(v2.intbound) or v1 is v2: + elif v1.getintbound().known_ge(v2.getintbound()) or v1 is v2: self.make_constant_int(op, 0) else: self.emit_operation(op) @@ -286,9 +288,9 @@ def optimize_INT_GT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_gt(v2.intbound): + if v1.getintbound().known_gt(v2.getintbound()): self.make_constant_int(op, 1) - elif v1.intbound.known_le(v2.intbound) or v1 is v2: + elif v1.getintbound().known_le(v2.getintbound()) or v1 is v2: self.make_constant_int(op, 0) else: self.emit_operation(op) @@ -296,9 +298,9 @@ def optimize_INT_LE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_le(v2.intbound) or v1 is v2: + if v1.getintbound().known_le(v2.getintbound()) or v1 is v2: self.make_constant_int(op, 1) - elif v1.intbound.known_gt(v2.intbound): + elif v1.getintbound().known_gt(v2.getintbound()): self.make_constant_int(op, 0) else: self.emit_operation(op) @@ -306,9 +308,9 @@ def optimize_INT_GE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_ge(v2.intbound) or v1 is v2: + if v1.getintbound().known_ge(v2.getintbound()) or v1 is v2: self.make_constant_int(op, 1) - elif v1.intbound.known_lt(v2.intbound): + elif v1.getintbound().known_lt(v2.getintbound()): self.make_constant_int(op, 0) else: self.emit_operation(op) @@ -316,9 +318,9 @@ def optimize_INT_EQ(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_gt(v2.intbound): + if v1.getintbound().known_gt(v2.getintbound()): self.make_constant_int(op, 0) - elif v1.intbound.known_lt(v2.intbound): + elif v1.getintbound().known_lt(v2.getintbound()): self.make_constant_int(op, 0) elif v1 is v2: self.make_constant_int(op, 1) @@ -328,9 +330,9 @@ def optimize_INT_NE(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.known_gt(v2.intbound): + if v1.getintbound().known_gt(v2.getintbound()): self.make_constant_int(op, 1) - elif v1.intbound.known_lt(v2.intbound): + elif v1.getintbound().known_lt(v2.getintbound()): self.make_constant_int(op, 1) elif v1 is v2: self.make_constant_int(op, 0) @@ -339,7 +341,7 @@ def optimize_INT_FORCE_GE_ZERO(self, op): value = self.getvalue(op.getarg(0)) - if value.intbound.known_ge(IntBound(0, 0)): + if value.getintbound().known_ge(IntBound(0, 0)): self.make_equal_to(op, value) else: self.emit_operation(op) @@ -350,50 +352,53 @@ start = -(1 << (numbits - 1)) stop = 1 << (numbits - 1) bounds = IntBound(start, stop - 1) - if bounds.contains_bound(value.intbound): + if bounds.contains_bound(value.getintbound()): self.make_equal_to(op, value) else: self.emit_operation(op) vres = self.getvalue(op) - vres.intbound.intersect(bounds) + vres.getintbound().intersect(bounds) def optimize_ARRAYLEN_GC(self, op): self.emit_operation(op) array = self.getvalue(op.getarg(0)) result = self.getvalue(op) array.make_len_gt(MODE_ARRAY, op.getdescr(), -1) - array.lenbound.bound.intersect(result.intbound) - result.intbound = array.lenbound.bound + array.getlenbound().bound.intersect(result.getintbound()) + assert isinstance(result, IntOptValue) + result.intbound = array.getlenbound().bound def optimize_STRLEN(self, op): self.emit_operation(op) array = self.getvalue(op.getarg(0)) result = self.getvalue(op) array.make_len_gt(MODE_STR, op.getdescr(), -1) - array.lenbound.bound.intersect(result.intbound) - result.intbound = array.lenbound.bound + array.getlenbound().bound.intersect(result.getintbound()) + assert isinstance(result, IntOptValue) + result.intbound = array.getlenbound().bound def optimize_UNICODELEN(self, op): self.emit_operation(op) array = self.getvalue(op.getarg(0)) result = self.getvalue(op) array.make_len_gt(MODE_UNICODE, op.getdescr(), -1) - array.lenbound.bound.intersect(result.intbound) - result.intbound = array.lenbound.bound + array.getlenbound().bound.intersect(result.getintbound()) + assert isinstance(result, IntOptValue) + result.intbound = array.getlenbound().bound def optimize_STRGETITEM(self, op): self.emit_operation(op) v1 = self.getvalue(op) - v1.intbound.make_ge(IntLowerBound(0)) - v1.intbound.make_lt(IntUpperBound(256)) + v1.getintbound().make_ge(IntLowerBound(0)) + v1.getintbound().make_lt(IntUpperBound(256)) def optimize_GETFIELD_RAW_I(self, op): self.emit_operation(op) descr = op.getdescr() if descr.is_integer_bounded(): v1 = self.getvalue(op) - v1.intbound.make_ge(IntLowerBound(descr.get_integer_min())) - v1.intbound.make_le(IntUpperBound(descr.get_integer_max())) + v1.getintbound().make_ge(IntLowerBound(descr.get_integer_min())) + v1.getintbound().make_le(IntUpperBound(descr.get_integer_max())) optimize_GETFIELD_RAW_F = optimize_GETFIELD_RAW_I optimize_GETFIELD_GC_I = optimize_GETFIELD_RAW_I @@ -408,9 +413,9 @@ self.emit_operation(op) descr = op.getdescr() if descr and descr.is_item_integer_bounded(): - v1 = self.getvalue(op) - v1.intbound.make_ge(IntLowerBound(descr.get_item_integer_min())) - v1.intbound.make_le(IntUpperBound(descr.get_item_integer_max())) + intbound = self.getvalue(op).getintbound() + intbound.make_ge(IntLowerBound(descr.get_item_integer_min())) + intbound.make_le(IntUpperBound(descr.get_item_integer_max())) optimize_GETARRAYITEM_RAW_F = optimize_GETARRAYITEM_RAW_I optimize_GETARRAYITEM_GC_I = optimize_GETARRAYITEM_RAW_I @@ -420,22 +425,22 @@ def optimize_UNICODEGETITEM(self, op): self.emit_operation(op) v1 = self.getvalue(op) - v1.intbound.make_ge(IntLowerBound(0)) + v1.getintbound().make_ge(IntLowerBound(0)) def make_int_lt(self, box1, box2): v1 = self.getvalue(box1) v2 = self.getvalue(box2) - if v1.intbound.make_lt(v2.intbound): + if v1.getintbound().make_lt(v2.getintbound()): self.propagate_bounds_backward(box1, v1) - if v2.intbound.make_gt(v1.intbound): + if v2.getintbound().make_gt(v1.getintbound()): self.propagate_bounds_backward(box2, v2) def make_int_le(self, box1, box2): v1 = self.getvalue(box1) v2 = self.getvalue(box2) - if v1.intbound.make_le(v2.intbound): + if v1.getintbound().make_le(v2.getintbound()): self.propagate_bounds_backward(box1, v1) - if v2.intbound.make_ge(v1.intbound): + if v2.getintbound().make_ge(v1.getintbound()): self.propagate_bounds_backward(box2, v2) def make_int_gt(self, box1, box2): @@ -482,9 +487,9 @@ if r.box.same_constant(CONST_1): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.intersect(v2.intbound): + if v1.getintbound().intersect(v2.getintbound()): self.propagate_bounds_backward(op.getarg(0), v1) - if v2.intbound.intersect(v1.intbound): + if v2.getintbound().intersect(v1.getintbound()): self.propagate_bounds_backward(op.getarg(1), v2) def propagate_bounds_INT_NE(self, op): @@ -493,9 +498,9 @@ if r.box.same_constant(CONST_0): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - if v1.intbound.intersect(v2.intbound): + if v1.getintbound().intersect(v2.getintbound()): self.propagate_bounds_backward(op.getarg(0), v1) - if v2.intbound.intersect(v1.intbound): + if v2.getintbound().intersect(v1.getintbound()): self.propagate_bounds_backward(op.getarg(1), v2) def propagate_bounds_INT_IS_TRUE(self, op): @@ -503,8 +508,8 @@ if r.is_constant(): if r.box.same_constant(CONST_1): v1 = self.getvalue(op.getarg(0)) - if v1.intbound.known_ge(IntBound(0, 0)): - v1.intbound.make_gt(IntBound(0, 0)) + if v1.getintbound().known_ge(IntBound(0, 0)): + v1.getintbound().make_gt(IntBound(0, 0)) self.propagate_bounds_backward(op.getarg(0), v1) def propagate_bounds_INT_IS_ZERO(self, op): @@ -515,50 +520,50 @@ # Clever hack, we can't use self.make_constant_int yet because # the args aren't in the values dictionary yet so it runs into # an assert, this is a clever way of expressing the same thing. - v1.intbound.make_ge(IntBound(0, 0)) - v1.intbound.make_lt(IntBound(1, 1)) + v1.getintbound().make_ge(IntBound(0, 0)) + v1.getintbound().make_lt(IntBound(1, 1)) self.propagate_bounds_backward(op.getarg(0), v1) def propagate_bounds_INT_ADD(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - r = self.getvalue(op) - b = r.intbound.sub_bound(v2.intbound) - if v1.intbound.intersect(b): + r = self.getvalue(op.result) + b = r.getintbound().sub_bound(v2.getintbound()) + if v1.getintbound().intersect(b): self.propagate_bounds_backward(op.getarg(0), v1) - b = r.intbound.sub_bound(v1.intbound) - if v2.intbound.intersect(b): + b = r.getintbound().sub_bound(v1.getintbound()) + if v2.getintbound().intersect(b): self.propagate_bounds_backward(op.getarg(1), v2) def propagate_bounds_INT_SUB(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - r = self.getvalue(op) - b = r.intbound.add_bound(v2.intbound) - if v1.intbound.intersect(b): + r = self.getvalue(op.result) + b = r.getintbound().add_bound(v2.getintbound()) + if v1.getintbound().intersect(b): self.propagate_bounds_backward(op.getarg(0), v1) - b = r.intbound.sub_bound(v1.intbound).mul(-1) - if v2.intbound.intersect(b): + b = r.getintbound().sub_bound(v1.getintbound()).mul(-1) + if v2.getintbound().intersect(b): self.propagate_bounds_backward(op.getarg(1), v2) def propagate_bounds_INT_MUL(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) - r = self.getvalue(op) - b = r.intbound.div_bound(v2.intbound) - if v1.intbound.intersect(b): + r = self.getvalue(op.result) + b = r.getintbound().div_bound(v2.getintbound()) + if v1.getintbound().intersect(b): self.propagate_bounds_backward(op.getarg(0), v1) - b = r.intbound.div_bound(v1.intbound) - if v2.intbound.intersect(b): + b = r.getintbound().div_bound(v1.getintbound()) + if v2.getintbound().intersect(b): self.propagate_bounds_backward(op.getarg(1), v2) def propagate_bounds_INT_LSHIFT(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) r = self.getvalue(op) - b = r.intbound.rshift_bound(v2.intbound) - if v1.intbound.intersect(b): - self.propagate_bounds_backward(op.getarg(0)) + b = r.getintbound().rshift_bound(v2.getintbound()) + if v1.getintbound().intersect(b): + self.propagate_bounds_backward(op.getarg(0), v1) propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -4,20 +4,26 @@ from rpython.jit.metainterp.history import Const, ConstInt, REF from rpython.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \ ImmutableIntUnbounded, \ - IntLowerBound, MININT, MAXINT + IntLowerBound, MININT,\ + MAXINT from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop, ResOperation,\ - AbstractResOp, AbstractInputArg, DONT_CHANGE + AbstractResOp, AbstractInputArg, DONT_CHANGE, GuardResOp from rpython.jit.metainterp.typesystem import llhelper from rpython.tool.pairtype import extendabletype from rpython.rlib.debug import debug_print from rpython.rlib.objectmodel import specialize +""" The tag field on OptValue has a following meaning: -LEVEL_UNKNOWN = '\x00' -LEVEL_NONNULL = '\x01' -LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays -LEVEL_CONSTANT = '\x03' +lower two bits are LEVEL +next 16 bits is the position in the original list, 0 if unknown or a constant +""" + +LEVEL_UNKNOWN = 0 +LEVEL_NONNULL = 1 +LEVEL_KNOWNCLASS = 2 # might also mean KNOWNARRAYDESCR, for arrays +LEVEL_CONSTANT = 3 MODE_ARRAY = '\x00' MODE_STR = '\x01' @@ -41,92 +47,49 @@ class OptValue(object): __metaclass__ = extendabletype - _attrs_ = ('box', 'known_class', 'last_guard', 'level', 'intbound', 'lenbound') - last_guard = None + _attrs_ = ('box', '_tag') - level = LEVEL_UNKNOWN - known_class = None - intbound = ImmutableIntUnbounded() - lenbound = None + _tag = 0 def __init__(self, box, level=None, known_class=None, intbound=None): self.box = box if level is not None: - self.level = level - self.known_class = known_class - if intbound: - self.intbound = intbound - else: - if box is not None and box.type == 'i': - self.intbound = IntBound(MININT, MAXINT) - else: - self.intbound = IntUnbounded() + self._tag = level if isinstance(box, Const): self.make_constant(box) # invariant: box is a Const if and only if level == LEVEL_CONSTANT - def make_len_gt(self, mode, descr, val): - if self.lenbound: - assert self.lenbound.mode == mode - assert self.lenbound.descr == descr - self.lenbound.bound.make_gt(IntBound(val, val)) + def getlevel(self): + return self._tag & 0x3 + + def setlevel(self, level): + self._tag = (self._tag & (~0x3)) | level + + def import_from(self, other, optimizer): + if self.getlevel() == LEVEL_CONSTANT: + assert other.getlevel() == LEVEL_CONSTANT + assert other.box.same_constant(self.box) + return + assert self.getlevel() <= LEVEL_NONNULL + if other.getlevel() == LEVEL_CONSTANT: + self.make_constant(other.get_key_box()) + elif other.getlevel() == LEVEL_KNOWNCLASS: + self.make_constant_class(None, other.get_known_class()) else: - self.lenbound = LenBound(mode, descr, IntLowerBound(val + 1)) + if other.getlevel() == LEVEL_NONNULL: + self.ensure_nonnull() def make_guards(self, box): - guards = [] - if self.level == LEVEL_CONSTANT: + if self.getlevel() == LEVEL_CONSTANT: op = ResOperation(rop.GUARD_VALUE, [box, self.box], None) - guards.append(op) - elif self.level == LEVEL_KNOWNCLASS: - op = ResOperation(rop.GUARD_NONNULL, [box], None) - guards.append(op) - op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) - guards.append(op) - else: - if self.level == LEVEL_NONNULL: - op = ResOperation(rop.GUARD_NONNULL, [box], None) - guards.append(op) - self.intbound.make_guards(box, guards) - if self.lenbound: - lenbox = BoxInt() - if self.lenbound.mode == MODE_ARRAY: - op = ResOperation(rop.ARRAYLEN_GC, [box], lenbox, self.lenbound.descr) - elif self.lenbound.mode == MODE_STR: - op = ResOperation(rop.STRLEN, [box], lenbox, self.lenbound.descr) - elif self.lenbound.mode == MODE_UNICODE: - op = ResOperation(rop.UNICODELEN, [box], lenbox, self.lenbound.descr) - else: - debug_print("Unknown lenbound mode") - assert False - guards.append(op) - self.lenbound.bound.make_guards(lenbox, guards) - return guards + return [op] + return [] - def import_from(self, other, optimizer): - if self.level == LEVEL_CONSTANT: - assert other.level == LEVEL_CONSTANT - assert other.box.same_constant(self.box) - return - assert self.level <= LEVEL_NONNULL - if other.level == LEVEL_CONSTANT: - self.make_constant(other.get_key_box()) - optimizer.turned_constant(self) - elif other.level == LEVEL_KNOWNCLASS: - self.make_constant_class(other.known_class, None) - else: - if other.level == LEVEL_NONNULL: - self.ensure_nonnull() - self.intbound.intersect(other.intbound) - if other.lenbound: - if self.lenbound: - assert other.lenbound.mode == self.lenbound.mode - assert other.lenbound.descr == self.lenbound.descr - self.lenbound.bound.intersect(other.lenbound.bound) - else: - self.lenbound = other.lenbound.clone() - + def copy_from(self, other_value): + assert isinstance(other_value, OptValue) + self.box = other_value.box + self._tag = other_value._tag def force_box(self, optforce): return self.box @@ -154,7 +117,7 @@ assert 0, "unreachable" def is_constant(self): - return self.level == LEVEL_CONSTANT + return self.getlevel() == LEVEL_CONSTANT def is_null(self): if self.is_constant(): @@ -170,58 +133,20 @@ return self.box.same_constant(other.box) return self is other - def make_constant(self, constbox): - """Replace 'self.box' with a Const box.""" - assert isinstance(constbox, Const) - self.box = constbox - self.level = LEVEL_CONSTANT - - if isinstance(constbox, ConstInt): - val = constbox.getint() - self.intbound = IntBound(val, val) - else: - self.intbound = IntUnbounded() - - def get_constant_class(self, cpu): - level = self.level - if level == LEVEL_KNOWNCLASS: - return self.known_class - elif level == LEVEL_CONSTANT: - return cpu.ts.cls_of_box(self.box) - else: - return None - - def make_constant_class(self, classbox, guardop): - assert self.level < LEVEL_KNOWNCLASS - self.known_class = classbox - self.level = LEVEL_KNOWNCLASS - self.last_guard = guardop - - def make_nonnull(self, guardop): - assert self.level < LEVEL_NONNULL - self.level = LEVEL_NONNULL - self.last_guard = guardop - def is_nonnull(self): - level = self.level + level = self.getlevel() if level == LEVEL_NONNULL or level == LEVEL_KNOWNCLASS: return True elif level == LEVEL_CONSTANT: box = self.box assert isinstance(box, Const) return box.nonnull() - elif self.intbound: - if self.intbound.known_gt(IntBound(0, 0)) or \ - self.intbound.known_lt(IntBound(0, 0)): - return True - else: - return False else: return False def ensure_nonnull(self): - if self.level < LEVEL_NONNULL: - self.level = LEVEL_NONNULL + if self.getlevel() < LEVEL_NONNULL: + self.setlevel(LEVEL_NONNULL) def is_virtual(self): # Don't check this with 'isinstance(_, VirtualValue)'! @@ -262,8 +187,224 @@ def get_missing_null_value(self): raise NotImplementedError # only for VArrayValue + def make_constant(self, constbox): + """Replace 'self.box' with a Const box.""" + assert isinstance(constbox, Const) + self.box = constbox + self.setlevel(LEVEL_CONSTANT) -class ConstantValue(OptValue): + def get_last_guard(self, optimizer): + return None + + def get_known_class(self): + return None + + def getlenbound(self): + return None + + def getintbound(self): + return None + + def get_constant_class(self, cpu): + return None + +class PtrOptValue(OptValue): + _attrs_ = ('known_class', 'last_guard_pos', 'lenbound') + + known_class = None + last_guard_pos = -1 + lenbound = None + + def __init__(self, box, level=None, known_class=None, intbound=None): + OptValue.__init__(self, box, level, None, intbound) + if not isinstance(box, Const): + self.known_class = known_class + + def copy_from(self, other_value): + assert isinstance(other_value, PtrOptValue) + self.box = other_value.box + self.known_class = other_value.known_class + self._tag = other_value._tag + self.last_guard_pos = other_value.last_guard_pos + self.lenbound = other_value.lenbound + + def make_len_gt(self, mode, descr, val): + if self.lenbound: + assert self.lenbound.mode == mode + assert self.lenbound.descr == descr + self.lenbound.bound.make_gt(IntBound(val, val)) + else: + self.lenbound = LenBound(mode, descr, IntLowerBound(val + 1)) _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit