Author: Maciej Fijalkowski <fij...@gmail.com> Branch: Changeset: r75765:221df618cf77 Date: 2015-02-08 20:59 +0200 http://bitbucket.org/pypy/pypy/changeset/221df618cf77/
Log: merge diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -235,6 +235,11 @@ config.translation.suggest(check_str_without_nul=True) config.translation.suggest(shared=True) + if config.translation.shared: + if config.translation.output is not None: + raise Exception("Cannot use the --output option with PyPy " + "when --shared is on (it is by default). " + "See issue #1971.") if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -81,7 +81,10 @@ return w_object.descr_copy(space, w_order) elif not copy and (subok or type(w_object) is W_NDimArray): return w_object - # we have a ndarray, but need to copy or change dtype or create W_NDimArray + if subok: + raise oefmt(space.w_NotImplementedError, + "array(..., subok=True) only partially implemented") + # we have a ndarray, but need to copy or change dtype if dtype is None: dtype = w_object.get_dtype() if dtype != w_object.get_dtype(): @@ -89,13 +92,12 @@ copy = True if copy: shape = w_object.get_shape() - _elems_w = w_object.reshape(space, space.wrap(-1)) elems_w = [None] * w_object.get_size() - for i in range(len(elems_w)): - elems_w[i] = _elems_w.descr_getitem(space, space.wrap(i)) - elif subok: - raise oefmt(space.w_NotImplementedError, - "array(...copy=False, subok=True) not implemented yet") + elsize = w_object.get_dtype().elsize + # TODO - use w_object.implementation without copying to a list + # unfortunately that causes a union error in translation + for i in range(w_object.get_size()): + elems_w[i] = w_object.implementation.getitem(i * elsize) else: sz = support.product(w_object.get_shape()) * dtype.elsize return W_NDimArray.from_shape_and_storage(space, @@ -113,7 +115,7 @@ dtype = descriptor.variable_dtype(space, dtype.char + '1') w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) - if len(elems_w) == 1: + if support.product(shape) == 1: w_arr.set_scalar_value(dtype.coerce(space, elems_w[0])) else: loop.assign(space, w_arr, elems_w) diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -272,40 +272,103 @@ import numpy as N # numpy's matrix class caused an infinite loop class matrix(N.ndarray): - getcnt = 0 def __new__(subtype, data, dtype=None, copy=True): + print('matrix __new__') + if isinstance(data, matrix): + dtype2 = data.dtype + if (dtype is None): + dtype = dtype2 + if (dtype2 == dtype) and (not copy): + return data + return data.astype(dtype) + + if isinstance(data, N.ndarray): + if dtype is None: + intype = data.dtype + else: + intype = N.dtype(dtype) + new = data.view(subtype) + if intype != data.dtype: + return new.astype(intype) + if copy: return new.copy() + else: return new + + if isinstance(data, str): + data = _convert_from_string(data) + + # now convert data to an array arr = N.array(data, dtype=dtype, copy=copy) + ndim = arr.ndim shape = arr.shape + if (ndim > 2): + raise ValueError("matrix must be 2-dimensional") + elif ndim == 0: + shape = (1, 1) + elif ndim == 1: + shape = (1, shape[0]) + + order = False + if (ndim == 2) and arr.flags.fortran: + order = True + + if not (order or arr.flags.contiguous): + arr = arr.copy() ret = N.ndarray.__new__(subtype, shape, arr.dtype, buffer=arr, - order=True) + order=order) return ret + def __array_finalize__(self, obj): + print('matrix __array_finalize__') + self._getitem = False + if (isinstance(obj, matrix) and obj._getitem): return + ndim = self.ndim + if (ndim == 2): + return + if (ndim > 2): + newshape = tuple([x for x in self.shape if x > 1]) + ndim = len(newshape) + if ndim == 2: + self.shape = newshape + return + elif (ndim > 2): + raise ValueError("shape too large to be a matrix.") + else: + newshape = self.shape + if ndim == 0: + self.shape = (1, 1) + elif ndim == 1: + self.shape = (1, newshape[0]) + return + def __getitem__(self, index): - matrix.getcnt += 1 - if matrix.getcnt > 10: - # XXX strides.find_shape_and_elems is sensitive - # to shape modification - xxx - out = N.ndarray.__getitem__(self, index) + print('matrix __getitem__') + self._getitem = True + + try: + out = N.ndarray.__getitem__(self, index) + finally: + self._getitem = False if not isinstance(out, N.ndarray): return out + + if out.ndim == 0: + return out[()] + if out.ndim == 1: + sh = out.shape[0] # Determine when we should have a column array - old_shape = out.shape - if out.ndim < 2: - sh = out.shape[0] try: n = len(index) except: n = 0 - if n > 1: + if n > 1 and isscalar(index[1]): out.shape = (sh, 1) else: out.shape = (1, sh) - #print 'out, shape was',old_shape,'now',out.shape,'out',out return out + a = matrix([[1., 2.], [3., 4.]]) b = N.array([a]) assert (b == a).all() @@ -318,6 +381,17 @@ assert len(b.shape) == 2 assert (b == a).all() + b = N.array(a, copy=True, dtype=int) + assert len(b.shape) == 2 + assert (b == a).all() + + c = matrix(a, copy=False) + assert c.base is not None + c[0, 0] = 100 + assert a[0, 0] == 100 + b = N.array(c, copy=True) + assert (b == a).all() + def test_setstate_no_version(self): # Some subclasses of ndarray, like MaskedArray, do not use # version in __setstare__ diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -151,9 +151,7 @@ default=False, cmdline="--dont-write-c-files"), ArbitraryOption("instrumentctl", "internal", default=None), - StrOption("output", "Output file name (don't change for PyPy!" - " doesn't work with virtualenv+shared: issue 1971)", - cmdline="--really-force-output"), + StrOption("output", "Output file name", cmdline="--output"), StrOption("secondaryentrypoints", "Comma separated list of keys choosing secondary entrypoints", cmdline="--entrypoints", default="main"), diff --git a/rpython/flowspace/generator.py b/rpython/flowspace/generator.py --- a/rpython/flowspace/generator.py +++ b/rpython/flowspace/generator.py @@ -107,7 +107,7 @@ # First, always run simplify_graph in order to reduce the number of # variables passed around simplify_graph(graph) - insert_empty_startblock(None, graph) + insert_empty_startblock(graph) _insert_reads(graph.startblock, Entry.varnames) Entry.block = graph.startblock # @@ -130,7 +130,7 @@ if hlop.opname == 'yield_': [v_yielded_value] = hlop.args del block.operations[index] - newlink = split_block(None, block, index) + newlink = split_block(block, index) newblock = newlink.target # class Resume(AbstractPosition): diff --git a/rpython/jit/backend/x86/arch.py b/rpython/jit/backend/x86/arch.py --- a/rpython/jit/backend/x86/arch.py +++ b/rpython/jit/backend/x86/arch.py @@ -35,7 +35,9 @@ PASS_ON_MY_FRAME = 15 JITFRAME_FIXED_SIZE = 6 + 8 * 2 # 6 GPR + 8 XMM * 2 WORDS/float # 'threadlocal_addr' is passed as 2nd argument on the stack, - # and it can be left here for when it is needed + # and it can be left here for when it is needed. As an additional hack, + # with asmgcc, it is made odd-valued to mean "already seen this frame + # during the previous minor collection". THREADLOCAL_OFS = (FRAME_FIXED_SIZE + 2) * WORD else: # rbp + rbx + r12 + r13 + r14 + r15 + threadlocal + 12 extra words = 19 @@ -43,7 +45,9 @@ PASS_ON_MY_FRAME = 12 JITFRAME_FIXED_SIZE = 28 # 13 GPR + 15 XMM # 'threadlocal_addr' is passed as 2nd argument in %esi, - # and is moved into this frame location + # and is moved into this frame location. As an additional hack, + # with asmgcc, it is made odd-valued to mean "already seen this frame + # during the previous minor collection". THREADLOCAL_OFS = (FRAME_FIXED_SIZE - 1) * WORD assert PASS_ON_MY_FRAME >= 12 # asmgcc needs at least JIT_USE_WORDS + 3 diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1980,6 +1980,23 @@ def _call_assembler_emit_call(self, addr, argloc, _): threadlocal_loc = RawEspLoc(THREADLOCAL_OFS, INT) + if self._is_asmgcc(): + # We need to remove the bit "already seen during the + # previous minor collection" instead of passing this + # value directly. + if IS_X86_64: + tmploc = esi # already the correct place + if argloc is tmploc: + self.mc.MOV_rr(esi.value, edi.value) + argloc = edi + else: + tmploc = eax + if tmploc is argloc: + tmploc = edx + self.mc.MOV(tmploc, threadlocal_loc) + self.mc.AND_ri(tmploc.value, ~1) + threadlocal_loc = tmploc + # self.simple_call(addr, [argloc, threadlocal_loc]) def _call_assembler_emit_helper_call(self, addr, arglocs, result_loc): @@ -2355,6 +2372,8 @@ assert self.cpu.translate_support_code assert isinstance(resloc, RegLoc) self.mc.MOV_rs(resloc.value, THREADLOCAL_OFS) + if self._is_asmgcc(): + self.mc.AND_ri(resloc.value, ~1) self.load_from_mem(resloc, addr_add_const(resloc, offset), imm(size), imm(sign)) diff --git a/rpython/jit/backend/x86/callbuilder.py b/rpython/jit/backend/x86/callbuilder.py --- a/rpython/jit/backend/x86/callbuilder.py +++ b/rpython/jit/backend/x86/callbuilder.py @@ -167,6 +167,8 @@ self.tlofs_reg = r12 self.mc.MOV_rs(self.tlofs_reg.value, THREADLOCAL_OFS - self.current_esp) + if self.asm._is_asmgcc(): + self.mc.AND_ri(self.tlofs_reg.value, ~1) return self.tlofs_reg def save_stack_position(self): diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -304,13 +304,20 @@ REX_B = 1 @specialize.arg(2) -def encode_rex(mc, rexbyte, basevalue, orbyte): +def encode_rex(mc, rexbyte, w, orbyte): if mc.WORD == 8: assert 0 <= rexbyte < 8 - if basevalue != 0 or rexbyte != 0: - if basevalue == 0: - basevalue = 0x40 - mc.writechar(chr(basevalue | rexbyte)) + mc.writechar(chr(0x40 | w | rexbyte)) + else: + assert rexbyte == 0 + return 0 + +@specialize.arg(2) +def encode_rex_opt(mc, rexbyte, _, orbyte): + if mc.WORD == 8: + assert 0 <= rexbyte < 8 + if rexbyte != 0: + mc.writechar(chr(0x40 | rexbyte)) else: assert rexbyte == 0 return 0 @@ -322,9 +329,9 @@ # the REX prefix in all cases. It is only useful on instructions which # have an 8-bit register argument, to force access to the "sil" or "dil" # registers (as opposed to "ah-dh"). -rex_w = encode_rex, 0, (0x40 | REX_W), None # a REX.W prefix -rex_nw = encode_rex, 0, 0, None # an optional REX prefix -rex_fw = encode_rex, 0, 0x40, None # a forced REX prefix +rex_w = encode_rex, 0, REX_W, None # a REX.W prefix +rex_nw = encode_rex_opt, 0, 0, None # an optional REX prefix +rex_fw = encode_rex, 0, 0, None # a forced REX prefix # ____________________________________________________________ diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -107,7 +107,7 @@ """ # split the block just before the jit_merge_point() if portalopindex > 0: - link = split_block(None, portalblock, portalopindex) + link = split_block(portalblock, portalopindex) portalblock = link.target portalop = portalblock.operations[0] # split again, this time enforcing the order of the live vars @@ -115,7 +115,7 @@ assert portalop.opname == 'jit_marker' assert portalop.args[0].value == 'jit_merge_point' greens_v, reds_v = decode_hp_hint_args(portalop) - link = split_block(None, portalblock, 0, greens_v + reds_v) + link = split_block(portalblock, 0, greens_v + reds_v) return link.target def sort_vars(args_v): diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -162,41 +162,67 @@ class JitMixin: basic = True + # Basic terminology: the JIT produces "loops" and "bridges". + # Bridges are always attached to failing guards. Every loop is + # the "trunk" of a tree of compiled code, which is formed by first + # compiling a loop and then incrementally adding some number of + # bridges to it. Each loop and each bridge ends with either a + # FINISH or a JUMP instruction (the name "loop" is not really + # adapted any more). The JUMP instruction jumps to any LABEL + # pseudo-instruction, which can be anywhere, within the same tree + # or another one. + def check_resops(self, expected=None, **check): + """Check the instructions in all loops and bridges, ignoring + the ones that end in FINISH. Either pass a dictionary (then + the check must match exactly), or some keyword arguments (then + the check is only about the instructions named).""" get_stats().check_resops(expected=expected, **check) def check_simple_loop(self, expected=None, **check): + """Useful in the simplest case when we have only one loop + ending with a jump back to itself and possibly a few bridges. + Only the operations within the loop formed by that single jump + will be counted; the bridges are all ignored. If several loops + were compiled, complains.""" get_stats().check_simple_loop(expected=expected, **check) def check_trace_count(self, count): # was check_loop_count - # The number of traces compiled + """Check the number of loops and bridges compiled.""" assert get_stats().compiled_count == count def check_trace_count_at_most(self, count): + """Check the number of loops and bridges compiled.""" assert get_stats().compiled_count <= count def check_jitcell_token_count(self, count): # was check_tree_loop_count + """This should check the number of independent trees of code. + (xxx it is not 100% clear that the count is correct)""" assert len(get_stats().jitcell_token_wrefs) == count def check_target_token_count(self, count): + """(xxx unknown)""" tokens = get_stats().get_all_jitcell_tokens() n = sum([len(t.target_tokens) for t in tokens]) assert n == count def check_enter_count(self, count): + """Check the number of times pyjitpl ran. (Every time, it + should have produced either one loop or one bridge, or aborted; + but it is not 100% clear that this is still correct in the + presence of unrolling.)""" assert get_stats().enter_count == count def check_enter_count_at_most(self, count): + """Check the number of times pyjitpl ran.""" assert get_stats().enter_count <= count - def check_jumps(self, maxcount): - return # FIXME - assert get_stats().exec_jumps <= maxcount - def check_aborted_count(self, count): + """Check the number of times pyjitpl was aborted.""" assert get_stats().aborted_count == count def check_aborted_count_at_least(self, count): + """Check the number of times pyjitpl was aborted.""" assert get_stats().aborted_count >= count def meta_interp(self, *args, **kwds): diff --git a/rpython/jit/metainterp/test/test_send.py b/rpython/jit/metainterp/test/test_send.py --- a/rpython/jit/metainterp/test/test_send.py +++ b/rpython/jit/metainterp/test/test_send.py @@ -202,7 +202,7 @@ # the final one. self.check_trace_count(1) self.check_resops(guard_class=1, int_add=4, int_sub=4) - self.check_jumps(14) + #self.check_jumps(14) def test_oosend_guard_failure_2(self): # same as above, but using prebuilt objects 'w1' and 'w2' @@ -244,7 +244,7 @@ assert res == f(4, 28) self.check_trace_count(1) self.check_resops(guard_class=1, int_add=4, int_sub=4) - self.check_jumps(14) + #self.check_jumps(14) def test_oosend_different_initial_class(self): myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'w']) diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -375,6 +375,10 @@ # the nursery. self.pinned_objects_in_nursery = 0 # + # This flag is set if the previous minor collection found at least + # one pinned object alive. + self.any_pinned_object_kept = False + # # Keeps track of old objects pointing to pinned objects. These objects # must be traced every minor collection. Without tracing them the # referenced pinned object wouldn't be visited and therefore collected. @@ -1489,7 +1493,9 @@ # The following counter keeps track of alive and pinned young objects # inside the nursery. We reset it here and increace it in # '_trace_drag_out()'. + any_pinned_object_from_earlier = self.any_pinned_object_kept self.pinned_objects_in_nursery = 0 + self.any_pinned_object_kept = False # # Before everything else, remove from 'old_objects_pointing_to_young' # the young arrays. @@ -1513,7 +1519,7 @@ # are copied out or flagged. They are also added to the list # 'old_objects_pointing_to_young'. self.nursery_surviving_size = 0 - self.collect_roots_in_nursery() + self.collect_roots_in_nursery(any_pinned_object_from_earlier) # # visit all objects that are known for pointing to pinned # objects. This way we populate 'surviving_pinned_objects' @@ -1649,7 +1655,7 @@ def _visit_old_objects_pointing_to_pinned(self, obj, ignore): self.trace(obj, self._trace_drag_out, obj) - def collect_roots_in_nursery(self): + def collect_roots_in_nursery(self, any_pinned_object_from_earlier): # we don't need to trace prebuilt GcStructs during a minor collect: # if a prebuilt GcStruct contains a pointer to a young object, # then the write_barrier must have ensured that the prebuilt @@ -1659,10 +1665,19 @@ callback = IncrementalMiniMarkGC._trace_drag_out1_marking_phase else: callback = IncrementalMiniMarkGC._trace_drag_out1 + # + # Note a subtlety: if the nursery contains pinned objects "from + # earlier", i.e. created earlier than the previous minor + # collection, then we can't use the "is_minor=True" optimization. + # We really need to walk the complete stack to be sure we still + # see them. + use_jit_frame_stoppers = not any_pinned_object_from_earlier + # self.root_walker.walk_roots( callback, # stack roots callback, # static in prebuilt non-gc - None) # static in prebuilt gc + None, # static in prebuilt gc + is_minor=use_jit_frame_stoppers) debug_stop("gc-minor-walkroots") def collect_cardrefs_to_nursery(self): @@ -1844,6 +1859,7 @@ self.surviving_pinned_objects.append( llarena.getfakearenaaddress(obj - size_gc_header)) self.pinned_objects_in_nursery += 1 + self.any_pinned_object_kept = True return else: # First visit to an object that has already a shadow. diff --git a/rpython/memory/gc/minimark.py b/rpython/memory/gc/minimark.py --- a/rpython/memory/gc/minimark.py +++ b/rpython/memory/gc/minimark.py @@ -1322,7 +1322,8 @@ self.root_walker.walk_roots( MiniMarkGC._trace_drag_out1, # stack roots MiniMarkGC._trace_drag_out1, # static in prebuilt non-gc - None) # static in prebuilt gc + None, # static in prebuilt gc + is_minor=True) debug_stop("gc-minor-walkroots") def collect_cardrefs_to_nursery(self): diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -34,7 +34,8 @@ def walk_roots(self, collect_stack_root, collect_static_in_prebuilt_nongc, - collect_static_in_prebuilt_gc): + collect_static_in_prebuilt_gc, + is_minor=False): gc = self.tester.gc layoutbuilder = self.tester.layoutbuilder if collect_static_in_prebuilt_gc: diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -340,9 +340,10 @@ # called first, to initialize self.belongs_to_current_thread. assert not hasattr(self, 'gc_detach_callback_pieces_ptr') - def walk_stack_roots(self, collect_stack_root): + def walk_stack_roots(self, collect_stack_root, is_minor=False): gcdata = self.gcdata gcdata._gc_collect_stack_root = collect_stack_root + gcdata._gc_collect_is_minor = is_minor pypy_asm_stackwalk(llhelper(ASM_CALLBACK_PTR, self._asm_callback), gcrootanchor) @@ -468,6 +469,13 @@ if gc.points_to_valid_gc_object(addr): collect_stack_root(gc, addr) # + # small hack: the JIT reserves THREADLOCAL_OFS's last bit for + # us. We use it to store an "already traced past this frame" + # flag. + if self._with_jit and self.gcdata._gc_collect_is_minor: + if self.mark_jit_frame_can_stop(callee): + return False + # # track where the caller_frame saved the registers from its own # caller # @@ -548,6 +556,19 @@ else: # kind == LOC_EBP_MINUS: at -N(%ebp) return ebp_in_caller - offset + def mark_jit_frame_can_stop(self, callee): + location = self._shape_decompressor.get_threadlocal_loc() + if location == LOC_NOWHERE: + return False + addr = self.getlocation(callee, llmemory.NULL, location) + # + x = addr.signed[0] + if x & 1: + return True # this JIT stack frame is already marked! + else: + addr.signed[0] = x | 1 # otherwise, mark it but don't stop + return False + LOC_REG = 0 LOC_ESP_PLUS = 1 @@ -729,6 +750,17 @@ llop.debug_fatalerror(lltype.Void, "asmgcroot: invalid index") return 0 # annotator fix + def get_threadlocal_loc(self): + index = self.jit_index + if index < 0: + return LOC_NOWHERE # case "outside the jit" + else: + # case "in the jit" + from rpython.jit.backend.x86.arch import THREADLOCAL_OFS, WORD + return (LOC_ESP_PLUS | + ((THREADLOCAL_OFS // WORD + self.extra_stack_depth) << 2)) + + # ____________________________________________________________ # diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1462,7 +1462,8 @@ def walk_roots(self, collect_stack_root, collect_static_in_prebuilt_nongc, - collect_static_in_prebuilt_gc): + collect_static_in_prebuilt_gc, + is_minor=False): gcdata = self.gcdata gc = self.gc if collect_static_in_prebuilt_nongc: @@ -1482,7 +1483,7 @@ collect_static_in_prebuilt_gc(gc, result) addr += sizeofaddr if collect_stack_root: - self.walk_stack_roots(collect_stack_root) # abstract + self.walk_stack_roots(collect_stack_root, is_minor) # abstract def finished_minor_collection(self): func = self.finished_minor_collection_func diff --git a/rpython/memory/gctransform/shadowstack.py b/rpython/memory/gctransform/shadowstack.py --- a/rpython/memory/gctransform/shadowstack.py +++ b/rpython/memory/gctransform/shadowstack.py @@ -99,7 +99,7 @@ self.shadow_stack_pool.initial_setup() BaseRootWalker.setup_root_walker(self) - def walk_stack_roots(self, collect_stack_root): + def walk_stack_roots(self, collect_stack_root, is_minor=False): gcdata = self.gcdata self.rootstackhook(collect_stack_root, gcdata.root_stack_base, gcdata.root_stack_top) diff --git a/rpython/memory/gctransform/transform.py b/rpython/memory/gctransform/transform.py --- a/rpython/memory/gctransform/transform.py +++ b/rpython/memory/gctransform/transform.py @@ -221,7 +221,7 @@ # for sanity, we need an empty block at the start of the graph inserted_empty_startblock = False if not starts_with_empty_block(graph): - insert_empty_startblock(self.translator.annotator, graph) + insert_empty_startblock(graph) inserted_empty_startblock = True is_borrowed = self.compute_borrowed_vars(graph) @@ -239,7 +239,7 @@ if link.prevblock.exitswitch is None: link.prevblock.operations.extend(llops) else: - insert_empty_block(self.translator.annotator, link, llops) + insert_empty_block(link, llops) # remove the empty block at the start of the graph, which should # still be empty (but let's check) diff --git a/rpython/memory/gcwrapper.py b/rpython/memory/gcwrapper.py --- a/rpython/memory/gcwrapper.py +++ b/rpython/memory/gcwrapper.py @@ -191,7 +191,8 @@ def walk_roots(self, collect_stack_root, collect_static_in_prebuilt_nongc, - collect_static_in_prebuilt_gc): + collect_static_in_prebuilt_gc, + is_minor=False): gcheap = self.gcheap gc = gcheap.gc if collect_static_in_prebuilt_gc: @@ -203,7 +204,7 @@ if self.gcheap.gc.points_to_valid_gc_object(addrofaddr): collect_static_in_prebuilt_nongc(gc, addrofaddr) if collect_stack_root: - for addrofaddr in gcheap.llinterp.find_roots(): + for addrofaddr in gcheap.llinterp.find_roots(is_minor): if self.gcheap.gc.points_to_valid_gc_object(addrofaddr): collect_stack_root(gc, addrofaddr) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -146,13 +146,21 @@ } return self._tlobj - def find_roots(self): + def find_roots(self, is_minor=False): """Return a list of the addresses of the roots.""" #log.findroots("starting") roots = [] - for frame in self.frame_stack: + for frame in reversed(self.frame_stack): #log.findroots("graph", frame.graph.name) frame.find_roots(roots) + # If a call is done with 'is_minor=True', we can stop after the + # first frame in the stack that was already seen by the previous + # call with 'is_minor=True'. (We still need to trace that frame, + # but not its callers.) + if is_minor: + if getattr(frame, '_find_roots_already_seen', False): + break + frame._find_roots_already_seen = True return roots def find_exception(self, exc): diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -371,9 +371,7 @@ assert 0 <= pos < len(newops) - 1 extraops = block.operations[pos+1:] del block.operations[pos+1:] - extrablock = insert_empty_block(self.annotator, - noexclink, - newops = extraops) + extrablock = insert_empty_block(noexclink, newops=extraops) if extrablock is None: self.insert_link_conversions(block) @@ -447,10 +445,9 @@ # cannot insert conversion operations around a single # link, unless it is the only exit of this block. # create a new block along the link... - newblock = insert_empty_block(self.annotator, - link, + newblock = insert_empty_block(link, # ...and store the conversions there. - newops=newops) + newops=newops) link = newblock.exits[0] for i, new_a1 in newlinkargs.items(): link.args[i] = new_a1 diff --git a/rpython/translator/backendopt/constfold.py b/rpython/translator/backendopt/constfold.py --- a/rpython/translator/backendopt/constfold.py +++ b/rpython/translator/backendopt/constfold.py @@ -171,7 +171,7 @@ v_result.concretetype = nextop.result.concretetype constants[nextop.result] = v_result callop = SpaceOperation('direct_call', callargs, v_result) - newblock = insert_empty_block(None, link, [callop]) + newblock = insert_empty_block(link, [callop]) [link] = newblock.exits assert link.target is block folded_count += 1 @@ -197,7 +197,7 @@ splitlink = block.exits[0] else: # split the block at the given position - splitlink = split_block(None, block, position) + splitlink = split_block(block, position) assert list(block.exits) == [splitlink] assert link.target is block assert splitlink.prevblock is block diff --git a/rpython/translator/backendopt/inline.py b/rpython/translator/backendopt/inline.py --- a/rpython/translator/backendopt/inline.py +++ b/rpython/translator/backendopt/inline.py @@ -396,7 +396,7 @@ copiedexceptblock.recloseblock(Link(linkargs, blocks[0])) def do_inline(self, block, index_operation): - splitlink = split_block(None, block, index_operation) + splitlink = split_block(block, index_operation) afterblock = splitlink.target # these variables have to be passed along all the links in the inlined # graph because the original function needs them in the blocks after diff --git a/rpython/translator/c/gcc/test/test_asmgcroot.py b/rpython/translator/c/gcc/test/test_asmgcroot.py --- a/rpython/translator/c/gcc/test/test_asmgcroot.py +++ b/rpython/translator/c/gcc/test/test_asmgcroot.py @@ -251,13 +251,17 @@ def define_callback_with_collect(cls): return lambda: 0 -class TestAsmGCRootWithSemiSpaceGC_Shared(TestAsmGCRootWithSemiSpaceGC): - @classmethod - def make_config(cls): - config = TestAsmGCRootWithSemiSpaceGC.make_config() - config.translation.shared = True - return config +#class TestAsmGCRootWithSemiSpaceGC_Shared(TestAsmGCRootWithSemiSpaceGC): +# @classmethod +# def make_config(cls): +# config = TestAsmGCRootWithSemiSpaceGC.make_config() +# config.translation.shared = True +# return config class TestAsmGCRootWithHybridTagged(AbstractTestAsmGCRoot, test_newgc.TestHybridTaggedPointers): pass + +class TestAsmGCRootWithIncrementalMinimark(AbstractTestAsmGCRoot, + test_newgc.TestIncrementalMiniMarkGC): + pass diff --git a/rpython/translator/c/support.py b/rpython/translator/c/support.py --- a/rpython/translator/c/support.py +++ b/rpython/translator/c/support.py @@ -89,7 +89,8 @@ ''') def _char_repr(c): - if c in '\\"': return '\\' + c + # escape with a '\' the characters '\', '"' or (for trigraphs) '?' + if c in '\\"?': return '\\' + c if ' ' <= c < '\x7F': return c return '\\%03o' % ord(c) diff --git a/rpython/translator/exceptiontransform.py b/rpython/translator/exceptiontransform.py --- a/rpython/translator/exceptiontransform.py +++ b/rpython/translator/exceptiontransform.py @@ -259,7 +259,7 @@ if not self.raise_analyzer.can_raise(op): continue - splitlink = split_block(None, block, i+1) + splitlink = split_block(block, i+1) afterblock = splitlink.target if lastblock is block: lastblock = afterblock @@ -432,7 +432,7 @@ if insert_zeroing_op: if normalafterblock is None: - normalafterblock = insert_empty_block(None, l0) + normalafterblock = insert_empty_block(l0) v_result = spaceop.result if v_result in l0.args: result_i = l0.args.index(v_result) diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -308,7 +308,9 @@ samefile = this_exe.samefile(exe_name) assert not samefile, ( 'Output file %s is the currently running ' - 'interpreter (use --output=...)' % exe_name) + 'interpreter (please move the executable, and ' + 'possibly its associated libpypy-c, somewhere else ' + 'before you execute it)' % exe_name) except EnvironmentError: pass diff --git a/rpython/translator/simplify.py b/rpython/translator/simplify.py --- a/rpython/translator/simplify.py +++ b/rpython/translator/simplify.py @@ -1072,7 +1072,7 @@ link.target in stopblocks): hints['exactlength'] = True chints = Constant(hints) - newblock = unsimplify.insert_empty_block(None, link) + newblock = unsimplify.insert_empty_block(link) index = link.args.index(vlist) vlist2 = newblock.inputargs[index] vlist3 = Variable(vlist2) diff --git a/rpython/translator/test/test_unsimplify.py b/rpython/translator/test/test_unsimplify.py --- a/rpython/translator/test/test_unsimplify.py +++ b/rpython/translator/test/test_unsimplify.py @@ -21,7 +21,7 @@ w = x * y return z + w graph, t = translate(f, [int, int]) - split_block(t.annotator, graph.startblock, i) + split_block(graph.startblock, i) checkgraph(graph) interp = LLInterpreter(t.rtyper) result = interp.eval_graph(graph, [1, 2]) @@ -35,7 +35,7 @@ else: return y + 2 graph, t = translate(f, [int, int]) - split_block(t.annotator, graph.startblock, i) + split_block(graph.startblock, i) checkgraph(graph) interp = LLInterpreter(t.rtyper) result = interp.eval_graph(graph, [-12, 2]) @@ -61,7 +61,7 @@ return 1 return x graph, t = translate(catches, [int]) - split_block(t.annotator, graph.startblock, i) + split_block(graph.startblock, i) checkgraph(graph) interp = LLInterpreter(t.rtyper) result = interp.eval_graph(graph, [0]) diff --git a/rpython/translator/unsimplify.py b/rpython/translator/unsimplify.py --- a/rpython/translator/unsimplify.py +++ b/rpython/translator/unsimplify.py @@ -7,7 +7,7 @@ var.concretetype = concretetype return var -def insert_empty_block(annotator, link, newops=[]): +def insert_empty_block(link, newops=[]): """Insert and return a new block along the given link.""" vars = {} for v in link.args: @@ -30,7 +30,7 @@ link.target = newblock return newblock -def insert_empty_startblock(annotator, graph): +def insert_empty_startblock(graph): vars = [v.copy() for v in graph.startblock.inputargs] newblock = Block(vars) newblock.closeblock(Link(vars, graph.startblock)) @@ -41,7 +41,7 @@ and graph.startblock.exitswitch is None and graph.startblock.exits[0].args == graph.getargs()) -def split_block(annotator, block, index, _forcelink=None): +def split_block(block, index, _forcelink=None): """return a link where prevblock is the block leading up but excluding the index'th operation and target is a new block with the neccessary variables passed on. _______________________________________________ pypy-commit mailing list pypy-commit@python.org https://mail.python.org/mailman/listinfo/pypy-commit