Author: Maciej Fijalkowski <[email protected]>
Branch: jitframe-on-heap
Changeset: r61041:5ebb36945721
Date: 2013-02-10 16:03 +0200
http://bitbucket.org/pypy/pypy/changeset/5ebb36945721/
Log: merge
diff --git a/rpython/jit/backend/arm/arch.py b/rpython/jit/backend/arm/arch.py
--- a/rpython/jit/backend/arm/arch.py
+++ b/rpython/jit/backend/arm/arch.py
@@ -17,6 +17,4 @@
# A jitframe is a jit.backend.llsupport.llmodel.JITFRAME = GcArray(Signed).
# Stack frame fixed area
# Currently only the force_index
-FRAME_FIXED_SIZE = 1
JITFRAME_FIXED_SIZE = 16 + 16 * 2 # 16 GPR + 16 VFP Regs (64bit)
-
diff --git a/rpython/jit/backend/arm/assembler.py
b/rpython/jit/backend/arm/assembler.py
--- a/rpython/jit/backend/arm/assembler.py
+++ b/rpython/jit/backend/arm/assembler.py
@@ -6,7 +6,7 @@
from rpython.jit.backend.arm import registers as r
from rpython.jit.backend.arm.arch import WORD, DOUBLE_WORD, FUNC_ALIGN, \
N_REGISTERS_SAVED_BY_MALLOC, \
- JITFRAME_FIXED_SIZE, FRAME_FIXED_SIZE
+ JITFRAME_FIXED_SIZE
from rpython.jit.backend.arm.codebuilder import ARMv7Builder,
OverwritingBuilder
from rpython.jit.backend.arm.locations import get_fp_offset, imm, StackLocation
from rpython.jit.backend.arm.regalloc import (Regalloc, ARMFrameManager,
@@ -259,7 +259,7 @@
def _build_stack_check_slowpath(self):
_, _, slowpathaddr = self.cpu.insert_stack_check()
- if slowpathaddr == 0 or self.cpu.propagate_exception_v < 0:
+ if slowpathaddr == 0 or not self.cpu.propagate_exception_descr:
return # no stack check (for tests, or non-translated)
#
# make a "function" that is called immediately at the start of
@@ -432,7 +432,6 @@
def _build_failure_recovery(self, exc, withfloats=False):
mc = ARMv7Builder()
self._push_all_regs_to_jitframe(mc, [], withfloats)
- self._insert_checks(mc)
if exc:
# We might have an exception pending. Load it into r4
@@ -472,49 +471,6 @@
rawstart = mc.materialize(self.cpu.asmmemmgr, [])
self.failure_recovery_code[exc + 2 * withfloats] = rawstart
- DESCR_REF = 0x00
- DESCR_INT = 0x01
- DESCR_FLOAT = 0x02
- DESCR_SPECIAL = 0x03
- CODE_FROMSTACK = 64
- CODE_STOP = 0 | DESCR_SPECIAL
- CODE_HOLE = 4 | DESCR_SPECIAL
- CODE_INPUTARG = 8 | DESCR_SPECIAL
- CODE_FORCED = 12 | DESCR_SPECIAL #XXX where should this be written?
-
- def write_failure_recovery_description(self, descr, failargs, locs):
- assert self.mc is not None
- for i in range(len(failargs)):
- arg = failargs[i]
- if arg is not None:
- if arg.type == REF:
- kind = self.DESCR_REF
- elif arg.type == INT:
- kind = self.DESCR_INT
- elif arg.type == FLOAT:
- kind = self.DESCR_FLOAT
- else:
- raise AssertionError("bogus kind")
- loc = locs[i]
- if loc.is_stack():
- pos = loc.position
- if pos < 0:
- self.mc.writechar(chr(self.CODE_INPUTARG))
- pos = ~pos
- n = self.CODE_FROMSTACK // 4 + pos
- else:
- assert loc.is_reg() or loc.is_vfp_reg()
- n = loc.value
- n = kind + 4 * n
- while n > 0x7F:
- self.mc.writechar(chr((n & 0x7F) | 0x80))
- n >>= 7
- else:
- n = self.CODE_HOLE
- self.mc.writechar(chr(n))
- self.mc.writechar(chr(self.CODE_STOP))
-
-
def generate_quick_failure(self, guardtok, fcond=c.AL):
assert isinstance(guardtok.exc, bool)
startpos = self.mc.currpos()
@@ -527,12 +483,13 @@
target = self.failure_recovery_code[exc + 2 * withfloats]
fail_descr = cast_instance_to_gcref(guardtok.faildescr)
fail_descr = rffi.cast(lltype.Signed, fail_descr)
+ base_ofs = self.cpu.get_baseofs_of_frame_field()
positions = [0] * len(guardtok.fail_locs)
for i, loc in enumerate(guardtok.fail_locs):
if loc is None:
positions[i] = -1
elif loc.is_stack():
- positions[i] = loc.value
+ positions[i] = loc.value - base_ofs
else:
if loc.is_reg():
assert loc is not r.fp # for now
@@ -560,23 +517,25 @@
mc = self.mc
if gcrootmap and gcrootmap.is_shadow_stack:
self.gen_footer_shadowstack(gcrootmap, mc)
- mc.ADD_ri(r.sp.value, r.sp.value, WORD, cond=cond) # for the force
index
if self.cpu.supports_floats:
mc.VPOP([reg.value for reg in r.callee_saved_vfp_registers],
cond=cond)
- mc.POP([reg.value for reg in r.callee_restored_registers], cond=cond)
+ # push all callee saved registers and IP to keep the alignment
+ mc.POP([reg.value for reg in r.callee_restored_registers] +
+ [r.ip.value], cond=cond)
mc.BKPT()
def gen_func_prolog(self):
- stack_size = FRAME_FIXED_SIZE * WORD
+ stack_size = WORD #alignment
stack_size += len(r.callee_saved_registers) * WORD
if self.cpu.supports_floats:
stack_size += len(r.callee_saved_vfp_registers) * 2 * WORD
- self.mc.PUSH([reg.value for reg in r.callee_saved_registers])
+ # push all callee saved registers and IP to keep the alignment
+ self.mc.PUSH([reg.value for reg in r.callee_saved_registers] +
+ [r.ip.value])
if self.cpu.supports_floats:
self.mc.VPUSH([reg.value for reg in r.callee_saved_vfp_registers])
- self.mc.SUB_ri(r.sp.value, r.sp.value, WORD) # for the force index
assert stack_size % 8 == 0 # ensure we keep alignment
# set fp to point to the JITFRAME
@@ -673,8 +632,8 @@
loop_head = self.mc.get_relative_pos()
looptoken._arm_loop_code = loop_head
#
- frame_depth = self._assemble(regalloc, inputargs, operations)
- self.update_frame_depth(frame_depth + JITFRAME_FIXED_SIZE)
+ frame_depth_no_fixed_size = self._assemble(regalloc, inputargs,
operations)
+ self.update_frame_depth(frame_depth_no_fixed_size +
JITFRAME_FIXED_SIZE)
#
size_excluding_failure_stuff = self.mc.get_relative_pos()
@@ -764,6 +723,7 @@
frame_depth = max(self.current_clt.frame_info.jfi_frame_depth,
frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE)
self.fixup_target_tokens(rawstart)
+ self._patch_stackadjust(stack_check_patch_ofs + rawstart, frame_depth)
self.update_frame_depth(frame_depth)
self.teardown()
@@ -816,12 +776,11 @@
"""
descrs = self.cpu.gc_ll_descr.getframedescrs(self.cpu)
ofs = self.cpu.unpack_fielddescr(descrs.arraydescr.lendescr)
- base_ofs = self.cpu.get_baseofs_of_frame_field()
mc.gen_load_int(r.ip.value, ofs)
- mc.SUB_ri(r.ip.value, r.ip.value, base_ofs)
stack_check_cmp_ofs = mc.currpos()
if expected_size == -1:
- mc.gen_load_int(r.lr.value, 0xffffff)
+ mc.NOP()
+ mc.NOP()
else:
mc.gen_load_int(r.lr.value, expected_size)
mc.CMP_rr(r.ip.value, r.lr.value)
@@ -829,10 +788,11 @@
jg_location = mc.currpos()
mc.BKPT()
+ # the size value is still stored in lr
+ mc.PUSH([r.lr.value])
+
self.push_gcmap(mc, gcmap, push=True)
- # the size value is still stored in lr
- mc.PUSH([r.lr.value])
self.mc.BL(self._stack_check_failure)
@@ -862,19 +822,20 @@
# store return address and keep the stack aligned
mc.PUSH([r.ip.value, r.lr.value])
- # store the current gcmap(r1) in the jitframe
+ # store the current gcmap(r0) in the jitframe
gcmap_ofs = self.cpu.get_ofs_of_frame_field('jf_gcmap')
assert check_imm_arg(abs(gcmap_ofs))
- mc.STR_ri(r.r1.value, r.fp.value, imm=gcmap_ofs)
+ mc.STR_ri(r.r0.value, r.fp.value, imm=gcmap_ofs)
# set first arg, which is the old jitframe address
mc.MOV_rr(r.r0.value, r.fp.value)
# call realloc_frame, it takes two arguments
# arg0: the old jitframe
# arg1: the new size
+ #
mc.BL(self.cpu.realloc_frame)
- # set fp to the new jitframe plus the baseofs
- mc.ADD_ri(r.fp.value, r.r0.value)
+ # set fp to the new jitframe
+ mc.MOV_rr(r.fp.value, r.r0.value)
gcrootmap = self.cpu.gc_ll_descr.gcrootmap
if gcrootmap and gcrootmap.is_shadow_stack:
@@ -902,6 +863,11 @@
targettoken._arm_loop_code += rawstart
self.target_tokens_currently_compiling = None
+ def _patch_stackadjust(self, adr, allocated_depth):
+ mc = ARMv7Builder()
+ mc.gen_load_int(r.lr.value, allocated_depth)
+ mc.copy_to_raw_memory(adr)
+
def target_arglocs(self, loop_token):
return loop_token._arm_arglocs
@@ -1061,15 +1027,6 @@
asm_math_operations[oopspecindex](self, op, arglocs, regalloc, fcond)
return fcond
-
- def _insert_checks(self, mc=None):
- if not we_are_translated() and self._debug:
- if mc is None:
- mc = self.mc
- mc.CMP_rr(r.fp.value, r.sp.value)
- mc.MOV_rr(r.pc.value, r.pc.value, cond=c.GE)
- mc.BKPT()
-
def _ensure_result_bit_extension(self, resloc, size, signed):
if size == 4:
return
@@ -1245,9 +1202,9 @@
offset = loc.value
if not check_imm_arg(offset):
self.mc.gen_load_int(r.ip.value, offset, cond=cond)
- self.mc.SUB_rr(r.ip.value, r.fp.value, r.ip.value, cond=cond)
+ self.mc.ADD_rr(r.ip.value, r.fp.value, r.ip.value, cond=cond)
else:
- self.mc.SUB_ri(r.ip.value, r.fp.value, offset, cond=cond)
+ self.mc.ADD_ri(r.ip.value, r.fp.value, offset, cond=cond)
self.mc.VSTR(prev_loc.value, r.ip.value, cond=cond)
self.mc.POP([r.ip.value], cond=cond)
else:
@@ -1390,36 +1347,11 @@
# malloc_slowpath in case we called malloc_slowpath, which returns the
# new value of nursery_free_adr in r1 and the adr of the new object in
# r0.
- self.mark_gc_roots(self.write_new_force_index(),
- use_copy_area=True)
self.mc.BL(self.malloc_slowpath, c=c.HI)
self.mc.gen_load_int(r.ip.value, nursery_free_adr)
self.mc.STR_ri(r.r1.value, r.ip.value)
- def mark_gc_roots(self, force_index, use_copy_area=False):
- if force_index < 0:
- return # not needed
- gcrootmap = self.cpu.gc_ll_descr.gcrootmap
- if gcrootmap:
- mark = self._regalloc.get_mark_gc_roots(gcrootmap, use_copy_area)
- assert gcrootmap.is_shadow_stack
- gcrootmap.write_callshape(mark, force_index)
-
- def write_new_force_index(self):
- # for shadowstack only: get a new, unused force_index number and
- # write it to FORCE_INDEX_OFS. Used to record the call shape
- # (i.e. where the GC pointers are in the stack) around a CALL
- # instruction that doesn't already have a force_index.
- gcrootmap = self.cpu.gc_ll_descr.gcrootmap
- if gcrootmap and gcrootmap.is_shadow_stack:
- clt = self.current_clt
- force_index = clt.reserve_and_record_some_faildescr_index()
- self._write_fail_index(force_index)
- return force_index
- else:
- return 0
-
def push_gcmap(self, mc, gcmap, push=False, mov=False, store=False):
ptr = rffi.cast(lltype.Signed, gcmap)
if push:
diff --git a/rpython/jit/backend/arm/opassembler.py
b/rpython/jit/backend/arm/opassembler.py
--- a/rpython/jit/backend/arm/opassembler.py
+++ b/rpython/jit/backend/arm/opassembler.py
@@ -32,6 +32,7 @@
from rpython.rlib import rgc
from rpython.rtyper.lltypesystem import rstr, rffi, lltype, llmemory
from rpython.rlib.rarithmetic import r_uint
+from rpython.rtyper.annlowlevel import cast_instance_to_gcref
NO_FORCE_INDEX = -1
@@ -339,7 +340,6 @@
target_nbargs = target_token._arm_clt._debug_nbargs
assert my_nbargs == target_nbargs
- self._insert_checks()
if target_token in self.target_tokens_currently_compiling:
self.mc.B_offs(target, fcond)
else:
@@ -365,27 +365,24 @@
self.gen_func_epilog()
return fcond
- def emit_op_call(self, op, arglocs, regalloc, fcond,
- force_index=NO_FORCE_INDEX):
- if force_index == NO_FORCE_INDEX:
- force_index = self.write_new_force_index()
+ def emit_op_call(self, op, arglocs, regalloc, fcond):
resloc = arglocs[0]
adr = arglocs[1]
arglist = arglocs[2:]
descr = op.getdescr()
size = descr.get_result_size()
signed = descr.is_result_signed()
- cond = self._emit_call(force_index, adr, arglist,
+ cond = self._emit_call(adr, arglist,
fcond, resloc, (size, signed))
return cond
- def _emit_call(self, force_index, adr, arglocs, fcond=c.AL,
+ def _emit_call(self, adr, arglocs, fcond=c.AL,
resloc=None, result_info=(-1, -1)):
if self.cpu.use_hf_abi:
- stack_args, adr = self._setup_call_hf(force_index, adr,
+ stack_args, adr = self._setup_call_hf(adr,
arglocs, fcond, resloc, result_info)
else:
- stack_args, adr = self._setup_call_sf(force_index, adr,
+ stack_args, adr = self._setup_call_sf(adr,
arglocs, fcond, resloc, result_info)
#the actual call
@@ -399,7 +396,6 @@
assert adr.is_reg()
if adr.is_reg():
self.mc.BLX(adr.value)
- self.mark_gc_roots(force_index)
self._restore_sp(stack_args, fcond)
# ensure the result is wellformed and stored in the correct location
@@ -454,7 +450,7 @@
else:
self.regalloc_push(arg)
- def _setup_call_sf(self, force_index, adr, arglocs, fcond=c.AL,
+ def _setup_call_sf(self, adr, arglocs, fcond=c.AL,
resloc=None, result_info=(-1, -1)):
reg_args = count_reg_args(arglocs)
stack_args = self._collect_stack_args_sf(arglocs)
@@ -499,7 +495,7 @@
self.mov_from_vfp_loc(loc, reg, r.all_regs[reg.value + 1])
return stack_args, adr
- def _setup_call_hf(self, force_index, adr, arglocs, fcond=c.AL,
+ def _setup_call_hf(self, adr, arglocs, fcond=c.AL,
resloc=None, result_info=(-1, -1)):
non_float_locs = []
non_float_regs = []
@@ -1053,7 +1049,7 @@
length_loc = bytes_loc
# call memcpy()
regalloc.before_call()
- self._emit_call(NO_FORCE_INDEX, imm(self.memcpy_addr),
+ self._emit_call(imm(self.memcpy_addr),
[dstaddr_loc, srcaddr_loc, length_loc])
regalloc.possibly_free_var(length_box)
@@ -1126,6 +1122,7 @@
return fcond
def emit_op_force_token(self, op, arglocs, regalloc, fcond):
+ # XXX kill me
res_loc = arglocs[0]
self.mc.MOV_rr(res_loc.value, r.fp.value)
return fcond
@@ -1138,14 +1135,12 @@
resloc = arglocs[2]
callargs = arglocs[3:]
- faildescr = guard_op.getdescr()
- fail_index = self.cpu.get_fail_descr_number(faildescr)
- self._write_fail_index(fail_index)
+ self._store_force_index(guard_op)
descr = op.getdescr()
assert isinstance(descr, JitCellToken)
# check value
assert tmploc is r.r0
- self._emit_call(fail_index, imm(descr._arm_func_addr),
+ self._emit_call(imm(descr._arm_func_addr),
callargs, fcond, resloc=tmploc)
if op.result is None:
value = self.cpu.done_with_this_frame_void_v
@@ -1288,9 +1283,7 @@
def emit_guard_call_may_force(self, op, guard_op, arglocs, regalloc,
fcond):
- faildescr = guard_op.getdescr()
- fail_index = self.cpu.get_fail_descr_number(faildescr)
- self._write_fail_index(fail_index)
+ self._store_force_index(guard_op)
numargs = op.numargs()
callargs = arglocs[2:numargs + 1] # extract the arguments to the call
adr = arglocs[1]
@@ -1300,12 +1293,13 @@
size = descr.get_result_size()
signed = descr.is_result_signed()
#
- self._emit_call(fail_index, adr, callargs, fcond,
+ self._emit_call(adr, callargs, fcond,
resloc, (size, signed))
- self.mc.LDR_ri(r.ip.value, r.fp.value)
+ ofs = self.cpu.get_ofs_of_frame_field('jf_descr')
+ self.mc.LDR_ri(r.ip.value, r.fp.value, imm=ofs)
self.mc.CMP_ri(r.ip.value, 0)
- self._emit_guard(guard_op, arglocs[1 + numargs:], c.GE,
+ self._emit_guard(guard_op, arglocs[1 + numargs:], c.EQ,
save_exc=True, is_guard_not_forced=True)
return fcond
@@ -1322,15 +1316,13 @@
if gcrootmap:
self.call_release_gil(gcrootmap, arglocs, fcond)
# do the call
- faildescr = guard_op.getdescr()
- fail_index = self.cpu.get_fail_descr_number(faildescr)
- self._write_fail_index(fail_index)
+ self._store_force_index(guard_op)
#
descr = op.getdescr()
size = descr.get_result_size()
signed = descr.is_result_signed()
#
- self._emit_call(fail_index, adr, callargs, fcond,
+ self._emit_call(adr, callargs, fcond,
resloc, (size, signed))
# then reopen the stack
if gcrootmap:
@@ -1352,8 +1344,7 @@
regs_to_save.append(reg)
assert gcrootmap.is_shadow_stack
with saved_registers(self.mc, regs_to_save):
- self._emit_call(NO_FORCE_INDEX,
- imm(self.releasegil_addr), [], fcond)
+ self._emit_call(imm(self.releasegil_addr), [], fcond)
def call_reacquire_gil(self, gcrootmap, save_loc, fcond):
# save the previous result into the stack temporarily.
@@ -1370,25 +1361,14 @@
regs_to_save.append(r.ip) # for alingment
assert gcrootmap.is_shadow_stack
with saved_registers(self.mc, regs_to_save, vfp_regs_to_save):
- self._emit_call(NO_FORCE_INDEX, imm(self.reacqgil_addr), [], fcond)
+ self._emit_call(imm(self.reacqgil_addr), [], fcond)
- def write_new_force_index(self):
- # for shadowstack only: get a new, unused force_index number and
- # write it to FORCE_INDEX_OFS. Used to record the call shape
- # (i.e. where the GC pointers are in the stack) around a CALL
- # instruction that doesn't already have a force_index.
- gcrootmap = self.cpu.gc_ll_descr.gcrootmap
- if gcrootmap and gcrootmap.is_shadow_stack:
- clt = self.current_clt
- force_index = clt.reserve_and_record_some_faildescr_index()
- self._write_fail_index(force_index)
- return force_index
- else:
- return 0
-
- def _write_fail_index(self, fail_index):
- self.mc.gen_load_int(r.ip.value, fail_index)
- self.mc.STR_ri(r.ip.value, r.fp.value)
+ def _store_force_index(self, guard_op):
+ faildescr = guard_op.getdescr()
+ ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr')
+ value = rffi.cast(lltype.Signed, cast_instance_to_gcref(faildescr))
+ self.mc.gen_load_int(r.ip.value, value)
+ self.store_reg(self.mc, r.ip, r.fp, ofs)
def emit_op_call_malloc_gc(self, op, arglocs, regalloc, fcond):
self.emit_op_call(op, arglocs, regalloc, fcond)
diff --git a/rpython/jit/backend/arm/regalloc.py
b/rpython/jit/backend/arm/regalloc.py
--- a/rpython/jit/backend/arm/regalloc.py
+++ b/rpython/jit/backend/arm/regalloc.py
@@ -1070,6 +1070,7 @@
prepare_op_cond_call_gc_wb_array = prepare_op_cond_call_gc_wb
def prepare_op_force_token(self, op, fcond):
+ # XXX for now we return a regular reg
res_loc = self.force_allocate_reg(op.result)
self.possibly_free_var(op.result)
return [res_loc]
@@ -1109,9 +1110,9 @@
# end of the same loop, i.e. if what we are compiling is a single
# loop that ends up jumping to this LABEL, then we can now provide
# the hints about the expected position of the spilled variables.
- jump_op = self.final_jump_op
- if jump_op is not None and jump_op.getdescr() is descr:
- self._compute_hint_frame_locations_from_descr(descr)
+ #jump_op = self.final_jump_op
+ #if jump_op is not None and jump_op.getdescr() is descr:
+ # self._compute_hint_frame_locations_from_descr(descr)
def prepare_guard_call_may_force(self, op, guard_op, fcond):
args = self._prepare_call(op, save_all_regs=True)
diff --git a/rpython/jit/backend/arm/runner.py
b/rpython/jit/backend/arm/runner.py
--- a/rpython/jit/backend/arm/runner.py
+++ b/rpython/jit/backend/arm/runner.py
@@ -117,33 +117,6 @@
cast_ptr_to_int._annspecialcase_ = 'specialize:arglltype(0)'
cast_ptr_to_int = staticmethod(cast_ptr_to_int)
- all_null_registers = lltype.malloc(rffi.LONGP.TO,
- len(all_vfp_regs) * 2 + len(all_regs),
- flavor='raw', zero=True, immortal=True)
-
- def force(self, addr_of_force_index):
- TP = rffi.CArrayPtr(lltype.Signed)
- fail_index = rffi.cast(TP, addr_of_force_index)[0]
- assert fail_index >= 0, "already forced!"
- faildescr = self.get_fail_descr_from_number(fail_index)
- rffi.cast(TP, addr_of_force_index)[0] = ~fail_index
- frb = self.assembler._find_failure_recovery_bytecode(faildescr)
- bytecode = rffi.cast(rffi.UCHARP, frb)
- addr_all_null_regsiters = rffi.cast(rffi.LONG, self.all_null_registers)
- #
- assert (rffi.cast(lltype.Signed, bytecode[0]) ==
- self.assembler.CODE_FORCED)
- bytecode = rffi.ptradd(bytecode, 1)
- deadframe = self.assembler.grab_frame_values(self,
- bytecode, addr_of_force_index,
- self.all_null_registers,
- self.all_null_registers)
- #
- assert self.get_latest_descr(deadframe) is faildescr
- self.assembler.force_token_to_dead_frame[addr_of_force_index] = (
- deadframe)
- return deadframe
-
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken)
diff --git a/rpython/jit/backend/llsupport/llmodel.py
b/rpython/jit/backend/llsupport/llmodel.py
--- a/rpython/jit/backend/llsupport/llmodel.py
+++ b/rpython/jit/backend/llsupport/llmodel.py
@@ -178,6 +178,11 @@
self.gc_ll_descr.freeing_block(rawstart, rawstop)
self.asmmemmgr.free(rawstart, rawstop)
+ def force(self, addr_of_force_token):
+ frame = rffi.cast(jitframe.JITFRAMEPTR, addr_of_force_token)
+ frame.jf_descr = frame.jf_force_descr
+ return lltype.cast_opaque_ptr(llmemory.GCREF, frame)
+
# ------------------- helpers and descriptions --------------------
@staticmethod
diff --git a/rpython/jit/backend/x86/runner.py
b/rpython/jit/backend/x86/runner.py
--- a/rpython/jit/backend/x86/runner.py
+++ b/rpython/jit/backend/x86/runner.py
@@ -152,11 +152,6 @@
cast_ptr_to_int._annspecialcase_ = 'specialize:arglltype(0)'
cast_ptr_to_int = staticmethod(cast_ptr_to_int)
- def force(self, addr_of_force_token):
- frame = rffi.cast(jitframe.JITFRAMEPTR, addr_of_force_token)
- frame.jf_descr = frame.jf_force_descr
- return lltype.cast_opaque_ptr(llmemory.GCREF, frame)
-
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken)
_______________________________________________
pypy-commit mailing list
[email protected]
http://mail.python.org/mailman/listinfo/pypy-commit