Author: Maciej Fijalkowski <fij...@gmail.com>
Branch: 
Changeset: r64724:b93ade5abad3
Date: 2013-06-03 05:10 +0800
http://bitbucket.org/pypy/pypy/changeset/b93ade5abad3/

Log:    merge

diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -2,7 +2,7 @@
 PyPy on Windows
 ===============
 
-Pypy is supported on Windows platforms, starting with Windows 2000.
+PyPy is supported on Windows platforms, starting with Windows 2000.
 The following text gives some hints about how to translate the PyPy
 interpreter.
 
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -303,7 +303,7 @@
         return _absolute_import(space, modulename, baselevel,
                                 fromlist_w, tentative)
     finally:
-        lock.release_lock()
+        lock.release_lock(silent_after_fork=True)
 
 @jit.unroll_safe
 def absolute_import_try(space, modulename, baselevel, fromlist_w):
@@ -788,10 +788,10 @@
             self.lockowner = me
         self.lockcounter += 1
 
-    def release_lock(self):
+    def release_lock(self, silent_after_fork):
         me = self.space.getexecutioncontext()   # used as thread ident
         if self.lockowner is not me:
-            if self.lockowner is None:
+            if self.lockowner is None and silent_after_fork:
                 # Too bad.  This situation can occur if a fork() occurred
                 # with the import lock held, and we're the child.
                 return
diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py
--- a/pypy/module/imp/interp_imp.py
+++ b/pypy/module/imp/interp_imp.py
@@ -177,7 +177,7 @@
 
 def release_lock(space):
     if space.config.objspace.usemodules.thread:
-        importing.getimportlock(space).release_lock()
+        importing.getimportlock(space).release_lock(silent_after_fork=False)
 
 def reinit_lock(space):
     if space.config.objspace.usemodules.thread:
diff --git a/rpython/jit/backend/arm/assembler.py 
b/rpython/jit/backend/arm/assembler.py
--- a/rpython/jit/backend/arm/assembler.py
+++ b/rpython/jit/backend/arm/assembler.py
@@ -8,6 +8,7 @@
     JITFRAME_FIXED_SIZE)
 from rpython.jit.backend.arm.codebuilder import InstrBuilder, 
OverwritingBuilder
 from rpython.jit.backend.arm.locations import imm, StackLocation
+from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size
 from rpython.jit.backend.arm.opassembler import ResOpAssembler
 from rpython.jit.backend.arm.regalloc import (Regalloc,
     CoreRegisterManager, check_imm_arg, VFPRegisterManager,
@@ -961,7 +962,7 @@
             return self._load_core_reg(mc, target, base, ofs, cond, helper)
 
     def _load_vfp_reg(self, mc, target, base, ofs, cond=c.AL, helper=r.ip):
-        if check_imm_arg(ofs):
+        if check_imm_arg(ofs, VMEM_imm_size):
             mc.VLDR(target.value, base.value, imm=ofs, cond=cond)
         else:
             mc.gen_load_int(helper.value, ofs, cond=cond)
@@ -982,7 +983,7 @@
             return self._store_core_reg(mc, source, base, ofs, cond, helper)
 
     def _store_vfp_reg(self, mc, source, base, ofs, cond=c.AL, helper=r.ip):
-        if check_imm_arg(ofs):
+        if check_imm_arg(ofs, VMEM_imm_size):
             mc.VSTR(source.value, base.value, imm=ofs, cond=cond)
         else:
             mc.gen_load_int(helper.value, ofs, cond=cond)
diff --git a/rpython/jit/backend/arm/callbuilder.py 
b/rpython/jit/backend/arm/callbuilder.py
--- a/rpython/jit/backend/arm/callbuilder.py
+++ b/rpython/jit/backend/arm/callbuilder.py
@@ -52,6 +52,8 @@
 
     def _push_stack_args(self, stack_args, on_stack):
         assert on_stack % 8 == 0
+        if on_stack == 0:
+            return
         self._adjust_sp(-on_stack)
         self.current_sp = on_stack
         ofs = 0
@@ -71,7 +73,7 @@
             else:
                 self.mc.gen_load_int(r.ip.value, n)
                 self.mc.ADD_rr(r.sp.value, r.sp.value, r.ip.value)
-        else:
+        elif n < 0:
             n = abs(n)
             if check_imm_arg(n):
                 self.mc.SUB_ri(r.sp.value, r.sp.value, n)
diff --git a/rpython/jit/backend/arm/helper/regalloc.py 
b/rpython/jit/backend/arm/helper/regalloc.py
--- a/rpython/jit/backend/arm/helper/regalloc.py
+++ b/rpython/jit/backend/arm/helper/regalloc.py
@@ -4,7 +4,10 @@
 from rpython.jit.metainterp.history import ConstInt
 from rpython.rlib.objectmodel import we_are_translated
 
-def check_imm_arg(arg, size=0xFF, allow_zero=True):
+VMEM_imm_size=0x3FC
+default_imm_size=0xFF
+
+def check_imm_arg(arg, size=default_imm_size, allow_zero=True):
     assert not isinstance(arg, ConstInt)
     if not we_are_translated():
         if not isinstance(arg, int):
diff --git a/rpython/jit/backend/arm/opassembler.py 
b/rpython/jit/backend/arm/opassembler.py
--- a/rpython/jit/backend/arm/opassembler.py
+++ b/rpython/jit/backend/arm/opassembler.py
@@ -15,6 +15,7 @@
                                                 gen_emit_unary_float_op,
                                                 saved_registers)
 from rpython.jit.backend.arm.helper.regalloc import check_imm_arg
+from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size
 from rpython.jit.backend.arm.codebuilder import InstrBuilder, 
OverwritingBuilder
 from rpython.jit.backend.arm.jump import remap_frame_layout
 from rpython.jit.backend.arm.regalloc import TempBox
@@ -23,6 +24,7 @@
 from rpython.jit.backend.llsupport.gcmap import allocate_gcmap
 from rpython.jit.backend.llsupport.descr import InteriorFieldDescr
 from rpython.jit.backend.llsupport.assembler import GuardToken, BaseAssembler
+from rpython.jit.backend.llsupport.regalloc import get_scale
 from rpython.jit.metainterp.history import (Box, AbstractFailDescr,
                                             INT, FLOAT, REF)
 from rpython.jit.metainterp.history import TargetToken
@@ -559,47 +561,8 @@
     def emit_op_getfield_gc(self, op, arglocs, regalloc, fcond):
         base_loc, ofs, res, size = arglocs
         signed = op.getdescr().is_field_signed()
-        if size.value == 8:
-            assert res.is_vfp_reg()
-            # vldr only supports imm offsets
-            # so if the ofset is too large we add it to the base and use an
-            # offset of 0
-            if ofs.is_reg():
-                self.mc.ADD_rr(r.ip.value, base_loc.value, ofs.value)
-                base_loc = r.ip
-                ofs = imm(0)
-            else:
-                assert ofs.value % 4 == 0
-            self.mc.VLDR(res.value, base_loc.value, ofs.value)
-        elif size.value == 4:
-            if ofs.is_imm():
-                self.mc.LDR_ri(res.value, base_loc.value, ofs.value)
-            else:
-                self.mc.LDR_rr(res.value, base_loc.value, ofs.value)
-        elif size.value == 2:
-            if ofs.is_imm():
-                if signed:
-                    self.mc.LDRSH_ri(res.value, base_loc.value, ofs.value)
-                else:
-                    self.mc.LDRH_ri(res.value, base_loc.value, ofs.value)
-            else:
-                if signed:
-                    self.mc.LDRSH_rr(res.value, base_loc.value, ofs.value)
-                else:
-                    self.mc.LDRH_rr(res.value, base_loc.value, ofs.value)
-        elif size.value == 1:
-            if ofs.is_imm():
-                if signed:
-                    self.mc.LDRSB_ri(res.value, base_loc.value, ofs.value)
-                else:
-                    self.mc.LDRB_ri(res.value, base_loc.value, ofs.value)
-            else:
-                if signed:
-                    self.mc.LDRSB_rr(res.value, base_loc.value, ofs.value)
-                else:
-                    self.mc.LDRB_rr(res.value, base_loc.value, ofs.value)
-        else:
-            assert 0
+        scale = get_scale(size.value)
+        self._load_from_mem(res, base_loc, ofs, imm(scale), signed, fcond)
         return fcond
 
     emit_op_getfield_raw = emit_op_getfield_gc
@@ -609,39 +572,22 @@
     def emit_op_getinteriorfield_gc(self, op, arglocs, regalloc, fcond):
         (base_loc, index_loc, res_loc,
             ofs_loc, ofs, itemsize, fieldsize) = arglocs
-        self.mc.gen_load_int(r.ip.value, itemsize.value)
-        self.mc.MUL(r.ip.value, index_loc.value, r.ip.value)
+        scale = get_scale(fieldsize.value)
+        tmploc, save = self.get_tmp_reg([base_loc, ofs_loc])
+        assert not save
+        self.mc.gen_load_int(tmploc.value, itemsize.value)
+        self.mc.MUL(tmploc.value, index_loc.value, tmploc.value)
         descr = op.getdescr()
         assert isinstance(descr, InteriorFieldDescr)
         signed = descr.fielddescr.is_field_signed()
         if ofs.value > 0:
             if ofs_loc.is_imm():
-                self.mc.ADD_ri(r.ip.value, r.ip.value, ofs_loc.value)
+                self.mc.ADD_ri(tmploc.value, tmploc.value, ofs_loc.value)
             else:
-                self.mc.ADD_rr(r.ip.value, r.ip.value, ofs_loc.value)
-
-        if fieldsize.value == 8:
-            # vldr only supports imm offsets
-            # so if the ofset is too large we add it to the base and use an
-            # offset of 0
-            assert res_loc.is_vfp_reg()
-            self.mc.ADD_rr(r.ip.value, base_loc.value, r.ip.value)
-            self.mc.VLDR(res_loc.value, r.ip.value, 0)
-        elif fieldsize.value == 4:
-            self.mc.LDR_rr(res_loc.value, base_loc.value, r.ip.value)
-        elif fieldsize.value == 2:
-            if signed:
-                self.mc.LDRSH_rr(res_loc.value, base_loc.value, r.ip.value)
-            else:
-                self.mc.LDRH_rr(res_loc.value, base_loc.value, r.ip.value)
-        elif fieldsize.value == 1:
-            if signed:
-                self.mc.LDRSB_rr(res_loc.value, base_loc.value, r.ip.value)
-            else:
-                self.mc.LDRB_rr(res_loc.value, base_loc.value, r.ip.value)
-        else:
-            assert 0
-
+                self.mc.ADD_rr(tmploc.value, tmploc.value, ofs_loc.value)
+        ofs_loc = tmploc
+        self._load_from_mem(res_loc, base_loc, ofs_loc,
+                                imm(scale), signed, fcond)
         return fcond
 
     def emit_op_setinteriorfield_gc(self, op, arglocs, regalloc, fcond):
@@ -731,33 +677,73 @@
             self.mc.ADD_ri(r.ip.value, ofs_loc.value, imm=ofs.value)
             ofs_loc = r.ip
         #
-        self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed)
+        self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond)
         return fcond
 
     def _load_from_mem(self, res_loc, base_loc, ofs_loc, scale,
                                             signed=False, fcond=c.AL):
         if scale.value == 3:
             assert res_loc.is_vfp_reg()
-            assert ofs_loc.is_reg()
-            self.mc.ADD_rr(r.ip.value, base_loc.value, ofs_loc.value)
-            self.mc.VLDR(res_loc.value, r.ip.value, cond=fcond)
+            # vldr only supports imm offsets
+            # if the offset is in a register we add it to the base and use a
+            # tmp reg
+            if ofs_loc.is_reg():
+                tmploc, save = self.get_tmp_reg([base_loc, ofs_loc])
+                assert not save
+                self.mc.ADD_rr(tmploc.value, base_loc.value, ofs_loc.value)
+                base_loc = tmploc
+                ofs_loc = imm(0)
+            else:
+                assert ofs_loc.is_imm()
+                # if the ofset is too large for an imm we add it to the base 
and use an
+                # offset of 0
+                if check_imm_arg(ofs_loc.value, VMEM_imm_size):
+                    tmploc, save = self.get_tmp_reg([base_loc, ofs_loc])
+                    assert not save
+                    self.mc.gen_load_int(tmploc.value, ofs_loc.value)
+                    self.mc.ADD_rr(tmploc.value, base_loc.value, tmploc.value)
+                    base_loc = tmploc
+                    ofs_loc = imm(0)
+                else:  # sanity check
+                    assert ofs_loc.value % 4 == 0
+            self.mc.VLDR(res_loc.value, base_loc.value, ofs_loc.value, 
cond=fcond)
         elif scale.value == 2:
-            self.mc.LDR_rr(res_loc.value, base_loc.value,
-                                 ofs_loc.value, cond=fcond)
+            if ofs_loc.is_imm():
+                self.mc.LDR_ri(res_loc.value, base_loc.value,
+                                ofs_loc.value, cond=fcond)
+            else:
+                self.mc.LDR_rr(res_loc.value, base_loc.value,
+                                ofs_loc.value, cond=fcond)
         elif scale.value == 1:
-            if signed:
-                self.mc.LDRSH_rr(res_loc.value, base_loc.value,
-                                 ofs_loc.value, cond=fcond)
+            if ofs_loc.is_imm():
+                if signed:
+                    self.mc.LDRSH_ri(res_loc.value, base_loc.value,
+                                        ofs_loc.value, cond=fcond)
+                else:
+                    self.mc.LDRH_ri(res_loc.value, base_loc.value,
+                                        ofs_loc.value, cond=fcond)
             else:
-                self.mc.LDRH_rr(res_loc.value, base_loc.value,
-                                 ofs_loc.value, cond=fcond)
+                if signed:
+                    self.mc.LDRSH_rr(res_loc.value, base_loc.value,
+                                        ofs_loc.value, cond=fcond)
+                else:
+                    self.mc.LDRH_rr(res_loc.value, base_loc.value,
+                                        ofs_loc.value, cond=fcond)
         elif scale.value == 0:
-            if signed:
-                self.mc.LDRSB_rr(res_loc.value, base_loc.value,
-                                 ofs_loc.value, cond=fcond)
+            if ofs_loc.is_imm():
+                if signed:
+                    self.mc.LDRSB_ri(res_loc.value, base_loc.value,
+                                        ofs_loc.value, cond=fcond)
+                else:
+                    self.mc.LDRB_ri(res_loc.value, base_loc.value,
+                                        ofs_loc.value, cond=fcond)
             else:
-                self.mc.LDRB_rr(res_loc.value, base_loc.value,
-                                 ofs_loc.value, cond=fcond)
+                if signed:
+                    self.mc.LDRSB_rr(res_loc.value, base_loc.value,
+                                        ofs_loc.value, cond=fcond)
+                else:
+                    self.mc.LDRB_rr(res_loc.value, base_loc.value,
+                                        ofs_loc.value, cond=fcond)
         else:
             assert 0
 
@@ -770,7 +756,7 @@
         # no base offset
         assert ofs.value == 0
         signed = op.getdescr().is_item_signed()
-        self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed)
+        self._load_from_mem(res_loc, base_loc, ofs_loc, scale, signed, fcond)
         return fcond
 
     def emit_op_strlen(self, op, arglocs, regalloc, fcond):
@@ -993,7 +979,7 @@
                 assert result_loc.is_vfp_reg()
                 # we always have a register here, since we have to sync them
                 # before call_assembler
-                self.mc.VLDR(result_loc.value, r.r0.value, imm=ofs)
+                self.load_reg(self.mc, result_loc, r.r0, ofs=ofs)
             else:
                 assert result_loc is r.r0
                 ofs = self.cpu.unpack_arraydescr(descr)
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to