[pypy-commit] pypy default: reapply dc81116ff4b7

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73256:44c39245f150
Date: 2014-09-01 08:57 +0200
http://bitbucket.org/pypy/pypy/changeset/44c39245f150/

Log:reapply dc81116ff4b7

diff --git a/py/_code/source.py b/py/_code/source.py
--- a/py/_code/source.py
+++ b/py/_code/source.py
@@ -416,6 +416,8 @@
 trysource = source[start:end]
 if trysource.isparseable():
 return start, end
+if end == start + 100:   # XXX otherwise, it takes forever
+break# XXX
 raise SyntaxError("no valid source range around line %d " % (lineno,))
 
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Reapply fix c6f52c21fe7e

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73255:88ad3346a763
Date: 2014-09-01 08:56 +0200
http://bitbucket.org/pypy/pypy/changeset/88ad3346a763/

Log:Reapply fix c6f52c21fe7e

diff --git a/py/_path/local.py b/py/_path/local.py
--- a/py/_path/local.py
+++ b/py/_path/local.py
@@ -750,7 +750,8 @@
 mkdtemp = classmethod(mkdtemp)
 
 def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
-  lock_timeout = 172800):   # two days
+  lock_timeout = 172800,   # two days
+  min_timeout = 300):  # five minutes
 """ return unique directory with a number greater than the current
 maximum one.  The number is assumed to start directly after prefix.
 if keep is true directories with a number less than (maxnum-keep)
@@ -818,6 +819,20 @@
 for path in rootdir.listdir():
 num = parse_num(path)
 if num is not None and num <= (maxnum - keep):
+if min_timeout:
+# NB: doing this is needed to prevent (or reduce
+# a lot the chance of) the following situation:
+# 'keep+1' processes call make_numbered_dir() at
+# the same time, they create dirs, but then the
+# last process notices the first dir doesn't have
+# (yet) a .lock in it and kills it.
+try:
+t1 = path.lstat().mtime
+t2 = lockfile.lstat().mtime
+if abs(t2-t1) < min_timeout:
+continue   # skip directories too recent
+except py.error.Error:
+continue   # failure to get a time, better skip
 lf = path.join('.lock')
 try:
 t1 = lf.lstat().mtime
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Add two README files in "py" and "_pytest". PLEASE READ THEM

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73257:084074ff7fa1
Date: 2014-09-01 09:06 +0200
http://bitbucket.org/pypy/pypy/changeset/084074ff7fa1/

Log:Add two README files in "py" and "_pytest". PLEASE READ THEM when
updating these!

diff --git a/_pytest/README-BEFORE-UPDATING b/_pytest/README-BEFORE-UPDATING
new file mode 100644
--- /dev/null
+++ b/_pytest/README-BEFORE-UPDATING
@@ -0,0 +1,17 @@
+This is PyPy's code of the pytest lib.  We don't expect to upgrade it
+very often, but once we do:
+
+WARNING!
+
+WE HAVE MADE A FEW TWEAKS HERE!
+
+Please be sure that you don't just copy the newer version from
+upstream without checking the few changes that we did.  This
+can be done like this:
+
+cd 
+hg log . -v | less
+
+then search for all " _pytest/" in that list to know which are the
+relevant checkins.  (Look for the checkins that only edit one
+or two files in this directory.)
diff --git a/py/README-BEFORE-UPDATING b/py/README-BEFORE-UPDATING
new file mode 100644
--- /dev/null
+++ b/py/README-BEFORE-UPDATING
@@ -0,0 +1,17 @@
+This is PyPy's code of the py lib.  We don't expect to upgrade it
+very often, but once we do:
+
+WARNING!
+
+WE HAVE MADE A FEW TWEAKS HERE!
+
+Please be sure that you don't just copy the newer version from
+upstream without checking the few changes that we did.  This
+can be done like this:
+
+cd 
+hg log . -v | less
+
+then search for all " py/" in that list to know which are the
+relevant checkins.  (Look for the checkins that only edit one
+or two files in this directory.)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Update for some (minimal) constant-folding occurring in the list-to-cdata-struct code

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73258:3119573b8a41
Date: 2014-09-01 09:17 +0200
http://bitbucket.org/pypy/pypy/changeset/3119573b8a41/

Log:Update for some (minimal) constant-folding occurring in the list-to-
cdata-struct code

diff --git a/pypy/module/pypyjit/test_pypy_c/test_ffi.py 
b/pypy/module/pypyjit/test_pypy_c/test_ffi.py
--- a/pypy/module/pypyjit/test_pypy_c/test_ffi.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_ffi.py
@@ -340,30 +340,19 @@
 guard_value(p166, ConstPtr(ptr72), descr=...)
 p167 = call(ConstClass(_ll_0_alloc_with_del___), descr=)
 guard_no_exception(descr=...)
-i168 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, 
descr=)
-i169 = int_add(i168, i97)
-i170 = int_sub(i160, i106)
-setfield_gc(p167, i168, descr=)
+i112 = int_sub(i160, -32768)
 setfield_gc(p167, ConstPtr(null), descr=)
-setfield_gc(p167, ConstPtr(ptr89), descr=)
-i171 = uint_gt(i170, i108)
-guard_false(i171, descr=...)
-i172 = int_sub(i160, -32768)
-i173 = int_and(i172, 65535)
-i174 = int_add(i173, -32768)
-setarrayitem_raw(i169, 0, i174, descr=)
-i175 = int_add(i168, i121)
-i176 = int_sub(i160, i130)
-i177 = uint_gt(i176, i132)
-guard_false(i177, descr=...)
-setarrayitem_raw(i175, 0, i174, descr=)
-i178 = int_add(i168, i140)
-i179 = int_sub(i160, i149)
-i180 = uint_gt(i179, i151)
-guard_false(i180, descr=...)
-setarrayitem_raw(i178, 0, i174, descr=)
+setfield_gc(p167, ConstPtr(ptr85), descr=)
+i114 = uint_gt(i112, 65535)
+guard_false(i114, descr=...)
+i115 = int_and(i112, 65535)
+i116 = int_add(i115, -32768)
 --TICK--
-i183 = arraylen_gc(p67, descr=)
-i184 = arraylen_gc(p92, descr=)
+i119 = call(ConstClass(_ll_1_raw_malloc_varsize__Signed), 6, 
descr=)
+raw_store(i119, 0, i116, descr=)
+raw_store(i119, 2, i116, descr=)
+raw_store(i119, 4, i116, descr=)
+setfield_gc(p167, i119, descr=)
+i123 = arraylen_gc(p67, descr=)
 jump(..., descr=...)
 """)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: fix for e9f0c13de06b: output the jit-backend-count loop numbers

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73259:625c201e7f9b
Date: 2014-09-01 09:25 +0200
http://bitbucket.org/pypy/pypy/changeset/625c201e7f9b/

Log:fix for e9f0c13de06b: output the jit-backend-count loop numbers as
unsigned too, to match the newly unsigned Guard0xNNN output.

diff --git a/rpython/jit/backend/llsupport/assembler.py 
b/rpython/jit/backend/llsupport/assembler.py
--- a/rpython/jit/backend/llsupport/assembler.py
+++ b/rpython/jit/backend/llsupport/assembler.py
@@ -294,10 +294,16 @@
 struct = self.loop_run_counters[i]
 if struct.type == 'l':
 prefix = 'TargetToken(%d)' % struct.number
-elif struct.type == 'b':
-prefix = 'bridge ' + str(struct.number)
 else:
-prefix = 'entry ' + str(struct.number)
+num = struct.number
+if num == -1:
+num = '-1'
+else:
+num = str(r_uint(num))
+if struct.type == 'b':
+prefix = 'bridge %s' % num
+else:
+prefix = 'entry %s' % num
 debug_print(prefix + ':' + str(struct.i))
 debug_stop('jit-backend-counts')
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Fix test for the 'trace-limit-hack' merge

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73260:fc98972622d7
Date: 2014-09-01 09:32 +0200
http://bitbucket.org/pypy/pypy/changeset/fc98972622d7/

Log:Fix test for the 'trace-limit-hack' merge

diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py 
b/pypy/module/pypyjit/test_pypy_c/test_call.py
--- a/pypy/module/pypyjit/test_pypy_c/test_call.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_call.py
@@ -17,13 +17,18 @@
 # now we can inline it as call assembler
 i = 0
 j = 0
-while i < 20:
+while i < 25:
 i += 1
 j += rec(100) # ID: call_rec
 return j
 #
-log = self.run(fn, [], threshold=18)
-loop, = log.loops_by_filename(self.filepath)
+# NB. the parameters below are a bit ad-hoc.  After 16 iterations,
+# the we trace from the "while" and reach a "trace too long".  Then
+# in the next execution, we trace the "rec" function from start;
+# that's "functrace" below.  Then after one or two extra iterations
+# we try again from "while", and this time we succeed.
+log = self.run(fn, [], threshold=20)
+functrace, loop = log.loops_by_filename(self.filepath)
 assert loop.match_by_id('call_rec', """
 ...
 p53 = call_assembler(..., descr=...)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy arm-longlong: uh, there are synonyms in the ARM instruction set?

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: arm-longlong
Changeset: r73261:d6eaca4efd43
Date: 2014-09-01 09:49 +0200
http://bitbucket.org/pypy/pypy/changeset/d6eaca4efd43/

Log:uh, there are synonyms in the ARM instruction set?

diff --git a/rpython/jit/backend/arm/codebuilder.py 
b/rpython/jit/backend/arm/codebuilder.py
--- a/rpython/jit/backend/arm/codebuilder.py
+++ b/rpython/jit/backend/arm/codebuilder.py
@@ -340,19 +340,8 @@
 MOD = binary_helper_call('int_mod')
 UDIV = binary_helper_call('uint_div')
 
-def FMDRR(self, dm, rd, rn, c=cond.AL):
-self.write32(c << 28
-| 0x0c400b10
-| (dm & 0xF)
-| (rd & 0xF) << 12
-| (rn & 0xF) << 16)
-
-def FMRRD(self, rd, rn, dm, c=cond.AL):
-self.write32(c << 28
-| 0x0c500b10
-| (dm & 0xF)
-| (rd & 0xF) << 12
-| (rn & 0xF) << 16)
+FMDRR = VMOV_cr # uh, there are synonyms?
+FMRRD = VMOV_rc
 
 def _encode_reg_list(self, instr, regs):
 for reg in regs:
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Move the test to a function and use it from two places.

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73262:b97c7d6f7fd8
Date: 2014-09-01 10:07 +0200
http://bitbucket.org/pypy/pypy/changeset/b97c7d6f7fd8/

Log:Move the test to a function and use it from two places.

diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py
--- a/pypy/module/sys/initpath.py
+++ b/pypy/module/sys/initpath.py
@@ -18,6 +18,13 @@
 _WIN32 = sys.platform == 'win32'
 
 
+def _exists_and_is_executable(fn):
+# os.access checks using the user's real uid and gid.
+# Since pypy should not be run setuid/setgid, this
+# should be sufficient.
+return os.path.isfile(fn) and os.access(fn, os.X_OK)
+
+
 def find_executable(executable):
 """
 Return the absolute path of the executable, by looking into PATH and
@@ -34,18 +41,14 @@
 if path:
 for dir in path.split(os.pathsep):
 fn = os.path.join(dir, executable)
-if os.path.isfile(fn):
-# os.access checks using the user's real uid and gid.
-# Since pypy should not be run setuid/setgid, this
-# should be sufficient.
-if os.access(fn, os.X_OK):
-executable = fn
-break
+if _exists_and_is_executable(fn):
+executable = fn
+break
 executable = rpath.rabspath(executable)
 
 # 'sys.executable' should not end up being an non-existing file;
 # just use '' in this case. (CPython issue #7774)
-return executable if os.path.isfile(executable) else ''
+return executable if _exists_and_is_executable(executable) else ''
 
 
 def _readlink_maybe(filename):
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Add a test for unichar comparison. It was not broken but producing

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73263:65ac482d28d6
Date: 2014-09-01 11:16 +0200
http://bitbucket.org/pypy/pypy/changeset/65ac482d28d6/

Log:Add a test for unichar comparison. It was not broken but producing
very inefficient code. Improve...

diff --git a/rpython/rtyper/rstr.py b/rpython/rtyper/rstr.py
--- a/rpython/rtyper/rstr.py
+++ b/rpython/rtyper/rstr.py
@@ -3,6 +3,7 @@
 from rpython.rtyper import rint
 from rpython.rtyper.error import TyperError
 from rpython.rtyper.lltypesystem.lltype import Signed, Bool, Void, UniChar
+from rpython.rtyper.lltypesystem import lltype
 from rpython.rtyper.rmodel import IteratorRepr, inputconst, Repr
 from rpython.rtyper.rint import IntegerRepr
 from rpython.rtyper.rfloat import FloatRepr
@@ -384,10 +385,10 @@
 unicode_encode_utf_8_impl, 'runicode_encode_utf_8')
 
 def rtype_method_upper(self, hop):
-raise TypeError("Cannot do toupper on unicode string")
+raise TyperError("Cannot do toupper on unicode string")
 
 def rtype_method_lower(self, hop):
-raise TypeError("Cannot do tolower on unicode string")
+raise TyperError("Cannot do tolower on unicode string")
 
 @jit.elidable
 def ll_encode_utf8(self, ll_s):
@@ -711,6 +712,11 @@
  pairtype(AbstractUniCharRepr, AbstractCharRepr)):
 def rtype_eq(_, hop): return _rtype_unchr_compare_template(hop, 'eq')
 def rtype_ne(_, hop): return _rtype_unchr_compare_template(hop, 'ne')
+def rtype_lt(_, hop): return _rtype_unchr_compare_template_ord(hop, 'lt')
+def rtype_le(_, hop): return _rtype_unchr_compare_template_ord(hop, 'le')
+def rtype_gt(_, hop): return _rtype_unchr_compare_template_ord(hop, 'gt')
+def rtype_ge(_, hop): return _rtype_unchr_compare_template_ord(hop, 'ge')
+
 
 #Helper functions for comparisons
 
@@ -719,6 +725,18 @@
 vlist = hop.inputargs(unichar_repr, unichar_repr)
 return hop.genop('unichar_' + func, vlist, resulttype=Bool)
 
+def _rtype_unchr_compare_template_ord(hop, func):
+vlist = hop.inputargs(*hop.args_r)
+vlist2 = []
+for v in vlist:
+if v.concretetype == lltype.Char:
+v = hop.genop('cast_char_to_int', [v], resulttype=lltype.Signed)
+elif v.concretetype == lltype.UniChar:
+v = hop.genop('cast_unichar_to_int', [v], resulttype=lltype.Signed)
+else:
+assert 0, v.concretetype
+vlist2.append(v)
+return hop.genop('int_' + func, vlist2, resulttype=Bool)
 
 #
 # _ Conversions _
diff --git a/rpython/rtyper/test/test_runicode.py 
b/rpython/rtyper/test/test_runicode.py
--- a/rpython/rtyper/test/test_runicode.py
+++ b/rpython/rtyper/test/test_runicode.py
@@ -296,3 +296,13 @@
 
 res = self.interpret(f, [5])
 assert res == 0
+
+def test_unicode_char_comparison(self):
+const = u'abcdef'
+def f(n):
+return const[n] >= u'c'
+
+res = self.interpret(f, [1])
+assert res == False
+res = self.interpret(f, [2])
+assert res == True
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] stmgc default: introduce stmcb_obj_supports_cards and hopefully make cards finally work as they should

2014-09-01 Thread Raemi
Author: Remi Meier 
Branch: 
Changeset: r1326:83e4c655d31b
Date: 2014-09-01 11:46 +0200
http://bitbucket.org/pypy/stmgc/changeset/83e4c655d31b/

Log:introduce stmcb_obj_supports_cards and hopefully make cards finally
work as they should

diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c
--- a/c7/demo/demo2.c
+++ b/c7/demo/demo2.c
@@ -43,6 +43,10 @@
 n = (struct node_s*)obj;
 visit((object_t **)&n->next);
 }
+long stmcb_obj_supports_cards(struct object_s *obj)
+{
+return 0;
+}
 void stmcb_get_card_base_itemsize(struct object_s *obj,
   uintptr_t offset_itemsize[2])
 {
diff --git a/c7/demo/demo_largemalloc.c b/c7/demo/demo_largemalloc.c
--- a/c7/demo/demo_largemalloc.c
+++ b/c7/demo/demo_largemalloc.c
@@ -24,7 +24,10 @@
 }
 
 void stmcb_commit_soon() {}
-
+long stmcb_obj_supports_cards(struct object_s *obj)
+{
+return 0;
+}
 void stmcb_trace_cards(struct object_s *obj, void cb(object_t **),
uintptr_t start, uintptr_t stop) {
 abort();
diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c
--- a/c7/demo/demo_random.c
+++ b/c7/demo/demo_random.c
@@ -80,7 +80,10 @@
 }
 
 void stmcb_commit_soon() {}
-
+long stmcb_obj_supports_cards(struct object_s *obj)
+{
+return 0;
+}
 void stmcb_trace_cards(struct object_s *obj, void cb(object_t **),
uintptr_t start, uintptr_t stop) {
 abort();
diff --git a/c7/demo/demo_random2.c b/c7/demo/demo_random2.c
--- a/c7/demo/demo_random2.c
+++ b/c7/demo/demo_random2.c
@@ -85,6 +85,10 @@
 
 void stmcb_commit_soon() {}
 
+long stmcb_obj_supports_cards(struct object_s *obj)
+{
+return 0;
+}
 void stmcb_trace_cards(struct object_s *obj, void cb(object_t **),
uintptr_t start, uintptr_t stop) {
 abort();
diff --git a/c7/demo/demo_simple.c b/c7/demo/demo_simple.c
--- a/c7/demo/demo_simple.c
+++ b/c7/demo/demo_simple.c
@@ -38,7 +38,10 @@
 n = (struct node_s*)obj;
 visit((object_t **)&n->next);
 }
-
+long stmcb_obj_supports_cards(struct object_s *obj)
+{
+return 0;
+}
 void stmcb_commit_soon() {}
 
 void stmcb_trace_cards(struct object_s *obj, void cb(object_t **),
diff --git a/c7/demo/test_shadowstack.c b/c7/demo/test_shadowstack.c
--- a/c7/demo/test_shadowstack.c
+++ b/c7/demo/test_shadowstack.c
@@ -18,6 +18,10 @@
 void stmcb_trace(struct object_s *obj, void visit(object_t **))
 {
 }
+long stmcb_obj_supports_cards(struct object_s *obj)
+{
+return 0;
+}
 void stmcb_get_card_base_itemsize(struct object_s *obj,
   uintptr_t offset_itemsize[2])
 {
diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -231,8 +231,12 @@
 {
 struct object_s *realobj = (struct object_s *)
 REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
+long supports = stmcb_obj_supports_cards(realobj);
+if (!supports)
+return 0;
+
+/* check also if it makes sense: */
 size_t size = stmcb_size_rounded_up(realobj);
-
 return (size >= _STM_MIN_CARD_OBJ_SIZE);
 }
 
@@ -590,13 +594,16 @@
 assert(!(obj->stm_flags & GCFLAG_CARDS_SET));
 assert(!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj));
 
+uintptr_t offset_itemsize[2];
 struct object_s *realobj = (struct object_s 
*)REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
 size_t obj_size = stmcb_size_rounded_up(realobj);
 assert(obj_size >= 32);
+stmcb_get_card_base_itemsize(realobj, offset_itemsize);
+size_t real_idx_count = (obj_size - offset_itemsize[0]) / 
offset_itemsize[1];
 
 uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj);
 uintptr_t card_index = 1;
-uintptr_t last_card_index = get_index_to_card_index(obj_size - 1); /* max 
valid index */
+uintptr_t last_card_index = get_index_to_card_index(real_idx_count - 1); 
/* max valid index */
 long i, myself = STM_SEGMENT->segment_num;
 
 /* simple heuristic to check if probably the whole object is
@@ -617,7 +624,6 @@
 /* Combine multiple marked cards and do a memcpy for them. We don't
try yet to use page_copy() or otherwise take into account privatization
of pages (except _has_private_page_in_range) */
-uintptr_t offset_itemsize[2];
 bool all_cards_were_cleared = true;
 
 uintptr_t start_card_index = -1;
@@ -634,7 +640,6 @@
 /* realobj, get_card_index_to_index(card_index)); */
 if (all_cards_were_cleared) {
 all_cards_were_cleared = false;
-stmcb_get_card_base_itemsize(realobj, offset_itemsize);
 }
 }
 }
diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c
--- a/c7/stm/nursery.c
+++ b/c7/stm/nursery.c
@@ -241,8 +241,12 @@
 #undef STM_SEGMENT
 struct object_s *realobj = (struct object_s 
*)REAL_ADDRESS(pseg->pub.segment_base, obj);
 size_t size = stmcb_size_rounded_up(realobj);
+OPT_ASSERT(size >= _STM_MIN_CARD_OBJ_SIZE);
 
-OPT_ASSERT(size >= _STM_MIN_CARD_OBJ_S

[pypy-commit] pypy default: Detect and special-case in the JIT the greater-than kind of comparisons

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73265:8b404466566d
Date: 2014-09-01 12:03 +0200
http://bitbucket.org/pypy/pypy/changeset/8b404466566d/

Log:Detect and special-case in the JIT the greater-than kind of
comparisons between two single characters (or unichars).

diff --git a/rpython/jit/codewriter/effectinfo.py 
b/rpython/jit/codewriter/effectinfo.py
--- a/rpython/jit/codewriter/effectinfo.py
+++ b/rpython/jit/codewriter/effectinfo.py
@@ -36,6 +36,7 @@
 OS_STREQ_NONNULL_CHAR   = 29   # s1 == char  (assert s1!=NULL)
 OS_STREQ_CHECKNULL_CHAR = 30   # s1!=NULL and s1==char
 OS_STREQ_LENGTHOK   = 31   # s1 == s2(assert len(s1)==len(s2))
+OS_STR_CMP  = 32   # "stroruni.cmp"
 #
 OS_UNI_CONCAT   = 42   #
 OS_UNI_SLICE= 43   #
@@ -47,6 +48,7 @@
 OS_UNIEQ_NONNULL_CHAR   = 49   #   (must be the same amount as for
 OS_UNIEQ_CHECKNULL_CHAR = 50   #   STR, in the same order)
 OS_UNIEQ_LENGTHOK   = 51   #
+OS_UNI_CMP  = 52
 _OS_offset_uni  = OS_UNI_CONCAT - OS_STR_CONCAT
 #
 OS_LIBFFI_CALL  = 62
diff --git a/rpython/jit/codewriter/jtransform.py 
b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -1767,6 +1767,7 @@
 dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT,
 "stroruni.slice":  EffectInfo.OS_STR_SLICE,
 "stroruni.equal":  EffectInfo.OS_STR_EQUAL,
+"stroruni.cmp":EffectInfo.OS_STR_CMP,
 "stroruni.copy_string_to_raw": 
EffectInfo.OS_STR_COPY_TO_RAW,
 }
 CHR = lltype.Char
@@ -1774,6 +1775,7 @@
 dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT,
 "stroruni.slice":  EffectInfo.OS_UNI_SLICE,
 "stroruni.equal":  EffectInfo.OS_UNI_EQUAL,
+"stroruni.cmp":EffectInfo.OS_UNI_CMP,
 "stroruni.copy_string_to_raw": 
EffectInfo.OS_UNI_COPY_TO_RAW
 }
 CHR = lltype.UniChar
diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py 
b/rpython/jit/metainterp/optimizeopt/vstring.py
--- a/rpython/jit/metainterp/optimizeopt/vstring.py
+++ b/rpython/jit/metainterp/optimizeopt/vstring.py
@@ -733,6 +733,25 @@
 return True
 return False
 
+def opt_call_stroruni_STR_CMP(self, op, mode):
+v1 = self.getvalue(op.getarg(1))
+v2 = self.getvalue(op.getarg(2))
+l1box = v1.getstrlen(None, mode, None)
+l2box = v2.getstrlen(None, mode, None)
+if (l1box is not None and l2box is not None and
+isinstance(l1box, ConstInt) and
+isinstance(l2box, ConstInt) and
+l1box.value == l2box.value == 1):
+# comparing two single chars
+vchar1 = self.strgetitem(v1, optimizer.CVAL_ZERO, mode)
+vchar2 = self.strgetitem(v2, optimizer.CVAL_ZERO, mode)
+seo = self.optimizer.send_extra_operation
+seo(ResOperation(rop.INT_SUB, [vchar1.force_box(self),
+   vchar2.force_box(self)],
+ op.result))
+return True
+return False
+
 def opt_call_SHRINK_ARRAY(self, op):
 v1 = self.getvalue(op.getarg(1))
 v2 = self.getvalue(op.getarg(2))
diff --git a/rpython/jit/metainterp/test/test_string.py 
b/rpython/jit/metainterp/test/test_string.py
--- a/rpython/jit/metainterp/test/test_string.py
+++ b/rpython/jit/metainterp/test/test_string.py
@@ -846,6 +846,27 @@
 'jump': 1, 'guard_true': 2, 'int_ge': 2, 'int_add': 2, 'int_sub': 2
 })
 
+def test_compare_single_char_for_ordering(self):
+jitdriver = JitDriver(reds=['result', 'n'], greens=[])
+_str = self._str
+constant1 = _str("abcdefghij")
+
+def cmpstr(x, y):
+return x > _str(y)
+
+def f(n):
+cmpstr(_str("abc"), "def")  # force x and y to be annot as strings
+result = 0
+while n >= 0:
+jitdriver.jit_merge_point(n=n, result=result)
+c = constant1[n]
+result += cmpstr(c, "c")
+n -= 1
+return result
+
+res = self.meta_interp(f, [9])
+assert res == f(9)
+self.check_resops(newstr=0, newunicode=0, call=0)
 
 
 class TestLLtype(StringTests, LLJitMixin):
diff --git a/rpython/rtyper/lltypesystem/rstr.py 
b/rpython/rtyper/lltypesystem/rstr.py
--- a/rpython/rtyper/lltypesystem/rstr.py
+++ b/rpython/rtyper/lltypesystem/rstr.py
@@ -531,6 +531,7 @@
 return diff
 i += 1
 return len1 - len2
+ll_strcmp.oopspec = 'stroruni.cmp(s1, s2)'
 
 @jit.elidable
 def ll_streq(s1, s2):
___
pypy

[pypy-commit] pypy default: Remove this special case. See comments.

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73264:fcbba01341eb
Date: 2014-09-01 11:28 +0200
http://bitbucket.org/pypy/pypy/changeset/fcbba01341eb/

Log:Remove this special case. See comments.

diff --git a/pypy/module/_pypyjson/interp_encoder.py 
b/pypy/module/_pypyjson/interp_encoder.py
--- a/pypy/module/_pypyjson/interp_encoder.py
+++ b/pypy/module/_pypyjson/interp_encoder.py
@@ -37,16 +37,14 @@
 sb = StringBuilder(len(u))
 sb.append_slice(s, 0, first)
 else:
+# We used to check if 'u' contains only safe characters, and return
+# 'w_string' directly.  But this requires an extra pass over all
+# characters, and the expected use case of this function, from
+# json.encoder, will anyway re-encode a unicode result back to
+# a string (with the ascii encoding).  So we may as well directly
+# turn it into a string from here, and avoid the extra pass over
+# all characters here.
 u = space.unicode_w(w_string)
-for i in range(len(u)):
-c = u[i]
-if c >= u' ' and c <= u'~' and c != u'"' and c != u'\\':
-pass
-else:
-break
-else:
-# the input is a unicode with only non-special ascii chars
-return w_string
 sb = StringBuilder(len(u))
 first = 0
 
diff --git a/pypy/module/_pypyjson/test/test__pypyjson.py 
b/pypy/module/_pypyjson/test/test__pypyjson.py
--- a/pypy/module/_pypyjson/test/test__pypyjson.py
+++ b/pypy/module/_pypyjson/test/test__pypyjson.py
@@ -192,14 +192,14 @@
 
 def test_raw_encode_basestring_ascii(self):
 import _pypyjson
-def check(s, expected_type=str):
+def check(s):
 s = _pypyjson.raw_encode_basestring_ascii(s)
-assert type(s) is expected_type
+assert type(s) is str
 return s
 assert check("") == ""
-assert check(u"", expected_type=unicode) == u""
+assert check(u"") == ""
 assert check("abc ") == "abc "
-assert check(u"abc ", expected_type=unicode) == u"abc "
+assert check(u"abc ") == "abc "
 raises(UnicodeDecodeError, check, "\xc0")
 assert check("\xc2\x84") == "\\u0084"
 assert check("\xf0\x92\x8d\x85") == "\\ud808\\udf45"
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: clarify comment

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73266:dffa8ea95d54
Date: 2014-09-01 12:06 +0200
http://bitbucket.org/pypy/pypy/changeset/dffa8ea95d54/

Log:clarify comment

diff --git a/pypy/module/_pypyjson/interp_encoder.py 
b/pypy/module/_pypyjson/interp_encoder.py
--- a/pypy/module/_pypyjson/interp_encoder.py
+++ b/pypy/module/_pypyjson/interp_encoder.py
@@ -41,9 +41,9 @@
 # 'w_string' directly.  But this requires an extra pass over all
 # characters, and the expected use case of this function, from
 # json.encoder, will anyway re-encode a unicode result back to
-# a string (with the ascii encoding).  So we may as well directly
-# turn it into a string from here, and avoid the extra pass over
-# all characters here.
+# a string (with the ascii encoding).  This requires two passes
+# over the characters.  So we may as well directly turn it into a
+# string here --- only one pass.
 u = space.unicode_w(w_string)
 sb = StringBuilder(len(u))
 first = 0
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy stmgc-c7: add pypy_stmcb_obj_supports_cards

2014-09-01 Thread Raemi
Author: Remi Meier 
Branch: stmgc-c7
Changeset: r73268:db94b14e4f34
Date: 2014-09-01 12:43 +0200
http://bitbucket.org/pypy/pypy/changeset/db94b14e4f34/

Log:add pypy_stmcb_obj_supports_cards

diff --git a/rpython/memory/gctransform/stmframework.py 
b/rpython/memory/gctransform/stmframework.py
--- a/rpython/memory/gctransform/stmframework.py
+++ b/rpython/memory/gctransform/stmframework.py
@@ -33,6 +33,14 @@
  llannotation.SomePtr(GCClass.VISIT_FPTR)],
   annmodel.s_None))
 #
+def pypy_stmcb_obj_supports_cards(obj):
+typeid = gc.get_type_id(obj)
+return gc.is_varsize(typeid)
+pypy_stmcb_obj_supports_cards.c_name = "pypy_stmcb_obj_supports_cards"
+self.autoregister_ptrs.append(
+getfn(pypy_stmcb_obj_supports_cards, [llannotation.SomeAddress()],
+  annmodel.SomeInteger()))
+#
 def pypy_stmcb_trace_cards(obj, visit_fn, start, stop):
 typeid = gc.get_type_id(obj)
 if not gc.has_gcptr_in_varsize(typeid):
diff --git a/rpython/translator/stm/src_stm/stmgcintf.c 
b/rpython/translator/stm/src_stm/stmgcintf.c
--- a/rpython/translator/stm/src_stm/stmgcintf.c
+++ b/rpython/translator/stm/src_stm/stmgcintf.c
@@ -13,6 +13,7 @@
 extern void pypy_stmcb_get_card_base_itemsize(void*, uintptr_t[]);
 extern void pypy_stmcb_trace(void*, void(*)(void*));
 extern void pypy_stmcb_trace_cards(void*, void(*)(void*), uintptr_t, 
uintptr_t);
+extern Signed pypy_stmcb_obj_supports_cards(void*);
 
 inline ssize_t stmcb_size_rounded_up(struct object_s *obj) {
 ssize_t result = pypy_stmcb_size_rounded_up(obj);
@@ -30,6 +31,10 @@
 pypy_stmcb_trace(obj, (void(*)(void*))visit);
 }
 
+inline long stmcb_obj_supports_cards(struct object_s *obj) {
+return pypy_stmcb_obj_supports_cards(obj);
+}
+
 inline void stmcb_trace_cards(struct object_s *obj, void visit(object_t **),
   uintptr_t start, uintptr_t stop) {
 pypy_stmcb_trace_cards(obj, (void(*)(void*))visit, start, stop);
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy stmgc-c7: import stmgc

2014-09-01 Thread Raemi
Author: Remi Meier 
Branch: stmgc-c7
Changeset: r73267:960172cff05f
Date: 2014-09-01 12:43 +0200
http://bitbucket.org/pypy/pypy/changeset/960172cff05f/

Log:import stmgc

diff --git a/rpython/translator/stm/src_stm/revision 
b/rpython/translator/stm/src_stm/revision
--- a/rpython/translator/stm/src_stm/revision
+++ b/rpython/translator/stm/src_stm/revision
@@ -1,1 +1,1 @@
-dbe9b14b252f
+83e4c655d31b
diff --git a/rpython/translator/stm/src_stm/stm/core.c 
b/rpython/translator/stm/src_stm/stm/core.c
--- a/rpython/translator/stm/src_stm/stm/core.c
+++ b/rpython/translator/stm/src_stm/stm/core.c
@@ -232,8 +232,12 @@
 {
 struct object_s *realobj = (struct object_s *)
 REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
+long supports = stmcb_obj_supports_cards(realobj);
+if (!supports)
+return 0;
+
+/* check also if it makes sense: */
 size_t size = stmcb_size_rounded_up(realobj);
-
 return (size >= _STM_MIN_CARD_OBJ_SIZE);
 }
 
@@ -591,13 +595,16 @@
 assert(!(obj->stm_flags & GCFLAG_CARDS_SET));
 assert(!IS_OVERFLOW_OBJ(STM_PSEGMENT, obj));
 
+uintptr_t offset_itemsize[2];
 struct object_s *realobj = (struct object_s 
*)REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
 size_t obj_size = stmcb_size_rounded_up(realobj);
 assert(obj_size >= 32);
+stmcb_get_card_base_itemsize(realobj, offset_itemsize);
+size_t real_idx_count = (obj_size - offset_itemsize[0]) / 
offset_itemsize[1];
 
 uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj);
 uintptr_t card_index = 1;
-uintptr_t last_card_index = get_index_to_card_index(obj_size - 1); /* max 
valid index */
+uintptr_t last_card_index = get_index_to_card_index(real_idx_count - 1); 
/* max valid index */
 long i, myself = STM_SEGMENT->segment_num;
 
 /* simple heuristic to check if probably the whole object is
@@ -618,7 +625,6 @@
 /* Combine multiple marked cards and do a memcpy for them. We don't
try yet to use page_copy() or otherwise take into account privatization
of pages (except _has_private_page_in_range) */
-uintptr_t offset_itemsize[2];
 bool all_cards_were_cleared = true;
 
 uintptr_t start_card_index = -1;
@@ -635,7 +641,6 @@
 /* realobj, get_card_index_to_index(card_index)); */
 if (all_cards_were_cleared) {
 all_cards_were_cleared = false;
-stmcb_get_card_base_itemsize(realobj, offset_itemsize);
 }
 }
 }
diff --git a/rpython/translator/stm/src_stm/stm/nursery.c 
b/rpython/translator/stm/src_stm/stm/nursery.c
--- a/rpython/translator/stm/src_stm/stm/nursery.c
+++ b/rpython/translator/stm/src_stm/stm/nursery.c
@@ -242,8 +242,12 @@
 #undef STM_SEGMENT
 struct object_s *realobj = (struct object_s 
*)REAL_ADDRESS(pseg->pub.segment_base, obj);
 size_t size = stmcb_size_rounded_up(realobj);
+OPT_ASSERT(size >= _STM_MIN_CARD_OBJ_SIZE);
 
-OPT_ASSERT(size >= _STM_MIN_CARD_OBJ_SIZE);
+uintptr_t offset_itemsize[2];
+stmcb_get_card_base_itemsize(realobj, offset_itemsize);
+size = (size - offset_itemsize[0]) / offset_itemsize[1];
+
 assert(IMPLY(mark_value == CARD_CLEAR, !mark_all)); /* not necessary */
 assert(IMPLY(mark_all, mark_value == CARD_MARKED_OLD)); /* set *all* to 
OLD */
 assert(IMPLY(IS_OVERFLOW_OBJ(pseg, realobj),
diff --git a/rpython/translator/stm/src_stm/stmgc.h 
b/rpython/translator/stm/src_stm/stmgc.h
--- a/rpython/translator/stm/src_stm/stmgc.h
+++ b/rpython/translator/stm/src_stm/stmgc.h
@@ -260,6 +260,9 @@
one item */
 extern void stmcb_get_card_base_itemsize(struct object_s *,
  uintptr_t offset_itemsize[2]);
+/* returns whether this object supports cards. we will only call
+   stmcb_get_card_base_itemsize on objs that do so. */
+extern long stmcb_obj_supports_cards(struct object_s *);
 extern void stmcb_commit_soon(void);
 
 
@@ -368,7 +371,7 @@
 int stm_is_inevitable(void);
 #else
 static inline int stm_is_inevitable(void) {
-return !rewind_jmp_armed(&STM_SEGMENT->running_thread->rjthread); 
+return !rewind_jmp_armed(&STM_SEGMENT->running_thread->rjthread);
 }
 #endif
 static inline void stm_become_inevitable(stm_thread_local_t *tl,
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy gc-incminimark-pinning: add first simple test for a weakref pointing to a pinned object. Fails right now.

2014-09-01 Thread groggi
Author: Gregor Wegberg 
Branch: gc-incminimark-pinning
Changeset: r73269:de940c201b84
Date: 2014-09-01 15:14 +0200
http://bitbucket.org/pypy/pypy/changeset/de940c201b84/

Log:add first simple test for a weakref pointing to a pinned object.
Fails right now.

diff --git a/rpython/memory/test/test_incminimark_gc.py 
b/rpython/memory/test/test_incminimark_gc.py
--- a/rpython/memory/test/test_incminimark_gc.py
+++ b/rpython/memory/test/test_incminimark_gc.py
@@ -36,3 +36,27 @@
 return ref() is b
 res = self.interpret(f, [])
 assert res == True
+
+def test_weakref_to_pinned(self):
+import weakref
+from rpython.rlib import rgc
+class A(object):
+pass
+def g():
+a = A()
+assert rgc.pin(a)
+a.x = 100
+wr = weakref.ref(a)
+llop.gc__collect(lltype.Void)
+assert wr() is not None
+assert a.x == 100
+return wr
+def f():
+ref = g()
+llop.gc__collect(lltype.Void, 1)
+b = ref()
+assert b is not None
+b.x = 101
+return ref() is b
+res = self.interpret(f, [])
+assert res == True
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] stmgc gc-small-uniform: Somehow merge default

2014-09-01 Thread Raemi
Author: Remi Meier 
Branch: gc-small-uniform
Changeset: r1327:3aa3696e8add
Date: 2014-09-01 14:15 +0200
http://bitbucket.org/pypy/stmgc/changeset/3aa3696e8add/

Log:Somehow merge default

diff too long, truncating to 2000 out of 5104 lines

diff --git a/c7/TODO b/c7/TODO
--- a/c7/TODO
+++ b/c7/TODO
@@ -1,8 +1,6 @@
 
 - use small uniform gcpages
 
-- write barrier for big arrays
-
 - finalizers
 
 - the highest_overflow_number can overflow after 2**30 non-collect-time
@@ -16,3 +14,16 @@
   the unused pages away --- or maybe use consecutive addresses from the
   lowest ones from segment N, instead of the page corresponding to the page
   number in segment 0 (possibly a bit messy)
+
+- possibly messy too, but think about not using N+1 segments but only N
+
+- use a call/cc-style variant of setjmp/longjmp to avoid inevitable
+  transactions when we need to return
+
+- kill "atomic" and use regular lock elision
+
+- increase the memory limit, currently 2.5GB; this requires, apparently,
+  more fighting against LLVM bugs
+
+- avoid __builtin_frame_address(0) in precisely the performance-critical
+  functions like the interpreter main loop
diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c
--- a/c7/demo/demo2.c
+++ b/c7/demo/demo2.c
@@ -43,7 +43,20 @@
 n = (struct node_s*)obj;
 visit((object_t **)&n->next);
 }
-
+long stmcb_obj_supports_cards(struct object_s *obj)
+{
+return 0;
+}
+void stmcb_get_card_base_itemsize(struct object_s *obj,
+  uintptr_t offset_itemsize[2])
+{
+abort();
+}
+void stmcb_trace_cards(struct object_s *obj, void visit(object_t **),
+   uintptr_t start, uintptr_t stop)
+{
+abort();
+}
 void stmcb_commit_soon() {}
 
 static void expand_marker(char *base, uintptr_t odd_number,
@@ -62,9 +75,8 @@
 {
 nodeptr_t r_n;
 long prev, sum;
-stm_jmpbuf_t here;
 
-STM_START_TRANSACTION(&stm_thread_local, here);
+stm_start_transaction(&stm_thread_local);
 
 stm_read((objptr_t)global_chained_list);
 r_n = global_chained_list;
@@ -92,11 +104,9 @@
 
 nodeptr_t swap_nodes(nodeptr_t initial)
 {
-stm_jmpbuf_t here;
-
 assert(initial != NULL);
 
-STM_START_TRANSACTION(&stm_thread_local, here);
+stm_start_transaction(&stm_thread_local);
 
 if (stm_thread_local.longest_marker_state != 0) {
 fprintf(stderr, "[%p] marker %d for %.6f seconds:\n",
@@ -193,7 +203,7 @@
 
 stm_commit_transaction();
 
-stm_start_inevitable_transaction(&stm_thread_local);
+stm_start_transaction(&stm_thread_local);
 STM_POP_ROOT(stm_thread_local, global_chained_list);   /* update value */
 assert(global_chained_list->value == -1);
 STM_PUSH_ROOT(stm_thread_local, global_chained_list);  /* remains forever 
in the shadow stack */
@@ -202,6 +212,11 @@
 printf("setup ok\n");
 }
 
+void teardown_list(void)
+{
+STM_POP_ROOT_RET(stm_thread_local);
+}
+
 
 static sem_t done;
 
@@ -215,7 +230,9 @@
 void *demo2(void *arg)
 {
 int status;
+rewind_jmp_buf rjbuf;
 stm_register_thread_local(&stm_thread_local);
+stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
 char *org = (char *)stm_thread_local.shadowstack;
 
 STM_PUSH_ROOT(stm_thread_local, global_chained_list);  /* remains forever 
in the shadow stack */
@@ -235,6 +252,7 @@
 STM_POP_ROOT(stm_thread_local, global_chained_list);
 OPT_ASSERT(org == (char *)stm_thread_local.shadowstack);
 
+stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
 unregister_thread_local();
 status = sem_post(&done); assert(status == 0);
 return NULL;
@@ -271,11 +289,13 @@
 int main(void)
 {
 int status, i;
+rewind_jmp_buf rjbuf;
 
 status = sem_init(&done, 0, 0); assert(status == 0);
 
 stm_setup();
 stm_register_thread_local(&stm_thread_local);
+stm_rewind_jmp_enterframe(&stm_thread_local, &rjbuf);
 stmcb_expand_marker = expand_marker;
 
 
@@ -292,9 +312,11 @@
 
 final_check();
 
+teardown_list();
 
+stm_rewind_jmp_leaveframe(&stm_thread_local, &rjbuf);
 unregister_thread_local();
-stm_teardown();
+//stm_teardown();
 
 return 0;
 }
diff --git a/c7/demo/demo_largemalloc.c b/c7/demo/demo_largemalloc.c
--- a/c7/demo/demo_largemalloc.c
+++ b/c7/demo/demo_largemalloc.c
@@ -24,6 +24,18 @@
 }
 
 void stmcb_commit_soon() {}
+long stmcb_obj_supports_cards(struct object_s *obj)
+{
+return 0;
+}
+void stmcb_trace_cards(struct object_s *obj, void cb(object_t **),
+   uintptr_t start, uintptr_t stop) {
+abort();
+}
+void stmcb_get_card_base_itemsize(struct object_s *obj,
+  uintptr_t offset_itemsize[2]) {
+abort();
+}
 
 //
 
@@ -67,7 +79,7 @@
 int i;
 arena_data = malloc(ARENA_SIZE);
 assert(arena_data != NULL);
-_stm_mutex_pages_lock();
+//_stm_mutex_pages_lock();
 for (i = 0; i < 25; i++)
 timing(i);
 return 0;

[pypy-commit] stmgc gc-small-uniform: merging mostly complete

2014-09-01 Thread Raemi
Author: Remi Meier 
Branch: gc-small-uniform
Changeset: r1329:fe2595f7a3cc
Date: 2014-09-01 15:21 +0200
http://bitbucket.org/pypy/stmgc/changeset/fe2595f7a3cc/

Log:merging mostly complete

diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -459,7 +459,6 @@
 
 static void copy_object_to_shared(object_t *obj, int source_segment_num)
 {
-abort();
 /* Only used by major GC.  XXX There is a lot of code duplication
with synchronize_object_now() but I don't completely see how to
improve...
@@ -471,13 +470,16 @@
 uintptr_t first_page = start / 4096UL;
 struct object_s *realobj = (struct object_s *)
 REAL_ADDRESS(segment_base, obj);
+ssize_t obj_size = stmcb_size_rounded_up(realobj);
+assert(obj_size >= 16);
+
 
 if (is_small_uniform(obj)) {
-abort();//XXX WRITE THE FAST CASE
+char *src = REAL_ADDRESS(segment_base, start);
+char *dst = REAL_ADDRESS(stm_object_pages, start);
+memcpy(dst, src, obj_size);
 }
 else {
-ssize_t obj_size = stmcb_size_rounded_up(realobj);
-assert(obj_size >= 16);
 uintptr_t end = start + obj_size;
 uintptr_t last_page = (end - 1) / 4096UL;
 
@@ -532,10 +534,14 @@
 
 char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, frag);
 char *dst = REAL_ADDRESS(stm_object_pages, frag);
-if (is_private_page(STM_SEGMENT->segment_num, page))
-memcpy(dst, src, frag_size);
-else
+if (is_private_page(STM_SEGMENT->segment_num, page)) {
+if (frag_size == 4096)
+pagecopy(dst, src);
+else
+memcpy(dst, src, frag_size);
+} else {
 EVENTUALLY(memcmp(dst, src, frag_size) == 0);  /* same page */
+}
 
 /* Then enqueue this object (or fragemnt of object) */
 if (STM_PSEGMENT->sq_len == SYNC_QUEUE_SIZE)
@@ -582,7 +588,7 @@
 uintptr_t first_card_index = get_write_lock_idx((uintptr_t)obj);
 uintptr_t card_index = 1;
 uintptr_t last_card_index = get_index_to_card_index(real_idx_count - 1); 
/* max valid index */
-long i, myself = STM_SEGMENT->segment_num;
+long myself = STM_SEGMENT->segment_num;
 
 /* simple heuristic to check if probably the whole object is
marked anyway so we should do page-wise synchronize */
@@ -674,14 +680,6 @@
 return;
 }
 
-#ifndef NDEBUG
-char *src = REAL_ADDRESS(stm_object_pages, (uintptr_t)obj);
-char *dst;
-for (i = 1; i <= NB_SEGMENTS; i++) {
-dst = REAL_ADDRESS(get_segment_base(i), (uintptr_t)obj);
-assert(memcmp(dst, src, obj_size) == 0);
-}
-#endif
 }
 
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] stmgc gc-small-uniform: more merging

2014-09-01 Thread Raemi
Author: Remi Meier 
Branch: gc-small-uniform
Changeset: r1328:ceac696718e6
Date: 2014-09-01 14:51 +0200
http://bitbucket.org/pypy/stmgc/changeset/ceac696718e6/

Log:more merging

diff --git a/c7/stm/core.c b/c7/stm/core.c
--- a/c7/stm/core.c
+++ b/c7/stm/core.c
@@ -277,7 +277,7 @@
  */
 assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
 assert(obj->stm_flags & GCFLAG_CARDS_SET);
-assert(!(obj->stm_flags & GCFLAG_SMALL_UNIFORM)); /* not supported/tested 
*/
+assert(!is_small_uniform(obj)); /* not supported/tested */
 
 #ifndef NDEBUG
 struct object_s *realobj = (struct object_s *)
@@ -513,6 +513,18 @@
 }
 }
 
+static inline bool _has_private_page_in_range(
+long seg_num, uintptr_t start, uintptr_t size)
+{
+uintptr_t first_page = start / 4096UL;
+uintptr_t last_page = (start + size) / 4096UL;
+for (; first_page <= last_page; first_page++)
+if (is_private_page(seg_num, first_page))
+return true;
+return false;
+}
+
+
 static inline void _synchronize_fragment(stm_char *frag, ssize_t frag_size)
 {
 /* First copy the object into the shared page, if needed */
@@ -533,30 +545,8 @@
 ++STM_PSEGMENT->sq_len;
 }
 
-static void synchronize_object_enqueue(object_t *obj)
+static void _page_wise_synchronize_object_now(object_t *obj, ssize_t obj_size)
 {
-/* Copy around the version of 'obj' that lives in our own segment.
-   It is first copied into the shared pages, and then into other
-   segments' own private pages.  (The second part might be done
-   later; call synchronize_objects_flush() to flush this queue.)
-
-   Must be called with the privatization lock acquired.
-*/
-assert(!_is_young(obj));
-assert(obj->stm_flags & GCFLAG_WRITE_BARRIER);
-ssize_t obj_size = stmcb_size_rounded_up(
-(struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj));
-OPT_ASSERT(obj_size >= 16);
-assert(STM_PSEGMENT->privatization_lock == 1);
-
-if (LIKELY(is_small_uniform(obj))) {
-_synchronize_fragment((stm_char *)obj, obj_size);
-return;
-}
-
-/* else, a more complicated case for large objects, to copy
-   around data only within the needed pages
-*/
 uintptr_t start = (uintptr_t)obj;
 uintptr_t end = start + obj_size;
 
@@ -574,140 +564,18 @@
 _synchronize_fragment((stm_char *)start, copy_size);
 
 start = copy_up_to;
-
 } while (start != end);
 }
 
-static void synchronize_objects_flush(void)
+static void _card_wise_synchronize_object_now(object_t *obj, ssize_t obj_size)
 {
-
-/* Do a full memory barrier.  We must make sure that other
-   CPUs see the changes we did to the shared page ("S", in
-   synchronize_object_enqueue()) before we check the other segments
-   with is_private_page() (below).  Otherwise, we risk the
-   following: this CPU writes "S" but the writes are not visible yet;
-   then it checks is_private_page() and gets false, and does nothing
-   more; just afterwards another CPU sets its own private_page bit
-   and copies the page; but it risks doing so before seeing the "S"
-   writes.
-*/
-long j = STM_PSEGMENT->sq_len;
-if (j == 0)
-return;
-STM_PSEGMENT->sq_len = 0;
-
-__sync_synchronize();
-
-long i, myself = STM_SEGMENT->segment_num;
-do {
---j;
-stm_char *frag = STM_PSEGMENT->sq_fragments[j];
-uintptr_t page = ((uintptr_t)frag) / 4096UL;
-if (!any_other_private_page(myself, page))
-continue;
-
-ssize_t frag_size = STM_PSEGMENT->sq_fragsizes[j];
-
-for (i = 1; i <= NB_SEGMENTS; i++) {
-if (i == myself)
-continue;
-
-char *src = REAL_ADDRESS(stm_object_pages, frag);
-char *dst = REAL_ADDRESS(get_segment_base(i), frag);
-if (is_private_page(i, page))
-memcpy(dst, src, frag_size);
-else
-EVENTUALLY(memcmp(dst, src, frag_size) == 0);  /* same page */
-}
-} while (j > 0);
-}
-
-static void _page_wise_synchronize_object_now_default(object_t *obj)
-{
-uintptr_t start = (uintptr_t)obj;
-uintptr_t first_page = start / 4096UL;
-
-char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj);
-ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj);
-assert(obj_size >= 16);
-uintptr_t end = start + obj_size;
-uintptr_t last_page = (end - 1) / 4096UL;
-long i, myself = STM_SEGMENT->segment_num;
-
-for (; first_page <= last_page; first_page++) {
-
-uintptr_t copy_size;
-if (first_page == last_page) {
-/* this is the final fragment */
-copy_size = end - start;
-}
-else {
-/* this is a non-final fragment, going up to the
-   page's end */
-copy_size = 4096 - (start & 4095);
-}
-/* double-check that the result fits in one page */

[pypy-commit] pypy default: hg merge arm-longlong

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73271:8ae0ea93f7fe
Date: 2014-09-01 17:05 +0200
http://bitbucket.org/pypy/pypy/changeset/8ae0ea93f7fe/

Log:hg merge arm-longlong

Reintroduce longlong support in the ARM backend. Tested with the
hard-float variant of the calling convention; can't test the soft-
float variant for now. I'll test myself at a later date or just
handle bug reports --- if any: I think that for soft-float, we
simply need to consider longlongs as equivalent to 64-bit floats (as
we do now).

diff --git a/rpython/jit/backend/arm/callbuilder.py 
b/rpython/jit/backend/arm/callbuilder.py
--- a/rpython/jit/backend/arm/callbuilder.py
+++ b/rpython/jit/backend/arm/callbuilder.py
@@ -80,15 +80,6 @@
 self.mc.gen_load_int(r.ip.value, n)
 self.mc.SUB_rr(r.sp.value, r.sp.value, r.ip.value)
 
-def _must_remap_fnloc(self):
-fnloc = self.fnloc
-if fnloc.is_stack():
-return True
-if self.is_call_release_gil:
-if fnloc is r.r5 or fnloc is r.r6 or fnloc is r.r7:
-return True
-return False
-
 def call_releasegil_addr_and_move_real_arguments(self, fastgil):
 assert self.is_call_release_gil
 assert not self.asm._is_asmgcc()
@@ -121,7 +112,7 @@
 self.mc.STREX(r.r3.value, r.ip.value, r.r6.value, c=c.EQ)
  # try to claim the lock
 self.mc.CMP_ri(r.r3.value, 0, cond=c.EQ) # did this succeed?
-self.mc.DMB(c=c.EQ)
+self.mc.DMB()
 # the success of the lock acquisition is defined by
 # 'EQ is true', or equivalently by 'r3 == 0'.
 #
@@ -268,7 +259,7 @@
 # or on the stack, which we can not access later
 # If this happens to be the case we remap the register to r4 and use r4
 # to call the function
-if self.fnloc in r.argument_regs or self._must_remap_fnloc():
+if not self.fnloc.is_imm():
 non_float_locs.append(self.fnloc)
 non_float_regs.append(r.r4)
 self.fnloc = r.r4
@@ -285,29 +276,23 @@
 
 def get_next_vfp(self, tp):
 assert tp in 'fS'
-if self.next_arg_vfp == -1:
-return None
-if tp == 'S':
+if tp == 'f':
+# 64bit double
+i = max(self.next_arg_vfp, (self.next_arg_svfp + 1) >> 1)
+if i >= len(r.vfp_argument_regs):
+self.next_arg_svfp = 1000# stop that sequence too
+return None
+self.next_arg_vfp = i + 1
+return r.vfp_argument_regs[i]
+else:
+# 32bit float
 i = self.next_arg_svfp
-next_vfp = (i >> 1) + 1
-if not (i + 1) & 1: # i is even
-self.next_arg_vfp = max(self.next_arg_vfp, next_vfp)
-self.next_arg_svfp = self.next_arg_vfp << 1
-else:
-self.next_arg_svfp += 1
-self.next_arg_vfp = next_vfp
-lst = r.svfp_argument_regs
-else: # 64bit double
-i = self.next_arg_vfp
-self.next_arg_vfp += 1
-if self.next_arg_svfp >> 1 == i:
-self.next_arg_svfp = self.next_arg_vfp << 1
-lst = r.vfp_argument_regs
-try:
-return lst[i]
-except IndexError:
-self.next_arg_vfp = self.next_arg_svfp = -1
-return None
+if not (i & 1): # if i is even
+i = max(i, self.next_arg_vfp << 1)
+if i >= len(r.svfp_argument_regs):
+return None
+self.next_arg_svfp = i + 1
+return r.svfp_argument_regs[i]
 
 def prepare_arguments(self):
 non_float_locs = []
@@ -316,34 +301,64 @@
 float_regs = []
 stack_args = []
 singlefloats = None
+longlong_mask = 0
 
 arglocs = self.arglocs
 argtypes = self.argtypes
 
-count = 0  # stack alignment counter
+r_register_count = 0
 on_stack = 0
+
 for i in range(len(arglocs)):
 argtype = INT
 if i < len(argtypes) and argtypes[i] == 'S':
 argtype = argtypes[i]
 arg = arglocs[i]
+
 if arg.is_float():
-argtype = FLOAT
-reg = self.get_next_vfp(argtype)
-if reg:
-assert len(float_regs) < len(r.vfp_argument_regs)
-float_locs.append(arg)
-assert reg not in float_regs
-float_regs.append(reg)
-else:  # float argument that needs to go on the stack
-if count % 2 != 0:
-stack_args.append(None)
-count = 0
-on_stack += 1
-stack_args.append(arg)
-on_stack += 

[pypy-commit] pypy arm-longlong: Close branch, ready for merge.

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: arm-longlong
Changeset: r73270:e49b397a6028
Date: 2014-09-01 17:02 +0200
http://bitbucket.org/pypy/pypy/changeset/e49b397a6028/

Log:Close branch, ready for merge.

Can't test on soft-float ARM for now. We'll see later.

___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: add a remark about SoftFloatCallBuilder

2014-09-01 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r73272:f27cde19678a
Date: 2014-09-01 17:19 +0200
http://bitbucket.org/pypy/pypy/changeset/f27cde19678a/

Log:add a remark about SoftFloatCallBuilder

diff --git a/rpython/jit/backend/arm/callbuilder.py 
b/rpython/jit/backend/arm/callbuilder.py
--- a/rpython/jit/backend/arm/callbuilder.py
+++ b/rpython/jit/backend/arm/callbuilder.py
@@ -173,6 +173,13 @@
 
 
 class SoftFloatCallBuilder(ARMCallbuilder):
+# XXX Maybe we could kill this class and unify the remaining two
+# XXX classes, by carefully checking if all methods here are doing
+# XXX the exact same thing as the methods from HardFloatCallBuilder,
+# XXX but simply forcing all BoxFloat arguments to be longlongs
+# XXX (i.e. ignoring 'f' in favour of 'L'), and the same with
+# XXX single-float arguments (ignoring 'S' in favour of 'i');
+# XXX and the same for the return value.
 
 def get_result_locs(self):
 if self.resloc is None:
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Backout 44c3924 : no need to patch dead code

2014-09-01 Thread rlamy
Author: Ronan Lamy 
Branch: 
Changeset: r73273:1cea12b7ec07
Date: 2014-09-01 18:20 +0100
http://bitbucket.org/pypy/pypy/changeset/1cea12b7ec07/

Log:Backout 44c3924 : no need to patch dead code

diff --git a/py/_code/source.py b/py/_code/source.py
--- a/py/_code/source.py
+++ b/py/_code/source.py
@@ -416,8 +416,6 @@
 trysource = source[start:end]
 if trysource.isparseable():
 return start, end
-if end == start + 100:   # XXX otherwise, it takes forever
-break# XXX
 raise SyntaxError("no valid source range around line %d " % (lineno,))
 
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy gc_no_cleanup_nursery: a simple hack to avoid writing "uninitialized" to places

2014-09-01 Thread fijal
Author: Maciej Fijalkowski 
Branch: gc_no_cleanup_nursery
Changeset: r73274:ab09d9e107f6
Date: 2014-09-01 12:21 -0600
http://bitbucket.org/pypy/pypy/changeset/ab09d9e107f6/

Log:a simple hack to avoid writing "uninitialized" to places

diff --git a/rpython/memory/gctransform/framework.py 
b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -1264,6 +1264,7 @@
 
 
 def gen_zero_gc_pointers(TYPE, v, llops, previous_steps=None):
+
 if previous_steps is None:
 previous_steps = []
 assert (isinstance(TYPE, lltype.Struct) or isinstance(TYPE, lltype.Array))
diff --git a/rpython/rtyper/lltypesystem/llmemory.py 
b/rpython/rtyper/lltypesystem/llmemory.py
--- a/rpython/rtyper/lltypesystem/llmemory.py
+++ b/rpython/rtyper/lltypesystem/llmemory.py
@@ -1037,7 +1037,8 @@
 else:
 # this is a hack XXX de-hack this
 llvalue = source._obj.getitem(i, uninitialized_ok=True)
-dest._obj.setitem(i, llvalue)
+if not isinstance(llvalue, lltype._uninitialized):
+dest._obj.setitem(i, llvalue)
 elif isinstance(T, lltype.Struct):
 for name in T._names:
 FIELDTYPE = getattr(T, name)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy gc_no_cleanup_nursery: Refactor some stuff so test_transformed_gc passes (although it's definitely not

2014-09-01 Thread fijal
Author: Maciej Fijalkowski 
Branch: gc_no_cleanup_nursery
Changeset: r73275:0cfec6969ec9
Date: 2014-09-01 12:52 -0600
http://bitbucket.org/pypy/pypy/changeset/0cfec6969ec9/

Log:Refactor some stuff so test_transformed_gc passes (although it's
definitely not exhaustive as shown by missing cases)

diff --git a/rpython/memory/gc/test/test_direct.py 
b/rpython/memory/gc/test/test_direct.py
--- a/rpython/memory/gc/test/test_direct.py
+++ b/rpython/memory/gc/test/test_direct.py
@@ -725,21 +725,22 @@
 assert arr_of_ptr_struct[i].next == lltype.nullptr(S)
 
 #fail for now
-def test_malloc_array_of_ptr_arr(self):
+def xxx_test_malloc_array_of_ptr_arr(self):
 ARR_OF_PTR_ARR = 
lltype.GcArray(lltype.Ptr(lltype.GcArray(lltype.Ptr(S
-arr_of_ptr_arr = lltype.malloc(ARR_OF_PTR_ARR, 10)
+arr_of_ptr_arr = self.malloc(ARR_OF_PTR_ARR, 10)
 self.stackroots.append(arr_of_ptr_arr)
 for i in range(10):
 assert arr_of_ptr_arr[i] == 
lltype.nullptr(lltype.GcArray(lltype.Ptr(S)))
 for i in range(10):
-arr_of_ptr_arr[i] = self.malloc(lltype.GcArray(lltype.Ptr(S)), i)
-self.stackroots.append(arr_of_ptr_arr[i])
-debug_print(arr_of_ptr_arr[i])
+self.writearray(arr_of_ptr_arr, i,
+self.malloc(lltype.GcArray(lltype.Ptr(S)), i))
+#self.stackroots.append(arr_of_ptr_arr[i])
+#debug_print(arr_of_ptr_arr[i])
 for elem in arr_of_ptr_arr[i]:
-self.stackroots.append(elem)
+#self.stackroots.append(elem)
 assert elem == lltype.nullptr(S)
 elem = self.malloc(S)
 assert elem.prev == lltype.nullptr(S)
 assert elem.next == lltype.nullptr(S)
 
-
\ No newline at end of file
+
diff --git a/rpython/memory/gctransform/framework.py 
b/rpython/memory/gctransform/framework.py
--- a/rpython/memory/gctransform/framework.py
+++ b/rpython/memory/gctransform/framework.py
@@ -883,7 +883,7 @@
 if not self.malloc_zero_filled:
 v_ob = hop.spaceop.args[0]
 TYPE = v_ob.concretetype.TO
-gen_zero_gc_pointers(TYPE, v_ob, hop.llops)
+self.gen_zero_gc_pointers(TYPE, v_ob, hop.llops)
 
 def gct_gc_writebarrier_before_copy(self, hop):
 op = hop.spaceop
@@ -1204,6 +1204,74 @@
 def pop_roots(self, hop, livevars):
 raise NotImplementedError
 
+def gen_zero_gc_pointers(self, TYPE, v, llops, previous_steps=None):
+if isinstance(TYPE, lltype.Struct):
+for name in TYPE._names:
+FIELD = getattr(TYPE, name)
+if isinstance(FIELD, lltype.Ptr) and FIELD._needsgc():
+c_name = rmodel.inputconst(lltype.Void, name)
+c_null = rmodel.inputconst(FIELD, lltype.nullptr(FIELD.TO))
+llops.genop('bare_setfield', [v, c_name, c_null])
+elif (isinstance(FIELD, lltype.Array) and
+  isinstance(FIELD.OF, lltype.Ptr) and 
FIELD.OF._needsgc()):
+xxx
+return
+elif isinstance(TYPE, lltype.Array):
+ITEM = TYPE.OF
+if isinstance(ITEM, lltype.Ptr) and ITEM._needsgc():
+v_size = llops.genop('getarraysize', [v],
+ resulttype=lltype.Signed)
+c_size = rmodel.inputconst(lltype.Signed, 
llmemory.sizeof(ITEM))
+v_totalsize = llops.genop('int_mul', [v_size, c_size],
+  resulttype=lltype.Signed)
+v_a = llops.genop('cast_ptr_to_adr', [v],
+  resulttype=llmemory.Address)
+c_fixedofs = rmodel.inputconst(lltype.Signed,
+  llmemory.itemoffsetof(TYPE))
+v_adr = llops.genop('adr_add', [v_a, c_fixedofs],
+resulttype=llmemory.Address)
+llops.genop('raw_memclear', [v_adr, v_totalsize])
+elif isinstance(TYPE, lltype.Struct):
+xxx
+return
+else:
+raise TypeError(TYPE)  
+
+if previous_steps is None:
+previous_steps = []
+assert (isinstance(TYPE, lltype.Struct) or isinstance(TYPE, 
lltype.Array))
+if isinstance(TYPE, lltype.Struct):
+for name in TYPE._names:
+c_name = rmodel.inputconst(lltype.Void, name)
+FIELD = getattr(TYPE, name)
+#handle ptr field in GcStruct
+if isinstance(FIELD, lltype.Ptr) and FIELD._needsgc():
+c_null = rmodel.inputconst(FIELD, lltype.nullptr(FIELD.TO))
+if not previous_steps:
+llops.genop('bare_setfield', [v, c_name, c_null])
+else:
+  

[pypy-commit] pypy gc_no_cleanup_nursery: merge default

2014-09-01 Thread fijal
Author: Maciej Fijalkowski 
Branch: gc_no_cleanup_nursery
Changeset: r73276:6dd6a6c171b2
Date: 2014-09-01 13:14 -0600
http://bitbucket.org/pypy/pypy/changeset/6dd6a6c171b2/

Log:merge default

diff too long, truncating to 2000 out of 63292 lines

diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -35,280 +35,290 @@
 the beginning of each file) the files in the 'pypy' directory are each
 copyrighted by one or more of the following people and organizations:
 
-Armin Rigo
-Maciej Fijalkowski
-Carl Friedrich Bolz
-Antonio Cuni
-Amaury Forgeot d'Arc
-Samuele Pedroni
-Alex Gaynor
-Michael Hudson
-David Schneider
-Matti Picus
-Brian Kearns
-Philip Jenvey
-Holger Krekel
-Christian Tismer
-Hakan Ardo
-Benjamin Peterson
-Manuel Jacob
-Anders Chrigstrom
-Eric van Riet Paap
-Wim Lavrijsen
-Ronan Lamy
-Richard Emslie
-Alexander Schremmer
-Dan Villiom Podlaski Christiansen
-Lukas Diekmann
-Sven Hager
-Anders Lehmann
-Aurelien Campeas
-Niklaus Haldimann
-Camillo Bruni
-Laura Creighton
-Toon Verwaest
-Remi Meier
-Leonardo Santagada
-Seo Sanghyeon
-Romain Guillebert
-Justin Peel
-Ronny Pfannschmidt
-David Edelsohn
-Anders Hammarquist
-Jakub Gustak
-Guido Wesdorp
-Lawrence Oluyede
-Bartosz Skowron
-Daniel Roberts
-Niko Matsakis
-Adrien Di Mascio
-Alexander Hesse
-Ludovic Aubry
-Jacob Hallen
-Jason Creighton
-Alex Martelli
-Michal Bendowski
-Jan de Mooij
-stian
-Michael Foord
-Stephan Diehl
-Stefan Schwarzer
-Valentino Volonghi
-Tomek Meka
-Patrick Maupin
-Bob Ippolito
-Bruno Gola
-Jean-Paul Calderone
-Timo Paulssen
-Squeaky
-Alexandre Fayolle
-Simon Burton
-Marius Gedminas
-John Witulski
-Konstantin Lopuhin
-Greg Price
-Dario Bertini
-Mark Pearse
-Simon Cross
-Andreas Stührk
-Jean-Philippe St. Pierre
-Guido van Rossum
-Pavel Vinogradov
-Paweł Piotr Przeradowski
-Paul deGrandis
-Ilya Osadchiy
-Tobias Oberstein
-Adrian Kuhn
-Boris Feigin
-Stefano Rivera
-tav
-Taavi Burns
-Georg Brandl
-Bert Freudenberg
-Stian Andreassen
-Laurence Tratt
-Wanja Saatkamp
-Ivan Sichmann Freitas
-Gerald Klix
-Mike Blume
-Oscar Nierstrasz
-Stefan H. Muller
-Jeremy Thurgood
-Gregor Wegberg
-Rami Chowdhury
-Tobias Pape
-Edd Barrett
-David Malcolm
-Eugene Oden
-Henry Mason
-Preston Timmons
-Jeff Terrace
-David Ripton
-Dusty Phillips
-Lukas Renggli
-Guenter Jantzen
-Ned Batchelder
-Amit Regmi
-Ben Young
-Nicolas Chauvat
-Andrew Durdin
-Andrew Chambers
-Michael Schneider
-Nicholas Riley
-Jason Chu
-Igor Trindade Oliveira
-Rocco Moretti
-Gintautas Miliauskas
-Michael Twomey
-Lucian Branescu Mihaila
-Tim Felgentreff
-Tyler Wade
-Gabriel Lavoie
-Olivier Dormond
-Jared Grubb
-Karl Bartel
-Brian Dorsey
-Victor Stinner
-Andrews Medina
-Stuart Williams
-Jasper Schulz
-Christian Hudon
-Toby Watson
-Antoine Pitrou
-Aaron Iles
-Michael Cheng
-Justas Sadzevicius
-Mikael Schönenberg
-Gasper Zejn
-Neil Shepperd
-Elmo Mäntynen
-Jonathan David Riehl
-Stanislaw Halik
-Anders Qvist
-Chirag Jadwani
-Beatrice During
-Alex Perry
-Vincent Legoll
-Alan McIntyre
-Alexander Sedov
-Corbin Simpson
-Christopher Pope
-wenzhuman
-Christian Tismer 
-Marc Abramowitz
-Dan Stromberg
-Stefano Parmesan
-Alexis Daboville
-Jens-Uwe Mager
-Carl Meyer
-Karl Ramm
-Pieter Zieschang
-Gabriel
-Lukas Vacek
-Andrew Dalke
-Sylvain Thenault
-Nathan Taylor
-Vladimir Kryachko
-Jacek Generowicz
-Alejandro J. Cura
-Jacob Oscarson
-Travis Francis Athougies
-Ryan Gonzalez
-Kristjan Valur Jonsson
-Sebastian Pawluś
-Neil Blakey-Milner
-anatoly techtonik
-Lutz Paelike
-Lucio Torre
-Lars Wassermann
-Henrik Vendelbo
-Dan Buch
-Miguel de Val Borro
-Artur Lisiecki
-Sergey Kishchenko
-Ignas Mikalajunas
-Christoph Gerum
-Martin Blais
-Lene Wagner
-Tomo Cocoa
-roberto@goyle
-Yury V. Zaytsev
-Anna Katrina Dominguez
-William Leslie
-Bobby Impollonia
-t...@eistee.fritz.box
-Andrew Thompson
-Ben Darnell
-Roberto De Ioris
-Juan Francisco Cantero Hurtado
-Godefroid Chappelle
-Joshua Gilbert
-Dan Colish
-Christopher Armstrong
-Michael Hudson-Doyle
-Anders Sigfridsson
-Yasir Suhail
-rafalgalczyn...@gmail.com
-Floris Bruynooghe
-Laurens Van Houtven
-Akira Li
-Gustavo Niemeyer
-Stephan Busemann
-Rafał Gałczyński
-Yusei Tahara
-Christia

[pypy-commit] pypy gc_no_cleanup_nursery: merge default

2014-09-01 Thread fijal
Author: Maciej Fijalkowski 
Branch: gc_no_cleanup_nursery
Changeset: r73277:a9dc0d3a9f93
Date: 2014-09-01 13:15 -0600
http://bitbucket.org/pypy/pypy/changeset/a9dc0d3a9f93/

Log:merge default

diff --git a/_pytest/README-BEFORE-UPDATING b/_pytest/README-BEFORE-UPDATING
new file mode 100644
--- /dev/null
+++ b/_pytest/README-BEFORE-UPDATING
@@ -0,0 +1,17 @@
+This is PyPy's code of the pytest lib.  We don't expect to upgrade it
+very often, but once we do:
+
+WARNING!
+
+WE HAVE MADE A FEW TWEAKS HERE!
+
+Please be sure that you don't just copy the newer version from
+upstream without checking the few changes that we did.  This
+can be done like this:
+
+cd 
+hg log . -v | less
+
+then search for all " _pytest/" in that list to know which are the
+relevant checkins.  (Look for the checkins that only edit one
+or two files in this directory.)
diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py
--- a/_pytest/resultlog.py
+++ b/_pytest/resultlog.py
@@ -53,16 +53,23 @@
 self.config = config
 self.logfile = logfile # preferably line buffered
 
-def write_log_entry(self, testpath, lettercode, longrepr):
+def write_log_entry(self, testpath, lettercode, longrepr, sections=None):
 py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile)
 for line in longrepr.splitlines():
 py.builtin.print_(" %s" % line, file=self.logfile)
+if sections is not None:
+for title, content in sections:
+py.builtin.print_(" -- %s --" % (title,),
+  file=self.logfile)
+for line in content.splitlines():
+py.builtin.print_(" %s" % line, file=self.logfile)
 
 def log_outcome(self, report, lettercode, longrepr):
 testpath = getattr(report, 'nodeid', None)
 if testpath is None:
 testpath = report.fspath
-self.write_log_entry(testpath, lettercode, longrepr)
+self.write_log_entry(testpath, lettercode, longrepr,
+ getattr(report, 'sections', None))
 
 def pytest_runtest_logreport(self, report):
 if report.when != "call" and report.passed:
diff --git a/py/README-BEFORE-UPDATING b/py/README-BEFORE-UPDATING
new file mode 100644
--- /dev/null
+++ b/py/README-BEFORE-UPDATING
@@ -0,0 +1,17 @@
+This is PyPy's code of the py lib.  We don't expect to upgrade it
+very often, but once we do:
+
+WARNING!
+
+WE HAVE MADE A FEW TWEAKS HERE!
+
+Please be sure that you don't just copy the newer version from
+upstream without checking the few changes that we did.  This
+can be done like this:
+
+cd 
+hg log . -v | less
+
+then search for all " py/" in that list to know which are the
+relevant checkins.  (Look for the checkins that only edit one
+or two files in this directory.)
diff --git a/py/_path/local.py b/py/_path/local.py
--- a/py/_path/local.py
+++ b/py/_path/local.py
@@ -750,7 +750,8 @@
 mkdtemp = classmethod(mkdtemp)
 
 def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
-  lock_timeout = 172800):   # two days
+  lock_timeout = 172800,   # two days
+  min_timeout = 300):  # five minutes
 """ return unique directory with a number greater than the current
 maximum one.  The number is assumed to start directly after prefix.
 if keep is true directories with a number less than (maxnum-keep)
@@ -818,6 +819,20 @@
 for path in rootdir.listdir():
 num = parse_num(path)
 if num is not None and num <= (maxnum - keep):
+if min_timeout:
+# NB: doing this is needed to prevent (or reduce
+# a lot the chance of) the following situation:
+# 'keep+1' processes call make_numbered_dir() at
+# the same time, they create dirs, but then the
+# last process notices the first dir doesn't have
+# (yet) a .lock in it and kills it.
+try:
+t1 = path.lstat().mtime
+t2 = lockfile.lstat().mtime
+if abs(t2-t1) < min_timeout:
+continue   # skip directories too recent
+except py.error.Error:
+continue   # failure to get a time, better skip
 lf = path.join('.lock')
 try:
 t1 = lf.lstat().mtime
diff --git a/pypy/module/_pypyjson/interp_encoder.py 
b/pypy/module/_pypyjson/interp_encoder.py
--- a/pypy/module/_pypyjson/interp_encoder.py
+++ b/pypy/module/_pypyjson/interp_encoder.py
@@ -37,16 +37,14 @@
 sb = StringBuilder(len(u))
 sb.append_slice(s, 0, first)
 else:
+# We us

[pypy-commit] pypy gc_no_cleanup_nursery: Handle the uninitialized value better while translating (it can happen)

2014-09-01 Thread fijal
Author: Maciej Fijalkowski 
Branch: gc_no_cleanup_nursery
Changeset: r73278:5c7aa94e6c89
Date: 2014-09-01 15:04 -0600
http://bitbucket.org/pypy/pypy/changeset/5c7aa94e6c89/

Log:Handle the uninitialized value better while translating (it can
happen)

diff --git a/rpython/memory/test/test_transformed_gc.py 
b/rpython/memory/test/test_transformed_gc.py
--- a/rpython/memory/test/test_transformed_gc.py
+++ b/rpython/memory/test/test_transformed_gc.py
@@ -1331,7 +1331,7 @@
 run = self.runner('malloc_array_of_gcptr')
 res = run([])
 assert not res
-'''
+
 def define_malloc_struct_of_gcptr(cls):
 S1 = lltype.GcStruct('S', ('x', lltype.Signed))
 S = lltype.GcStruct('S',
@@ -1347,25 +1347,7 @@
 run = self.runner("malloc_struct_of_gcptr")
 res = run([])
 assert res
-'''
-'''
-def define_malloc_struct_of_gcptr(cls):
-S = lltype.GcForwardReference()
-S.become(lltype.GcStruct('S',
- ('x', lltype.Signed),
- ('prev', lltype.Ptr(S)),
- ('next', lltype.Ptr(S
-s0 = lltype.malloc(S,zero = False)
-def f():
-return s0.next == lltype.nullptr(S)
-return f
 
-def test_malloc_struct_of_gcptr(self):
-run = self.runner("malloc_struct_of_gcptr")
-pdb.set_trace()
-res = run([])
-assert res
-'''
 # 
 # tagged pointers
 
diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py
--- a/rpython/translator/c/database.py
+++ b/rpython/translator/c/database.py
@@ -286,6 +286,8 @@
 for value in newdependencies:
 #if isinstance(value, _uninitialized):
 #continue
+if isinstance(value, lltype._uninitialized):
+continue
 if isinstance(typeOf(value), ContainerType):
 node = self.getcontainernode(value)
 if parent and node._funccodegen_owner is not None:
diff --git a/rpython/translator/c/test/test_newgc.py 
b/rpython/translator/c/test/test_newgc.py
--- a/rpython/translator/c/test/test_newgc.py
+++ b/rpython/translator/c/test/test_newgc.py
@@ -1381,20 +1381,29 @@
 for length in range(3, 76, 5)])
 
 def define_nursery_hash_base(cls):
+from rpython.rlib.debug import debug_print
+
 class A:
 pass
 def fn():
 objects = []
 hashes = []
 for i in range(200):
+debug_print("starting nursery collection", i)
 rgc.collect(0) # nursery-only collection, if possible
+debug_print("finishing nursery collection", i)
 obj = A()
 objects.append(obj)
 hashes.append(compute_identity_hash(obj))
 unique = {}
+debug_print("objects", len(objects))
 for i in range(len(objects)):
+debug_print(i)
 assert compute_identity_hash(objects[i]) == hashes[i]
+debug_print("storing in dict")
 unique[hashes[i]] = None
+debug_print("done")
+debug_print("finished")
 return len(unique)
 return fn
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy gc_no_cleanup_nursery: fix some tests

2014-09-01 Thread fijal
Author: Maciej Fijalkowski 
Branch: gc_no_cleanup_nursery
Changeset: r73279:b3beb2273ee2
Date: 2014-09-01 15:10 -0600
http://bitbucket.org/pypy/pypy/changeset/b3beb2273ee2/

Log:fix some tests

diff --git a/rpython/memory/test/test_transformed_gc.py 
b/rpython/memory/test/test_transformed_gc.py
--- a/rpython/memory/test/test_transformed_gc.py
+++ b/rpython/memory/test/test_transformed_gc.py
@@ -1319,7 +1319,7 @@
 S = lltype.GcStruct('S', ('x', lltype.Signed))
 A = lltype.GcArray(lltype.Ptr(S))
 def f():
-lst = lltype.malloc(A, 5, zero= False)
+lst = lltype.malloc(A, 5)
 return (lst[0] == lltype.nullptr(S) 
 and lst[1] == lltype.nullptr(S)
 and lst[2] == lltype.nullptr(S)
@@ -1330,7 +1330,7 @@
 def test_malloc_array_of_gcptr(self):
 run = self.runner('malloc_array_of_gcptr')
 res = run([])
-assert not res
+assert res
 
 def define_malloc_struct_of_gcptr(cls):
 S1 = lltype.GcStruct('S', ('x', lltype.Signed))
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit