[pypy-commit] pypy jit-short_from_state: allow always pure opperations in short_preamble

2011-06-29 Thread hakanardo
Author: Hakan Ardo ha...@debian.org
Branch: jit-short_from_state
Changeset: r45170:fa033a59e246
Date: 2011-06-29 08:01 +0200
http://bitbucket.org/pypy/pypy/changeset/fa033a59e246/

Log:allow always pure opperations in short_preamble

diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py 
b/pypy/jit/metainterp/optimizeopt/optimizer.py
--- a/pypy/jit/metainterp/optimizeopt/optimizer.py
+++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
@@ -391,9 +391,9 @@
 new.values[box] = value.get_cloned(new, valuemap)
 
 new.pure_operations = args_dict()
-#for key, op in self.pure_operations.items():
-#if op.result in short_boxes:
-#new.pure_operations[key] = op
+for key, op in self.pure_operations.items():
+if op.result in short_boxes:
+new.pure_operations[key] = op
 new.producer = self.producer
 assert self.posponedop is None
 new.quasi_immutable_deps = self.quasi_immutable_deps
@@ -419,8 +419,9 @@
 return new
 
 def produce_potential_short_preamble_ops(self, potential_ops):
-#for op in self.emitted_pure_operations:
-#potential_ops[op.result] = op
+for op in self.emitted_pure_operations:
+if op.is_always_pure():
+potential_ops[op.result] = op
 for opt in self.optimizations:
 opt.produce_potential_short_preamble_ops(potential_ops)
 
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy jit-short_from_state: Dissable all types of operations in the short preamble. This makes a lot of tests fail due to worse optimization but produces a working pypy. Every loop will g

2011-06-29 Thread hakanardo
Author: Hakan Ardo ha...@debian.org
Branch: jit-short_from_state
Changeset: r45169:59df9c031c41
Date: 2011-06-29 07:29 +0200
http://bitbucket.org/pypy/pypy/changeset/59df9c031c41/

Log:Dissable all types of operations in the short preamble. This makes a
lot of tests fail due to worse optimization but produces a working
pypy. Every loop will get an empty short preamble. That allows
virtuals to stay virtual across bridges.

diff --git a/pypy/jit/metainterp/optimizeopt/heap.py 
b/pypy/jit/metainterp/optimizeopt/heap.py
--- a/pypy/jit/metainterp/optimizeopt/heap.py
+++ b/pypy/jit/metainterp/optimizeopt/heap.py
@@ -146,6 +146,7 @@
 def reconstruct_for_next_iteration(self,  short_boxes, surviving_boxes,
optimizer, valuemap):
 new = OptHeap()
+return new
 
 for descr, d in self.cached_fields.items():
 new.cached_fields[descr] = d.get_cloned(optimizer, valuemap, 
short_boxes)
@@ -173,7 +174,8 @@
 
 return new
 
-def produce_potential_short_preamble_ops(self, potential_ops):
+def produce_potential_short_preamble_ops(self, potential_ops):
+return
 for descr, d in self.cached_fields.items():
 d.produce_potential_short_preamble_ops(self.optimizer,
potential_ops, descr)
diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py 
b/pypy/jit/metainterp/optimizeopt/optimizer.py
--- a/pypy/jit/metainterp/optimizeopt/optimizer.py
+++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
@@ -391,9 +391,9 @@
 new.values[box] = value.get_cloned(new, valuemap)
 
 new.pure_operations = args_dict()
-for key, op in self.pure_operations.items():
-if op.result in short_boxes:
-new.pure_operations[key] = op
+#for key, op in self.pure_operations.items():
+#if op.result in short_boxes:
+#new.pure_operations[key] = op
 new.producer = self.producer
 assert self.posponedop is None
 new.quasi_immutable_deps = self.quasi_immutable_deps
@@ -419,8 +419,8 @@
 return new
 
 def produce_potential_short_preamble_ops(self, potential_ops):
-for op in self.emitted_pure_operations:
-potential_ops[op.result] = op
+#for op in self.emitted_pure_operations:
+#potential_ops[op.result] = op
 for opt in self.optimizations:
 opt.produce_potential_short_preamble_ops(potential_ops)
 
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy jit-short_from_state: failing test

2011-06-29 Thread hakanardo
Author: Hakan Ardo ha...@debian.org
Branch: jit-short_from_state
Changeset: r45171:431d551a6ef3
Date: 2011-06-29 08:27 +0200
http://bitbucket.org/pypy/pypy/changeset/431d551a6ef3/

Log:failing test

diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py 
b/pypy/jit/metainterp/optimizeopt/virtualstate.py
--- a/pypy/jit/metainterp/optimizeopt/virtualstate.py
+++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py
@@ -231,7 +231,11 @@
 bad[self] = True
 bad[other] = True
 return False
-return self.intbound.contains_bound(other.intbound)
+if not self.intbound.contains_bound(other.intbound):
+bad[self] = True
+bad[other] = True
+return False
+return True
 
 def _generate_guards(self, other, box, cpu, extra_guards):
 if not isinstance(other, NotVirtualStateInfo):
diff --git a/pypy/jit/metainterp/test/test_virtual.py 
b/pypy/jit/metainterp/test/test_virtual.py
--- a/pypy/jit/metainterp/test/test_virtual.py
+++ b/pypy/jit/metainterp/test/test_virtual.py
@@ -898,6 +898,29 @@
 res = self.meta_interp(f, [], repeat=7)
 assert res == f()
 
+def test_virtual_attribute_pure_function(self):
+mydriver = JitDriver(reds = ['i', 'sa', 'n', 'node'], greens = [])
+class A(object):
+def __init__(self, v1, v2):
+self.v1 = v1
+self.v2 = v2
+def f(n):
+i = sa = 0
+node = A(1, 2)
+while i  n:
+mydriver.jit_merge_point(i=i, sa=sa, n=n, node=node)
+sa += node.v1 + node.v2 + 2*node.v1
+if i  n/2:
+node = A(n, 2*n)
+else:
+node = A(n, 3*n)
+i += 1
+return sa
+
+res = self.meta_interp(f, [16])
+assert res == f(16)
+
+
 # 
 # Run 1: all the tests instantiate a real RPython class
 
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: (lac, arigo)

2011-06-29 Thread arigo
Author: Armin Rigo ar...@tunes.org
Branch: 
Changeset: r45172:1bb155fd266f
Date: 2011-06-28 13:41 +0200
http://bitbucket.org/pypy/pypy/changeset/1bb155fd266f/

Log:(lac, arigo)

Found out that even large young arrays would (likely) benefit from
card marking. So enable card marking even for them, carefully.

diff --git a/pypy/rpython/memory/gc/minimark.py 
b/pypy/rpython/memory/gc/minimark.py
--- a/pypy/rpython/memory/gc/minimark.py
+++ b/pypy/rpython/memory/gc/minimark.py
@@ -75,9 +75,14 @@
 
 first_gcflag = 1  (LONG_BIT//2)
 
-# The following flag is never set on young objects.  It is initially set
+# The following flag is usually not set on young objects.  It is initially set
 # on all prebuilt and old objects, and gets cleared by the write_barrier()
-# when we write in them a pointer to a young object.
+# when we write in them a pointer to a young object.  If the object is a
+# large array (young or old), then GCFLAG_HAS_CARDS is set; in this case,
+# GCFLAG_NO_YOUNG_PTRS is also generally set (a bit counter-intuitively).
+# However, if card-marking lost track and is now useless, then
+# GCFLAG_NO_YOUNG_PTRS is cleared: there might be young pointers anywhere
+# in the array.
 GCFLAG_NO_YOUNG_PTRS = first_gcflag  0
 
 # The following flag is set on some prebuilt objects.  The flag is set
@@ -256,7 +261,8 @@
 # that it is possible for an object to be listed both in here
 # and in 'old_objects_pointing_to_young', in which case we
 # should just clear the cards and trace it fully, as usual.
-self.old_objects_with_cards_set = self.AddressStack()
+# Note also that young array objects may be added to this list.
+self.objects_with_cards_set = self.AddressStack()
 #
 # A list of all prebuilt GC objects that contain pointers to the heap
 self.prebuilt_root_objects = self.AddressStack()
@@ -643,7 +649,7 @@
 # Reserve N extra words containing card bits before the object.
 extra_words = self.card_marking_words_for_length(length)
 cardheadersize = WORD * extra_words
-extra_flags = GCFLAG_HAS_CARDS
+extra_flags = GCFLAG_HAS_CARDS | GCFLAG_NO_YOUNG_PTRS
 # note that if 'can_make_young', then card marking will only
 # be used later, after (and if) the object becomes old
 #
@@ -980,12 +986,13 @@
 # 'addr_array' is the address of the object in which we write,
 # which must have an array part;  'index' is the index of the
 # item that is (or contains) the pointer that we write.
-if DEBUG:   # note: PYPY_GC_DEBUG=1 does not enable this
-ll_assert(self.debug_is_old_object(addr_array),
-  young array with GCFLAG_NO_YOUNG_PTRS)
 objhdr = self.header(addr_array)
 if objhdr.tid  GCFLAG_HAS_CARDS == 0:
 #
+if DEBUG:   # note: PYPY_GC_DEBUG=1 does not enable this
+ll_assert(self.debug_is_old_object(addr_array),
+  young array with GCFLAG_NO_YOUNG_PTRS)
+#
 # no cards, use default logic.  Mostly copied from above.
 self.old_objects_pointing_to_young.append(addr_array)
 objhdr = self.header(addr_array)
@@ -1016,7 +1023,7 @@
 addr_byte.char[0] = chr(byte | bitmask)
 #
 if objhdr.tid  GCFLAG_CARDS_SET == 0:
-self.old_objects_with_cards_set.append(addr_array)
+self.objects_with_cards_set.append(addr_array)
 objhdr.tid |= GCFLAG_CARDS_SET
 
 remember_young_pointer_from_array2._dont_inline_ = True
@@ -1026,9 +1033,6 @@
 
 # xxx trying it out for the JIT: a 3-arguments version of the above
 def remember_young_pointer_from_array3(addr_array, index, newvalue):
-if DEBUG:   # note: PYPY_GC_DEBUG=1 does not enable this
-ll_assert(self.debug_is_old_object(addr_array),
-  young array with GCFLAG_NO_YOUNG_PTRS)
 objhdr = self.header(addr_array)
 #
 # a single check for the common case of neither GCFLAG_HAS_CARDS
@@ -1066,12 +1070,16 @@
 addr_byte.char[0] = chr(byte | bitmask)
 #
 if objhdr.tid  GCFLAG_CARDS_SET == 0:
-self.old_objects_with_cards_set.append(addr_array)
+self.objects_with_cards_set.append(addr_array)
 objhdr.tid |= GCFLAG_CARDS_SET
 return
 #
 # Logic for the no-cards case, put here to minimize the number
 # of checks done at the start of the function
+if DEBUG:   # note: PYPY_GC_DEBUG=1 does not enable this
+ll_assert(self.debug_is_old_object(addr_array),
+  young 

[pypy-commit] pypy default: (antocuni, lac, arigo)

2011-06-29 Thread arigo
Author: Armin Rigo ar...@tunes.org
Branch: 
Changeset: r45173:324a8265e420
Date: 2011-06-28 18:42 +0200
http://bitbucket.org/pypy/pypy/changeset/324a8265e420/

Log:(antocuni, lac, arigo)

Carefully change the world to fix corner-case bugs introduced by the
previous checkin.

A better version of writebarrier_before_copy() for list resizes,
copying the card marks over to the new array.

diff --git a/pypy/rlib/rgc.py b/pypy/rlib/rgc.py
--- a/pypy/rlib/rgc.py
+++ b/pypy/rlib/rgc.py
@@ -272,7 +272,9 @@
 if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc':
 # perform a write barrier that copies necessary flags from
 # source to dest
-if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest):
+if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest,
+source_start, dest_start,
+length):
 # if the write barrier is not supported, copy by hand
 for i in range(length):
 dest[i + dest_start] = source[i + source_start]
diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py
--- a/pypy/rpython/llinterp.py
+++ b/pypy/rpython/llinterp.py
@@ -737,9 +737,12 @@
 def op_zero_gc_pointers_inside(self, obj):
 raise NotImplementedError(zero_gc_pointers_inside)
 
-def op_gc_writebarrier_before_copy(self, source, dest):
+def op_gc_writebarrier_before_copy(self, source, dest,
+   source_start, dest_start, length):
 if hasattr(self.heap, 'writebarrier_before_copy'):
-return self.heap.writebarrier_before_copy(source, dest)
+return self.heap.writebarrier_before_copy(source, dest,
+  source_start, dest_start,
+  length)
 else:
 return True
 
diff --git a/pypy/rpython/lltypesystem/opimpl.py 
b/pypy/rpython/lltypesystem/opimpl.py
--- a/pypy/rpython/lltypesystem/opimpl.py
+++ b/pypy/rpython/lltypesystem/opimpl.py
@@ -473,12 +473,16 @@
 checkadr(addr2)
 return addr1 - addr2
 
-def op_gc_writebarrier_before_copy(source, dest):
+def op_gc_writebarrier_before_copy(source, dest,
+   source_start, dest_start, length):
 A = lltype.typeOf(source)
 assert A == lltype.typeOf(dest)
 assert isinstance(A.TO, lltype.GcArray)
 assert isinstance(A.TO.OF, lltype.Ptr)
 assert A.TO.OF.TO._gckind == 'gc'
+assert type(source_start) is int
+assert type(dest_start) is int
+assert type(length) is int
 return True
 
 def op_getfield(p, name):
diff --git a/pypy/rpython/memory/gc/generation.py 
b/pypy/rpython/memory/gc/generation.py
--- a/pypy/rpython/memory/gc/generation.py
+++ b/pypy/rpython/memory/gc/generation.py
@@ -517,7 +517,8 @@
 objhdr.tid = ~GCFLAG_NO_HEAP_PTRS
 self.last_generation_root_objects.append(addr_struct)
 
-def writebarrier_before_copy(self, source_addr, dest_addr):
+def writebarrier_before_copy(self, source_addr, dest_addr,
+ source_start, dest_start, length):
  This has the same effect as calling writebarrier over
 each element in dest copied from source, except it might reset
 one of the following flags a bit too eagerly, which means we'll have
diff --git a/pypy/rpython/memory/gc/minimark.py 
b/pypy/rpython/memory/gc/minimark.py
--- a/pypy/rpython/memory/gc/minimark.py
+++ b/pypy/rpython/memory/gc/minimark.py
@@ -75,15 +75,16 @@
 
 first_gcflag = 1  (LONG_BIT//2)
 
-# The following flag is usually not set on young objects.  It is initially set
-# on all prebuilt and old objects, and gets cleared by the write_barrier()
-# when we write in them a pointer to a young object.  If the object is a
-# large array (young or old), then GCFLAG_HAS_CARDS is set; in this case,
-# GCFLAG_NO_YOUNG_PTRS is also generally set (a bit counter-intuitively).
-# However, if card-marking lost track and is now useless, then
-# GCFLAG_NO_YOUNG_PTRS is cleared: there might be young pointers anywhere
-# in the array.
-GCFLAG_NO_YOUNG_PTRS = first_gcflag  0
+# The following flag is set on objects if we need to do something to
+# track the young pointers that it might contain.  The flag is not set
+# on young objects (unless they are large arrays, see below), and we
+# simply assume that any young object can point to any other young object.
+# For old and prebuilt objects, the flag is usually set, and is cleared
+# when we write a young pointer to it.  For large arrays with
+# GCFLAG_HAS_CARDS, we rely on card marking to track where the
+# young pointers are; the flag GCFLAG_TRACK_YOUNG_PTRS is set in this
+# case too, to speed up the write barrier.
+GCFLAG_TRACK_YOUNG_PTRS = first_gcflag  0
 
 # The following flag is set on some prebuilt objects.  The flag is 

[pypy-commit] pypy default: merge heads

2011-06-29 Thread arigo
Author: Armin Rigo ar...@tunes.org
Branch: 
Changeset: r45174:c19ba84541c5
Date: 2011-06-29 09:40 +0200
http://bitbucket.org/pypy/pypy/changeset/c19ba84541c5/

Log:merge heads

diff --git a/pypy/annotation/bookkeeper.py b/pypy/annotation/bookkeeper.py
--- a/pypy/annotation/bookkeeper.py
+++ b/pypy/annotation/bookkeeper.py
@@ -299,12 +299,13 @@
 listdef.generalize_range_step(flags['range_step'])
 return SomeList(listdef)
 
-def getdictdef(self, is_r_dict=False):
+def getdictdef(self, is_r_dict=False, force_non_null=False):
 Get the DictDef associated with the current position.
 try:
 dictdef = self.dictdefs[self.position_key]
 except KeyError:
-dictdef = DictDef(self, is_r_dict=is_r_dict)
+dictdef = DictDef(self, is_r_dict=is_r_dict,
+  force_non_null=force_non_null)
 self.dictdefs[self.position_key] = dictdef
 return dictdef
 
diff --git a/pypy/annotation/builtin.py b/pypy/annotation/builtin.py
--- a/pypy/annotation/builtin.py
+++ b/pypy/annotation/builtin.py
@@ -311,8 +311,14 @@
 def robjmodel_we_are_translated():
 return immutablevalue(True)
 
-def robjmodel_r_dict(s_eqfn, s_hashfn):
-dictdef = getbookkeeper().getdictdef(is_r_dict=True)
+def robjmodel_r_dict(s_eqfn, s_hashfn, s_force_non_null=None):
+if s_force_non_null is None:
+force_non_null = False
+else:
+assert s_force_non_null.is_constant()
+force_non_null = s_force_non_null.const
+dictdef = getbookkeeper().getdictdef(is_r_dict=True,
+ force_non_null=force_non_null)
 dictdef.dictkey.update_rdict_annotations(s_eqfn, s_hashfn)
 return SomeDict(dictdef)
 
diff --git a/pypy/annotation/dictdef.py b/pypy/annotation/dictdef.py
--- a/pypy/annotation/dictdef.py
+++ b/pypy/annotation/dictdef.py
@@ -85,12 +85,14 @@
 
 def __init__(self, bookkeeper, s_key = s_ImpossibleValue,
  s_value = s_ImpossibleValue,
-   is_r_dict = False):
+   is_r_dict = False,
+   force_non_null = False):
 self.dictkey = DictKey(bookkeeper, s_key, is_r_dict)
 self.dictkey.itemof[self] = True
 self.dictvalue = DictValue(bookkeeper, s_value)
 self.dictvalue.itemof[self] = True
 self.bookkeeper = bookkeeper
+self.force_non_null = force_non_null
 
 def read_key(self, position_key=None):
 if position_key is None:
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -562,7 +562,8 @@
 elif callable.api_func.restype is not lltype.Void:
 retval = rffi.cast(callable.api_func.restype, result)
 except Exception, e:
-print 'Fatal error in cpyext, calling', callable.__name__
+print 'Fatal error in cpyext, CPython compatibility layer, 
calling', callable.__name__
+print 'Either report a bug or consider not using this particular 
extension'
 if not we_are_translated():
 import traceback
 traceback.print_exc()
diff --git a/pypy/objspace/std/dictmultiobject.py 
b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -1,13 +1,14 @@
 import py, sys
 from pypy.objspace.std.model import registerimplementation, W_Object
 from pypy.objspace.std.register_all import register_all
+from pypy.objspace.std.settype import set_typedef as settypedef
 from pypy.interpreter import gateway
 from pypy.interpreter.argument import Signature
 from pypy.interpreter.error import OperationError, operationerrfmt
 from pypy.module.__builtin__.__init__ import BUILTIN_TO_INDEX, 
OPTIMIZED_BUILTINS
 
 from pypy.rlib.objectmodel import r_dict, we_are_translated
-from pypy.objspace.std.settype import set_typedef as settypedef
+from pypy.rlib.debug import mark_dict_non_null
 
 def _is_str(space, w_key):
 return space.is_w(space.type(w_key), space.w_str)
@@ -59,7 +60,8 @@
 
 def initialize_as_rdict(self):
 assert self.r_dict_content is None
-self.r_dict_content = r_dict(self.space.eq_w, self.space.hash_w)
+self.r_dict_content = r_dict(self.space.eq_w, self.space.hash_w,
+ force_non_null=True)
 return self.r_dict_content
 
 
@@ -308,6 +310,7 @@
 def __init__(self, space):
 self.space = space
 self.content = {}
+mark_dict_non_null(self.content)
 
 def impl_setitem(self, w_key, w_value):
 space = self.space
@@ -317,6 +320,7 @@
 self._as_rdict().impl_fallback_setitem(w_key, w_value)
 
 def impl_setitem_str(self, key, w_value):
+assert key is not None
 self.content[key] = w_value
 
 def impl_setdefault(self, w_key, 

[pypy-commit] pypy default: a helper function

2011-06-29 Thread fijal
Author: Maciej Fijalkowski fij...@gmail.com
Branch: 
Changeset: r45175:1795fbfe7614
Date: 2011-06-29 11:26 +0200
http://bitbucket.org/pypy/pypy/changeset/1795fbfe7614/

Log:a helper function

diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py
--- a/pypy/tool/jitlogparser/parser.py
+++ b/pypy/tool/jitlogparser/parser.py
@@ -121,6 +121,9 @@
 def getcode(self):
 return self.code
 
+def has_valid_code(self):
+return self.code is not None
+
 def getopcode(self):
 return self.code.map[self.bytecode_no]
 
@@ -220,6 +223,12 @@
 return self._lineset
 lineset = property(getlineset)
 
+def has_valid_code(self):
+for chunk in self.chunks:
+if not chunk.has_valid_code():
+return False
+return True
+
 def _compute_linerange(self):
 self._lineset = set()
 minline = sys.maxint
diff --git a/pypy/tool/jitlogparser/test/test_parser.py 
b/pypy/tool/jitlogparser/test/test_parser.py
--- a/pypy/tool/jitlogparser/test/test_parser.py
+++ b/pypy/tool/jitlogparser/test/test_parser.py
@@ -168,7 +168,7 @@
 []
 int_add(0, 1)
 ''')
-loops = LoopStorage().reconnect_loops([main, bridge])
+LoopStorage().reconnect_loops([main, bridge])
 assert adjust_bridges(main, {})[1].name == 'guard_true'
 assert adjust_bridges(main, {'loop-13': True})[1].name == 'int_add'
 
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] jitviewer default: should work better now

2011-06-29 Thread fijal
Author: Maciej Fijalkowski fij...@gmail.com
Branch: 
Changeset: r135:f2ada8b3735d
Date: 2011-06-29 11:27 +0200
http://bitbucket.org/pypy/jitviewer/changeset/f2ada8b3735d/

Log:should work better now

diff --git a/_jitviewer/parser.py b/_jitviewer/parser.py
--- a/_jitviewer/parser.py
+++ b/_jitviewer/parser.py
@@ -118,6 +118,8 @@
 def html_repr(self):
 if self.filename is not None:
 code = self.getcode()
+if code is None:
+return self.bytecode_name
 opcode = self.code.map[self.bytecode_no]
 return '%s %s' % (self.bytecode_name, opcode.argstr)
 else:
diff --git a/bin/jitviewer.py b/bin/jitviewer.py
--- a/bin/jitviewer.py
+++ b/bin/jitviewer.py
@@ -127,13 +127,14 @@
 callstack.append((','.join(path_so_far), '%s in %s at %d' % (loop.name,
 loop.filename, loop.startlineno)))
 
-startline, endline = loop.linerange
-if loop.filename is not None:
+if not loop.has_valid_code() or loop.filename is None:
+startline = 0
+source = CodeReprNoFile(loop)
+else:
+startline, endline = loop.linerange
 code = self.storage.load_code(loop.filename)[(loop.startlineno,
   loop.name)]
 source = CodeRepr(inspect.getsource(code), code, loop)
-else:
-source = CodeReprNoFile(loop)
 d = {'html': flask.render_template('loop.html',
source=source,
current_loop=no,
@@ -168,8 +169,8 @@
 class CheckingLoopStorage(LoopStorage):
 def disassemble_code(self, fname, startlineno, name):
 result = super(CheckingLoopStorage, self).disassemble_code(fname, 
startlineno, name)
-if result is None and fname is not None:
-raise CannotFindFile(fname)
+#if result is None and fname is not None:
+#raise CannotFindFile(fname)
 return result
 
 
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Test and fix on 64-bit.

2011-06-29 Thread arigo
Author: Armin Rigo ar...@tunes.org
Branch: 
Changeset: r45176:76b06820d08b
Date: 2011-06-29 11:47 +0200
http://bitbucket.org/pypy/pypy/changeset/76b06820d08b/

Log:Test and fix on 64-bit.

diff --git a/pypy/jit/backend/x86/assembler.py 
b/pypy/jit/backend/x86/assembler.py
--- a/pypy/jit/backend/x86/assembler.py
+++ b/pypy/jit/backend/x86/assembler.py
@@ -895,7 +895,7 @@
 
 def regalloc_push(self, loc):
 if isinstance(loc, RegLoc) and loc.is_xmm:
-self.mc.SUB_ri(esp.value, 2*WORD)
+self.mc.SUB_ri(esp.value, 8)   # = size of doubles
 self.mc.MOVSD_sx(0, loc.value)
 elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8:
 # XXX evil trick
@@ -907,7 +907,7 @@
 def regalloc_pop(self, loc):
 if isinstance(loc, RegLoc) and loc.is_xmm:
 self.mc.MOVSD_xs(loc.value, 0)
-self.mc.ADD_ri(esp.value, 2*WORD)
+self.mc.ADD_ri(esp.value, 8)   # = size of doubles
 elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8:
 # XXX evil trick
 self.mc.POP_b(get_ebp_ofs(loc.position + 1))
diff --git a/pypy/jit/backend/x86/test/test_assembler.py 
b/pypy/jit/backend/x86/test/test_assembler.py
--- a/pypy/jit/backend/x86/test/test_assembler.py
+++ b/pypy/jit/backend/x86/test/test_assembler.py
@@ -1,13 +1,15 @@
 from pypy.jit.backend.x86.regloc import *
 from pypy.jit.backend.x86.assembler import Assembler386
 from pypy.jit.backend.x86.regalloc import X86FrameManager, get_ebp_ofs
-from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, INT, REF, 
FLOAT
+from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, ConstFloat
+from pypy.jit.metainterp.history import INT, REF, FLOAT
 from pypy.rlib.rarithmetic import intmask
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64
 from pypy.jit.backend.detect_cpu import getcpuclass 
 from pypy.jit.backend.x86.regalloc import X86RegisterManager, 
X86_64_RegisterManager, X86XMMRegisterManager, X86_64_XMMRegisterManager
 from pypy.jit.codewriter import longlong
+import ctypes
 
 ACTUAL_CPU = getcpuclass()
 
@@ -238,3 +240,103 @@
 assert assembler.fail_boxes_int.getitem(i) == expected_ints[i]
 assert assembler.fail_boxes_ptr.getitem(i) == expected_ptrs[i]
 assert assembler.fail_boxes_float.getitem(i) == expected_floats[i]
+
+# 
+
+class TestRegallocPushPop(object):
+
+def do_test(self, callback):
+from pypy.jit.backend.x86.regalloc import X86FrameManager
+from pypy.jit.backend.x86.regalloc import X86XMMRegisterManager
+class FakeToken:
+class compiled_loop_token:
+asmmemmgr_blocks = None
+cpu = ACTUAL_CPU(None, None)
+cpu.setup()
+looptoken = FakeToken()
+asm = cpu.assembler
+asm.setup_once()
+asm.setup(looptoken)
+self.fm = X86FrameManager()
+self.xrm = X86XMMRegisterManager(None, frame_manager=self.fm,
+ assembler=asm)
+callback(asm)
+asm.mc.RET()
+rawstart = asm.materialize_loop(looptoken)
+#
+F = ctypes.CFUNCTYPE(ctypes.c_long)
+fn = ctypes.cast(rawstart, F)
+res = fn()
+return res
+
+def test_simple(self):
+def callback(asm):
+asm.mov(imm(42), edx)
+asm.regalloc_push(edx)
+asm.regalloc_pop(eax)
+res = self.do_test(callback)
+assert res == 42
+
+def test_push_stack(self):
+def callback(asm):
+loc = self.fm.frame_pos(5, INT)
+asm.mc.SUB_ri(esp.value, 64)
+asm.mov(imm(42), loc)
+asm.regalloc_push(loc)
+asm.regalloc_pop(eax)
+asm.mc.ADD_ri(esp.value, 64)
+res = self.do_test(callback)
+assert res == 42
+
+def test_pop_stack(self):
+def callback(asm):
+loc = self.fm.frame_pos(5, INT)
+asm.mc.SUB_ri(esp.value, 64)
+asm.mov(imm(42), edx)
+asm.regalloc_push(edx)
+asm.regalloc_pop(loc)
+asm.mov(loc, eax)
+asm.mc.ADD_ri(esp.value, 64)
+res = self.do_test(callback)
+assert res == 42
+
+def test_simple_xmm(self):
+def callback(asm):
+c = ConstFloat(longlong.getfloatstorage(-42.5))
+loc = self.xrm.convert_to_imm(c)
+asm.mov(loc, xmm5)
+asm.regalloc_push(xmm5)
+asm.regalloc_pop(xmm0)
+asm.mc.CVTTSD2SI(eax, xmm0)
+res = self.do_test(callback)
+assert res == -42
+
+def test_push_stack_xmm(self):
+def callback(asm):
+c = ConstFloat(longlong.getfloatstorage(-42.5))
+loc = self.xrm.convert_to_imm(c)
+loc2 = self.fm.frame_pos(4, FLOAT)
+  

[pypy-commit] pypy dict-strategies: forget to import py

2011-06-29 Thread l . diekmann
Author: Lukas Diekmann lukas.diekm...@uni-duesseldorf.de
Branch: dict-strategies
Changeset: r45178:68c48983e7bd
Date: 2011-06-29 13:10 +0200
http://bitbucket.org/pypy/pypy/changeset/68c48983e7bd/

Log:forget to import py

diff --git a/pypy/objspace/std/test/test_dictmultiobject.py 
b/pypy/objspace/std/test/test_dictmultiobject.py
--- a/pypy/objspace/std/test/test_dictmultiobject.py
+++ b/pypy/objspace/std/test/test_dictmultiobject.py
@@ -737,6 +737,7 @@
 
 class AppTestModuleDict(object):
 def setup_class(cls):
+import py
 cls.space = gettestobjspace(**{objspace.std.withcelldict: True})
 if option.runappdirect:
 py.test.skip(__repr__ doesn't work on appdirect)
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy dict-strategies: with dict-strategies object identity is not preserved

2011-06-29 Thread l . diekmann
Author: Lukas Diekmann lukas.diekm...@uni-duesseldorf.de
Branch: dict-strategies
Changeset: r45180:b2211183df54
Date: 2011-06-29 13:22 +0200
http://bitbucket.org/pypy/pypy/changeset/b2211183df54/

Log:with dict-strategies object identity is not preserved

diff --git a/lib-python/modified-2.7/test/test_weakref.py 
b/lib-python/modified-2.7/test/test_weakref.py
--- a/lib-python/modified-2.7/test/test_weakref.py
+++ b/lib-python/modified-2.7/test/test_weakref.py
@@ -993,7 +993,7 @@
 self.assertTrue(len(weakdict) == 2)
 k, v = weakdict.popitem()
 self.assertTrue(len(weakdict) == 1)
-if k is key1:
+if k == key1:
 self.assertTrue(v is value1)
 else:
 self.assertTrue(v is value2)
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy dict-strategies: also promote self (strategy)

2011-06-29 Thread l . diekmann
Author: Lukas Diekmann lukas.diekm...@uni-duesseldorf.de
Branch: dict-strategies
Changeset: r45182:8b4e4c207846
Date: 2011-06-29 15:48 +0200
http://bitbucket.org/pypy/pypy/changeset/8b4e4c207846/

Log:also promote self (strategy)

diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py
--- a/pypy/objspace/std/celldict.py
+++ b/pypy/objspace/std/celldict.py
@@ -37,6 +37,7 @@
 # when we are jitting, we always go through the pure function
 # below, to ensure that we have no residual dict lookup
 w_dict = jit.hint(w_dict, promote=True)
+self = jit.hint(self, promote=True)
 return self._getcell_makenew(w_dict, key)
 return self.unerase(w_dict.dstorage).get(key, None)
 
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] extradoc extradoc: I think this XXX is ok now that we have a related work section

2011-06-29 Thread cfbolz
Author: Carl Friedrich Bolz cfb...@gmx.de
Branch: extradoc
Changeset: r3805:65d4c2b94f8d
Date: 2011-06-29 15:46 +0200
http://bitbucket.org/pypy/extradoc/changeset/65d4c2b94f8d/

Log:I think this XXX is ok now that we have a related work section

diff --git a/talk/iwtc11/paper.tex b/talk/iwtc11/paper.tex
--- a/talk/iwtc11/paper.tex
+++ b/talk/iwtc11/paper.tex
@@ -462,8 +462,6 @@
 \label{fig:overview}
 \end{figure}
 
-%XXX find reference of prior work on this
-
 Loop peeling is achieved by appending an copy of the traced iteration at
 the end of itself. See Figure~\ref{fig:overview} for an illustration.
 The first part (called \emph{preamble}) finishes with a jump the the second 
part
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] extradoc extradoc: Draft for the blog post Global Interpreter Lock

2011-06-29 Thread arigo
Author: Armin Rigo ar...@tunes.org
Branch: extradoc
Changeset: r3806:a651359a1763
Date: 2011-06-29 17:22 +0200
http://bitbucket.org/pypy/extradoc/changeset/a651359a1763/

Log:Draft for the blog post Global Interpreter Lock

diff --git a/blog/draft/gil.rst b/blog/draft/gil.rst
new file mode 100644
--- /dev/null
+++ b/blog/draft/gil.rst
@@ -0,0 +1,113 @@
+Global Interpreter Lock, or how to kill it
+==
+
+People that listened to my lightning talk at EuroPython know that
+(suddenly) we have a plan to remove the Global Interpreter Lock --- the
+infamous GIL, the thing in CPython that prevents multiple threads from
+actually running in your Python code in parallel.
+
+That's not actually new, because Jython has been doing it all along (and
+I think IronPython too).  Jython works by very carefully adding locks to
+all the mutable built-in types, and by relying on the underlying Java
+platform to be efficient about them (so that the result is faster than,
+say, very carefully adding similar locks in CPython).  By very
+carefully, I mean really really carefully; for example,
+'dict1.update(dict2)' needs to lock both dict1 and dict2, but if you do
+it naively, then a parallel 'dict2.update(dict1)' might cause a
+deadlock.
+
+We are considering a quite different approach, based on `Software
+Transactional Memory`_.  This is a recent development in computer
+science, and it gives a nicer solution than locking.  Here is a short
+introduction to it.
+
+Say you want to atomically pop an item from 'list1' and append it to
+'list2'::
+
+def f(list1, list2):
+x = list1.pop()
+list2.append(x)
+
+This is not safe in multithreaded cases (even with the GIL).  Say that
+you call ``f(l1, l2)`` in thread 1 and ``f(l2, l1)`` in thread 2.  What
+you want is that it has no effect at all (x is moved from one list to
+the other, then back).  But what can occur is that instead the top of
+the two lists are swapped, depending on timing issues.
+
+One way to fix it is with a global lock::
+
+def f(list1, list2):
+global_lock.acquire()
+x = list1.pop()
+list2.append(x)
+global_lock.release()
+
+A finer way to fix it is with locks that come with the lists::
+
+def f(list1, list2):
+acquire_all_locks(list1.lock, list2.lock)
+x = list1.pop()
+list2.append(x)
+release_all_locks(list1.lock, list2.lock)
+
+The second solution is a model for Jython's, while the first is a model
+for CPython's.  Indeed, in CPython's interpreter, we acquire the GIL,
+then we do one bytecode (or actually a number of them, like 100), then
+we release the GIL; and then we proceed to the next bunch of 100.
+
+Software Transactional Memory (STM) gives a third solution::
+
+def f(list1, list2):
+while True:
+t = transaction()
+x = list1.pop(t)
+list2.append(t, x)
+if t.commit():
+break
+
+In this solution, we make a ``transaction`` object and use it in all
+reads and writes we do to the lists.  There are actually several
+different models, but let's focus on one of them.  During a transaction,
+we don't actually change the global memory at all.  Instead, we use the
+thread-local ``transaction`` object.  We store in it which objects we
+read from, which objects we write to, and what values we write.  It is
+only when the transaction reaches its end that we attempt to commit
+it.  Committing might fail if other commits have occurred inbetween,
+creating inconsistencies; in that case, the transaction aborts and
+must restart from the beginning.
+
+In the same way as the previous two solutions are models for CPython and
+Jython, the STM solution looks like it could be a model for PyPy in the
+future.  In such a PyPy, the interpreter would start a transaction, do
+one or several bytecodes, and then end the transaction; and repeat.
+This is very similar to what is going on in CPython with the GIL.  In
+particular, it means that it gives programmers all the same guarantees
+as the GIL does.  The *only* difference is that it can actually run
+multiple threads in parallel, as long as their code are not interfering
+with each other.
+
+Why not apply that idea to CPython?  Because we would need to change
+everything everywhere.  In the example above, you may have noted that I
+no longer call 'list1.pop()', but 'list1.pop(t)'; this is a way to tell
+that the implementation of all the methods needs to be changed, in order
+to do their work transactionally.  This means that instead of really
+changing the global memory in which the list is stored, it must instead
+record the change in the ``transation`` object.  If our interpreter is
+written in C, like CPython, then we need to write it explicitly
+everywhere.  If it is written instead in a higher-level language, like
+PyPy, then we can add this behavior as translation rules, and apply it
+automatically wherever it is 

[pypy-commit] pypy default: (arigo, antocuni, rguillbert): add some debug info

2011-06-29 Thread antocuni
Author: Antonio Cuni anto.c...@gmail.com
Branch: 
Changeset: r45184:0ce7a1037276
Date: 2011-06-29 18:18 +0200
http://bitbucket.org/pypy/pypy/changeset/0ce7a1037276/

Log:(arigo, antocuni, rguillbert): add some debug info

diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py
--- a/pypy/jit/metainterp/compile.py
+++ b/pypy/jit/metainterp/compile.py
@@ -119,6 +119,7 @@
 old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop,
jitdriver_sd.warmstate.enable_opts)
 except InvalidLoop:
+debug_print(compile_new_loop: got an InvalidLoop)
 return None
 if old_loop_token is not None:
 metainterp.staticdata.log(reusing old loop)
@@ -633,6 +634,7 @@
 new_loop, state.enable_opts,
 inline_short_preamble, retraced)
 except InvalidLoop:
+debug_print(compile_new_bridge: got an InvalidLoop)
 # XXX I am fairly convinced that optimize_bridge cannot actually raise
 # InvalidLoop
 return None
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] extradoc extradoc: Updates.

2011-06-29 Thread arigo
Author: Armin Rigo ar...@tunes.org
Branch: extradoc
Changeset: r3809:47ee683cd1f1
Date: 2011-06-29 18:40 +0200
http://bitbucket.org/pypy/extradoc/changeset/47ee683cd1f1/

Log:Updates.

diff --git a/blog/draft/gil.rst b/blog/draft/gil.rst
--- a/blog/draft/gil.rst
+++ b/blog/draft/gil.rst
@@ -1,8 +1,8 @@
 Global Interpreter Lock, or how to kill it
 ==
 
-People that listened to my lightning talk at EuroPython know that
-(suddenly) we have a plan to remove the Global Interpreter Lock --- the
+People that listened to my (Armin Rigo) lightning talk at EuroPython know that
+suddenly, we have a plan to remove the Global Interpreter Lock --- the
 infamous GIL, the thing in CPython that prevents multiple threads from
 actually running in your Python code in parallel.
 
@@ -84,11 +84,11 @@
 particular, it means that it gives programmers all the same guarantees
 as the GIL does.  The *only* difference is that it can actually run
 multiple threads in parallel, as long as their code does not interfere
-with each other.  
-
-XXX how much slower would it make things for the person whose code
-isn't suitable to try to run it?  All of us?  Is this an option you
-could enable?
+with each other.  (In particular, if you need not just the GIL but actual
+locks in your existing multi-threaded program, then this will not
+magically remove the need for them.  You might get an additional built-in
+module that exposes STM to your Python programs, if you prefer it over
+locks, but that's another question.)
 
 Why not apply that idea to CPython?  Because we would need to change
 everything everywhere.  In the example above, you may have noted that I
@@ -100,7 +100,12 @@
 written in C, as CPython is, then we need to write it explicitly
 everywhere.  If it is written instead in a higher-level language, as
 PyPy is, then we can add this behavior as as set of translation rules, and 
-apply them automatically wherever it is necessary.
+apply them automatically wherever it is necessary.  Moreover, it can be
+a translation-time option: you can either get the current pypy with a
+GIL, or a version with STM, which would be slower due to the extra
+bookkeeping.  (How much slower?  I have no clue, but as a wild guess,
+maybe between 2 and 5 times slower.  That is fine if you have enough
+cores, as long as it scales nicely :-)
 
 A final note: as STM research is very recent (it started around 2003),
 there are a number of variants around, and it's not clear yet which one
@@ -109,20 +114,19 @@
 Transactional Memory seems to be one possible state-of-the-art; it also
 seems to be good enough for all cases.
 
-So, when will it be done?  No clue so far.  It is still at the idea
+So, when will it be done?  I cannot say yet.  It is still at the idea
 stage, but I *think* that it can work.  How long would it take us to
 write it?  Again no clue, but we are looking at many months rather
-than many days.  This is the sort of thing that I (Armin Rigo) would
+than many days.  This is the sort of thing that I would
 like to be able to work on full time after the `Eurostars funding`_
 runs out on September 1.  We are currently looking at ways to use
 `crowdfunding`_ to raise money so that I can do exactly that.  Expect
 a blog post about that very soon.  But this looks like a perfect
 candidate for crowdfunding -- there are at least thousands of you who
-would be willing to pay 10s of Euros to Kill the Gil.  Now we only
+would be willing to pay 10s of Euros to Kill the GIL.  Now we only
 have to make this happen.
 
 
 .. _`Software Transactional Memory`: 
http://en.wikipedia.org/wiki/Software_transactional_memory
-.. _`this paper`: 
 .. _`Eurostars funding`: 
http://morepypy.blogspot.com/2010/12/oh-and-btw-pypy-gets-funding-through.html
-.. _`crowdfunding`:http://en.wikipedia.org/wiki/Crowd_funding
\ No newline at end of file
+.. _`crowdfunding`:http://en.wikipedia.org/wiki/Crowd_funding
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy jit-short_from_state: enable getfield_gc in short preamble

2011-06-29 Thread hakanardo
Author: Hakan Ardo ha...@debian.org
Branch: jit-short_from_state
Changeset: r45186:65d00e745124
Date: 2011-06-29 18:43 +0200
http://bitbucket.org/pypy/pypy/changeset/65d00e745124/

Log:enable getfield_gc in short preamble

diff --git a/pypy/jit/metainterp/optimizeopt/heap.py 
b/pypy/jit/metainterp/optimizeopt/heap.py
--- a/pypy/jit/metainterp/optimizeopt/heap.py
+++ b/pypy/jit/metainterp/optimizeopt/heap.py
@@ -98,7 +98,7 @@
 cf = CachedField()
 for structvalue, fieldvalue in self._cached_fields.iteritems():
 op = self._cached_fields_getfield_op.get(structvalue, None)
-if op and op.result in short_boxes:
+if op and op.result in short_boxes and short_boxes[op.result] is 
op:
 structvalue2 = structvalue.get_cloned(optimizer, valuemap)
 fieldvalue2  = fieldvalue .get_cloned(optimizer, valuemap)
 cf._cached_fields[structvalue2] = fieldvalue2
@@ -146,10 +146,10 @@
 def reconstruct_for_next_iteration(self,  short_boxes, surviving_boxes,
optimizer, valuemap):
 new = OptHeap()
-return new
 
 for descr, d in self.cached_fields.items():
 new.cached_fields[descr] = d.get_cloned(optimizer, valuemap, 
short_boxes)
+return new
 
 new.cached_arrayitems = {}
 for descr, d in self.cached_arrayitems.items():
@@ -175,10 +175,10 @@
 return new
 
 def produce_potential_short_preamble_ops(self, potential_ops):
-return
 for descr, d in self.cached_fields.items():
 d.produce_potential_short_preamble_ops(self.optimizer,
potential_ops, descr)
+return
 
 for descr, d in self.cached_arrayitems.items():
 for value, cache in d.items():
diff --git a/pypy/jit/metainterp/test/test_virtual.py 
b/pypy/jit/metainterp/test/test_virtual.py
--- a/pypy/jit/metainterp/test/test_virtual.py
+++ b/pypy/jit/metainterp/test/test_virtual.py
@@ -919,6 +919,30 @@
 
 res = self.meta_interp(f, [16])
 assert res == f(16)
+
+def test_virtual_loop_invariant_getitem(self):
+mydriver = JitDriver(reds = ['i', 'sa', 'n', 'node1', 'node2'], greens 
= [])
+class A(object):
+def __init__(self, v1, v2):
+self.v1 = v1
+self.v2 = v2
+def f(n):
+i = sa = 0
+node1 = A(1, 2)
+node2 = A(n, n)
+while i  n:
+mydriver.jit_merge_point(i=i, sa=sa, n=n, node1=node1, 
node2=node2)
+sa += node1.v1 + node2.v1 + node2.v2
+if i  n/2:
+node1 = A(node2.v1, 2)
+else:
+node1 = A(i, 2)
+i += 1
+return sa
+
+res = self.meta_interp(f, [16])
+assert res == f(16)
+self.check_loops(getfield_gc=2)
 
 
 # 
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] extradoc extradoc: Typo.

2011-06-29 Thread arigo
Author: Armin Rigo ar...@tunes.org
Branch: extradoc
Changeset: r3810:7aada621cb17
Date: 2011-06-29 18:56 +0200
http://bitbucket.org/pypy/extradoc/changeset/7aada621cb17/

Log:Typo.

diff --git a/blog/draft/gil.rst b/blog/draft/gil.rst
--- a/blog/draft/gil.rst
+++ b/blog/draft/gil.rst
@@ -129,4 +129,4 @@
 
 .. _`Software Transactional Memory`: 
http://en.wikipedia.org/wiki/Software_transactional_memory
 .. _`Eurostars funding`: 
http://morepypy.blogspot.com/2010/12/oh-and-btw-pypy-gets-funding-through.html
-.. _`crowdfunding`:http://en.wikipedia.org/wiki/Crowd_funding
+. _`crowdfunding`: http://en.wikipedia.org/wiki/Crowd_funding
___
pypy-commit mailing list
pypy-commit@python.org
http://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy reflex-support: allow default arguments and test cleanup

2011-06-29 Thread wlav
Author: Wim Lavrijsen wlavrij...@lbl.gov
Branch: reflex-support
Changeset: r45190:362db21b87c0
Date: 2011-06-28 13:06 -0700
http://bitbucket.org/pypy/pypy/changeset/362db21b87c0/

Log:allow default arguments and test cleanup

diff --git a/pypy/module/cppyy/capi.py b/pypy/module/cppyy/capi.py
--- a/pypy/module/cppyy/capi.py
+++ b/pypy/module/cppyy/capi.py
@@ -132,6 +132,10 @@
 cppyy_method_num_args,
 [C_TYPEHANDLE, rffi.INT], rffi.INT,
 compilation_info=eci)
+c_method_req_args = rffi.llexternal(
+cppyy_method_req_args,
+[C_TYPEHANDLE, rffi.INT], rffi.INT,
+compilation_info=eci)
 c_method_arg_type = rffi.llexternal(
 cppyy_method_arg_type,
 [C_TYPEHANDLE, rffi.INT, rffi.INT], rffi.CCHARP,
diff --git a/pypy/module/cppyy/include/reflexcwrapper.h 
b/pypy/module/cppyy/include/reflexcwrapper.h
--- a/pypy/module/cppyy/include/reflexcwrapper.h
+++ b/pypy/module/cppyy/include/reflexcwrapper.h
@@ -42,6 +42,7 @@
 char* cppyy_method_name(cppyy_typehandle_t handle, int method_index);
 char* cppyy_method_result_type(cppyy_typehandle_t handle, int 
method_index);
 int cppyy_method_num_args(cppyy_typehandle_t handle, int method_index);
+int cppyy_method_req_args(cppyy_typehandle_t handle, int method_index);
 char* cppyy_method_arg_type(cppyy_typehandle_t handle, int method_index, 
int index);
 
 /* method properties */
diff --git a/pypy/module/cppyy/interp_cppyy.py 
b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -87,11 +87,12 @@
 _immutable_ = True
 _immutable_fields_ = [arg_types[*], arg_converters[*]]
 
-def __init__(self, cpptype, method_index, result_type, arg_types):
+def __init__(self, cpptype, method_index, result_type, arg_types, 
args_required):
 self.cpptype = cpptype
 self.space = cpptype.space
 self.method_index = method_index
 self.arg_types = arg_types
+self.args_required = args_required
 self.executor = executor.get_executor(self.space, result_type)
 self.arg_converters = None
 methgetter = get_methptr_getter(self.cpptype.handle,
@@ -113,13 +114,13 @@
 try:
 return self.executor.execute(self.space, self, cppthis, 
len(args_w), args)
 finally:
-self.free_arguments(args)
+self.free_arguments(args, len(args_w))
 
 @jit.unroll_safe
 def do_fast_call(self, cppthis, args_w):
 space = self.space
 # XXX factor out
-if len(args_w) != len(self.arg_types):
+if len(self.arg_types)  len(args_w) or len(args_w)  
self.args_required:
 raise OperationError(space.w_TypeError, space.wrap(wrong number 
of args))
 if self.arg_converters is None:
 self._build_converters()
@@ -161,7 +162,7 @@
 @jit.unroll_safe
 def prepare_arguments(self, args_w):
 space = self.space
-if len(args_w) != len(self.arg_types):
+if len(self.arg_types)  len(args_w) or len(args_w)  
self.args_required:
 raise OperationError(space.w_TypeError, space.wrap(wrong number 
of args))
 if self.arg_converters is None:
 self._build_converters()
@@ -182,8 +183,8 @@
 return args
 
 @jit.unroll_safe
-def free_arguments(self, args):
-for i in range(len(self.arg_types)):
+def free_arguments(self, args, nargs):
+for i in range(nargs):
 conv = self.arg_converters[i]
 conv.free_argument(args[i])
 lltype.free(args, flavor='raw')
@@ -209,7 +210,7 @@
 return self.executor.execute(self.space, self, NULL_VOIDP,
  len(args_w), args)
 finally:
-self.free_arguments(args)
+self.free_arguments(args, len(args_w))
  
 
 class CPPConstructor(CPPMethod):
@@ -393,10 +394,12 @@
 def _make_cppfunction(self, method_index):
 result_type = 
capi.charp2str_free(capi.c_method_result_type(self.handle, method_index))
 num_args = capi.c_method_num_args(self.handle, method_index)
+args_required = capi.c_method_req_args(self.handle, method_index)
 argtypes = []
 for i in range(num_args):
 argtype = capi.charp2str_free(capi.c_method_arg_type(self.handle, 
method_index, i))
-return CPPFunction(self, method_index, result_type, argtypes)
+argtypes.append(argtype)
+return CPPFunction(self, method_index, result_type, argtypes, 
args_required)
 
 def _find_data_members(self):
 num_data_members = capi.c_num_data_members(self.handle)
@@ -425,6 +428,7 @@
 def _make_cppfunction(self, method_index):
 result_type = 
capi.charp2str_free(capi.c_method_result_type(self.handle, method_index))
 num_args = capi.c_method_num_args(self.handle, method_index)
+args_required = capi.c_method_req_args(self.handle, method_index)
 argtypes = []
  

[pypy-commit] pypy reflex-support: - return by value of objects

2011-06-29 Thread wlav
Author: Wim Lavrijsen wlavrij...@lbl.gov
Branch: reflex-support
Changeset: r45192:30fa4b9dc482
Date: 2011-06-29 16:20 -0700
http://bitbucket.org/pypy/pypy/changeset/30fa4b9dc482/

Log:- return by value of objects
 - lazy-lookup of functions in namespaces in case of multiple dicts
 - fix to make global functions callable
 - pythonization of std::vector (iterator protocol)

diff --git a/pypy/module/cppyy/capi.py b/pypy/module/cppyy/capi.py
--- a/pypy/module/cppyy/capi.py
+++ b/pypy/module/cppyy/capi.py
@@ -84,6 +84,10 @@
 cppyy_call_v,
 [C_TYPEHANDLE, rffi.INT, C_OBJECT, rffi.INT, rffi.VOIDPP], lltype.Void,
 compilation_info=eci)
+c_call_o = rffi.llexternal(
+cppyy_call_o,
+[C_TYPEHANDLE, rffi.INT, C_OBJECT, rffi.INT, rffi.VOIDPP, C_TYPEHANDLE], 
rffi.LONG,
+compilation_info=eci)
 c_call_b = rffi.llexternal(
 cppyy_call_b,
 [C_TYPEHANDLE, rffi.INT, C_OBJECT, rffi.INT, rffi.VOIDPP], rffi.INT,
diff --git a/pypy/module/cppyy/executor.py b/pypy/module/cppyy/executor.py
--- a/pypy/module/cppyy/executor.py
+++ b/pypy/module/cppyy/executor.py
@@ -151,6 +151,17 @@
 ptr_result = rffi.cast(rffi.VOIDP, long_result)
 return interp_cppyy.W_CPPInstance(space, self.cpptype, ptr_result)
 
+class InstanceExecutor(InstancePtrExecutor):
+_immutable_ = True
+
+def execute(self, space, func, cppthis, num_args, args):
+from pypy.module.cppyy import interp_cppyy
+long_result = capi.c_call_o(
+func.cpptype.handle, func.method_index, cppthis, num_args, args, 
self.cpptype.handle)
+ptr_result = rffi.cast(rffi.VOIDP, long_result)
+# TODO: take ownership of result ...
+return interp_cppyy.W_CPPInstance(space, self.cpptype, ptr_result)
+
 
 def get_executor(space, name):
 # Matching of 'name' to an executor factory goes through up to four levels:
@@ -188,11 +199,14 @@
 
 #   3) types/classes, either by ref/ptr or by value
 cpptype = interp_cppyy.type_byname(space, clean_name)
-if cpptype and (compound == * or compound == ):
+if cpptype:
 # type check for the benefit of the annotator
 from pypy.module.cppyy.interp_cppyy import W_CPPType
 cpptype = space.interp_w(W_CPPType, cpptype, can_be_None=False)
-return InstancePtrExecutor(space, clean_name, cpptype)
+if (compound == * or compound == ):
+return InstancePtrExecutor(space, clean_name, cpptype)
+elif compound == :
+return InstanceExecutor(space, clean_name, cpptype)
 
 # 4) additional special cases
 # ... none for now
diff --git a/pypy/module/cppyy/include/reflexcwrapper.h 
b/pypy/module/cppyy/include/reflexcwrapper.h
--- a/pypy/module/cppyy/include/reflexcwrapper.h
+++ b/pypy/module/cppyy/include/reflexcwrapper.h
@@ -19,6 +19,7 @@
 
 /* method/function dispatching */
 void   cppyy_call_v(cppyy_typehandle_t handle, int method_index, 
cppyy_object_t self, int numargs, void* args[]);
+long   cppyy_call_o(cppyy_typehandle_t handle, int method_index, 
cppyy_object_t self, int numargs, void* args[], cppyy_typehandle_t rettype);
 intcppyy_call_b(cppyy_typehandle_t handle, int method_index, 
cppyy_object_t self, int numargs, void* args[]);
 char   cppyy_call_c(cppyy_typehandle_t handle, int method_index, 
cppyy_object_t self, int numargs, void* args[]);
 short  cppyy_call_h(cppyy_typehandle_t handle, int method_index, 
cppyy_object_t self, int numargs, void* args[]);
diff --git a/pypy/module/cppyy/interp_cppyy.py 
b/pypy/module/cppyy/interp_cppyy.py
--- a/pypy/module/cppyy/interp_cppyy.py
+++ b/pypy/module/cppyy/interp_cppyy.py
@@ -339,11 +339,12 @@
 for i in range(num_methods):
 method_name = capi.charp2str_free(capi.c_method_name(self.handle, 
i))
 pymethod_name = helper.map_operator_name(
-method_name, capi.c_method_num_args(self.handle, i),
-capi.charp2str_free(capi.c_method_result_type(self.handle, i)))
-cppfunction = self._make_cppfunction(i)
-overload = args_temp.setdefault(pymethod_name, [])
-overload.append(cppfunction)
+method_name, capi.c_method_num_args(self.handle, i),
+capi.charp2str_free(capi.c_method_result_type(self.handle, 
i)))
+if not self.methods.has_key(pymethod_name):
+cppfunction = self._make_cppfunction(i)
+overload = args_temp.setdefault(pymethod_name, [])
+overload.append(cppfunction)
 for name, functions in args_temp.iteritems():
 overload = W_CPPOverload(self.space, name, functions[:])
 self.methods[name] = overload
@@ -405,21 +406,28 @@
 num_data_members = capi.c_num_data_members(self.handle)
 for i in range(num_data_members):
 data_member_name = 
capi.charp2str_free(capi.c_data_member_name(self.handle, i))
-type_name =