Author: Armin Rigo <ar...@tunes.org>
Branch: errno-again
Changeset: r75432:d1497aa9524d
Date: 2015-01-19 11:59 +0100
http://bitbucket.org/pypy/pypy/changeset/d1497aa9524d/

Log:    hg merge default

diff --git a/lib-python/2.7/collections.py b/lib-python/2.7/collections.py
--- a/lib-python/2.7/collections.py
+++ b/lib-python/2.7/collections.py
@@ -17,6 +17,10 @@
 except ImportError:
     assert '__pypy__' not in _sys.builtin_module_names
     newdict = lambda _ : {}
+try:
+    from __pypy__ import reversed_dict
+except ImportError:
+    reversed_dict = lambda d: reversed(d.keys())
 
 try:
     from thread import get_ident as _get_ident
@@ -29,142 +33,35 @@
 
################################################################################
 
 class OrderedDict(dict):
-    'Dictionary that remembers insertion order'
-    # An inherited dict maps keys to values.
-    # The inherited dict provides __getitem__, __len__, __contains__, and get.
-    # The remaining methods are order-aware.
-    # Big-O running times for all methods are the same as regular dictionaries.
+    '''Dictionary that remembers insertion order.
 
-    # The internal self.__map dict maps keys to links in a doubly linked list.
-    # The circular doubly linked list starts and ends with a sentinel element.
-    # The sentinel element never gets deleted (this simplifies the algorithm).
-    # Each link is stored as a list of length three:  [PREV, NEXT, KEY].
+    In PyPy all dicts are ordered anyway.  This is mostly useful as a
+    placeholder to mean "this dict must be ordered even on CPython".
 
-    def __init__(self, *args, **kwds):
-        '''Initialize an ordered dictionary.  The signature is the same as
-        regular dictionaries, but keyword arguments are not recommended because
-        their insertion order is arbitrary.
-
-        '''
-        if len(args) > 1:
-            raise TypeError('expected at most 1 arguments, got %d' % len(args))
-        try:
-            self.__root
-        except AttributeError:
-            self.__root = root = []                     # sentinel node
-            root[:] = [root, root, None]
-            self.__map = {}
-        self.__update(*args, **kwds)
-
-    def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
-        'od.__setitem__(i, y) <==> od[i]=y'
-        # Setting a new item creates a new link at the end of the linked list,
-        # and the inherited dictionary is updated with the new key/value pair.
-        if key not in self:
-            root = self.__root
-            last = root[0]
-            last[1] = root[0] = self.__map[key] = [last, root, key]
-        return dict_setitem(self, key, value)
-
-    def __delitem__(self, key, dict_delitem=dict.__delitem__):
-        'od.__delitem__(y) <==> del od[y]'
-        # Deleting an existing item uses self.__map to find the link which gets
-        # removed by updating the links in the predecessor and successor nodes.
-        dict_delitem(self, key)
-        link_prev, link_next, _ = self.__map.pop(key)
-        link_prev[1] = link_next                        # update 
link_prev[NEXT]
-        link_next[0] = link_prev                        # update 
link_next[PREV]
-
-    def __iter__(self):
-        'od.__iter__() <==> iter(od)'
-        # Traverse the linked list in order.
-        root = self.__root
-        curr = root[1]                                  # start at the first 
node
-        while curr is not root:
-            yield curr[2]                               # yield the curr[KEY]
-            curr = curr[1]                              # move to next node
+    Known difference: iterating over an OrderedDict which is being
+    concurrently modified raises RuntimeError in PyPy.  In CPython
+    instead we get some behavior that appears reasonable in some
+    cases but is nonsensical in other cases.  This is officially
+    forbidden by the CPython docs, so we forbid it explicitly for now.
+    '''
 
     def __reversed__(self):
-        'od.__reversed__() <==> reversed(od)'
-        # Traverse the linked list in reverse order.
-        root = self.__root
-        curr = root[0]                                  # start at the last 
node
-        while curr is not root:
-            yield curr[2]                               # yield the curr[KEY]
-            curr = curr[0]                              # move to previous node
-
-    def clear(self):
-        'od.clear() -> None.  Remove all items from od.'
-        root = self.__root
-        root[:] = [root, root, None]
-        self.__map.clear()
-        dict.clear(self)
-
-    # -- the following methods do not depend on the internal structure --
-
-    def keys(self):
-        'od.keys() -> list of keys in od'
-        return list(self)
-
-    def values(self):
-        'od.values() -> list of values in od'
-        return [self[key] for key in self]
-
-    def items(self):
-        'od.items() -> list of (key, value) pairs in od'
-        return [(key, self[key]) for key in self]
-
-    def iterkeys(self):
-        'od.iterkeys() -> an iterator over the keys in od'
-        return iter(self)
-
-    def itervalues(self):
-        'od.itervalues -> an iterator over the values in od'
-        for k in self:
-            yield self[k]
-
-    def iteritems(self):
-        'od.iteritems -> an iterator over the (key, value) pairs in od'
-        for k in self:
-            yield (k, self[k])
-
-    update = MutableMapping.update
-
-    __update = update # let subclasses override update without breaking 
__init__
-
-    __marker = object()
-
-    def pop(self, key, default=__marker):
-        '''od.pop(k[,d]) -> v, remove specified key and return the 
corresponding
-        value.  If key is not found, d is returned if given, otherwise KeyError
-        is raised.
-
-        '''
-        if key in self:
-            result = self[key]
-            del self[key]
-            return result
-        if default is self.__marker:
-            raise KeyError(key)
-        return default
-
-    def setdefault(self, key, default=None):
-        'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
-        if key in self:
-            return self[key]
-        self[key] = default
-        return default
+        return reversed_dict(self)
 
     def popitem(self, last=True):
         '''od.popitem() -> (k, v), return and remove a (key, value) pair.
         Pairs are returned in LIFO order if last is true or FIFO order if 
false.
 
         '''
-        if not self:
-            raise KeyError('dictionary is empty')
-        key = next(reversed(self) if last else iter(self))
-        value = self.pop(key)
-        return key, value
+        if last:
+            return dict.popitem(self)
+        else:
+            it = dict.__iter__(self)
+            try:
+                k = it.next()
+            except StopIteration:
+                raise KeyError('dictionary is empty')
+            return (k, self.pop(k))
 
     def __repr__(self, _repr_running={}):
         'od.__repr__() <==> repr(od)'
@@ -183,8 +80,6 @@
         'Return state information for pickling'
         items = [[k, self[k]] for k in self]
         inst_dict = vars(self).copy()
-        for k in vars(OrderedDict()):
-            inst_dict.pop(k, None)
         if inst_dict:
             return (self.__class__, (items,), inst_dict)
         return self.__class__, (items,)
@@ -193,17 +88,6 @@
         'od.copy() -> a shallow copy of od'
         return self.__class__(self)
 
-    @classmethod
-    def fromkeys(cls, iterable, value=None):
-        '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
-        If not specified, the value defaults to None.
-
-        '''
-        self = cls()
-        for key in iterable:
-            self[key] = value
-        return self
-
     def __eq__(self, other):
         '''od.__eq__(y) <==> od==y.  Comparison to another OD is 
order-sensitive
         while comparison to a regular mapping is order-insensitive.
diff --git a/lib-python/2.7/test/test_collections.py 
b/lib-python/2.7/test/test_collections.py
--- a/lib-python/2.7/test/test_collections.py
+++ b/lib-python/2.7/test/test_collections.py
@@ -578,7 +578,12 @@
             def __repr__(self):
                 return "MySet(%s)" % repr(list(self))
         s = MySet([5,43,2,1])
-        self.assertEqual(s.pop(), 1)
+        # changed from CPython 2.7: it was "s.pop() == 1" but I see
+        # nothing that guarantees a particular order here.  In the
+        # 'all_ordered_dicts' branch of PyPy (or with OrderedDict
+        # instead of sets), it consistently returns 5, but this test
+        # should not rely on this or any other order.
+        self.assert_(s.pop() in [5,43,2,1])
 
     def test_issue8750(self):
         empty = WithSet()
@@ -1010,8 +1015,9 @@
                                           c=3, e=5).items()), pairs)           
     # mixed input
 
         # make sure no positional args conflict with possible kwdargs
-        
self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args,
-                         ['self'])
+        if '__init__' in OrderedDict.__dict__:   # absent in PyPy
+            
self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args,
+                             ['self'])
 
         # Make sure that direct calls to __init__ do not clear previous 
contents
         d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -47,6 +47,11 @@
 
 Install build-time dependencies
 -------------------------------
+(**Note**: for some hints on how to translate the Python interpreter under
+Windows, see the `windows document`_)
+
+.. _`windows document`: windows.html
+
 
 To build PyPy on Unix using the C translation backend, you need at least a C
 compiler and ``make`` installed. Further, some optional modules have additional
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -78,6 +78,7 @@
 
 Then you need to execute::
 
+    <path-to-visual>\vc\vcvars.bat
     editbin /largeaddressaware translator.exe
 
 where ``translator.exe`` is the pypy.exe or cpython.exe you will use to
@@ -96,7 +97,7 @@
 
 
 Abridged method (for -Ojit builds using Visual Studio 2008)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------------
 
 Download the versions of all the external packages from
 https://bitbucket.org/pypy/pypy/downloads/local_2.4.zip
@@ -110,7 +111,13 @@
     set INCLUDE=<base_dir>\include;<base_dir>\tcltk\include;%INCLUDE%
     set LIB=<base_dir>\lib;<base_dir>\tcltk\lib;%LIB%
 
-Now you should be good to go. Read on for more information.
+Now you should be good to go. If you choose this method, you do not need
+to download/build anything else. 
+
+Nonabrided method (building from scratch)
+-----------------------------------------
+
+If you want to, you can rebuild everything from scratch by continuing.
 
 
 The Boehm garbage collector
diff --git a/pypy/interpreter/astcompiler/optimize.py 
b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -254,11 +254,15 @@
         return rep
 
     def visit_Name(self, name):
-        # Turn loading None into a constant lookup.  Eventaully, we can do this
-        # for True and False, too.
+        # Turn loading None into a constant lookup.  We cannot do this
+        # for True and False, because rebinding them is allowed (2.7).
         if name.id == "None":
-            assert name.ctx == ast.Load
-            return ast.Const(self.space.w_None, name.lineno, name.col_offset)
+            # The compiler refuses to parse "None = ...", but "del None"
+            # is allowed (if pointless).  Check anyway: custom asts that
+            # correspond to "None = ..." can be made by hand.
+            if name.ctx == ast.Load:
+                return ast.Const(self.space.w_None, name.lineno,
+                                 name.col_offset)
         return name
 
     def visit_Tuple(self, tup):
diff --git a/pypy/interpreter/test/test_compiler.py 
b/pypy/interpreter/test/test_compiler.py
--- a/pypy/interpreter/test/test_compiler.py
+++ b/pypy/interpreter/test/test_compiler.py
@@ -654,6 +654,18 @@
         assert ex.match(space, space.w_SyntaxError)
         assert 'hello_world' in space.str_w(space.str(ex.get_w_value(space)))
 
+    def test_del_None(self):
+        snippet = '''if 1:
+            try:
+                del None
+            except NameError:
+                pass
+        '''
+        code = self.compiler.compile(snippet, '<tmp>', 'exec', 0)
+        space = self.space
+        w_d = space.newdict()
+        space.exec_(code, w_d, w_d)
+
 
 class TestPythonAstCompiler_25_grammar(BaseTestCompiler):
     def setup_method(self, method):
diff --git a/pypy/interpreter/test/test_targetpypy.py 
b/pypy/interpreter/test/test_targetpypy.py
--- a/pypy/interpreter/test/test_targetpypy.py
+++ b/pypy/interpreter/test/test_targetpypy.py
@@ -27,6 +27,6 @@
     pypy_setup_home = d['pypy_setup_home']
     lls = rffi.str2charp(__file__)
     res = pypy_setup_home(lls, rffi.cast(rffi.INT, 1))
-    assert lltype.typeOf(res) == rffi.LONG
+    assert lltype.typeOf(res) == rffi.INT
     assert rffi.cast(lltype.Signed, res) == 0
     lltype.free(lls, flavor='raw')
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -78,6 +78,7 @@
         'newlist_hint'              : 'interp_magic.newlist_hint',
         'add_memory_pressure'       : 'interp_magic.add_memory_pressure',
         'newdict'                   : 'interp_dict.newdict',
+        'reversed_dict'             : 'interp_dict.reversed_dict',
         'strategy'                  : 'interp_magic.strategy',  # dict,set,list
         'set_debug'                 : 'interp_magic.set_debug',
         'locals_to_fast'            : 'interp_magic.locals_to_fast',
diff --git a/pypy/module/__pypy__/interp_dict.py 
b/pypy/module/__pypy__/interp_dict.py
--- a/pypy/module/__pypy__/interp_dict.py
+++ b/pypy/module/__pypy__/interp_dict.py
@@ -30,3 +30,17 @@
         return space.newdict(strdict=True)
     else:
         raise oefmt(space.w_TypeError, "unknown type of dict %s", type)
+
+def reversed_dict(space, w_obj):
+    """Enumerate the keys in a dictionary object in reversed order.
+
+    This is a __pypy__ function instead of being simply done by calling
+    reversed(), for CPython compatibility: dictionaries are only ordered
+    on PyPy.  You should use the collections.OrderedDict class for cases
+    where ordering is important.  That class implements __reversed__ by
+    calling __pypy__.reversed_dict().
+    """
+    from pypy.objspace.std.dictmultiobject import W_DictMultiObject
+    if not isinstance(w_obj, W_DictMultiObject):
+        raise OperationError(space.w_TypeError, space.w_None)
+    return w_obj.nondescr_reversed_dict(space)
diff --git a/pypy/module/_multibytecodec/test/test_translation.py 
b/pypy/module/_multibytecodec/test/test_translation.py
--- a/pypy/module/_multibytecodec/test/test_translation.py
+++ b/pypy/module/_multibytecodec/test/test_translation.py
@@ -1,8 +1,11 @@
 from pypy.module._multibytecodec import c_codecs
 from rpython.translator.c.test import test_standalone
+from rpython.config.translationoption import get_combined_translation_config
 
 
 class TestTranslation(test_standalone.StandaloneTests):
+    config = get_combined_translation_config(translating=True)
+    config.translation.gc = 'boehm'
 
     def test_translation(self):
         #
diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py 
b/pypy/module/pypyjit/test_pypy_c/test_containers.py
--- a/pypy/module/pypyjit/test_pypy_c/test_containers.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py
@@ -43,9 +43,9 @@
         # can't change ;)
         assert loop.match_by_id("getitem", """
             ...
-            i26 = call(ConstClass(ll_dict_lookup), p18, p6, i25, descr=...)
+            i26 = call(ConstClass(ll_call_lookup_function), p18, p6, i25, 0, 
descr=...)
             ...
-            p33 = getinteriorfield_gc(p31, i26, descr=<InteriorFieldDescr 
<FieldP dictentry.value .*>>)
+            p33 = getinteriorfield_gc(p31, i26, descr=<InteriorFieldDescr 
<FieldP odictentry.value .*>>)
             ...
         """)
 
@@ -68,25 +68,29 @@
             guard_no_exception(descr=...)
             i12 = call(ConstClass(ll_strhash), p10, descr=<Calli . r EF=0>)
             p13 = new(descr=...)
-            p15 = new_array_clear(8, descr=<ArrayX .*>)
-            setfield_gc(p13, p15, descr=<FieldP dicttable.entries .*>)
-            i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 
descr=<Calli . rri EF=4 OS=4>)
+            p15 = new_array_clear(8, descr=<ArrayU 1>)
             {{{
-            setfield_gc(p13, 16, descr=<FieldS dicttable.resize_counter .*>)
-            setfield_gc(p13, 0, descr=<FieldS dicttable.num_items .+>)
+            setfield_gc(p13, 0, descr=<FieldS dicttable.num_ever_used_items 
.+>)
+            setfield_gc(p13, p15, descr=<FieldP dicttable.indexes .+>)
+            setfield_gc(p13, ConstPtr(0), descr=<FieldP dicttable.entries .+>)
+            }}}
+            i17 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 
1, descr=<Calli . rrii EF=4 OS=4>)
+            {{{
+            setfield_gc(p13, 0, descr=<FieldS dicttable.lookup_function_no .+>)
+            setfield_gc(p13, 0, descr=<FieldS dicttable.num_live_items .+>)
+            setfield_gc(p13, 16, descr=<FieldS dicttable.resize_counter .+>)
             }}}
             guard_no_exception(descr=...)
             p20 = new_with_vtable(ConstClass(W_IntObject))
             call(ConstClass(_ll_dict_setitem_lookup_done_trampoline), p13, 
p10, p20, i12, i17, descr=<Callv 0 rrrii EF=4>)
             setfield_gc(p20, i5, descr=<FieldS .*W_IntObject.inst_intval .*>)
             guard_no_exception(descr=...)
-            i23 = call(ConstClass(ll_dict_lookup_trampoline), p13, p10, i12, 
descr=<Calli . rri EF=4 OS=4>)
+            i23 = call(ConstClass(ll_call_lookup_function), p13, p10, i12, 0, 
descr=<Calli . rrii EF=4 OS=4>)
             guard_no_exception(descr=...)
-            i26 = int_and(i23, #)
-            i27 = int_is_true(i26)
+            i27 = int_lt(i23, 0)
             guard_false(i27, descr=...)
             p28 = getfield_gc(p13, descr=<FieldP dicttable.entries .*>)
-            p29 = getinteriorfield_gc(p28, i23, descr=<InteriorFieldDescr 
<FieldP dictentry.value .*>>)
+            p29 = getinteriorfield_gc(p28, i23, descr=<InteriorFieldDescr 
<FieldP odictentry.value .*>>)
             guard_nonnull_class(p29, ConstClass(W_IntObject), descr=...)
             i31 = getfield_gc_pure(p29, descr=<FieldS 
.*W_IntObject.inst_intval .*>)
             i32 = int_sub_ovf(i31, i5)
diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py 
b/pypy/module/pypyjit/test_pypy_c/test_instance.py
--- a/pypy/module/pypyjit/test_pypy_c/test_instance.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py
@@ -151,15 +151,13 @@
         assert loop.match_by_id('loadattr1',
         '''
         guard_not_invalidated(descr=...)
-        i19 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...)
+        i19 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...)
         guard_no_exception(descr=...)
-        i21 = int_and(i19, _)
-        i22 = int_is_true(i21)
+        i22 = int_lt(i19, 0)
         guard_true(i22, descr=...)
-        i26 = call(ConstClass(ll_dict_lookup), _, _, _, descr=...)
+        i26 = call(ConstClass(ll_call_lookup_function), _, _, _, 0, descr=...)
         guard_no_exception(descr=...)
-        i28 = int_and(i26, _)
-        i29 = int_is_true(i28)
+        i29 = int_lt(i26, 0)
         guard_true(i29, descr=...)
         ''')
         assert loop.match_by_id('loadattr2', "")   # completely folded away
diff --git a/pypy/objspace/std/dictmultiobject.py 
b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -258,6 +258,17 @@
         """D.itervalues() -> an iterator over the values of D"""
         return W_DictMultiIterValuesObject(space, self.itervalues())
 
+    def nondescr_reversed_dict(self, space):
+        """Not exposed directly to app-level, but via __pypy__.reversed_dict().
+        """
+        if self.strategy.has_iterreversed:
+            it = self.strategy.iterreversed(self)
+            return W_DictMultiIterKeysObject(space, it)
+        else:
+            # fall-back
+            w_keys = self.w_keys()
+            return space.call_method(w_keys, '__reversed__')
+
     def descr_viewitems(self, space):
         """D.viewitems() -> a set-like object providing a view on D's items"""
         return W_DictViewItemsObject(space, self)
@@ -503,6 +514,9 @@
     def getiteritems(self, w_dict):
         raise NotImplementedError
 
+    has_iterreversed = False
+    # no 'getiterreversed': no default implementation available
+
     def rev_update1_dict_dict(self, w_dict, w_updatedict):
         iteritems = self.iteritems(w_dict)
         while True:
@@ -623,6 +637,9 @@
     def getiteritems(self, w_dict):
         return iter([])
 
+    def getiterreversed(self, w_dict):
+        return iter([])
+
 
 # Iterator Implementation base classes
 
@@ -747,6 +764,17 @@
                 else:
                     return None, None
 
+    class IterClassReversed(BaseKeyIterator):
+        def __init__(self, space, strategy, impl):
+            self.iterator = strategy.getiterreversed(impl)
+            BaseIteratorImplementation.__init__(self, space, strategy, impl)
+
+        def next_key_entry(self):
+            for key in self.iterator:
+                return wrapkey(self.space, key)
+            else:
+                return None
+
     def iterkeys(self, w_dict):
         return IterClassKeys(self.space, self, w_dict)
 
@@ -756,6 +784,12 @@
     def iteritems(self, w_dict):
         return IterClassItems(self.space, self, w_dict)
 
+    if hasattr(dictimpl, 'getiterreversed'):
+        def iterreversed(self, w_dict):
+            return IterClassReversed(self.space, self, w_dict)
+        dictimpl.iterreversed = iterreversed
+        dictimpl.has_iterreversed = True
+
     @jit.look_inside_iff(lambda self, w_dict, w_updatedict:
                          w_dict_unrolling_heuristic(w_dict))
     def rev_update1_dict_dict(self, w_dict, w_updatedict):
@@ -932,6 +966,9 @@
     def getiteritems(self, w_dict):
         return self.unerase(w_dict.dstorage).iteritems()
 
+    def getiterreversed(self, w_dict):
+        return objectmodel.reversed_dict(self.unerase(w_dict.dstorage))
+
     def prepare_update(self, w_dict, num_extra):
         objectmodel.prepare_dict_update(self.unerase(w_dict.dstorage),
                                         num_extra)
diff --git a/pypy/objspace/std/test/test_dictmultiobject.py 
b/pypy/objspace/std/test/test_dictmultiobject.py
--- a/pypy/objspace/std/test/test_dictmultiobject.py
+++ b/pypy/objspace/std/test/test_dictmultiobject.py
@@ -254,6 +254,21 @@
             values.append(k)
         assert values == d.values()
 
+    def test_reversed_dict(self):
+        import __pypy__
+        for d in [{}, {1: 2, 3: 4, 5: 6}, {"a": 5, "b": 2, "c": 6}]:
+            assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1]
+        raises(TypeError, __pypy__.reversed_dict, 42)
+
+    def test_reversed_dict_runtimeerror(self):
+        import __pypy__
+        d = {1: 2, 3: 4, 5: 6}
+        it = __pypy__.reversed_dict(d)
+        key = it.next()
+        assert key in [1, 3, 5]
+        del d[key]
+        raises(RuntimeError, it.next)
+
     def test_keys(self):
         d = {1: 2, 3: 4}
         kys = d.keys()
diff --git a/pypy/objspace/std/test/test_mapdict.py 
b/pypy/objspace/std/test/test_mapdict.py
--- a/pypy/objspace/std/test/test_mapdict.py
+++ b/pypy/objspace/std/test/test_mapdict.py
@@ -700,6 +700,15 @@
         del a.x
         raises(AttributeError, "a.x")
 
+    def test_reversed_dict(self):
+        import __pypy__
+        class X(object):
+            pass
+        x = X(); x.a = 10; x.b = 20; x.c = 30
+        d = x.__dict__
+        assert list(__pypy__.reversed_dict(d)) == d.keys()[::-1]
+
+
 class AppTestWithMapDictAndCounters(object):
     spaceconfig = {"objspace.std.withmapdict": True,
                    "objspace.std.withmethodcachecounter": True}
diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py
--- a/rpython/annotator/model.py
+++ b/rpython/annotator/model.py
@@ -392,6 +392,8 @@
         assert isinstance(dct2, SomeOrderedDict), "OrderedDict.update(dict) 
not allowed"
         dct1.dictdef.union(dct2.dictdef)
 
+SomeDict = SomeOrderedDict      # all dicts are ordered!
+
 
 class SomeIterator(SomeObject):
     "Stands for an iterator returning objects from a given container."
diff --git a/rpython/annotator/test/test_annrpython.py 
b/rpython/annotator/test/test_annrpython.py
--- a/rpython/annotator/test/test_annrpython.py
+++ b/rpython/annotator/test/test_annrpython.py
@@ -1877,7 +1877,7 @@
                 return None
         a = self.RPythonAnnotator()
         s = a.build_types(f, [int])
-        assert s.knowntype == dict
+        assert s.knowntype == annmodel.SomeOrderedDict.knowntype
 
     def test_const_list_and_none(self):
         def g(l=None):
diff --git a/rpython/jit/backend/arm/test/test_regalloc_mov.py 
b/rpython/jit/backend/arm/test/test_regalloc_mov.py
--- a/rpython/jit/backend/arm/test/test_regalloc_mov.py
+++ b/rpython/jit/backend/arm/test/test_regalloc_mov.py
@@ -503,7 +503,6 @@
     def test_unsupported(self):
         py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), 
imm(1))')
         py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), 
imm_float(1))')
-        py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), 
r(1))')
         py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), 
vfp(1))')
         py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), 
stack(1))')
         py.test.raises(AssertionError, 'self.asm.regalloc_mov(raw_stack(0), 
stack_float(1))')
diff --git a/rpython/jit/metainterp/optimizeopt/heap.py 
b/rpython/jit/metainterp/optimizeopt/heap.py
--- a/rpython/jit/metainterp/optimizeopt/heap.py
+++ b/rpython/jit/metainterp/optimizeopt/heap.py
@@ -2,11 +2,12 @@
 
 from rpython.jit.codewriter.effectinfo import EffectInfo
 from rpython.jit.metainterp.optimizeopt.util import args_dict
-from rpython.jit.metainterp.history import Const
+from rpython.jit.metainterp.history import Const, ConstInt
 from rpython.jit.metainterp.jitexc import JitException
 from rpython.jit.metainterp.optimizeopt.optimizer import Optimization,\
      MODE_ARRAY, LEVEL_KNOWNCLASS, REMOVED
 from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method
+from rpython.jit.metainterp.optimizeopt.intutils import IntBound
 from rpython.jit.metainterp.optimize import InvalidLoop
 from rpython.jit.metainterp.resoperation import rop, ResOperation
 from rpython.rlib.objectmodel import we_are_translated
@@ -325,6 +326,29 @@
         self.emit_operation(op)
 
     def _optimize_CALL_DICT_LOOKUP(self, op):
+        # Cache consecutive lookup() calls on the same dict and key,
+        # depending on the 'flag_store' argument passed:
+        # FLAG_LOOKUP: always cache and use the cached result.
+        # FLAG_STORE:  don't cache (it might return -1, which would be
+        #                incorrect for future lookups); but if found in
+        #                the cache and the cached value was already checked
+        #                non-negative, then we can reuse it.
+        # FLAG_DELETE: never cache, never use the cached result (because
+        #                if there is a cached result, the FLAG_DELETE call
+        #                is needed for its side-effect of removing it).
+        #                In theory we could cache a -1 for the case where
+        #                the delete is immediately followed by a lookup,
+        #                but too obscure.
+        #
+        from rpython.rtyper.lltypesystem.rordereddict import FLAG_LOOKUP
+        from rpython.rtyper.lltypesystem.rordereddict import FLAG_STORE
+        flag_value = self.getvalue(op.getarg(4))
+        if not flag_value.is_constant():
+            return False
+        flag = flag_value.get_constant_int()
+        if flag != FLAG_LOOKUP and flag != FLAG_STORE:
+            return False
+        #
         descrs = op.getdescr().get_extra_info().extradescrs
         assert descrs        # translation hint
         descr1 = descrs[0]
@@ -333,13 +357,20 @@
         except KeyError:
             d = self.cached_dict_reads[descr1] = args_dict()
             self.corresponding_array_descrs[descrs[1]] = descr1
-        args = self.optimizer.make_args_key(op)
+        #
+        key = [self.optimizer.get_box_replacement(op.getarg(1)),   # dict
+               self.optimizer.get_box_replacement(op.getarg(2))]   # key
+               # other args can be ignored here (hash, store_flag)
         try:
-            res_v = d[args]
+            res_v = d[key]
         except KeyError:
-            d[args] = self.getvalue(op.result)
+            if flag == FLAG_LOOKUP:
+                d[key] = self.getvalue(op.result)
             return False
         else:
+            if flag != FLAG_LOOKUP:
+                if not res_v.getintbound().known_ge(IntBound(0, 0)):
+                    return False
             self.make_equal_to(op.result, res_v)
             self.last_emitted_operation = REMOVED
             return True
diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py 
b/rpython/jit/metainterp/optimizeopt/optimizer.py
--- a/rpython/jit/metainterp/optimizeopt/optimizer.py
+++ b/rpython/jit/metainterp/optimizeopt/optimizer.py
@@ -148,6 +148,12 @@
         if self.getlevel() < LEVEL_NONNULL:
             self.setlevel(LEVEL_NONNULL)
 
+    def get_constant_int(self):
+        assert self.is_constant()
+        box = self.box
+        assert isinstance(box, ConstInt)
+        return box.getint()
+
     def is_virtual(self):
         # Don't check this with 'isinstance(_, VirtualValue)'!
         # Even if it is a VirtualValue, the 'box' can be non-None,
diff --git a/rpython/jit/metainterp/test/test_dict.py 
b/rpython/jit/metainterp/test/test_dict.py
--- a/rpython/jit/metainterp/test/test_dict.py
+++ b/rpython/jit/metainterp/test/test_dict.py
@@ -181,15 +181,21 @@
                 n = d[y]
             return d[Wrapper(str(n + 1))]
 
+        # XXX <arigo> unsure I see the point of this test: the repeated
+        # dict lookup is *not* elided so far, and the test happens to
+        # check this...  with rdict.py, it's a write followed by a read,
+        # where the dict cache is thrown away after the first lookup
+        # (correctly: we don't want the two lookups to return the exact
+        # same result!).  With rordereddict.py, FLAG_STORE lookups are
+        # not cached anyway.
         res = self.meta_interp(f, [100], listops=True)
         assert res == f(50)
         self.check_resops({'new_array_clear': 2, 'getfield_gc': 2,
-                           'guard_true': 2, 'jump': 1,
+                           'guard_true': 4, 'jump': 1,
                            'new_with_vtable': 2, 'getinteriorfield_gc': 2,
-                           'setfield_gc': 8, 'int_gt': 2, 'int_sub': 2,
-                           'call': 10, 'int_and': 2,
-                           'guard_no_exception': 8, 'new': 2,
-                           'guard_false': 2, 'int_is_true': 2})
+                           'setfield_gc': 14, 'int_gt': 2, 'int_sub': 2,
+                           'call': 10, 'int_ge': 2,
+                           'guard_no_exception': 8, 'new': 2})
 
     def test_unrolling_of_dict_iter(self):
         driver = JitDriver(greens = [], reds = ['n'])
@@ -223,7 +229,7 @@
             return s
 
         self.meta_interp(f, [10])
-        # XXX should be one getinteriorfield_gc
+        # XXX should be one getinteriorfield_gc.  At least it's one call.
         self.check_simple_loop(call=1, getinteriorfield_gc=2,
                                guard_no_exception=1)
 
@@ -244,7 +250,7 @@
             return s
 
         self.meta_interp(f, [10])
-        # XXX should be one getinteriorfield_gc
+        # XXX should be one getinteriorfield_gc.  At least it's one call.
         self.check_simple_loop(call=1, getinteriorfield_gc=2,
                                guard_no_exception=1)
 
@@ -259,7 +265,7 @@
                 driver.jit_merge_point()
                 index = indexes[n & 1]
                 s += d[index]
-                d['aa'] += 1 # this will invalidate the index
+                d['aa'] = 13 # this will invalidate the index
                 s += d[index]
                 n -= 1
             return s
@@ -355,7 +361,7 @@
                     if n in mdict:
                         raise Exception
         self.meta_interp(f, [10])
-        self.check_simple_loop(call_may_force=0, call=3)
+        self.check_simple_loop(call_may_force=0, call=4)
 
     def test_dict_virtual(self):
         myjitdriver = JitDriver(greens = [], reds = 'auto')
diff --git a/rpython/memory/gctypelayout.py b/rpython/memory/gctypelayout.py
--- a/rpython/memory/gctypelayout.py
+++ b/rpython/memory/gctypelayout.py
@@ -406,6 +406,11 @@
             return
         self.iseen_roots[value] = True
 
+        if isinstance(TYPE, lltype.GcOpaqueType):
+            self.consider_constant(lltype.typeOf(value.container),
+                                   value.container, gc)
+            return
+
         if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
             typeid = self.get_type_id(TYPE)
             hdr = gc.gcheaderbuilder.new_header(value)
diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py
--- a/rpython/rlib/objectmodel.py
+++ b/rpython/rlib/objectmodel.py
@@ -753,6 +753,17 @@
         dict._prepare_dict_update(n_elements)
         # ^^ call an extra method that doesn't exist before translation
 
+@specialize.call_location()
+def reversed_dict(d):
+    """Equivalent to reversed(ordered_dict), but works also for
+    regular dicts."""
+    # note that there is also __pypy__.reversed_dict(), which we could
+    # try to use here if we're not translated and running on top of pypy,
+    # but that seems a bit pointless
+    if not we_are_translated():
+        d = d.keys()
+    return reversed(d)
+
 
 # ____________________________________________________________
 
diff --git a/rpython/rlib/rtermios.py b/rpython/rlib/rtermios.py
--- a/rpython/rlib/rtermios.py
+++ b/rpython/rlib/rtermios.py
@@ -3,6 +3,7 @@
 # returns list of mostly-strings of length one, but with few ints
 # inside, so we make sure it works
 
+import sys
 from rpython.rtyper.lltypesystem import rffi, lltype
 from rpython.rtyper.tool import rffi_platform
 from rpython.translator.tool.cbuild import ExternalCompilationInfo
@@ -97,6 +98,10 @@
 for name in CONSTANT_NAMES:
     value = c_config[name]
     if value is not None:
+        if value < -sys.maxsize-1 or value >= 2 * (sys.maxsize+1):
+            raise AssertionError("termios: %r has value %r, too large" % (
+                name, value))
+        value = intmask(value)     # wrap unsigned long numbers to signed longs
         globals()[name] = value
         all_constants[name] = value
             
diff --git a/rpython/rlib/test/test_objectmodel.py 
b/rpython/rlib/test/test_objectmodel.py
--- a/rpython/rlib/test/test_objectmodel.py
+++ b/rpython/rlib/test/test_objectmodel.py
@@ -341,6 +341,21 @@
         res = self.interpret(g, [3])
         assert res == 42     # "did not crash"
 
+    def test_reversed_dict(self):
+        d1 = {2:3, 4:5, 6:7}
+        def g():
+            n1 = 0
+            for key in d1:
+                n1 = n1 * 10 + key
+            n2 = 0
+            for key in reversed_dict(d1):
+                n2 = n2 * 10 + key
+            return n1 * 10000 + n2
+        got = str(g())
+        assert len(got) == 7 and got[3] == '0' and got[:3] == got[6:3:-1]
+        got = str(self.interpret(g, []))
+        assert len(got) == 7 and got[3] == '0' and got[:3] == got[6:3:-1]
+
     def test_compute_hash(self):
         class Foo(object):
             pass
diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py
--- a/rpython/rtyper/llinterp.py
+++ b/rpython/rtyper/llinterp.py
@@ -740,6 +740,10 @@
         return lltype.cast_opaque_ptr(RESTYPE, obj)
     op_cast_opaque_ptr.need_result_type = True
 
+    def op_length_of_simple_gcarray_from_opaque(self, obj):
+        checkptr(obj)
+        return lltype.length_of_simple_gcarray_from_opaque(obj)
+
     def op_cast_ptr_to_adr(self, ptr):
         checkptr(ptr)
         return llmemory.cast_ptr_to_adr(ptr)
diff --git a/rpython/rtyper/lltypesystem/lloperation.py 
b/rpython/rtyper/lltypesystem/lloperation.py
--- a/rpython/rtyper/lltypesystem/lloperation.py
+++ b/rpython/rtyper/lltypesystem/lloperation.py
@@ -396,6 +396,7 @@
     'direct_arrayitems':    LLOp(canfold=True),
     'direct_ptradd':        LLOp(canfold=True),
     'cast_opaque_ptr':      LLOp(sideeffects=False),
+    'length_of_simple_gcarray_from_opaque': LLOp(sideeffects=False),
 
     # __________ address operations __________
 
diff --git a/rpython/rtyper/lltypesystem/lltype.py 
b/rpython/rtyper/lltypesystem/lltype.py
--- a/rpython/rtyper/lltypesystem/lltype.py
+++ b/rpython/rtyper/lltypesystem/lltype.py
@@ -1025,6 +1025,30 @@
     return SomePtr(ll_ptrtype=typeOf(cast_p))
 
 
+def length_of_simple_gcarray_from_opaque(opaque_ptr):
+    CURTYPE = typeOf(opaque_ptr)
+    if not isinstance(CURTYPE, Ptr):
+        raise TypeError("can only cast pointers to other pointers")
+    if not isinstance(CURTYPE.TO, GcOpaqueType):
+        raise TypeError("expected a GcOpaqueType")
+    try:
+        c = opaque_ptr._obj.container
+    except AttributeError:
+        # if 'opaque_ptr' is already some _llgcopaque, hack its length
+        # by casting it to a random GcArray type and hoping
+        from rpython.rtyper.lltypesystem import rffi
+        p = rffi.cast(Ptr(GcArray(Signed)), opaque_ptr)
+        return len(p)
+    else:
+        return c.getlength()
+
+@analyzer_for(length_of_simple_gcarray_from_opaque)
+def ann_length_of_simple_gcarray_from_opaque(s_p):
+    assert isinstance(s_p, SomePtr), "casting of non-pointer: %r" % s_p
+    assert isinstance(s_p.ll_ptrtype.TO, GcOpaqueType)
+    return SomeInteger(nonneg=True)
+
+
 def direct_fieldptr(structptr, fieldname):
     """Get a pointer to a field in the struct.  The resulting
     pointer is actually of type Ptr(FixedSizeArray(FIELD, 1)).
diff --git a/rpython/rtyper/lltypesystem/rordereddict.py 
b/rpython/rtyper/lltypesystem/rordereddict.py
--- a/rpython/rtyper/lltypesystem/rordereddict.py
+++ b/rpython/rtyper/lltypesystem/rordereddict.py
@@ -34,16 +34,18 @@
 #        {byte, short, int, long} *indexes;
 #        dictentry *entries;
 #        lookup_function_no; # one of the four possible functions for different
-#                         # size dicts
+#                       # size dicts; the rest of the word is a counter for how
+#                       # many 'entries' at the start are known to be deleted
 #        (Function DICTKEY, DICTKEY -> bool) *fnkeyeq;
 #        (Function DICTKEY -> int) *fnkeyhash;
 #    }
 #
 #
 
+@jit.look_inside_iff(lambda d, key, hash, flag: jit.isvirtual(d))
+@jit.oopspec('ordereddict.lookup(d, key, hash, flag)')
 def ll_call_lookup_function(d, key, hash, flag):
-    DICT = lltype.typeOf(d).TO
-    fun = d.lookup_function_no
+    fun = d.lookup_function_no & FUNC_MASK
     if fun == FUNC_BYTE:
         return ll_dict_lookup(d, key, hash, flag, TYPE_BYTE)
     elif fun == FUNC_SHORT:
@@ -408,6 +410,8 @@
 
 IS_64BIT = sys.maxint != 2 ** 31 - 1
 
+FUNC_SHIFT = 2
+FUNC_MASK  = 0x03  # two bits
 if IS_64BIT:
     FUNC_BYTE, FUNC_SHORT, FUNC_INT, FUNC_LONG = range(4)
 else:
@@ -441,28 +445,46 @@
         d.lookup_function_no = FUNC_LONG
 
 def ll_clear_indexes(d, n):
-    if n <= 256:
+    fun = d.lookup_function_no & FUNC_MASK
+    d.lookup_function_no = fun
+    if fun == FUNC_BYTE:
         rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_BYTE, d.indexes))
-    elif n <= 65536:
+    elif fun == FUNC_SHORT:
         rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_SHORT, d.indexes))
-    elif IS_64BIT and n <= 2 ** 32:
+    elif IS_64BIT and fun == FUNC_INT:
         rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_INT, d.indexes))
+    elif fun == FUNC_LONG:
+        rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes))
     else:
-        rgc.ll_arrayclear(lltype.cast_opaque_ptr(DICTINDEX_LONG, d.indexes))
+        assert False
 
+@jit.dont_look_inside
 def ll_call_insert_clean_function(d, hash, i):
-    DICT = lltype.typeOf(d).TO
-    if d.lookup_function_no == FUNC_BYTE:
+    fun = d.lookup_function_no & FUNC_MASK
+    if fun == FUNC_BYTE:
         ll_dict_store_clean(d, hash, i, TYPE_BYTE)
-    elif d.lookup_function_no == FUNC_SHORT:
+    elif fun == FUNC_SHORT:
         ll_dict_store_clean(d, hash, i, TYPE_SHORT)
-    elif IS_64BIT and d.lookup_function_no == FUNC_INT:
+    elif IS_64BIT and fun == FUNC_INT:
         ll_dict_store_clean(d, hash, i, TYPE_INT)
-    elif d.lookup_function_no == FUNC_LONG:
+    elif fun == FUNC_LONG:
         ll_dict_store_clean(d, hash, i, TYPE_LONG)
     else:
         assert False
 
+def ll_call_delete_by_entry_index(d, hash, i):
+    fun = d.lookup_function_no & FUNC_MASK
+    if fun == FUNC_BYTE:
+        ll_dict_delete_by_entry_index(d, hash, i, TYPE_BYTE)
+    elif fun == FUNC_SHORT:
+        ll_dict_delete_by_entry_index(d, hash, i, TYPE_SHORT)
+    elif IS_64BIT and fun == FUNC_INT:
+        ll_dict_delete_by_entry_index(d, hash, i, TYPE_INT)
+    elif fun == FUNC_LONG:
+        ll_dict_delete_by_entry_index(d, hash, i, TYPE_LONG)
+    else:
+        assert False
+
 def ll_valid_from_flag(entries, i):
     return entries[i].f_valid
 
@@ -513,7 +535,7 @@
 
 def ll_dict_getitem(d, key):
     index = d.lookup_function(d, key, d.keyhash(key), FLAG_LOOKUP)
-    if index != -1:
+    if index >= 0:
         return d.entries[index].value
     else:
         raise KeyError
@@ -572,6 +594,7 @@
 _ll_dict_rescue._dont_inline_ = True
 
 def _ll_dict_insertclean(d, key, value, hash):
+    # never translated
     ENTRY = lltype.typeOf(d.entries).TO.OF
     ll_call_insert_clean_function(d, hash, d.num_ever_used_items)
     entry = d.entries[d.num_ever_used_items]
@@ -590,25 +613,24 @@
     # xxx Haaaack: returns len(d.indexes).  Works independently of
     # the exact type pointed to by d, using a forced cast...
     # Must only be called by @jit.dont_look_inside functions.
-    return len(rffi.cast(DICTINDEX_BYTE, d.indexes))
+    return lltype.length_of_simple_gcarray_from_opaque(d.indexes)
 
 def _overallocate_entries_len(baselen):
     # This over-allocates proportional to the list size, making room
-    # for additional growth.  The over-allocation is mild, but is
-    # enough to give linear-time amortized behavior over a long
-    # sequence of appends() in the presence of a poorly-performing
-    # system malloc().
-    # The growth pattern is:  0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ...
-    newsize = baselen + 1
-    if newsize < 9:
-        some = 3
-    else:
-        some = 6
-    some += newsize >> 3
-    return newsize + some
+    # for additional growth.  This over-allocates slightly more eagerly
+    # than with regular lists.  The idea is that there are many more
+    # lists than dicts around in PyPy, and dicts of 5 to 8 items are
+    # not that rare (so a single jump from 0 to 8 is a good idea).
+    # The growth pattern is:  0, 8, 17, 27, 38, 50, 64, 80, 98, ...
+    newsize = baselen + (baselen >> 3)
+    return newsize + 8
 
-@jit.dont_look_inside
+@jit.look_inside_iff(lambda d: jit.isvirtual(d))
 def ll_dict_grow(d):
+    # note: this @jit.look_inside_iff is here to inline the three lines
+    # at the end of this function.  It's important because dicts start
+    # with a length-zero 'd.entries' which must be grown as soon as we
+    # insert an element.
     if d.num_live_items < d.num_ever_used_items // 2:
         # At least 50% of the allocated entries are dead, so perform a
         # compaction. If ll_dict_remove_deleted_items detects that over
@@ -619,11 +641,29 @@
 
     new_allocated = _overallocate_entries_len(len(d.entries))
 
-    # Detect an obscure case where the indexes numeric type is too
-    # small to store all the entry indexes
-    if (max(128, _ll_len_of_d_indexes(d)) - new_allocated
-                   < MIN_INDEXES_MINUS_ENTRIES):
+    # Detect a relatively rare case where the indexes numeric type is too
+    # small to store all the entry indexes: there would be 'new_allocated'
+    # entries, which may in corner cases be larger than 253 even though we
+    # have single bytes in 'd.indexes' (and the same for the larger
+    # boundaries).  The 'd.indexes' hashtable is never more than 2/3rd
+    # full, so we know that 'd.num_live_items' should be at most 2/3 * 256
+    # (or 65536 or etc.) so after the ll_dict_remove_deleted_items() below
+    # at least 1/3rd items in 'd.entries' are free.
+    fun = d.lookup_function_no & FUNC_MASK
+    toobig = False
+    if fun == FUNC_BYTE:
+        assert d.num_live_items < ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES)
+        toobig = new_allocated > ((1 << 8) - MIN_INDEXES_MINUS_ENTRIES)
+    elif fun == FUNC_SHORT:
+        assert d.num_live_items < ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES)
+        toobig = new_allocated > ((1 << 16) - MIN_INDEXES_MINUS_ENTRIES)
+    elif IS_64BIT and fun == FUNC_INT:
+        assert d.num_live_items < ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES)
+        toobig = new_allocated > ((1 << 32) - MIN_INDEXES_MINUS_ENTRIES)
+    #
+    if toobig:
         ll_dict_remove_deleted_items(d)
+        assert d.num_live_items == d.num_ever_used_items
         return True
 
     newitems = lltype.malloc(lltype.typeOf(d).TO.entries.TO, new_allocated)
@@ -631,6 +671,7 @@
     d.entries = newitems
     return False
 
+@jit.dont_look_inside
 def ll_dict_remove_deleted_items(d):
     if d.num_live_items < len(d.entries) // 4:
         # At least 75% of the allocated entries are dead, so shrink the memory
@@ -684,7 +725,7 @@
 
 def ll_dict_delitem(d, key):
     index = d.lookup_function(d, key, d.keyhash(key), FLAG_DELETE)
-    if index == -1:
+    if index < 0:
         raise KeyError
     _ll_dict_del(d, index)
 
@@ -701,7 +742,12 @@
     if ENTRIES.must_clear_value:
         entry.value = lltype.nullptr(ENTRY.value.TO)
 
-    if index == d.num_ever_used_items - 1:
+    if d.num_live_items == 0:
+        # Dict is now empty.  Reset these fields.
+        d.num_ever_used_items = 0
+        d.lookup_function_no &= FUNC_MASK
+
+    elif index == d.num_ever_used_items - 1:
         # The last element of the ordereddict has been deleted. Instead of
         # simply marking the item as dead, we can safely reuse it. Since it's
         # also possible that there are more dead items immediately behind the
@@ -746,7 +792,9 @@
     else:
         ll_malloc_indexes_and_choose_lookup(d, new_size)
     d.resize_counter = new_size * 2 - d.num_live_items * 3
-    assert d.resize_counter > 0
+    ll_assert(d.resize_counter > 0, "reindex: resize_counter <= 0")
+    ll_assert((d.lookup_function_no >> FUNC_SHIFT) == 0,
+              "reindex: lookup_fun >> SHIFT")
     #
     entries = d.entries
     i = 0
@@ -769,23 +817,11 @@
 FLAG_LOOKUP = 0
 FLAG_STORE = 1
 FLAG_DELETE = 2
-FLAG_DELETE_TRY_HARD = 3
 
 @specialize.memo()
 def _ll_ptr_to_array_of(T):
     return lltype.Ptr(lltype.GcArray(T))
 
-def ll_kill_something(d, T):
-    INDEXES = _ll_ptr_to_array_of(T)
-    i = 0
-    indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes)
-    while True:
-        index = rffi.cast(lltype.Signed, indexes[i])
-        if index >= VALID_OFFSET:
-            indexes[i] = rffi.cast(T, DELETED)
-            return index
-        i += 1
-
 @jit.look_inside_iff(lambda d, key, hash, store_flag, T:
                      jit.isvirtual(d) and jit.isconstant(key))
 @jit.oopspec('ordereddict.lookup(d, key, hash, store_flag, T)')
@@ -827,8 +863,6 @@
         # pristine entry -- lookup failed
         if store_flag == FLAG_STORE:
             indexes[i] = rffi.cast(T, d.num_ever_used_items + VALID_OFFSET)
-        elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD:
-            return ll_kill_something(d, T)
         return -1
 
     # In the loop, a deleted entry (everused and not valid) is by far
@@ -845,8 +879,6 @@
                     deletedslot = intmask(i)
                 indexes[deletedslot] = rffi.cast(T, d.num_ever_used_items +
                                                  VALID_OFFSET)
-            elif d.paranoia and store_flag == FLAG_DELETE_TRY_HARD:
-                return ll_kill_something(d, T)
             return -1
         elif index >= VALID_OFFSET:
             checkingkey = entries[index - VALID_OFFSET].key
@@ -881,17 +913,38 @@
     mask = len(indexes) - 1
     i = r_uint(hash & mask)
     perturb = r_uint(hash)
-    while rffi.cast(lltype.Signed, indexes[i]) != 0:
+    while rffi.cast(lltype.Signed, indexes[i]) != FREE:
         i = (i << 2) + i + perturb + 1
         i = i & mask
         perturb >>= PERTURB_SHIFT
     indexes[i] = rffi.cast(T, index + VALID_OFFSET)
 
+def ll_dict_delete_by_entry_index(d, hash, locate_index, T):
+    # Another simplified version of ll_dict_lookup() which locates a
+    # hashtable entry with the given 'index' stored in it, and deletes it.
+    # This *should* be safe against evil user-level __eq__/__hash__
+    # functions because the 'hash' argument here should be the one stored
+    # into the directory, which is correct.
+    INDEXES = _ll_ptr_to_array_of(T)
+    indexes = lltype.cast_opaque_ptr(INDEXES, d.indexes)
+    mask = len(indexes) - 1
+    i = r_uint(hash & mask)
+    perturb = r_uint(hash)
+    locate_value = locate_index + VALID_OFFSET
+    while rffi.cast(lltype.Signed, indexes[i]) != locate_value:
+        assert rffi.cast(lltype.Signed, indexes[i]) != FREE
+        i = (i << 2) + i + perturb + 1
+        i = i & mask
+        perturb >>= PERTURB_SHIFT
+    indexes[i] = rffi.cast(T, DELETED)
+
 # ____________________________________________________________
 #
 #  Irregular operations.
 
-DICT_INITSIZE = 8
+# Start the hashtable size at 16 rather than 8, as with rdict.py, because
+# it is only an array of bytes
+DICT_INITSIZE = 16
 
 
 @specialize.memo()
@@ -948,14 +1001,19 @@
         self.r_dict = r_dict
         self.variant = variant
         self.lowleveltype = get_ll_dictiter(r_dict.lowleveltype)
-        self.ll_dictiter = ll_dictiter
-        self._ll_dictnext = _ll_dictnext
+        if variant == 'reversed':
+            self.ll_dictiter = ll_dictiter_reversed
+            self._ll_dictnext = _ll_dictnext_reversed
+        else:
+            self.ll_dictiter = ll_dictiter
+            self._ll_dictnext = _ll_dictnext
 
 
 def ll_dictiter(ITERPTR, d):
     iter = lltype.malloc(ITERPTR.TO)
     iter.dict = d
-    iter.index = 0
+    # initialize the index with usually 0, but occasionally a larger value
+    iter.index = d.lookup_function_no >> FUNC_SHIFT
     return iter
 
 @jit.look_inside_iff(lambda iter: jit.isvirtual(iter)
@@ -974,17 +1032,48 @@
             if entries.valid(index):
                 iter.index = nextindex
                 return index
+            else:
+                # In case of repeated iteration over the start of
+                # a dict where the items get removed, like
+                # collections.OrderedDict.popitem(last=False),
+                # the hack below will increase the value stored in
+                # the high bits of lookup_function_no and so the
+                # next iteration will start at a higher value.
+                # We should carefully reset these high bits to zero
+                # as soon as we do something like ll_dict_reindex().
+                if index == (dict.lookup_function_no >> FUNC_SHIFT):
+                    dict.lookup_function_no += (1 << FUNC_SHIFT)
             index = nextindex
         # clear the reference to the dict and prevent restarts
         iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO)
     raise StopIteration
 
+def ll_dictiter_reversed(ITERPTR, d):
+    iter = lltype.malloc(ITERPTR.TO)
+    iter.dict = d
+    iter.index = d.num_ever_used_items
+    return iter
+
+def _ll_dictnext_reversed(iter):
+    dict = iter.dict
+    if dict:
+        entries = dict.entries
+        index = iter.index - 1
+        while index >= 0:
+            if entries.valid(index):
+                iter.index = index
+                return index
+            index = index - 1
+        # clear the reference to the dict and prevent restarts
+        iter.dict = lltype.nullptr(lltype.typeOf(iter).TO.dict.TO)
+    raise StopIteration
+
 # _____________________________________________________________
 # methods
 
 def ll_dict_get(dict, key, default):
     index = dict.lookup_function(dict, key, dict.keyhash(key), FLAG_LOOKUP)
-    if index == -1:
+    if index < 0:
         return default
     else:
         return dict.entries[index].value
@@ -992,7 +1081,7 @@
 def ll_dict_setdefault(dict, key, default):
     hash = dict.keyhash(key)
     index = dict.lookup_function(dict, key, hash, FLAG_STORE)
-    if index == -1:
+    if index < 0:
         _ll_dict_setitem_lookup_done(dict, key, default, hash, -1)
         return default
     else:
@@ -1119,7 +1208,7 @@
 
 def ll_dict_contains(d, key):
     i = d.lookup_function(d, key, d.keyhash(key), FLAG_LOOKUP)
-    return i != -1
+    return i >= 0
 
 def _ll_getnextitem(dic):
     if dic.num_live_items == 0:
@@ -1127,22 +1216,19 @@
 
     entries = dic.entries
 
+    # find the last entry.  It's unclear if the loop below is still
+    # needed nowadays, because 'num_ever_used_items - 1' should always
+    # point to the last active item (we decrease it as needed in
+    # _ll_dict_del).  Better safe than sorry.
     while True:
         i = dic.num_ever_used_items - 1
         if entries.valid(i):
             break
         dic.num_ever_used_items -= 1
 
-    key = entries[i].key
-    index = dic.lookup_function(dic, key, entries.hash(i),
-                                FLAG_DELETE_TRY_HARD)
-    # if the lookup function returned me a random strange thing,
-    # don't care about deleting the item
-    if index == dic.num_ever_used_items - 1:
-        dic.num_ever_used_items -= 1
-    else:
-        assert index != -1
-    return index
+    # we must remove the precise entry in the hashtable that points to 'i'
+    ll_call_delete_by_entry_index(dic, entries.hash(i), i)
+    return i
 
 def ll_dict_popitem(ELEM, dic):
     i = _ll_getnextitem(dic)
@@ -1155,7 +1241,7 @@
 
 def ll_dict_pop(dic, key):
     index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE)
-    if index == -1:
+    if index < 0:
         raise KeyError
     value = dic.entries[index].value
     _ll_dict_del(dic, index)
@@ -1163,7 +1249,7 @@
 
 def ll_dict_pop_default(dic, key, dfl):
     index = dic.lookup_function(dic, key, dic.keyhash(key), FLAG_DELETE)
-    if index == -1:
+    if index < 0:
         return dfl
     value = dic.entries[index].value
     _ll_dict_del(dic, index)
diff --git a/rpython/rtyper/rbuiltin.py b/rpython/rtyper/rbuiltin.py
--- a/rpython/rtyper/rbuiltin.py
+++ b/rpython/rtyper/rbuiltin.py
@@ -445,6 +445,14 @@
     return hop.genop('cast_opaque_ptr', [v_input], # v_type implicit in 
r_result
                      resulttype = hop.r_result.lowleveltype)
 
+@typer_for(lltype.length_of_simple_gcarray_from_opaque)
+def rtype_length_of_simple_gcarray_from_opaque(hop):
+    assert isinstance(hop.args_r[0], rptr.PtrRepr)
+    v_opaque_ptr, = hop.inputargs(hop.args_r[0])
+    hop.exception_cannot_occur()
+    return hop.genop('length_of_simple_gcarray_from_opaque', [v_opaque_ptr],
+                     resulttype = hop.r_result.lowleveltype)
+
 @typer_for(lltype.direct_fieldptr)
 def rtype_direct_fieldptr(hop):
     assert isinstance(hop.args_r[0], rptr.PtrRepr)
diff --git a/rpython/rtyper/rdict.py b/rpython/rtyper/rdict.py
--- a/rpython/rtyper/rdict.py
+++ b/rpython/rtyper/rdict.py
@@ -98,12 +98,12 @@
             c_key = hop.inputconst(lltype.Void, 'key')
             v_key = hop.genop('getinteriorfield', [v_entries, v_index, c_key],
                               resulttype=KEY)
-        if variant != 'keys':
+        if variant != 'keys' and variant != 'reversed':
             VALUE = ENTRIES.TO.OF.value
             c_value = hop.inputconst(lltype.Void, 'value')
             v_value = hop.genop('getinteriorfield', 
[v_entries,v_index,c_value],
                                 resulttype=VALUE)
-        if variant == 'keys':
+        if variant == 'keys' or variant == 'reversed':
             return self.r_dict.recast_key(hop.llops, v_key)
         elif variant == 'values':
             return self.r_dict.recast_value(hop.llops, v_value)
diff --git a/rpython/rtyper/test/test_rdict.py 
b/rpython/rtyper/test/test_rdict.py
--- a/rpython/rtyper/test/test_rdict.py
+++ b/rpython/rtyper/test/test_rdict.py
@@ -1005,6 +1005,7 @@
 
 
     def test_dict_resize(self):
+        py.test.skip("test written for non-ordered dicts, update or kill")
         # XXX we no longer automatically resize on 'del'.  We need to
         # hack a bit in this test to trigger a resize by continuing to
         # fill the dict's table while keeping the actual size very low
@@ -1025,7 +1026,7 @@
         res = self.interpret(func, [1])
         assert len(res.entries) == rdict.DICT_INITSIZE
 
-    def test_opt_nullkeymarker(self):
+    def test_opt_dummykeymarker(self):
         def f():
             d = {"hello": None}
             d["world"] = None
@@ -1033,10 +1034,9 @@
         res = self.interpret(f, [])
         assert res.item0 == True
         DICT = lltype.typeOf(res.item1).TO
-        assert not hasattr(DICT.entries.TO.OF, 'f_everused')# non-None string 
keys
-        assert not hasattr(DICT.entries.TO.OF, 'f_valid')   # strings have a 
dummy
+        assert not hasattr(DICT.entries.TO.OF, 'f_valid')   # strs have a dummy
 
-    def test_opt_nullvaluemarker(self):
+    def test_opt_dummyvaluemarker(self):
         def f(n):
             d = {-5: "abcd"}
             d[123] = "def"
@@ -1044,29 +1044,8 @@
         res = self.interpret(f, [-5])
         assert res.item0 == 4
         DICT = lltype.typeOf(res.item1).TO
-        assert not hasattr(DICT.entries.TO.OF, 'f_everused')# non-None str 
values
         assert not hasattr(DICT.entries.TO.OF, 'f_valid')   # strs have a dummy
 
-    def test_opt_nonullmarker(self):
-        class A:
-            pass
-        def f(n):
-            if n > 5:
-                a = A()
-            else:
-                a = None
-            d = {a: -5441}
-            d[A()] = n+9872
-            return d[a], d
-        res = self.interpret(f, [-5])
-        assert res.item0 == -5441
-        DICT = lltype.typeOf(res.item1).TO
-        assert hasattr(DICT.entries.TO.OF, 'f_everused') # can-be-None A 
instances
-        assert not hasattr(DICT.entries.TO.OF, 'f_valid')# with a dummy A 
instance
-
-        res = self.interpret(f, [6])
-        assert res.item0 == -5441
-
     def test_opt_nonnegint_dummy(self):
         def f(n):
             d = {n: 12}
@@ -1077,7 +1056,6 @@
         assert res.item0 == 1
         assert res.item1 == 24
         DICT = lltype.typeOf(res.item2).TO
-        assert hasattr(DICT.entries.TO.OF, 'f_everused') # all ints can be zero
         assert not hasattr(DICT.entries.TO.OF, 'f_valid')# nonneg int: dummy -1
 
     def test_opt_no_dummy(self):
@@ -1090,7 +1068,6 @@
         assert res.item0 == 1
         assert res.item1 == -24
         DICT = lltype.typeOf(res.item2).TO
-        assert hasattr(DICT.entries.TO.OF, 'f_everused') # all ints can be zero
         assert hasattr(DICT.entries.TO.OF, 'f_valid')    # no dummy available
 
     def test_opt_boolean_has_no_dummy(self):
@@ -1103,7 +1080,6 @@
         assert res.item0 == 1
         assert res.item1 is True
         DICT = lltype.typeOf(res.item2).TO
-        assert hasattr(DICT.entries.TO.OF, 'f_everused') # all ints can be zero
         assert hasattr(DICT.entries.TO.OF, 'f_valid')    # no dummy available
 
     def test_opt_multiple_identical_dicts(self):
@@ -1142,6 +1118,7 @@
         assert sorted(DICT.TO.entries.TO.OF._flds) == ['f_hash', 'key', 
'value']
 
     def test_deleted_entry_reusage_with_colliding_hashes(self):
+        py.test.skip("test written for non-ordered dicts, update or kill")
         def lowlevelhash(value):
             p = rstr.mallocstr(len(value))
             for i in range(len(value)):
diff --git a/rpython/rtyper/test/test_rordereddict.py 
b/rpython/rtyper/test/test_rordereddict.py
--- a/rpython/rtyper/test/test_rordereddict.py
+++ b/rpython/rtyper/test/test_rordereddict.py
@@ -115,11 +115,18 @@
         rordereddict.ll_dict_setitem(ll_d, llstr("b"), 2)
         rordereddict.ll_dict_setitem(ll_d, llstr("c"), 3)
         rordereddict.ll_dict_setitem(ll_d, llstr("d"), 4)
-        assert len(get_indexes(ll_d)) == 8
         rordereddict.ll_dict_setitem(ll_d, llstr("e"), 5)
         rordereddict.ll_dict_setitem(ll_d, llstr("f"), 6)
-        assert len(get_indexes(ll_d)) == 32
-        for item in ['a', 'b', 'c', 'd', 'e', 'f']:
+        rordereddict.ll_dict_setitem(ll_d, llstr("g"), 7)
+        rordereddict.ll_dict_setitem(ll_d, llstr("h"), 8)
+        rordereddict.ll_dict_setitem(ll_d, llstr("i"), 9)
+        rordereddict.ll_dict_setitem(ll_d, llstr("j"), 10)
+        assert len(get_indexes(ll_d)) == 16
+        rordereddict.ll_dict_setitem(ll_d, llstr("k"), 11)
+        rordereddict.ll_dict_setitem(ll_d, llstr("l"), 12)
+        rordereddict.ll_dict_setitem(ll_d, llstr("m"), 13)
+        assert len(get_indexes(ll_d)) == 64
+        for item in 'abcdefghijklm':
             assert rordereddict.ll_dict_getitem(ll_d, llstr(item)) == 
ord(item) - ord('a') + 1
 
     def test_dict_grow_cleanup(self):
@@ -160,6 +167,38 @@
         assert ll_elem.item1 == 1
         py.test.raises(KeyError, rordereddict.ll_dict_popitem, TUP, ll_d)
 
+    def test_popitem_first(self):
+        DICT = self._get_str_dict()
+        ll_d = rordereddict.ll_newdict(DICT)
+        rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
+        rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
+        rordereddict.ll_dict_setitem(ll_d, llstr("m"), 3)
+        ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
+        for expected in ["k", "j", "m"]:
+            ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
+            num = rordereddict._ll_dictnext(ll_iter)
+            ll_key = ll_d.entries[num].key
+            assert hlstr(ll_key) == expected
+            rordereddict.ll_dict_delitem(ll_d, ll_key)
+        ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
+        py.test.raises(StopIteration, rordereddict._ll_dictnext, ll_iter)
+
+    def test_popitem_first_bug(self):
+        DICT = self._get_str_dict()
+        ll_d = rordereddict.ll_newdict(DICT)
+        rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
+        rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1)
+        rordereddict.ll_dict_delitem(ll_d, llstr("k"))
+        ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
+        ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
+        num = rordereddict._ll_dictnext(ll_iter)
+        ll_key = ll_d.entries[num].key
+        assert hlstr(ll_key) == "j"
+        assert ll_d.lookup_function_no == 4    # 1 free item found at the start
+        rordereddict.ll_dict_delitem(ll_d, llstr("j"))
+        assert ll_d.num_ever_used_items == 0
+        assert ll_d.lookup_function_no == 0    # reset
+
     def test_direct_enter_and_del(self):
         def eq(a, b):
             return a == b
diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py
--- a/rpython/translator/c/funcgen.py
+++ b/rpython/translator/c/funcgen.py
@@ -653,6 +653,11 @@
     OP_CAST_ADR_TO_PTR = OP_CAST_POINTER
     OP_CAST_OPAQUE_PTR = OP_CAST_POINTER
 
+    def OP_LENGTH_OF_SIMPLE_GCARRAY_FROM_OPAQUE(self, op):
+        return ('%s = *(long *)(((char *)%s) + sizeof(struct pypy_header0));'
+                '  /* length_of_simple_gcarray_from_opaque */'
+            % (self.expr(op.result), self.expr(op.args[0])))
+
     def OP_CAST_INT_TO_PTR(self, op):
         TYPE = self.lltypemap(op.result)
         typename = self.db.gettype(TYPE)
diff --git a/rpython/translator/c/test/test_lladdresses.py 
b/rpython/translator/c/test/test_lladdresses.py
--- a/rpython/translator/c/test/test_lladdresses.py
+++ b/rpython/translator/c/test/test_lladdresses.py
@@ -246,3 +246,13 @@
     assert res == 456
     res = fc(77)
     assert res == 123
+
+def test_gcarray_length():
+    A = lltype.GcArray(lltype.Char)
+    def f():
+        a = lltype.malloc(A, 117)
+        p = lltype.cast_opaque_ptr(GCREF, a)
+        return lltype.length_of_simple_gcarray_from_opaque(p)
+    fc = compile(f, [])
+    res = fc()
+    assert res == 117
diff --git a/rpython/translator/tool/staticsizereport.py 
b/rpython/translator/tool/staticsizereport.py
--- a/rpython/translator/tool/staticsizereport.py
+++ b/rpython/translator/tool/staticsizereport.py
@@ -3,6 +3,7 @@
 
 from rpython.tool.ansicolor import red, yellow, green
 from rpython.rtyper.lltypesystem.lltype import typeOf, _ptr, Ptr, ContainerType
+from rpython.rtyper.lltypesystem.lltype import GcOpaqueType
 from rpython.rtyper.lltypesystem import llmemory
 from rpython.memory.lltypelayout import convert_offset_to_int
 
@@ -54,6 +55,8 @@
         if isinstance(typeOf(value), Ptr):
             container = value._obj
             if isinstance(typeOf(container), ContainerType):
+                if isinstance(typeOf(container), GcOpaqueType):
+                    container = container.container
                 node = database.getcontainernode(container)
                 if node.nodekind != 'func':
                     nodes.append(node)
@@ -77,7 +80,10 @@
                 return 0
     else:
         length = None
-    return convert_offset_to_int(llmemory.sizeof(TYPE, length))
+    #print obj, ', length =', length
+    r = convert_offset_to_int(llmemory.sizeof(TYPE, length))
+    #print '\tr =', r
+    return r
 
 
 def guess_size(database, node, recursive=None):
diff --git a/rpython/translator/tool/test/test_staticsizereport.py 
b/rpython/translator/tool/test/test_staticsizereport.py
--- a/rpython/translator/tool/test/test_staticsizereport.py
+++ b/rpython/translator/tool/test/test_staticsizereport.py
@@ -57,10 +57,17 @@
         P = rffi.sizeof(rffi.VOIDP)
         B = 1 # bool
         assert guess_size(func.builder.db, dictvalnode, set()) > 100
-        assert guess_size(func.builder.db, dictvalnode2, set()) == 2 * S + 1 * 
P + 1 * S + 8 * (2*S + 1 * B)
+        assert guess_size(func.builder.db, dictvalnode2, set()) == (
+            (4 * S + 2 * P) +     # struct dicttable
+            (S + 8) +             # indexes, length 8
+            (S + S + S))          # entries, length 1
         r_set = set()
         dictnode_size = guess_size(db, test_dictnode, r_set)
-        assert dictnode_size == 2 * S + 1 * P + 1 * S + (4096-256) * (1*S+1*P 
+ (1 * S + 1*P + 5)) + (8192-4096+256) * (1*S+1*P)
+        assert dictnode_size == (
+            (4 * S + 2 * P) +      # struct dicttable
+            (S + 2 * 8192) +       # indexes, length 8192, rffi.USHORT
+            (S + (S + S) * 3840) + # entries, length 3840
+            (S + S + 5) * 3840)    # 3840 strings with 5 chars each
         assert guess_size(func.builder.db, fixarrayvalnode, set()) == 100 * 
rffi.sizeof(lltype.Signed) + 1 * rffi.sizeof(lltype.Signed)
         assert guess_size(func.builder.db, dynarrayvalnode, set()) == 100 * 
rffi.sizeof(lltype.Signed) + 2 * rffi.sizeof(lltype.Signed) + 1 * 
rffi.sizeof(rffi.VOIDP)
 
_______________________________________________
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit

Reply via email to