[pypy-commit] pypy default: merge heads

2020-01-31 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r98612:39bcfae01bee
Date: 2020-01-31 14:10 +0100
http://bitbucket.org/pypy/pypy/changeset/39bcfae01bee/

Log:merge heads

diff --git a/pypy/module/mmap/interp_mmap.py b/pypy/module/mmap/interp_mmap.py
--- a/pypy/module/mmap/interp_mmap.py
+++ b/pypy/module/mmap/interp_mmap.py
@@ -177,8 +177,10 @@
 return space.newbytes(self.mmap.getslice(start, length))
 else:
 b = StringBuilder(length)
-for i in range(start, stop, step):
-b.append(self.mmap.getitem(i))
+index = start
+for i in range(length):
+b.append(self.mmap.getitem(index))
+index += step
 return space.newbytes(b.build())
 
 def descr_setitem(self, w_index, w_value):
diff --git a/pypy/module/mmap/test/test_mmap.py 
b/pypy/module/mmap/test/test_mmap.py
--- a/pypy/module/mmap/test/test_mmap.py
+++ b/pypy/module/mmap/test/test_mmap.py
@@ -433,6 +433,15 @@
 m.close()
 f.close()
 
+def test_get_crash(self):
+import sys
+from mmap import mmap
+s = b'hallo!!!'
+m = mmap(-1, len(s))
+m[:] = s
+assert m[1:None:sys.maxsize] == b'a'
+m.close()
+
 def test_set_item(self):
 import mmap
 
diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py
--- a/pypy/objspace/std/newformat.py
+++ b/pypy/objspace/std/newformat.py
@@ -772,7 +772,7 @@
 digits = self._upcase_string(digits)
 out.append(digits)
 if spec.n_decimal:
-out.append(self._lit(".")[0])
+out.append(self._lit(self._loc_dec)[0])
 if spec.n_remainder:
 out.append(num[to_remainder:])
 if spec.n_rpadding:
diff --git a/pypy/objspace/std/test/test_newformat.py 
b/pypy/objspace/std/test/test_newformat.py
--- a/pypy/objspace/std/test/test_newformat.py
+++ b/pypy/objspace/std/test/test_newformat.py
@@ -389,6 +389,24 @@
 finally:
 locale.setlocale(locale.LC_NUMERIC, 'C')
 
+def test_locale_german(self):
+import locale
+for name in ['de_DE', 'de_DE.utf8']:
+try:
+locale.setlocale(locale.LC_NUMERIC, name)
+break
+except locale.Error:
+pass
+else:
+skip("no german locale")
+x = 1234.567890
+try:
+assert locale.format('%g', x, grouping=True) == '1.234,57'
+assert format(x, 'n') == '1.234,57'
+assert format(12345678901234, 'n') == '12.345.678.901.234'
+finally:
+locale.setlocale(locale.LC_NUMERIC, 'C')
+
 def test_dont_switch_to_g(self):
 skip("must fix when float formatting is figured out")
 assert len(format(1.1234e90, "f")) == 98
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2020-01-23 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r98576:a7c7b4c7dcae
Date: 2020-01-23 13:08 +0100
http://bitbucket.org/pypy/pypy/changeset/a7c7b4c7dcae/

Log:merge heads

diff --git a/pypy/module/_multibytecodec/c_codecs.py 
b/pypy/module/_multibytecodec/c_codecs.py
--- a/pypy/module/_multibytecodec/c_codecs.py
+++ b/pypy/module/_multibytecodec/c_codecs.py
@@ -194,17 +194,23 @@
rffi.SSIZE_T)
 pypy_cjk_enc_getcodec = llexternal('pypy_cjk_enc_getcodec',
[ENCODEBUF_P], MULTIBYTECODEC_P)
+pypy_cjk_enc_copystate = llexternal('pypy_cjk_enc_copystate',
+[ENCODEBUF_P, ENCODEBUF_P], lltype.Void)
 MBENC_FLUSH = 1
 MBENC_RESET = 2
 
 def encode(codec, unicodedata, length, errors="strict", errorcb=None,
-   namecb=None):
+   namecb=None, copystate=lltype.nullptr(ENCODEBUF_P.TO)):
 encodebuf = pypy_cjk_enc_new(codec)
 if not encodebuf:
 raise MemoryError
+if copystate:
+pypy_cjk_enc_copystate(encodebuf, copystate)
 try:
 return encodeex(encodebuf, unicodedata, length, errors, errorcb, 
namecb)
 finally:
+if copystate:
+pypy_cjk_enc_copystate(copystate, encodebuf)
 pypy_cjk_enc_free(encodebuf)
 
 def encodeex(encodebuf, utf8data, length, errors="strict", errorcb=None,
@@ -258,18 +264,18 @@
 elif errors == "ignore":
 replace = ""
 elif errors == "replace":
-codec = pypy_cjk_enc_getcodec(encodebuf)
-try:
-replace = encode(codec, "?", 1)
-except EncodeDecodeError:
-replace = "?"
+replace = "?"# utf-8 unicode
 else:
 assert errorcb
-rets, end = errorcb(errors, namecb, reason,
+replace, end = errorcb(errors, namecb, reason,
 unicodedata, start, end)
+if len(replace) > 0:
 codec = pypy_cjk_enc_getcodec(encodebuf)
-lgt = rutf8.codepoints_in_utf8(rets)
-replace = encode(codec, rets, lgt, "strict", errorcb, namecb)
+lgt = rutf8.codepoints_in_utf8(replace)
+replace = encode(codec, replace, lgt, copystate=encodebuf)
+#else:
+#   replace is an empty utf-8 unicode, which we directly consider to
+#   encode as an empty byte string.
 with rffi.scoped_nonmovingbuffer(replace) as inbuf:
 r = pypy_cjk_enc_replace_on_error(encodebuf, inbuf, len(replace), end)
 if r == MBERR_NOMEMORY:
diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c 
b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c
--- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c
+++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c
@@ -135,6 +135,11 @@
   return d;
 }
 
+void pypy_cjk_enc_copystate(struct pypy_cjk_enc_s *dst, struct pypy_cjk_enc_s 
*src)
+{
+dst->state = src->state;
+}
+
 Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d,
  Py_UNICODE *inbuf, Py_ssize_t inlen)
 {
diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h 
b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h
--- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h
+++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h
@@ -146,6 +146,8 @@
   char *, pypymbc_ssize_t, 
pypymbc_ssize_t);
 RPY_EXTERN
 const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *);
+RPY_EXTERN
+void pypy_cjk_enc_copystate(struct pypy_cjk_enc_s *dst, struct pypy_cjk_enc_s 
*src);
 
 /* list of codecs defined in the .c files */
 
diff --git a/pypy/module/_multibytecodec/test/test_app_codecs.py 
b/pypy/module/_multibytecodec/test/test_app_codecs.py
--- a/pypy/module/_multibytecodec/test/test_app_codecs.py
+++ b/pypy/module/_multibytecodec/test/test_app_codecs.py
@@ -110,3 +110,33 @@
   lambda e: ('\xc3', e.end))
 raises(TypeError, u"\uDDA1".encode, "gbk",
"test.test_encode_custom_error_handler_type")
+
+def test_encode_replacement_with_state(self):
+import codecs
+s = u'\u4ee4\u477c\u4ee4'.encode("iso-2022-jp", errors="replace")
+assert s == '\x1b$BNa\x1b(B?\x1b$BNa\x1b(B'
+
+def test_streaming_codec(self):
+test_0 = u'\uc5fc\u76d0\u5869\u9e7d\u477c\u4e3d/\u3012'
+test_1 = 
u'\u4ee4\u477c\u3080\u304b\u3057\u3080\u304b\u3057\u3042\u308b\u3068\u3053\u308d\u306b'
+test_2 = u' foo = "Quoted string \u4ee4\u477c" '
+
+ereplace = {'errors': 'replace'}
+exml = {'errors': 'xmlcharrefreplace'}
+for codec in ("iso-2022-jp", "iso-2022-jp-ext", "iso-2022-jp-1",
+  "iso-2022-jp-2", "iso-2022-jp-3", "iso-2022-jp-2004",
+  "iso-2022-kr",
+ ):
+
+out_1 = test_1.encode(codec, **ereplace).decode(codec, **ereplace)
+assert 

[pypy-commit] pypy default: merge closed branch

2019-12-31 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r98425:f749b133c219
Date: 2019-12-31 10:37 +0200
http://bitbucket.org/pypy/pypy/changeset/f749b133c219/

Log:merge closed branch

___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge this branch to speedup cpyext tests, especially on py3.6

2019-12-23 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r98374:3bb3cd0a3643
Date: 2019-12-23 19:17 +0100
http://bitbucket.org/pypy/pypy/changeset/3bb3cd0a3643/

Log:merge this branch to speedup cpyext tests, especially on py3.6

diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py
--- a/lib-python/2.7/pickle.py
+++ b/lib-python/2.7/pickle.py
@@ -1444,5 +1444,14 @@
 import doctest
 return doctest.testmod()
 
+# = PyPy modification to support pickling cpyext methods =
+try:
+import cpyext
+except ImportError:
+pass
+else:
+Pickler.dispatch[cpyext.FunctionType] = Pickler.save_global
+# = end of PyPy modification 
+
 if __name__ == "__main__":
 _test()
diff --git a/pypy/module/cpyext/moduledef.py b/pypy/module/cpyext/moduledef.py
--- a/pypy/module/cpyext/moduledef.py
+++ b/pypy/module/cpyext/moduledef.py
@@ -6,6 +6,7 @@
 interpleveldefs = {
 'load_module': 'api.load_extension_module',
 'is_cpyext_function': 'interp_cpyext.is_cpyext_function',
+'FunctionType': 'methodobject.W_PyCFunctionObject',
 }
 
 appleveldefs = {
@@ -15,15 +16,6 @@
 
 def startup(self, space):
 space.fromcache(State).startup(space)
-method = pypy.module.cpyext.typeobject.get_new_method_def(space)
-# the w_self argument here is a dummy, the only thing done with w_obj
-# is call type() on it
-w_obj = pypy.module.cpyext.methodobject.W_PyCFunctionObject(space,
-   method, 
space.w_None)
-space.appexec([w_obj], """(meth):
-from pickle import Pickler
-Pickler.dispatch[type(meth)] = Pickler.save_global
-""")
 
 def register_atexit(self, function):
 if len(self.atexit_funcs) >= 32:
diff --git a/pypy/module/cpyext/test/test_cpyext.py 
b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -82,23 +82,6 @@
 def freeze_refcnts(self):
 rawrefcount._dont_free_any_more()
 
-def preload(space, name):
-from pypy.module.cpyext.pyobject import make_ref
-if '.' not in name:
-w_obj = space.builtin.getdictvalue(space, name)
-else:
-module, localname = name.rsplit('.', 1)
-code = "(): import {module}; return {module}.{localname}"
-code = code.format(**locals())
-w_obj = space.appexec([], code)
-make_ref(space, w_obj)
-
-def preload_expr(space, expr):
-from pypy.module.cpyext.pyobject import make_ref
-code = "(): return {}".format(expr)
-w_obj = space.appexec([], code)
-make_ref(space, w_obj)
-
 def is_interned_string(space, w_obj):
 try:
 s = space.str_w(w_obj)
@@ -148,13 +131,37 @@
 Eagerly create pyobjs for various builtins so they don't look like
 leaks.
 """
-for name in [
-'buffer', 'mmap.mmap',
-'types.FunctionType', 'types.CodeType',
-'types.TracebackType', 'types.FrameType']:
-preload(space, name)
-for expr in ['type(str.join)']:
-preload_expr(space, expr)
+from pypy.module.cpyext.pyobject import make_ref
+w_to_preload = space.appexec([], """():
+import sys
+import mmap
+#
+# copied to avoid importing the whole types.py, which is
+# expensive on py3k
+# 
+def _f(): pass
+FunctionType = type(_f)
+CodeType = type(_f.__code__)
+try:
+raise TypeError
+except TypeError:
+tb = sys.exc_info()[2]
+TracebackType = type(tb)
+FrameType = type(tb.tb_frame)
+del tb
+# 
+return [
+buffer,
+mmap.mmap,
+FunctionType,
+CodeType,
+TracebackType,
+FrameType,
+type(str.join),
+]
+""")
+for w_obj in space.unpackiterable(w_to_preload):
+make_ref(space, w_obj)
 
 def cleanup(self):
 self.space.getexecutioncontext().cleanup_cpyext_state()
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge closed backport-decode_timeval_ns-py3.7

2019-11-30 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r98198:bd2b1a5f595d
Date: 2019-11-30 21:31 +0200
http://bitbucket.org/pypy/pypy/changeset/bd2b1a5f595d/

Log:merge closed backport-decode_timeval_ns-py3.7

___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge backport-decode_timeval_ns-py3.7 into default

2019-11-30 Thread Yannick_Jadoul
Author: Yannick Jadoul 
Branch: 
Changeset: r98193:d3c8d438315b
Date: 2019-11-30 16:57 +0100
http://bitbucket.org/pypy/pypy/changeset/d3c8d438315b/

Log:merge backport-decode_timeval_ns-py3.7 into default

diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py
--- a/rpython/rlib/rtime.py
+++ b/rpython/rlib/rtime.py
@@ -9,7 +9,7 @@
 from rpython.rtyper.tool import rffi_platform
 from rpython.rtyper.lltypesystem import rffi, lltype
 from rpython.rlib.objectmodel import register_replacement_for
-from rpython.rlib.rarithmetic import intmask, UINT_MAX
+from rpython.rlib.rarithmetic import intmask, r_int64, UINT_MAX
 from rpython.rlib import rposix
 
 _WIN32 = sys.platform.startswith('win')
@@ -94,6 +94,10 @@
 return (float(rffi.getintfield(t, 'c_tv_sec')) +
 float(rffi.getintfield(t, 'c_tv_usec')) * 0.01)
 
+def decode_timeval_ns(t):
+return (r_int64(rffi.getintfield(t, 'c_tv_sec')) * 10**9 +
+r_int64(rffi.getintfield(t, 'c_tv_usec')) * 10**3)
+
 
 def external(name, args, result, compilation_info=eci, **kwds):
 return rffi.llexternal(name, args, result,
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge allow-forcing-no-embed which added PYPY_NO_EMBED_DEPENDENCIES to packaging

2019-10-28 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r97879:d6217bf98b7c
Date: 2019-10-28 20:29 +0200
http://bitbucket.org/pypy/pypy/changeset/d6217bf98b7c/

Log:merge allow-forcing-no-embed which added PYPY_NO_EMBED_DEPENDENCIES
to packaging

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -16,3 +16,7 @@
 .. branch: license-update
 
 Update list directories in LICENSE
+
+.. branch: allow-forcing-no-embed
+
+When packaging, allow suppressing embedded dependencies via 
PYPY_NO_EMBED_DEPENDENCIES
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge https-readme into default

2019-10-24 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r97850:c32894ffed89
Date: 2019-10-24 17:19 +0300
http://bitbucket.org/pypy/pypy/changeset/c32894ffed89/

Log:merge https-readme into default

___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge license-update into default

2019-10-24 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r97849:9ecf0c6f29e5
Date: 2019-10-24 17:18 +0300
http://bitbucket.org/pypy/pypy/changeset/9ecf0c6f29e5/

Log:merge license-update into default

___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Merge some license gap fixes.

2019-10-24 Thread JulianB
Author: Julian Berman 
Branch: 
Changeset: r97846:f8f6fe914790
Date: 2019-10-24 08:33 -0400
http://bitbucket.org/pypy/pypy/changeset/f8f6fe914790/

Log:Merge some license gap fixes.

diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -3,10 +3,11 @@
 License
 ===
 
-Except when otherwise stated (look for LICENSE files in directories or
-information at the beginning of each file) all software and documentation in
-the 'rpython', 'pypy', 'ctype_configure', 'dotviewer', 'demo', 'lib_pypy',
-'py', and '_pytest' directories is licensed as follows:
+Except when otherwise stated (look for LICENSE files in directories
+or information at the beginning of each file) all software and
+documentation in the 'rpython', 'pypy', 'ctype_configure', 'dotviewer',
+'demo', 'extra_tests', 'include', 'lib_pypy', 'py', and '_pytest'
+directories is licensed as follows:
 
 The MIT License
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge branch to fix descrmismatch exception

2019-10-16 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r97783:f089d461d2ee
Date: 2019-10-16 08:14 +0300
http://bitbucket.org/pypy/pypy/changeset/f089d461d2ee/

Log:merge branch to fix descrmismatch exception

diff --git a/lib-python/2.7/test/test_dictviews.py 
b/lib-python/2.7/test/test_dictviews.py
--- a/lib-python/2.7/test/test_dictviews.py
+++ b/lib-python/2.7/test/test_dictviews.py
@@ -182,7 +182,7 @@
 
 def test_deeply_nested_repr(self):
 d = {}
-for i in range(sys.getrecursionlimit() + 100):
+for i in range(sys.getrecursionlimit() + 200):
 d = {42: d.viewvalues()}
 self.assertRaises(RuntimeError, repr, d)
 
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,11 +3,9 @@
 ==
 
 .. this is a revision shortly after release-pypy-7.2.0
-.. startrev: 78cd4acbcbec 
+.. startrev: a511d86377d6 
 
+.. branch: fix-descrmismatch-crash
 
-.. branch: json-decoder-maps
+Fix segfault when calling descr-methods with no arguments
 
-Much faster and more memory-efficient JSON decoding. The resulting
-dictionaries that come out of the JSON decoder have faster lookups too.
-
diff --git a/pypy/doc/whatsnew-pypy2-7.2.0.rst 
b/pypy/doc/whatsnew-pypy2-7.2.0.rst
--- a/pypy/doc/whatsnew-pypy2-7.2.0.rst
+++ b/pypy/doc/whatsnew-pypy2-7.2.0.rst
@@ -74,3 +74,9 @@
 .. branch: openssl-for-macos
 
 Update _ssl on macos to statically link to openssl-1.1.1c
+
+.. branch: json-decoder-maps
+
+Much faster and more memory-efficient JSON decoding. The resulting
+dictionaries that come out of the JSON decoder have faster lookups too.
+
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -419,6 +419,8 @@
 
 @specialize.memo()
 def wrappable_class_name(Class):
+if 'exact_class_applevel_name' in Class.__dict__:
+return Class.exact_class_applevel_name
 try:
 return Class.typedef.name
 except AttributeError:
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -709,6 +709,7 @@
 self.func__args__ = func
 elif unwrap_spec == [self_type, ObjSpace, Arguments]:
 self.__class__ = BuiltinCodePassThroughArguments1
+self.descr_reqcls = self_type
 miniglobals = {'func': func, 'self_type': self_type}
 d = {}
 source = """if 1:
@@ -754,10 +755,7 @@
 except DescrMismatch:
 if w_obj is not None:
 args = args.prepend(w_obj)
-return scope_w[0].descr_call_mismatch(space,
-  self.descrmismatch_op,
-  self.descr_reqcls,
-  args)
+return self._type_unwrap_mismatch(space, args)
 except Exception as e:
 self.handle_exception(space, e)
 w_result = None
@@ -765,6 +763,15 @@
 w_result = space.w_None
 return w_result
 
+def _type_unwrap_mismatch(self, space, args):
+w_obj = args.firstarg()
+if w_obj is None:
+raise oefmt(space.w_SystemError, "unexpected DescrMismatch error")
+return w_obj.descr_call_mismatch(space,
+ self.descrmismatch_op,
+ self.descr_reqcls,
+ args)
+
 def handle_exception(self, space, e):
 try:
 if not we_are_translated():
@@ -787,10 +794,7 @@
 try:
 w_result = self.func__args__(space, args)
 except DescrMismatch:
-return args.firstarg().descr_call_mismatch(space,
-  self.descrmismatch_op,
-  self.descr_reqcls,
-  args)
+return self._type_unwrap_mismatch(space, args)
 except Exception as e:
 self.handle_exception(space, e)
 w_result = None
@@ -808,10 +812,7 @@
 try:
 w_result = self.func__args__(space, w_obj, args)
 except DescrMismatch:
-return args.firstarg().descr_call_mismatch(space,
-  self.descrmismatch_op,
-  self.descr_reqcls,
-  args.prepend(w_obj))
+return self._type_unwrap_mismatch(space, args.prepend(w_obj))
 except Exception as e:
 self.handle_exception(space, e)
 w_result = None
@@ -851,9 +852,7 @@
 try:
 w_result = 

[pypy-commit] pypy default: merge json-decoder-maps:

2019-09-21 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r97581:89a3715662ef
Date: 2019-09-21 21:06 +0200
http://bitbucket.org/pypy/pypy/changeset/89a3715662ef/

Log:merge json-decoder-maps:

implement a much faster json decoder (3x speedup for large json
files, 2x less memory) used techniques:

- SWAR (SIMD within a register) techniques for finding the end of
whitespace and the end of strings
- cache strings aggressively
- use maps for representing the resulting objects.

diff too long, truncating to 2000 out of 2102 lines

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,3 +5,9 @@
 .. this is a revision shortly after release-pypy-7.2.0
 .. startrev: 78cd4acbcbec 
 
+
+.. branch: json-decoder-maps
+
+Much faster and more memory-efficient JSON decoding. The resulting
+dictionaries that come out of the JSON decoder have faster lookups too.
+
diff --git a/pypy/module/_pypyjson/interp_decoder.py 
b/pypy/module/_pypyjson/interp_decoder.py
--- a/pypy/module/_pypyjson/interp_decoder.py
+++ b/pypy/module/_pypyjson/interp_decoder.py
@@ -1,11 +1,13 @@
 import sys
 from rpython.rlib.rstring import StringBuilder
-from rpython.rlib.objectmodel import specialize, always_inline, r_dict
-from rpython.rlib import rfloat, rutf8
+from rpython.rlib.objectmodel import specialize, always_inline
+from rpython.rlib import rfloat, runicode, jit, objectmodel, rutf8
 from rpython.rtyper.lltypesystem import lltype, rffi
 from rpython.rlib.rarithmetic import r_uint
 from pypy.interpreter.error import oefmt
 from pypy.interpreter import unicodehelper
+from pypy.interpreter.baseobjspace import W_Root
+from pypy.module._pypyjson import simd
 
 OVF_DIGITS = len(str(sys.maxint))
 
@@ -15,45 +17,107 @@
 # precomputing negative powers of 10 is MUCH faster than using e.g. math.pow
 # at runtime
 NEG_POW_10 = [10.0**-i for i in range(16)]
+del i
+
 def neg_pow_10(x, exp):
 if exp >= len(NEG_POW_10):
 return 0.0
 return x * NEG_POW_10[exp]
 
-def slice_eq(a, b):
-(ll_chars1, start1, length1, _) = a
-(ll_chars2, start2, length2, _) = b
-if length1 != length2:
-return False
-j = start2
-for i in range(start1, start1 + length1):
-if ll_chars1[i] != ll_chars2[j]:
-return False
-j += 1
-return True
 
-def slice_hash(a):
-(ll_chars, start, length, h) = a
-return h
+class IntCache(object):
+""" A cache for wrapped ints between START and END """
 
-TYPE_UNKNOWN = 0
-TYPE_STRING = 1
-class JSONDecoder(object):
+# I also tried various combinations of having an LRU cache for ints as
+# well, didn't really help.
+
+# XXX one thing to do would be to use withintprebuilt in general again,
+# hidden behind a 'we_are_jitted'
+
+START = -10
+END = 256
+
+def __init__(self, space):
+self.space = space
+self.cache = [self.space.newint(i)
+for i in range(self.START, self.END)]
+
+def newint(self, intval):
+if self.START <= intval < self.END:
+return self.cache[intval - self.START]
+return self.space.newint(intval)
+
+
+class JSONDecoder(W_Root):
+
+LRU_SIZE = 16
+LRU_MASK = LRU_SIZE - 1
+
+DEFAULT_SIZE_SCRATCH = 20
+
+# string caching is only used if the total size of the message is larger
+# than a megabyte. Below that, there can't be that many repeated big
+# strings anyway (some experiments showed this to be a reasonable cutoff
+# size)
+MIN_SIZE_FOR_STRING_CACHE = 1024 * 1024
+
+# evaluate the string cache for 200 strings, before looking at the hit rate
+# and deciding whether to keep doing it
+STRING_CACHE_EVALUATION_SIZE = 200
+
+# keep using the string cache if at least 25% of all decoded strings are a
+# hit in the cache
+STRING_CACHE_USEFULNESS_FACTOR = 4
+
+
 def __init__(self, space, s):
 self.space = space
+self.w_empty_string = space.newutf8("", 0)
+
 self.s = s
+
 # we put our string in a raw buffer so:
 # 1) we automatically get the '\0' sentinel at the end of the string,
 #which means that we never have to check for the "end of string"
 # 2) we can pass the buffer directly to strtod
-self.ll_chars = rffi.str2charp(s)
+self.ll_chars, self.llobj, self.flag = 
rffi.get_nonmovingbuffer_ll_final_null(self.s)
 self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
 self.pos = 0
-self.cache = r_dict(slice_eq, slice_hash, simple_hash_eq=True)
+self.intcache = space.fromcache(IntCache)
+
+# two caches, one for keys, one for general strings. they both have the
+# form {hash-as-int: StringCacheEntry} and they don't deal with
+# collisions at all. For every hash there is simply one string stored
+# and we ignore collisions.
+

[pypy-commit] pypy default: merge heads

2019-09-12 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r97462:5e5857c2fae6
Date: 2019-09-12 16:52 +0200
http://bitbucket.org/pypy/pypy/changeset/5e5857c2fae6/

Log:merge heads

diff --git a/pypy/interpreter/unicodehelper.py 
b/pypy/interpreter/unicodehelper.py
--- a/pypy/interpreter/unicodehelper.py
+++ b/pypy/interpreter/unicodehelper.py
@@ -60,12 +60,6 @@
 return encode_object(space, w_data, encoding, errors)
 
 
-def _has_surrogate(u):
-for c in u:
-if 0xD800 <= ord(c) <= 0xDFFF:
-return True
-return False
-
 # These functions take and return unwrapped rpython strings
 def decode_unicode_escape(space, string):
 from pypy.module._codecs import interp_codecs
diff --git a/pypy/module/_io/interp_textio.py b/pypy/module/_io/interp_textio.py
--- a/pypy/module/_io/interp_textio.py
+++ b/pypy/module/_io/interp_textio.py
@@ -433,7 +433,10 @@
 end = len(self.text)
 else:
 end = self.pos + limit
-pos = self.text.find(marker, self.pos, end)
+pos = self.pos
+assert pos >= 0
+assert end >= 0
+pos = self.text.find(marker, pos, end)
 if pos >= 0:
 self.pos = self.upos = pos + 1
 return True
diff --git a/pypy/objspace/std/unicodeobject.py 
b/pypy/objspace/std/unicodeobject.py
--- a/pypy/objspace/std/unicodeobject.py
+++ b/pypy/objspace/std/unicodeobject.py
@@ -874,11 +874,6 @@
 def is_ascii(self):
 return self._length == len(self._utf8)
 
-def _has_surrogates(self):
-if self.is_ascii():
-return False
-return rutf8.has_surrogates(self._utf8)
-
 def _index_to_byte(self, index):
 if self.is_ascii():
 assert index >= 0
diff --git a/rpython/rlib/rutf8.py b/rpython/rlib/rutf8.py
--- a/rpython/rlib/rutf8.py
+++ b/rpython/rlib/rutf8.py
@@ -435,10 +435,17 @@
 return result
 
 def has_surrogates(utf8):
-# XXX write a faster version maybe
-for ch in Utf8StringIterator(utf8):
-if 0xD800 <= ch <= 0xDBFF:
+# a surrogate starts with 0xed in utf-8 encoding
+pos = 0
+while True:
+pos = utf8.find("\xed", pos)
+if pos < 0:
+return False
+assert pos <= len(utf8) - 1 # otherwise invalid utf-8
+ordch2 = ord(utf8[pos + 1])
+if _invalid_byte_2_of_3(0xed, ordch2, allow_surrogates=False):
 return True
+pos += 1
 return False
 
 def reencode_utf8_with_surrogates(utf8):
diff --git a/rpython/rlib/test/test_rutf8.py b/rpython/rlib/test/test_rutf8.py
--- a/rpython/rlib/test/test_rutf8.py
+++ b/rpython/rlib/test/test_rutf8.py
@@ -238,3 +238,17 @@
 assert pos == i
 i = rutf8.next_codepoint_pos(utf8s, i)
 assert list(arg) == l
+
+
+@given(strategies.text(), strategies.integers(0xd800, 0xdfff))
+def test_has_surrogates(arg, surrogate):
+b = (arg + unichr(surrogate) + arg).encode("utf-8")
+assert not rutf8.has_surrogates(arg.encode("utf-8"))
+assert rutf8.has_surrogates(unichr(surrogate).encode("utf-8"))
+assert rutf8.has_surrogates(b)
+
+def test_has_surrogate_xed_no_surrogate():
+u = unichr(55217) + unichr(54990)
+b = u.encode("utf-8")
+assert b.startswith(b"\xed")
+assert not rutf8.has_surrogates(b)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge openssl-for-macos which updates _ssl to openssl-1.1.1c and binds statically

2019-09-10 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r97427:849070e3fadb
Date: 2019-09-11 08:43 +0300
http://bitbucket.org/pypy/pypy/changeset/849070e3fadb/

Log:merge openssl-for-macos which updates _ssl to openssl-1.1.1c and
binds statically

also always rebuild cffi modules when packaging

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -70,3 +70,7 @@
 .. branch: compile_ncurses_tcl_tk_suse_latest
 
 Check for headers and runtime libraries in more locations to support other 
linuxes
+
+.. branch: openssl-for-macos
+
+Update _ssl on macos to statically link to openssl-1.1.1c
\ No newline at end of file
diff --git a/pypy/goal/targetpypystandalone.py 
b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -351,7 +351,7 @@
 ''' Use cffi to compile cffi interfaces to modules'''
 filename = os.path.join(pypydir, 'tool', 'build_cffi_imports.py')
 status, out, err = run_subprocess(str(driver.compute_exe_name()),
-  [filename])
+  [filename, 
'--embed-dependencies'])
 sys.stdout.write(out)
 sys.stderr.write(err)
 # otherwise, ignore errors
diff --git a/pypy/tool/build_cffi_imports.py b/pypy/tool/build_cffi_imports.py
--- a/pypy/tool/build_cffi_imports.py
+++ b/pypy/tool/build_cffi_imports.py
@@ -22,9 +22,9 @@
 # for distribution, we may want to fetch dependencies not provided by
 # the OS, such as a recent openssl/libressl.
 cffi_dependencies = {
-'_ssl': 
('http://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.6.2.tar.gz',
-'b029d2492b72a9ba5b5fcd9f3d602c9fd0baa087912f2aaecc28f52f567ec478',
-['--without-openssldir']),
+'_ssl': ('https://www.openssl.org/source/openssl-1.1.1c.tar.gz',
+'f6fb3079ad15076154eda9413fed42877d668e7069d9b87396d0804fdb3f4c90',
+['no-shared']),
 '_gdbm': ('http://ftp.gnu.org/gnu/gdbm/gdbm-1.13.tar.gz',
   
'9d252cbd7d793f7b12bcceaddda98d257c14f4d1890d851c386c37207000a253',
   ['--without-readline']),
@@ -106,12 +106,9 @@
 
 # configure & build it
 status, stdout, stderr = run_subprocess(
-'./configure',
+'./config',
 [
 '--prefix=/usr',
-'--disable-shared',
-'--enable-silent-rules',
-'--disable-dependency-tracking',
 ] + args,
 cwd=sources,
 )
@@ -125,16 +122,25 @@
 'make',
 [
 '-s', '-j' + str(multiprocessing.cpu_count()),
+],
+cwd=sources,
+)
+if status != 0:
+return status, stdout, stderr
+
+print('installing to', destdir, file=sys.stderr)
+status, stdout, stderr = run_subprocess(
+'make',
+[
 'install', 'DESTDIR={}/'.format(destdir),
 ],
 cwd=sources,
 )
-
 return status, stdout, stderr
 
 
 def create_cffi_import_libraries(pypy_c, options, basedir, only=None,
- embed_dependencies=False):
+ embed_dependencies=False, rebuild=False):
 from rpython.tool.runsubprocess import run_subprocess
 
 shutil.rmtree(str(join(basedir,'lib_pypy','__pycache__')),
@@ -153,12 +159,13 @@
 continue
 if module is None or getattr(options, 'no_' + key, False):
 continue
-# the key is the module name, has it already been built?
-status, stdout, stderr = run_subprocess(str(pypy_c), ['-c', 'import 
%s' % key])
-if status  == 0:
-print('*', ' %s already built' % key, file=sys.stderr)
-continue
-
+if not rebuild:
+# the key is the module name, has it already been built?
+status, stdout, stderr = run_subprocess(str(pypy_c), ['-c', 
'import %s' % key])
+if status  == 0:
+print('*', ' %s already built' % key, file=sys.stderr)
+continue
+
 if module.endswith('.py'):
 args = [module]
 cwd = str(join(basedir,'lib_pypy'))
@@ -175,18 +182,7 @@
 shutil.rmtree(destdir, ignore_errors=True)
 os.makedirs(destdir)
 
-if key == '_ssl' and sys.platform == 'darwin':
-# this patch is loosely inspired by an Apple and adds
-# a fallback to the OS X roots when none are available
-patches = [
-os.path.join(curdir,
- '../../lib_pypy/_cffi_ssl/osx-roots.diff'),
-]
-else:
-patches = []
-
-status, stdout, stderr = _build_dependency(key, destdir,
-   patches=patches)
+status, stdout, stderr = 

[pypy-commit] pypy default: merge compile_ncurses_tcl_tk_suse_latest which adds other locations to search

2019-09-09 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r97412:6ff327526dc7
Date: 2019-09-10 08:34 +0300
http://bitbucket.org/pypy/pypy/changeset/6ff327526dc7/

Log:merge compile_ncurses_tcl_tk_suse_latest which adds other locations
to search

diff --git a/lib_pypy/_curses_build.py b/lib_pypy/_curses_build.py
--- a/lib_pypy/_curses_build.py
+++ b/lib_pypy/_curses_build.py
@@ -1,4 +1,15 @@
 from cffi import FFI
+import os
+
+# On some systems, the ncurses library is
+# located at /usr/include/ncurses, so we must check this case.
+# Let's iterate over well known paths
+incdirs =  []
+for _path in ['/usr/include', '/usr/include/ncurses']:
+if os.path.isfile(os.path.join(_path, 'panel.h')):
+incdirs.append(_path)
+break
+
 
 ffi = FFI()
 
@@ -10,6 +21,13 @@
 #define NCURSES_OPAQUE 0
 #endif
 
+
+/* ncurses 6 change behaviour  and makes all pointers opaque, 
+  lets define backward compatibility. It doesn't harm 
+  previous versions */
+
+#define NCURSES_INTERNALS 1
+#define NCURSES_REENTRANT 0
 #include 
 #include 
 #include 
@@ -41,7 +59,8 @@
 void _m_getsyx(int *yx) {
 getsyx(yx[0], yx[1]);
 }
-""", libraries=['ncurses', 'panel'])
+""", include_dirs=incdirs, 
+ libraries=['ncurses', 'panel'])
 
 
 ffi.cdef("""
diff --git a/lib_pypy/_tkinter/tklib_build.py b/lib_pypy/_tkinter/tklib_build.py
--- a/lib_pypy/_tkinter/tklib_build.py
+++ b/lib_pypy/_tkinter/tklib_build.py
@@ -36,8 +36,11 @@
 for _ver in ['8.6', '8.5', '']:
 incdirs = []
 linklibs = ['tcl' + _ver, 'tk' + _ver]
-if os.path.isfile(''.join(['/usr/lib/lib', linklibs[1], '.so'])):
-found = True
+for lib in ['/usr/lib/lib', '/usr/lib64/lib']: 
+if os.path.isfile(''.join([lib, linklibs[1], '.so'])):
+found = True
+break
+if found:
 break
 if not found:
 sys.stderr.write("*** TCL libraries not found!  Falling back...\n")
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -66,3 +66,7 @@
 .. branch: cryptograhpt-2.7
 
 Update vendored cryptography used for _ssl to 2.7
+
+.. branch: compile_ncurses_tcl_tk_suse_latest
+
+Check for headers and runtime libraries in more locations to support other 
linuxes
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2019-08-22 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r97242:bacea0b28bb3
Date: 2019-08-22 13:15 +0200
http://bitbucket.org/pypy/pypy/changeset/bacea0b28bb3/

Log:merge heads

diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -226,6 +226,8 @@
 print 'turn off the JIT'
 print ' help'
 print 'print this page'
+print
+print 'The "pypyjit" module can be used to control the JIT from inside 
python'
 
 def print_version(*args):
 print >> sys.stderr, "Python", sys.version
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge cryptograhpt-2.7 which updates vendored cryptography to 2.7

2019-08-14 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r97174:5921676473ae
Date: 2019-08-14 17:01 +0300
http://bitbucket.org/pypy/pypy/changeset/5921676473ae/

Log:merge cryptograhpt-2.7 which updates vendored cryptography to 2.7

diff too long, truncating to 2000 out of 25452 lines

diff --git a/.hgignore b/.hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -70,7 +70,9 @@
 ^lib_pypy/ctypes_config_cache/_.+_cache\.py$
 ^lib_pypy/ctypes_config_cache/_.+_.+_\.py$
 ^lib_pypy/_libmpdec/.+.o$
-^lib_pypy/.+.c$
+^lib_pypy/.+_cffi.c$
+^lib_pypy/_curses_cffi_check.c
+^lib_pypy/_pypy_openssl.c
 ^lib_pypy/.+.o$
 ^lib_pypy/.+.so$
 ^lib_pypy/.+.pyd$
diff --git a/lib_pypy/_cffi_ssl/README.md b/lib_pypy/_cffi_ssl/README.md
--- a/lib_pypy/_cffi_ssl/README.md
+++ b/lib_pypy/_cffi_ssl/README.md
@@ -1,21 +1,22 @@
 # PyPy's SSL module
 
-All of the CFFI code is copied from cryptography, wich patches contributed
-back to cryptography. PyPy vendors it's own copy of the cffi backend thus
-it renames the compiled shared object to _pypy_openssl.so (which means
-that cryptography can ship their own cffi backend)
+All of the CFFI code is copied from cryptography. PyPy vendors it's own copy of
+the cffi backend thus it renames the compiled shared object to _pypy_openssl.so
+(which means that cryptography can ship their own cffi backend)
 
-NOTE: currently, we have the following changes:
+# Modifications to cryptography 2.7
 
-* ``_cffi_src/openssl/callbacks.py`` to not rely on the CPython C API
-  (this change is now backported)
-
-* ``_cffi_src/utils.py`` for issue #2575 (29c9a89359e4)
-
-* ``_cffi_src/openssl/x509_vfy.py`` for issue #2605 (ca4d0c90f5a1)
-
-* ``_cffi_src/openssl/pypy_win32_extra.py`` for Win32-only functionality like 
ssl.enum_certificates()
-
+- `_cffi_src/openssl/asn1.py` : revert removal of `ASN1_TIME_print`,
+  `ASN1_ITEM`, `ASN1_ITEM_EXP`, `ASN1_VALUE`, `ASN1_item_d2i`
+- `_cffi_src/openssl/bio.py` : revert removal of `BIO_s_file`, 
`BIO_read_filename`
+- `_cffi_src/openssl/evp.py` : revert removal of `EVP_MD_size`
+- `_cffi_src/openssl/nid.py` : revert removal of `NID_ad_OCSP`,
+  `NID_info_access`, `NID_ad_ca_issuers`, `NID_crl_distribution_points`
+- `_cffi_src/openssl/pem.py` : revert removal of `PEM_read_bio_X509_AUX`
+- `_cffi_src/openssl/x509.py` : revert removal of `X509_get_ext_by_NID`,
+  `i2d_X509`
+- `_cffi_src/openssl/x509v3.py` : revert removal of `X509V3_EXT_get`,
+  `X509V3_EXT_METHOD`
 
 # Tests?
 
@@ -25,11 +26,8 @@
 
 Copy over all the sources into the folder `lib_pypy/_cffi_ssl/*`. Updating the 
cffi backend can be simply done by the following command::
 
-$ cp -r /src/_cffi_src/* .
-
-NOTE: you need to keep our version of ``_cffi_src/openssl/callbacks.py``
-for now!
+$ cp -r /src/* .
 
 # Crpytography version
 
-Copied over release version `1.7.2`
+Copied over release version `2.7`
diff --git a/lib_pypy/_cffi_ssl/_cffi_src/build_commoncrypto.py 
b/lib_pypy/_cffi_ssl/_cffi_src/build_commoncrypto.py
deleted file mode 100644
--- a/lib_pypy/_cffi_ssl/_cffi_src/build_commoncrypto.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from __future__ import absolute_import, division, print_function
-
-from _cffi_src.utils import build_ffi_for_binding
-
-
-ffi = build_ffi_for_binding(
-module_name="_commoncrypto",
-module_prefix="_cffi_src.commoncrypto.",
-modules=[
-"cf",
-"common_digest",
-"common_hmac",
-"common_key_derivation",
-"common_cryptor",
-"common_symmetric_key_wrap",
-"seccertificate",
-"secimport",
-"secitem",
-"seckey",
-"seckeychain",
-"secpolicy",
-"sectransform",
-"sectrust",
-"secure_transport",
-],
-extra_link_args=[
-"-framework", "Security", "-framework", "CoreFoundation"
-],
-)
diff --git a/lib_pypy/_cffi_ssl/_cffi_src/build_openssl.py 
b/lib_pypy/_cffi_ssl/_cffi_src/build_openssl.py
--- a/lib_pypy/_cffi_ssl/_cffi_src/build_openssl.py
+++ b/lib_pypy/_cffi_ssl/_cffi_src/build_openssl.py
@@ -13,31 +13,43 @@
 
 
 def _get_openssl_libraries(platform):
+if os.environ.get("CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS", None):
+return []
 # OpenSSL goes by a different library name on different operating systems.
-if platform == "darwin":
-return _osx_libraries(
-os.environ.get("CRYPTOGRAPHY_OSX_NO_LINK_FLAGS")
+if platform == "win32" and compiler_type() == "msvc":
+windows_link_legacy_openssl = os.environ.get(
+"CRYPTOGRAPHY_WINDOWS_LINK_LEGACY_OPENSSL", None
 )
-elif platform == "win32":
-if compiler_type() == "msvc":
+if windows_link_legacy_openssl is None:
+# Link against the 1.1.0 names
+libs = ["libssl", "libcrypto"]
+else:
+# Link against the 1.0.2 

[pypy-commit] pypy default: merge heads

2019-08-09 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r97117:b14e43faf847
Date: 2019-08-09 15:20 +0200
http://bitbucket.org/pypy/pypy/changeset/b14e43faf847/

Log:merge heads

diff --git a/pypy/objspace/std/test/apptest_longobject.py 
b/pypy/objspace/std/test/apptest_longobject.py
new file mode 100644
--- /dev/null
+++ b/pypy/objspace/std/test/apptest_longobject.py
@@ -0,0 +1,409 @@
+from pytest import raises
+import sys
+import math
+import operator
+
+def test_trunc():
+assert math.trunc(1L) == 1L
+assert math.trunc(-1L) == -1L
+
+def test_add():
+x = 123L
+assert int(x + 12443L) == 123 + 12443
+x = -20
+assert x + 2 + 3L + True == -14L
+
+def test_sub():
+assert int(58543L - 12332L) == 58543 - 12332
+assert int(58543L - 12332) == 58543 - 12332
+assert int(58543 - 12332L) == 58543 - 12332
+x = 237123838281233L
+assert x * 12 == x * 12L
+
+def test_mul():
+x = 363L
+assert x * 2 ** 40 == x << 40
+
+def test_truediv():
+exec "from __future__ import division; a = 31415926L / 1000L"
+assert a == 3.1415926
+
+def test_floordiv():
+x = 31415926L
+a = x // 1000L
+assert a == 3L
+
+def test_int_floordiv():
+x = 3000L
+a = x // 1000
+assert a == 3L
+
+x = 3000L
+a = x // -1000
+assert a == -3L
+
+x = 3000L
+raises(ZeroDivisionError, "x // 0")
+
+n = sys.maxint+1
+assert n / int(-n) == -1L
+
+def test_numerator_denominator():
+assert (1L).numerator == 1L
+assert (1L).denominator == 1L
+assert (42L).numerator == 42L
+assert (42L).denominator == 1L
+
+def test_compare():
+Z = 0
+ZL = 0L
+
+assert Z == ZL
+assert not (Z != ZL)
+assert ZL == Z
+assert not (ZL != Z)
+assert Z <= ZL
+assert not (Z < ZL)
+assert ZL <= ZL
+assert not (ZL < ZL)
+
+for BIG in (1L, 1L << 62, 1L << ):
+assert not (Z == BIG)
+assert Z != BIG
+assert not (BIG == Z)
+assert BIG != Z
+assert not (ZL == BIG)
+assert ZL != BIG
+assert Z <= BIG
+assert Z < BIG
+assert not (BIG <= Z)
+assert not (BIG < Z)
+assert ZL <= BIG
+assert ZL < BIG
+assert not (BIG <= ZL)
+assert not (BIG < ZL)
+assert not (Z <= -BIG)
+assert not (Z < -BIG)
+assert -BIG <= Z
+assert -BIG < Z
+assert not (ZL <= -BIG)
+assert not (ZL < -BIG)
+assert -BIG <= ZL
+assert -BIG < ZL
+#
+assert not (BIG <  int(BIG))
+assert (BIG <= int(BIG))
+assert (BIG == int(BIG))
+assert not (BIG != int(BIG))
+assert not (BIG >  int(BIG))
+assert (BIG >= int(BIG))
+#
+assert (BIG <  int(BIG)+1)
+assert (BIG <= int(BIG)+1)
+assert not (BIG == int(BIG)+1)
+assert (BIG != int(BIG)+1)
+assert not (BIG >  int(BIG)+1)
+assert not (BIG >= int(BIG)+1)
+#
+assert not (BIG <  int(BIG)-1)
+assert not (BIG <= int(BIG)-1)
+assert not (BIG == int(BIG)-1)
+assert (BIG != int(BIG)-1)
+assert (BIG >  int(BIG)-1)
+assert (BIG >= int(BIG)-1)
+#
+assert not (int(BIG) <  BIG)
+assert (int(BIG) <= BIG)
+assert (int(BIG) == BIG)
+assert not (int(BIG) != BIG)
+assert not (int(BIG) >  BIG)
+assert (int(BIG) >= BIG)
+#
+assert not (int(BIG)+1 <  BIG)
+assert not (int(BIG)+1 <= BIG)
+assert not (int(BIG)+1 == BIG)
+assert (int(BIG)+1 != BIG)
+assert (int(BIG)+1 >  BIG)
+assert (int(BIG)+1 >= BIG)
+#
+assert (int(BIG)-1 <  BIG)
+assert (int(BIG)-1 <= BIG)
+assert not (int(BIG)-1 == BIG)
+assert (int(BIG)-1 != BIG)
+assert not (int(BIG)-1 >  BIG)
+assert not (int(BIG)-1 >= BIG)
+
+def test_conversion():
+class long2(long):
+pass
+x = 1L
+x = long2(x<<100)
+y = int(x)
+assert type(y) == long
+assert type(+long2(5)) is long
+assert type(long2(5) << 0) is long
+assert type(long2(5) >> 0) is long
+assert type(long2(5) + 0) is long
+assert type(long2(5) - 0) is long
+assert type(long2(5) * 1) is long
+assert type(1 * long2(5)) is long
+assert type(0 + long2(5)) is long
+assert type(-long2(0)) is long
+assert type(long2(5) // 1) is long
+
+def test_shift():
+assert 65l >> 2l == 16l
+assert 65l >> 2 == 16l
+assert 65 >> 2l == 16l
+assert 65l << 2l == 65l * 4
+assert 65l << 2 == 65l * 4
+assert 65 << 2l == 65l * 4
+raises(ValueError, "1L << -1L")
+raises(ValueError, "1L << -1")
+raises(OverflowError, "1L << (2 ** 100)")
+raises(ValueError, "1L >> -1L")
+raises(ValueError, "1L >> -1")
+raises(OverflowError, "1L >> (2 ** 100)")
+
+def test_pow():
+x = 0L
+assert pow(x, 

[pypy-commit] pypy default: merge heads

2019-08-06 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r97069:6b6a9bdd8658
Date: 2019-08-06 14:17 +0200
http://bitbucket.org/pypy/pypy/changeset/6b6a9bdd8658/

Log:merge heads

diff --git a/pypy/module/__builtin__/test/test_compile.py 
b/pypy/module/__builtin__/test/apptest_compile.py
rename from pypy/module/__builtin__/test/test_compile.py
rename to pypy/module/__builtin__/test/apptest_compile.py
--- a/pypy/module/__builtin__/test/test_compile.py
+++ b/pypy/module/__builtin__/test/apptest_compile.py
@@ -1,81 +1,82 @@
-class AppTestCompile:
-def test_simple(self):
-import sys
-co = compile('1+2', '?', 'eval')
+from pytest import raises
+import sys
+
+def test_simple():
+co = compile('1+2', '?', 'eval')
+assert eval(co) == 3
+co = compile(buffer('1+2'), '?', 'eval')
+assert eval(co) == 3
+exc = raises(TypeError, compile, chr(0), '?', 'eval')
+assert str(exc.value) == "compile() expected string without null bytes"
+exc = raises(TypeError, compile, unichr(0), '?', 'eval')
+assert str(exc.value) == "compile() expected string without null bytes"
+
+if '__pypy__' in sys.modules:
+co = compile(memoryview('1+2'), '?', 'eval')
 assert eval(co) == 3
-co = compile(buffer('1+2'), '?', 'eval')
-assert eval(co) == 3
-exc = raises(TypeError, compile, chr(0), '?', 'eval')
-assert str(exc.value) == "compile() expected string without null bytes"
-exc = raises(TypeError, compile, unichr(0), '?', 'eval')
-assert str(exc.value) == "compile() expected string without null bytes"
+compile("from __future__ import with_statement", "", "exec")
+raises(SyntaxError, compile, '-', '?', 'eval')
+raises(ValueError, compile, '"\\xt"', '?', 'eval')
+raises(ValueError, compile, '1+2', '?', 'maybenot')
+raises(ValueError, compile, "\n", "", "exec", 0xff)
+raises(TypeError, compile, '1+2', 12, 34)
 
-if '__pypy__' in sys.modules:
-co = compile(memoryview('1+2'), '?', 'eval')
-assert eval(co) == 3
-compile("from __future__ import with_statement", "", "exec")
-raises(SyntaxError, compile, '-', '?', 'eval')
-raises(ValueError, compile, '"\\xt"', '?', 'eval')
-raises(ValueError, compile, '1+2', '?', 'maybenot')
-raises(ValueError, compile, "\n", "", "exec", 0xff)
-raises(TypeError, compile, '1+2', 12, 34)
+def test_error_message():
+import re
+compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
+compile(b'\xef\xbb\xbf\n', 'dummy', 'exec')
+compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
+exc = raises(SyntaxError, compile,
+b'# -*- coding: fake -*-\n', 'dummy', 'exec')
+assert 'fake' in str(exc.value)
+exc = raises(SyntaxError, compile,
+b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
+assert 'iso-8859-15' in str(exc.value)
+assert 'BOM' in str(exc.value)
+exc = raises(SyntaxError, compile,
+b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
+assert 'fake' in str(exc.value)
+assert 'BOM' in str(exc.value)
 
-def test_error_message(self):
-import re
-compile('# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
-compile(b'\xef\xbb\xbf\n', 'dummy', 'exec')
-compile(b'\xef\xbb\xbf# -*- coding: utf-8 -*-\n', 'dummy', 'exec')
-exc = raises(SyntaxError, compile,
-b'# -*- coding: fake -*-\n', 'dummy', 'exec')
-assert 'fake' in str(exc.value)
-exc = raises(SyntaxError, compile,
-b'\xef\xbb\xbf# -*- coding: iso-8859-15 -*-\n', 'dummy', 'exec')
-assert 'iso-8859-15' in str(exc.value)
-assert 'BOM' in str(exc.value)
-exc = raises(SyntaxError, compile,
-b'\xef\xbb\xbf# -*- coding: fake -*-\n', 'dummy', 'exec')
-assert 'fake' in str(exc.value)
-assert 'BOM' in str(exc.value)
+def test_unicode():
+try:
+compile(u'-', '?', 'eval')
+except SyntaxError as e:
+assert e.lineno == 1
 
-def test_unicode(self):
-try:
-compile(u'-', '?', 'eval')
-except SyntaxError as e:
-assert e.lineno == 1
+def test_unicode_encoding():
+code = u"# -*- coding: utf-8 -*-\npass\n"
+raises(SyntaxError, compile, code, "tmp", "exec")
 
-def test_unicode_encoding(self):
-code = u"# -*- coding: utf-8 -*-\npass\n"
-raises(SyntaxError, compile, code, "tmp", "exec")
+def test_recompile_ast():
+import _ast
+# raise exception when node type doesn't match with compile mode
+co1 = compile('print 1', '', 'exec', _ast.PyCF_ONLY_AST)
+raises(TypeError, compile, co1, '', 'eval')
+co2 = compile('1+1', '', 'eval', _ast.PyCF_ONLY_AST)
+tree = compile(co2, '', 'eval')
+assert compile(co2, '', 'eval', _ast.PyCF_ONLY_AST) is co2
 
-def test_recompile_ast(self):
-import _ast
-# raise exception when node 

[pypy-commit] pypy default: merge arm64 support

2019-07-15 Thread fijal
Author: fijal
Branch: 
Changeset: r97005:f3b7650ebfc0
Date: 2019-07-15 17:00 +0200
http://bitbucket.org/pypy/pypy/changeset/f3b7650ebfc0/

Log:merge arm64 support

diff too long, truncating to 2000 out of 6406 lines

diff --git a/rpython/config/translationoption.py 
b/rpython/config/translationoption.py
--- a/rpython/config/translationoption.py
+++ b/rpython/config/translationoption.py
@@ -1,5 +1,6 @@
 import sys
 import os
+import platform as _stdlib_platform
 from rpython.config.config import OptionDescription, BoolOption, IntOption, 
ArbitraryOption, FloatOption
 from rpython.config.config import ChoiceOption, StrOption, Config, 
ConflictConfigError
 from rpython.config.config import ConfigError
@@ -30,7 +31,9 @@
 False)
 # Windows doesn't work.  Please
 # add other platforms here if it works on them.
-
+MACHINE = _stdlib_platform.machine()
+if MACHINE == 'aarch64':
+SUPPORT__THREAD = False
 # (*) NOTE: __thread on OS/X does not work together with
 # pthread_key_create(): when the destructor is called, the __thread is
 # already freed!
diff --git a/rpython/jit/backend/aarch64/TODO b/rpython/jit/backend/aarch64/TODO
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/aarch64/TODO
@@ -0,0 +1,35 @@
+* cond_call and following guard_exception
+
+
+* We can try to make generate_quick_failure() emit two instructions less:
+  the two store_reg() [one in generate_quick_failure and the other in
+  push_gcmap].  Instead we'd load the values in ip2 and ip3, and the
+  store_regs would occur inside self.failure_recovery_code
+  (which 'target' points to).
+
+
+* use STP instead of STR in all long sequences of STR.  Same with LDR
+
+* use "STR xzr, [..]" instead of "gen_load_int(ip, 0); STR ip, [..]".
+  Search around for gen_load_int(...0): it occurs at least in pop_gcmap()
+  _build_failure_recovery(), build_frame_realloc_slowpath(), etc.
+
+
+* malloc_cond() and malloc_cond_varsize_frame() hard-code forward jump
+  distances by guessing the number of instructions that follows.  Bad
+  idea because some of these instructions could easily be optimized in
+  the future to be a bit shorter.  Rewrite this two places to use the
+  proper way instead of a magic "40" (or at least assert that it was
+  really 40).
+
+
+* use "CBNZ register, offset" (compare-and-branch-if-not-zero)
+  instead of a CMP+BNE pair.  Same with CBZ instead of CMP+BEQ
+
+
+* when we need to save things on the stack, we typically push two words
+  and pop them later.  It would be cheaper if we reserved two locations
+  in the stack from _call_header, then we could just write there.
+  *OR*
+  maybe it's enough if we use the form "str x0, [sp, !#offset]" which
+  combines in a single instruction the "str" with the change of sp
diff --git a/rpython/jit/backend/aarch64/__init__.py 
b/rpython/jit/backend/aarch64/__init__.py
new file mode 100644
diff --git a/rpython/jit/backend/aarch64/arch.py 
b/rpython/jit/backend/aarch64/arch.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/aarch64/arch.py
@@ -0,0 +1,14 @@
+
+WORD = 8
+
+# The stack contains the force_index and the, callee saved registers and
+# ABI required information
+# All the rest of the data is in a GC-managed variable-size "frame".
+# This jitframe object's address is always stored in the register FP
+# A jitframe is a jit.backend.llsupport.llmodel.jitframe.JITFRAME
+# Stack frame fixed area
+# Currently only the force_index
+NUM_MANAGED_REGS = 16
+NUM_VFP_REGS = 8
+JITFRAME_FIXED_SIZE = NUM_MANAGED_REGS + NUM_VFP_REGS
+# 16 GPR + 8 VFP Regs, for now
diff --git a/rpython/jit/backend/aarch64/assembler.py 
b/rpython/jit/backend/aarch64/assembler.py
new file mode 100644
--- /dev/null
+++ b/rpython/jit/backend/aarch64/assembler.py
@@ -0,0 +1,1482 @@
+
+from rpython.jit.backend.aarch64.arch import WORD, JITFRAME_FIXED_SIZE
+from rpython.jit.backend.aarch64.codebuilder import InstrBuilder, 
OverwritingBuilder
+from rpython.jit.backend.aarch64.locations import imm, StackLocation, 
get_fp_offset
+#from rpython.jit.backend.arm.helper.regalloc import VMEM_imm_size
+from rpython.jit.backend.aarch64.opassembler import ResOpAssembler
+from rpython.jit.backend.aarch64.regalloc import (Regalloc, check_imm_arg,
+operations as regalloc_operations, guard_operations, comp_operations,
+CoreRegisterManager, VFPRegisterManager)
+from rpython.jit.backend.aarch64 import registers as r
+from rpython.jit.backend.arm import conditions as c
+from rpython.jit.backend.llsupport import jitframe, rewrite
+from rpython.jit.backend.llsupport.assembler import BaseAssembler
+from rpython.jit.backend.llsupport.regalloc import get_scale, 
valid_addressing_size
+from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
+from rpython.jit.backend.model import CompiledLoopToken
+from rpython.jit.codewriter.effectinfo import EffectInfo
+from rpython.jit.metainterp.history import AbstractFailDescr, FLOAT, INT, VOID
+from rpython.jit.metainterp.resoperation import rop

[pypy-commit] pypy default: merge heads

2019-06-15 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r96808:bc7aeb4d5987
Date: 2019-06-15 09:07 +0200
http://bitbucket.org/pypy/pypy/changeset/bc7aeb4d5987/

Log:merge heads

diff --git a/lib-python/2.7/distutils/sysconfig_pypy.py 
b/lib-python/2.7/distutils/sysconfig_pypy.py
--- a/lib-python/2.7/distutils/sysconfig_pypy.py
+++ b/lib-python/2.7/distutils/sysconfig_pypy.py
@@ -86,7 +86,7 @@
 arch = platform.machine()
 g['LDSHARED'] += ' -undefined dynamic_lookup'
 g['CC'] += ' -arch %s' % (arch,)
-g['MACOSX_DEPLOYMENT_TARGET'] = 10.14
+g['MACOSX_DEPLOYMENT_TARGET'] = '10.14'
 
 global _config_vars
 _config_vars = g
diff --git a/rpython/rlib/_rsocket_rffi.py b/rpython/rlib/_rsocket_rffi.py
--- a/rpython/rlib/_rsocket_rffi.py
+++ b/rpython/rlib/_rsocket_rffi.py
@@ -1426,10 +1426,10 @@
 return rwin32.FormatError(errno)
 
 def socket_strerror_unicode(errno):
-return rwin32.FormatErrorW(errno)[0]
+return rwin32.FormatErrorW(errno)[0].decode('utf-8')
 
 def gai_strerror_unicode(errno):
-return rwin32.FormatErrorW(errno)[0]
+return rwin32.FormatErrorW(errno)[0].decode('utf-8')
 
 def socket_strerror_utf8(errno):
 return rwin32.FormatErrorW(errno)
diff --git a/rpython/rlib/rvmprof/src/shared/vmp_stack.c 
b/rpython/rlib/rvmprof/src/shared/vmp_stack.c
--- a/rpython/rlib/rvmprof/src/shared/vmp_stack.c
+++ b/rpython/rlib/rvmprof/src/shared/vmp_stack.c
@@ -82,12 +82,6 @@
 
 static PY_STACK_FRAME_T * _write_python_stack_entry(PY_STACK_FRAME_T * frame, 
void ** result, int * depth, int max_depth)
 {
-int len;
-int addr;
-int j;
-uint64_t line;
-char *lnotab;
-
 #ifndef RPYTHON_VMPROF // pypy does not support line profiling
 if (vmp_profiles_python_lines()) {
 // In the line profiling mode we save a line number for every frame.
@@ -99,27 +93,8 @@
 
 // NOTE: the profiling overhead can be reduced by storing co_lnotab in 
the dump and
 // moving this computation to the reader instead of doing it here.
-lnotab = PyStr_AS_STRING(frame->f_code->co_lnotab);
-
-if (lnotab != NULL) {
-line = (uint64_t)frame->f_lineno;
-addr = 0;
-
-len = (int)PyStr_GET_SIZE(frame->f_code->co_lnotab);
-
-for (j = 0; j < len; j += 2) {
-addr += lnotab[j];
-if (addr > frame->f_lasti) {
-break;
-}
-line += lnotab[j+1];
-}
-result[*depth] = (void*) line;
-*depth = *depth + 1;
-} else {
-result[*depth] = (void*) 0;
-*depth = *depth + 1;
-}
+result[*depth] = (void*) (int64_t) PyFrame_GetLineNumber(frame);
+*depth = *depth + 1;
 }
 result[*depth] = (void*)CODE_ADDR_TO_UID(FRAME_CODE(frame));
 *depth = *depth + 1;
diff --git a/rpython/rlib/rvmprof/test/test_file.py 
b/rpython/rlib/rvmprof/test/test_file.py
--- a/rpython/rlib/rvmprof/test/test_file.py
+++ b/rpython/rlib/rvmprof/test/test_file.py
@@ -37,7 +37,7 @@
 #
 if no_matches:
 print
-print 'The following file dit NOT match'
+print 'The following file did NOT match'
 for f in no_matches:
 print '   ', f.relto(RVMPROF)
 raise AssertionError("some files were updated on github, "
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge fix-vmprof-memory-tracking

2019-05-25 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r96676:9cfdc2c91a37
Date: 2019-05-25 11:27 +0200
http://bitbucket.org/pypy/pypy/changeset/9cfdc2c91a37/

Log:merge fix-vmprof-memory-tracking

diff --git a/pypy/module/_vmprof/interp_vmprof.py 
b/pypy/module/_vmprof/interp_vmprof.py
--- a/pypy/module/_vmprof/interp_vmprof.py
+++ b/pypy/module/_vmprof/interp_vmprof.py
@@ -60,11 +60,6 @@
 'interval' is a float representing the sampling interval, in seconds.
 Must be smaller than 1.0
 """
-w_modules = space.sys.get('modules')
-#if space.contains_w(w_modules, space.newtext('_continuation')):
-#space.warn(space.newtext("Using _continuation/greenlet/stacklet 
together "
-# "with vmprof will crash"),
-#   space.w_RuntimeWarning)
 try:
 rvmprof.enable(fileno, period, memory, native, real_time)
 except rvmprof.VMProfError as e:
diff --git a/rpython/rlib/rvmprof/rvmprof.py b/rpython/rlib/rvmprof/rvmprof.py
--- a/rpython/rlib/rvmprof/rvmprof.py
+++ b/rpython/rlib/rvmprof/rvmprof.py
@@ -143,7 +143,7 @@
 native = 0 # force disabled on Windows
 lines = 0 # not supported on PyPy currently
 
-p_error = self.cintf.vmprof_init(fileno, interval, lines, memory, 
"pypy", native, real_time)
+p_error = self.cintf.vmprof_init(fileno, interval, memory, lines, 
"pypy", native, real_time)
 if p_error:
 raise VMProfError(rffi.charp2str(p_error))
 
diff --git a/rpython/rlib/rvmprof/test/test_rvmprof.py 
b/rpython/rlib/rvmprof/test/test_rvmprof.py
--- a/rpython/rlib/rvmprof/test/test_rvmprof.py
+++ b/rpython/rlib/rvmprof/test/test_rvmprof.py
@@ -98,12 +98,12 @@
 self.tmpfilename = str(self.tmpfile)
 super(RVMProfSamplingTest, self).init()
 
-ENTRY_POINT_ARGS = (int, float)
-def entry_point(self, value, delta_t):
+ENTRY_POINT_ARGS = (int, float, int)
+def entry_point(self, value, delta_t, memory=0):
 code = self.MyCode('py:code:52:test_enable')
 rvmprof.register_code(code, self.MyCode.get_name)
 fd = os.open(self.tmpfilename, os.O_WRONLY | os.O_CREAT, 0666)
-rvmprof.enable(fd, self.SAMPLING_INTERVAL)
+rvmprof.enable(fd, self.SAMPLING_INTERVAL, memory=memory)
 start = time.time()
 res = 0
 while time.time() < start+delta_t:
@@ -128,17 +128,25 @@
 
 def test(self):
 from vmprof import read_profile
-assert self.entry_point(10**4, 0.1) == 
+assert self.entry_point(10**4, 0.1, 0) == 
 assert self.tmpfile.check()
 self.tmpfile.remove()
 #
-assert self.rpy_entry_point(10**4, 0.5) == 
+assert self.rpy_entry_point(10**4, 0.5, 0) == 
 assert self.tmpfile.check()
 prof = read_profile(self.tmpfilename)
 tree = prof.get_tree()
 assert tree.name == 'py:code:52:test_enable'
 assert self.approx_equal(tree.count, 0.5/self.SAMPLING_INTERVAL)
 
+def test_mem(self):
+from vmprof import read_profile
+assert self.rpy_entry_point(10**4, 0.5, 1) == 
+assert self.tmpfile.check()
+prof = read_profile(self.tmpfilename)
+assert prof.profile_memory
+assert all(p[-1] > 0 for p in prof.profiles)
+
 
 class TestNative(RVMProfSamplingTest):
 
@@ -177,7 +185,7 @@
 def test(self):
 from vmprof import read_profile
 # from vmprof.show import PrettyPrinter
-assert self.rpy_entry_point(3, 0.5) == 42000
+assert self.rpy_entry_point(3, 0.5, 0) == 42000
 assert self.tmpfile.check()
 
 prof = read_profile(self.tmpfilename)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2019-05-25 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r96678:aeb506f08f30
Date: 2019-05-25 11:39 +0200
http://bitbucket.org/pypy/pypy/changeset/aeb506f08f30/

Log:merge heads

diff --git a/lib-python/2.7/socket.py b/lib-python/2.7/socket.py
--- a/lib-python/2.7/socket.py
+++ b/lib-python/2.7/socket.py
@@ -61,20 +61,22 @@
   DeprecationWarning, stacklevel=2)
 return _realssl.sslwrap_simple(sock, keyfile, certfile)
 
-# we need to import the same constants we used to...
-from _ssl import SSLError as sslerror
-from _ssl import \
- RAND_add, \
- RAND_status, \
- SSL_ERROR_ZERO_RETURN, \
- SSL_ERROR_WANT_READ, \
- SSL_ERROR_WANT_WRITE, \
- SSL_ERROR_WANT_X509_LOOKUP, \
- SSL_ERROR_SYSCALL, \
- SSL_ERROR_SSL, \
- SSL_ERROR_WANT_CONNECT, \
- SSL_ERROR_EOF, \
- SSL_ERROR_INVALID_ERROR_CODE
+# we need to import the same constants we used to, 
+# see lib_pypy/_cffi_ssl/_stdssl/error.py and __init__.py to prevent
+# circular import
+# from _ssl import SSLError as sslerror
+# from _ssl import \
+ # RAND_add, \
+ # RAND_status
+ # SSL_ERROR_ZERO_RETURN, \
+ # SSL_ERROR_WANT_READ, \
+ # SSL_ERROR_WANT_WRITE, \
+ # SSL_ERROR_WANT_X509_LOOKUP, \
+ # SSL_ERROR_SYSCALL, \
+ # SSL_ERROR_SSL, \
+ # SSL_ERROR_WANT_CONNECT, \
+ # SSL_ERROR_EOF, \
+ # SSL_ERROR_INVALID_ERROR_CODE
 try:
 from _ssl import RAND_egd
 except ImportError:
diff --git a/lib_pypy/_cffi_ssl/_stdssl/__init__.py 
b/lib_pypy/_cffi_ssl/_stdssl/__init__.py
--- a/lib_pypy/_cffi_ssl/_stdssl/__init__.py
+++ b/lib_pypy/_cffi_ssl/_stdssl/__init__.py
@@ -20,6 +20,7 @@
 pyerr_write_unraisable)
 from _cffi_ssl._stdssl import error
 from select import select
+import socket
 
 if sys.platform == 'win32':
 from _cffi_ssl._stdssl.win32_extra import enum_certificates, enum_crls
@@ -306,9 +307,6 @@
 return self.socket_type == SSL_SERVER
 
 def do_handshake(self):
-# delay to prevent circular imports
-import socket
-
 sock = self.get_socket_or_connection_gone()
 ssl = self.ssl
 timeout = _socket_timeout(sock)
@@ -372,9 +370,6 @@
 return _decode_certificate(self.peer_cert)
 
 def write(self, bytestring):
-# delay to prevent circular imports
-import socket
-
 deadline = 0
 b = _str_to_ffi_buffer(bytestring)
 sock = self.get_socket_or_connection_gone()
@@ -425,9 +420,6 @@
 raise pyssl_error(self, length)
 
 def read(self, length, buffer_into=None):
-# delay to prevent circular imports
-import socket
-
 ssl = self.ssl
 
 if length < 0 and buffer_into is None:
@@ -561,9 +553,6 @@
 return sock
 
 def shutdown(self):
-# delay to prevent circular imports
-import socket
-
 sock = self.get_socket_or_None()
 nonblocking = False
 ssl = self.ssl
@@ -1545,3 +1534,5 @@
"enough data to seed the PRNG");
 return bytecount
 
+socket.RAND_add = RAND_add
+socket.RAND_status = RAND_status
diff --git a/lib_pypy/_cffi_ssl/_stdssl/error.py 
b/lib_pypy/_cffi_ssl/_stdssl/error.py
--- a/lib_pypy/_cffi_ssl/_stdssl/error.py
+++ b/lib_pypy/_cffi_ssl/_stdssl/error.py
@@ -27,6 +27,13 @@
 if self.strerror and isinstance(self.strerror, str):
 return self.strerror
 return str(self.args)
+# these are expected on socket as well
+socket.sslerror = SSLError
+for v in [ 'SSL_ERROR_ZERO_RETURN', 'SSL_ERROR_WANT_READ',
+ 'SSL_ERROR_WANT_WRITE', 'SSL_ERROR_WANT_X509_LOOKUP', 'SSL_ERROR_SYSCALL',
+ 'SSL_ERROR_SSL', 'SSL_ERROR_WANT_CONNECT', 'SSL_ERROR_EOF',
+ 'SSL_ERROR_INVALID_ERROR_CODE' ]:
+setattr(socket, v, locals()[v]) 
 
 class SSLZeroReturnError(SSLError):
 """ SSL/TLS session closed cleanly. """
diff --git a/lib_pypy/_hashlib/__init__.py b/lib_pypy/_hashlib/__init__.py
--- a/lib_pypy/_hashlib/__init__.py
+++ b/lib_pypy/_hashlib/__init__.py
@@ -57,7 +57,7 @@
 
 def update(self, string):
 if isinstance(string, unicode):
-buf = ffi.from_buffer(string.encode('utf-8'))
+buf = ffi.from_buffer(string.encode('ascii'))
 else:
 buf = ffi.from_buffer(string)
 with self.lock:
diff --git a/pypy/module/_md5/test/test_md5.py 
b/pypy/module/_md5/test/test_md5.py
--- a/pypy/module/_md5/test/test_md5.py
+++ b/pypy/module/_md5/test/test_md5.py
@@ -24,7 +24,8 @@
 assert self.md5.md5().digest_size == 16
 if sys.version_info >= (2, 5):
 assert self.md5.blocksize == 1
-assert self.md5.md5().digestsize == 16
+# implementation detail, not part of the API
+# assert self.md5.md5().digestsize == 16
 
 def test_MD5Type(self):
 """
diff --git 

[pypy-commit] pypy default: merge cffi-libs, which moves _ssl and _hashlib to cffi-based implementations

2019-05-23 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r96665:50956a7107b8
Date: 2019-05-24 07:42 +0300
http://bitbucket.org/pypy/pypy/changeset/50956a7107b8/

Log:merge cffi-libs, which moves _ssl and _hashlib to cffi-based
implementations

diff too long, truncating to 2000 out of 11657 lines

diff --git a/lib-python/2.7/test/test_ftplib.py 
b/lib-python/2.7/test/test_ftplib.py
--- a/lib-python/2.7/test/test_ftplib.py
+++ b/lib-python/2.7/test/test_ftplib.py
@@ -234,11 +234,17 @@
 def run(self):
 self.active = True
 self.__flag.set()
-while self.active and asyncore.socket_map:
-self.active_lock.acquire()
-asyncore.loop(timeout=0.1, count=1)
-self.active_lock.release()
-asyncore.close_all(ignore_all=True)
+try:
+while self.active and asyncore.socket_map:
+self.active_lock.acquire()
+try:
+asyncore.loop(timeout=0.1, count=1)
+except:
+self.active_lock.release()
+raise
+self.active_lock.release()
+finally:
+asyncore.close_all(ignore_all=True)
 
 def stop(self):
 assert self.active
diff --git a/lib_pypy/_cffi_ssl/LICENSE b/lib_pypy/_cffi_ssl/LICENSE
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_cffi_ssl/LICENSE
@@ -0,0 +1,26 @@
+
+Except when otherwise stated (look for LICENSE files in directories or
+information at the beginning of each file) all software and
+documentation is licensed as follows: 
+
+The MIT License
+
+Permission is hereby granted, free of charge, to any person 
+obtaining a copy of this software and associated documentation 
+files (the "Software"), to deal in the Software without 
+restriction, including without limitation the rights to use, 
+copy, modify, merge, publish, distribute, sublicense, and/or 
+sell copies of the Software, and to permit persons to whom the 
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included 
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
MERCHANTABILITY, 
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
+DEALINGS IN THE SOFTWARE.
+
diff --git a/lib_pypy/_cffi_ssl/README.md b/lib_pypy/_cffi_ssl/README.md
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_cffi_ssl/README.md
@@ -0,0 +1,35 @@
+# PyPy's SSL module
+
+All of the CFFI code is copied from cryptography, wich patches contributed
+back to cryptography. PyPy vendors it's own copy of the cffi backend thus
+it renames the compiled shared object to _pypy_openssl.so (which means
+that cryptography can ship their own cffi backend)
+
+NOTE: currently, we have the following changes:
+
+* ``_cffi_src/openssl/callbacks.py`` to not rely on the CPython C API
+  (this change is now backported)
+
+* ``_cffi_src/utils.py`` for issue #2575 (29c9a89359e4)
+
+* ``_cffi_src/openssl/x509_vfy.py`` for issue #2605 (ca4d0c90f5a1)
+
+* ``_cffi_src/openssl/pypy_win32_extra.py`` for Win32-only functionality like 
ssl.enum_certificates()
+
+
+# Tests?
+
+Currently this module is tested using CPython's standard library test suite.
+
+# Install it into PyPy's source tree
+
+Copy over all the sources into the folder `lib_pypy/_cffi_ssl/*`. Updating the 
cffi backend can be simply done by the following command::
+
+$ cp -r /src/_cffi_src/* .
+
+NOTE: you need to keep our version of ``_cffi_src/openssl/callbacks.py``
+for now!
+
+# Crpytography version
+
+Copied over release version `1.7.2`
diff --git a/lib_pypy/_cffi_ssl/__init__.py b/lib_pypy/_cffi_ssl/__init__.py
new file mode 100644
diff --git a/lib_pypy/_cffi_ssl/_cffi_src/__init__.py 
b/lib_pypy/_cffi_ssl/_cffi_src/__init__.py
new file mode 100644
diff --git a/lib_pypy/_cffi_ssl/_cffi_src/build_commoncrypto.py 
b/lib_pypy/_cffi_ssl/_cffi_src/build_commoncrypto.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_cffi_ssl/_cffi_src/build_commoncrypto.py
@@ -0,0 +1,33 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from __future__ import absolute_import, division, print_function
+
+from _cffi_src.utils import build_ffi_for_binding
+
+
+ffi = build_ffi_for_binding(
+module_name="_commoncrypto",
+module_prefix="_cffi_src.commoncrypto.",
+modules=[
+"cf",
+"common_digest",
+"common_hmac",
+

[pypy-commit] pypy default: merge datetime_api_27 which provides Date*_FromTimestamp

2019-04-14 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r96486:2a381b55d56f
Date: 2019-04-14 16:40 +0300
http://bitbucket.org/pypy/pypy/changeset/2a381b55d56f/

Log:merge datetime_api_27 which provides Date*_FromTimestamp

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -12,3 +12,7 @@
 .. branch: jit-cleanup
 
 Remove rpython.jit.metainterp.typesystem and clean up related code in 
rpython/jit/
+
+.. branch: datetime_api_27
+
+Add ``DateTime_FromTimestamp`` and ``Date_FromTimestamp``
diff --git a/pypy/module/cpyext/cdatetime.py b/pypy/module/cpyext/cdatetime.py
--- a/pypy/module/cpyext/cdatetime.py
+++ b/pypy/module/cpyext/cdatetime.py
@@ -67,6 +67,14 @@
 _PyDelta_FromDelta.api_func.functype,
 _PyDelta_FromDelta.api_func.get_wrapper(space))
 
+datetimeAPI.c_DateTime_FromTimestamp = llhelper(
+_PyDateTime_FromTimestamp.api_func.functype,
+_PyDateTime_FromTimestamp.api_func.get_wrapper(space))
+
+datetimeAPI.c_Date_FromTimestamp = llhelper(
+_PyDate_FromTimestamp.api_func.functype,
+_PyDate_FromTimestamp.api_func.get_wrapper(space))
+
 state.datetimeAPI.append(datetimeAPI)
 return state.datetimeAPI[0]
 
@@ -243,8 +251,16 @@
 """
 w_datetime = PyImport_Import(space, space.newtext("datetime"))
 w_type = space.getattr(w_datetime, space.newtext("datetime"))
+return _PyDateTime_FromTimestamp(space, w_type, w_args, None)
+
+@cpython_api([PyObject, PyObject, PyObject], PyObject)
+def _PyDateTime_FromTimestamp(space, w_type, w_args, w_kwds):
+"""Implementation of datetime.fromtimestamp that matches the signature for
+PyDateTimeCAPI.DateTime_FromTimestamp
+"""
 w_method = space.getattr(w_type, space.newtext("fromtimestamp"))
-return space.call(w_method, w_args)
+
+return space.call(w_method, w_args, w_kwds=w_kwds)
 
 @cpython_api([PyObject], PyObject)
 def PyDate_FromTimestamp(space, w_args):
@@ -253,6 +269,12 @@
 """
 w_datetime = PyImport_Import(space, space.newtext("datetime"))
 w_type = space.getattr(w_datetime, space.newtext("date"))
+return _PyDate_FromTimestamp(space, w_type, w_args)
+
+@cpython_api([PyObject, PyObject], PyObject)
+def _PyDate_FromTimestamp(space, w_type, w_args):
+"""Implementation of date.fromtimestamp that matches the signature for
+PyDateTimeCAPI.Date_FromTimestamp"""
 w_method = space.getattr(w_type, space.newtext("fromtimestamp"))
 return space.call(w_method, w_args)
 
diff --git a/pypy/module/cpyext/parse/cpyext_datetime.h 
b/pypy/module/cpyext/parse/cpyext_datetime.h
--- a/pypy/module/cpyext/parse/cpyext_datetime.h
+++ b/pypy/module/cpyext/parse/cpyext_datetime.h
@@ -13,6 +13,10 @@
 PyObject*, PyTypeObject*);
 PyObject *(*Time_FromTime)(int, int, int, int, PyObject*, PyTypeObject*);
 PyObject *(*Delta_FromDelta)(int, int, int, int, PyTypeObject*);
+
+/* constructors for the DB API */
+PyObject *(*DateTime_FromTimestamp)(PyObject*, PyObject*, PyObject*);
+PyObject *(*Date_FromTimestamp)(PyObject*, PyObject*);
 } PyDateTime_CAPI;
 
 typedef struct
diff --git a/pypy/module/cpyext/test/test_datetime.py 
b/pypy/module/cpyext/test/test_datetime.py
--- a/pypy/module/cpyext/test/test_datetime.py
+++ b/pypy/module/cpyext/test/test_datetime.py
@@ -146,6 +146,41 @@
 2000, 6, 6, 6, 6, 6, 6, Py_None,
 PyDateTimeAPI->DateTimeType);
  """),
+("new_datetime_fromtimestamp", "METH_NOARGS",
+ """ PyDateTime_IMPORT;
+ PyObject *ts = PyFloat_FromDouble(20.0);
+ Py_INCREF(Py_None);
+ PyObject *tsargs = PyTuple_Pack(2, ts, Py_None);
+ PyObject *rv = PyDateTimeAPI->DateTime_FromTimestamp(
+(PyObject *)PyDateTimeAPI->DateTimeType, tsargs, NULL);
+ Py_DECREF(tsargs);
+ return rv;
+ """),
+("new_dt_fromts_tzinfo", "METH_O",
+ """ PyDateTime_IMPORT;
+ PyObject *ts = PyFloat_FromDouble(20.0);
+ PyObject *tsargs = PyTuple_Pack(1, ts);
+ PyObject *tskwargs = PyDict_New();
+
+ Py_INCREF(args);
+ PyDict_SetItemString(tskwargs, "tz", args);
+ PyObject *rv = PyDateTimeAPI->DateTime_FromTimestamp(
+(PyObject *)PyDateTimeAPI->DateTimeType, tsargs, tskwargs);
+ Py_DECREF(tsargs);
+ Py_DECREF(tskwargs);
+ return rv;
+ """),
+("new_date_fromtimestamp", "METH_NOARGS",
+ """ PyDateTime_IMPORT;
+ PyObject *ts = PyFloat_FromDouble(1430366400.0);
+ Py_INCREF(Py_None);
+ PyObject *tsargs = PyTuple_Pack(1, ts);
+ PyObject *rv = PyDateTimeAPI->Date_FromTimestamp(
+(PyObject 

[pypy-commit] pypy default: merge heads

2019-03-27 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r96371:24e79a787bd8
Date: 2019-03-27 23:00 +0100
http://bitbucket.org/pypy/pypy/changeset/24e79a787bd8/

Log:merge heads

diff --git a/pypy/interpreter/astcompiler/codegen.py 
b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -912,6 +912,20 @@
 elt_count = len(tup.elts) if tup.elts is not None else 0
 if tup.ctx == ast.Store:
 self.emit_op_arg(ops.UNPACK_SEQUENCE, elt_count)
+if tup.ctx == ast.Load and elt_count > MAX_STACKDEPTH_CONTAINERS:
+# we need a complete hack to build a new tuple from the list
+# ().__class__(l)
+empty_index = self.add_const(self.space.newtuple([]))
+self.emit_op_arg(ops.LOAD_CONST, empty_index)
+self.emit_op_name(ops.LOAD_ATTR, self.names, '__class__')
+
+self.emit_op_arg(ops.BUILD_LIST, 0)
+for element in tup.elts:
+element.walkabout(self)
+self.emit_op_arg(ops.LIST_APPEND, 1)
+
+self.emit_op_arg(ops.CALL_FUNCTION, 1)
+return
 self.visit_sequence(tup.elts)
 if tup.ctx == ast.Load:
 self.emit_op_arg(ops.BUILD_TUPLE, elt_count)
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py 
b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -1220,14 +1220,23 @@
 
 
 class TestHugeStackDepths:
-def test_list(self):
+def run_and_check_stacksize(self, source):
 space = self.space
-source = "a = [" + ",".join([str(i) for i in range(200)]) + "]\n"
-code = compile_with_astcompiler(source, 'exec', space)
+code = compile_with_astcompiler("a = " + source, 'exec', space)
 assert code.co_stacksize < 100
 w_dict = space.newdict()
 code.exec_code(space, w_dict, w_dict)
-assert space.unwrap(space.getitem(w_dict, space.newtext("a"))) == 
range(200)
+return space.getitem(w_dict, space.newtext("a"))
+
+def test_tuple(self):
+source = "(" + ",".join([str(i) for i in range(200)]) + ")\n"
+w_res = self.run_and_check_stacksize(source)
+assert self.space.unwrap(w_res) == tuple(range(200))
+
+def test_list(self):
+source = "a = [" + ",".join([str(i) for i in range(200)]) + "]\n"
+w_res = self.run_and_check_stacksize(source)
+assert self.space.unwrap(w_res) == range(200)
 
 def test_list_unpacking(self):
 space = self.space
@@ -1241,12 +1250,8 @@
 assert space.unwrap(space.getitem(w_dict, space.newtext("b199"))) == 
241
 
 def test_set(self):
+source = "a = {" + ",".join([str(i) for i in range(200)]) + "}\n"
+w_res = self.run_and_check_stacksize(source)
 space = self.space
-source = "a = {" + ",".join([str(i) for i in range(200)]) + "}\n"
-code = compile_with_astcompiler(source, 'exec', space)
-assert code.co_stacksize < 100
-w_dict = space.newdict()
-code.exec_code(space, w_dict, w_dict)
 assert [space.int_w(w_x)
-for w_x in space.unpackiterable(space.getitem(w_dict, 
space.newtext("a")))] == range(200)
-
+for w_x in space.unpackiterable(w_res)] == range(200)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge closed branch

2019-03-10 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r96267:c531ef4097ad
Date: 2019-03-10 22:44 +0200
http://bitbucket.org/pypy/pypy/changeset/c531ef4097ad/

Log:merge closed branch

___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge newmemoryview-app-level into default

2019-03-10 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r96232:750f4840080b
Date: 2019-03-10 12:42 +0200
http://bitbucket.org/pypy/pypy/changeset/750f4840080b/

Log:merge newmemoryview-app-level into default

diff --git a/extra_tests/ctypes_tests/test_structures.py 
b/extra_tests/ctypes_tests/test_structures.py
--- a/extra_tests/ctypes_tests/test_structures.py
+++ b/extra_tests/ctypes_tests/test_structures.py
@@ -124,12 +124,15 @@
 ms.n = 0xff00
 return repr(ba[:])
 
+nstruct = dostruct(Native)
 if sys.byteorder == 'little':
-assert dostruct(Native) == dostruct(Little)
-assert dostruct(Native) != dostruct(Big)
+assert nstruct == dostruct(Little)
+assert nstruct != dostruct(Big)
+assert Big._fields_[0][1] is not i
 else:
-assert dostruct(Native) == dostruct(Big)
-assert dostruct(Native) != dostruct(Little)
+assert nstruct == dostruct(Big)
+assert nstruct != dostruct(Little)
+assert Little._fields_[0][1] is not i
 
 def test_from_buffer_copy():
 from array import array
@@ -190,3 +193,20 @@
 assert sizeof(s) == 3 * sizeof(c_int)
 assert s.a == 4 # 256 + 4
 assert s.b == -123
+
+def test_memoryview():
+class S(Structure):
+_fields_ = [('a', c_int16),
+('b', c_int16),
+   ]
+
+S3 = S * 3
+c_array = (2 * S3)(
+S3(S(a=0, b=1), S(a=2, b=3), S(a=4,  b=5)),
+S3(S(a=6, b=7), S(a=8, b=9), S(a=10, b=11)),
+)
+
+mv = memoryview(c_array)
+assert mv.format == 'T{'}
+swappedorder = {'little': '>', 'big': '<'}
+
+def get_format_str(typ):
+if hasattr(typ, '_fields_'):
+if hasattr(typ, '_swappedbytes_'):
+bo = swappedorder[sys.byteorder]
+else:
+bo = byteorder[sys.byteorder]
+flds = []
+for name, obj in typ._fields_:
+# Trim off the leading '<' or '>'
+ch = get_format_str(obj)[1:]
+if (ch) == 'B':
+flds.append(byteorder[sys.byteorder])
+else:
+flds.append(bo)
+flds.append(ch)
+flds.append(':')
+flds.append(name)
+flds.append(':')
+return 'T{' + ''.join(flds) + '}'
+elif hasattr(typ, '_type_'):
+ch = typ._type_
+return byteorder[sys.byteorder] + ch
+else:
+raise ValueError('cannot get format string for %r' % typ)
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -2,8 +2,15 @@
 from _rawffi import alt as _ffi
 import sys
 
-try: from __pypy__ import builtinify
-except ImportError: builtinify = lambda f: f
+try:
+from __pypy__ import builtinify
+except ImportError:
+builtinify = lambda f: f
+
+try:
+from __pypy__.bufferable import bufferable
+except ImportError:
+bufferable = object
 
 keepalive_key = str # XXX fix this when provided with test
 
@@ -64,7 +71,7 @@
 'resbuffer' is a _rawffi array of length 1 containing the value,
 and this returns a general Python object that corresponds.
 """
-res = object.__new__(self)
+res = bufferable.__new__(self)
 res.__class__ = self
 res.__dict__['_buffer'] = resbuffer
 if base is not None:
@@ -158,7 +165,7 @@
 def __ne__(self, other):
 return self._obj != other
 
-class _CData(object):
+class _CData(bufferable):
 """ The most basic object for all ctypes types
 """
 __metaclass__ = _CDataMeta
diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py
--- a/lib_pypy/_ctypes/pointer.py
+++ b/lib_pypy/_ctypes/pointer.py
@@ -7,8 +7,7 @@
 from _ctypes.array import Array, array_get_slice_params, array_slice_getitem,\
  array_slice_setitem
 
-try: from __pypy__ import builtinify
-except ImportError: builtinify = lambda f: f
+from __pypy__ import builtinify, newmemoryview
 
 # This cache maps types to pointers to them.
 _pointer_type_cache = {}
@@ -135,6 +134,9 @@
 def _as_ffi_pointer_(self, ffitype):
 return as_ffi_pointer(self, ffitype)
 
+def __buffer__(self, flags):
+mv = memoryview(self.getcontents())
+return newmemoryview(mv, mv.itemsize, '&' + mv.format, mv.shape)
 
 def _cast_addr(obj, _, tp):
 if not (isinstance(tp, _CDataMeta) and tp._is_pointer_like()):
diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py
--- a/lib_pypy/_ctypes/structure.py
+++ b/lib_pypy/_ctypes/structure.py
@@ -2,9 +2,9 @@
 import _rawffi
 from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\
  store_reference, ensure_objects, CArgObject
-from _ctypes.array import Array
+from _ctypes.array import Array, get_format_str
 from _ctypes.pointer import _Pointer
-import inspect
+import inspect, __pypy__
 
 
 def names_and_fields(self, _fields_, superclass, 

[pypy-commit] pypy default: merge

2019-02-26 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r96178:dbdf453749f7
Date: 2019-02-26 19:01 +0100
http://bitbucket.org/pypy/pypy/changeset/dbdf453749f7/

Log:merge

diff --git a/rpython/rlib/rutf8.py b/rpython/rlib/rutf8.py
--- a/rpython/rlib/rutf8.py
+++ b/rpython/rlib/rutf8.py
@@ -819,7 +819,6 @@
 def __iter__(self):
 return self
 
-@always_inline
 def next(self):
 pos = self.it.get_pos()
 return (self.it.next(), pos)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge unicode-utf8 into default

2019-02-13 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r96003:ba081fb468f4
Date: 2019-02-13 23:11 +0200
http://bitbucket.org/pypy/pypy/changeset/ba081fb468f4/

Log:merge unicode-utf8 into default

diff too long, truncating to 2000 out of 15164 lines

diff --git a/TODO b/TODO
new file mode 100644
--- /dev/null
+++ b/TODO
@@ -0,0 +1,4 @@
+* find a better way to run "find" without creating the index storage,
+  if one is not already readily available (understand cost now, improve after 
merge)
+* improve performance of splitlines
+* think about cost of utf8 list strategy (Armin and CF)
diff --git a/lib-python/2.7/test/test_memoryio.py 
b/lib-python/2.7/test/test_memoryio.py
--- a/lib-python/2.7/test/test_memoryio.py
+++ b/lib-python/2.7/test/test_memoryio.py
@@ -712,6 +712,7 @@
 
 # XXX: For the Python version of io.StringIO, this is highly
 # dependent on the encoding used for the underlying buffer.
+@support.cpython_only
 def test_widechar(self):
 buf = self.buftype("\U0002030a\U00020347")
 memio = self.ioclass(buf)
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -29,7 +29,11 @@
 
 Improve register allocation in the JIT.
 
-
 .. branch: promote-unicode
 
 Implement rlib.jit.promote_unicode to complement promote_string
+
+.. branch: unicode-utf8
+
+Use utf8 internally to represent unicode, with the goal of never using 
rpython-level unicode
+
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -535,24 +535,26 @@
 if num_remainingkwds == 1:
 for i in range(len(keywords)):
 if i not in kwds_mapping:
-name = keywords[i]
-if name is None:
-# We'll assume it's unicode. Encode it.
-# Careful, I *think* it should not be possible to
-# get an IndexError here but you never know.
-try:
-if keyword_names_w is None:
-raise IndexError
-# note: negative-based indexing from the end
-w_name = keyword_names_w[i - len(keywords)]
-except IndexError:
+name = '?'
+# We'll assume it's unicode. Encode it.
+# Careful, I *think* it should not be possible to
+# get an IndexError here but you never know.
+try:
+if keyword_names_w is None:
+raise IndexError
+# note: negative-based indexing from the end
+w_name = keyword_names_w[i - len(keywords)]
+except IndexError:
+if keywords is None:
 name = '?'
 else:
-w_enc = space.newtext(space.sys.defaultencoding)
-w_err = space.newtext("replace")
-w_name = space.call_method(w_name, "encode", w_enc,
-   w_err)
-name = space.text_w(w_name)
+name = keywords[i]
+else:
+w_enc = space.newtext(space.sys.defaultencoding)
+w_err = space.newtext("replace")
+w_name = space.call_method(w_name, "encode", w_enc,
+   w_err)
+name = space.text_w(w_name)
 break
 self.kwd_name = name
 
diff --git a/pypy/interpreter/astcompiler/optimize.py 
b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -5,7 +5,7 @@
 from pypy.tool import stdlib_opcode as ops
 from pypy.interpreter.error import OperationError
 from rpython.rlib.unroll import unrolling_iterable
-from rpython.rlib.runicode import MAXUNICODE
+from rpython.rlib.rutf8 import MAXUNICODE
 from rpython.rlib.objectmodel import specialize
 
 
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py 
b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -975,9 +975,6 @@
 
 class AppTestCompiler:
 
-def setup_class(cls):
-cls.w_maxunicode = cls.space.wrap(sys.maxunicode)
-
 def test_docstring_not_loaded(self):
 import StringIO, dis, sys
 ns = {}
@@ -1027,7 +1024,7 @@
 import sys
 d = {}
 exec '# -*- coding: utf-8 -*-\n\nu = u"\xf0\x9f\x92\x8b"' in d
-if sys.maxunicode > 65535 and self.maxunicode > 65535:
+if 

[pypy-commit] pypy default: merge promote-unicode

2019-02-12 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r95981:9d4fe930924e
Date: 2019-02-12 20:11 +0100
http://bitbucket.org/pypy/pypy/changeset/9d4fe930924e/

Log:merge promote-unicode

mostly for completeness sake: support for rlib.jit.promote_unicode,
which behaves like promote_string, but for rpython unicode objects.

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -28,3 +28,8 @@
 .. branch: regalloc-playground
 
 Improve register allocation in the JIT.
+
+
+.. branch: promote-unicode
+
+Implement rlib.jit.promote_unicode to complement promote_string
diff --git a/rpython/jit/codewriter/jtransform.py 
b/rpython/jit/codewriter/jtransform.py
--- a/rpython/jit/codewriter/jtransform.py
+++ b/rpython/jit/codewriter/jtransform.py
@@ -596,6 +596,23 @@
 op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr],
  op.result)
 return [SpaceOperation('-live-', [], None), op1, None]
+if (hints.get('promote_unicode') and
+op.args[0].concretetype is not lltype.Void):
+U = lltype.Ptr(rstr.UNICODE)
+assert op.args[0].concretetype == U
+self._register_extra_helper(EffectInfo.OS_UNIEQ_NONNULL,
+"str.eq_nonnull",
+[U, U],
+lltype.Signed,
+EffectInfo.EF_ELIDABLE_CANNOT_RAISE)
+descr, p = 
self.callcontrol.callinfocollection.callinfo_for_oopspec(
+EffectInfo.OS_UNIEQ_NONNULL)
+# XXX this is fairly ugly way of creating a constant,
+# however, callinfocollection has no better interface
+c = Constant(p.adr.ptr, lltype.typeOf(p.adr.ptr))
+op1 = SpaceOperation('str_guard_value', [op.args[0], c, descr],
+ op.result)
+return [SpaceOperation('-live-', [], None), op1, None]
 if hints.get('force_virtualizable'):
 return SpaceOperation('hint_force_virtualizable', [op.args[0]], 
None)
 if hints.get('force_no_const'):   # for tests only
diff --git a/rpython/jit/codewriter/test/test_jtransform.py 
b/rpython/jit/codewriter/test/test_jtransform.py
--- a/rpython/jit/codewriter/test/test_jtransform.py
+++ b/rpython/jit/codewriter/test/test_jtransform.py
@@ -94,7 +94,7 @@
 return True
 return False
 def callinfo_for_oopspec(self, oopspecindex):
-assert oopspecindex == effectinfo.EffectInfo.OS_STREQ_NONNULL
+# assert oopspecindex == effectinfo.EffectInfo.OS_STREQ_NONNULL
 class c:
 class adr:
 ptr = 1
@@ -1129,6 +1129,21 @@
 assert op1.result == v2
 assert op0.opname == '-live-'
 
+def test_unicode_promote():
+PUNICODE = lltype.Ptr(rstr.UNICODE)
+v1 = varoftype(PUNICODE)
+v2 = varoftype(PUNICODE)
+op = SpaceOperation('hint',
+[v1, Constant({'promote_unicode': True}, lltype.Void)],
+v2)
+tr = Transformer(FakeCPU(), FakeBuiltinCallControl())
+op0, op1, _ = tr.rewrite_operation(op)
+assert op1.opname == 'str_guard_value'
+assert op1.args[0] == v1
+assert op1.args[2] == 'calldescr'
+assert op1.result == v2
+assert op0.opname == '-live-'
+
 def test_double_promote_str():
 PSTR = lltype.Ptr(rstr.STR)
 v1 = varoftype(PSTR)
diff --git a/rpython/jit/metainterp/test/test_string.py 
b/rpython/jit/metainterp/test/test_string.py
--- a/rpython/jit/metainterp/test/test_string.py
+++ b/rpython/jit/metainterp/test/test_string.py
@@ -3,7 +3,7 @@
 from rpython.jit.metainterp.test.support import LLJitMixin
 from rpython.rlib.debug import debug_print
 from rpython.rlib.jit import (JitDriver, dont_look_inside, we_are_jitted,
-promote_string)
+promote_string, promote_unicode)
 from rpython.rlib.rstring import StringBuilder, UnicodeBuilder
 
 
@@ -518,6 +518,19 @@
 self.meta_interp(f, [0])
 self.check_resops(call_r=2, call_i=5)
 
+def test_promote_unicode(self):
+driver = JitDriver(greens = [], reds = ['n'])
+
+def f(n):
+while n < 21:
+driver.jit_merge_point(n=n)
+promote_unicode(unicode(str(n % 3)))
+n += 1
+return 0
+
+self.meta_interp(f, [0])
+self.check_resops(call_r=4, call_i=5)
+
 def test_join_chars(self):
 jitdriver = JitDriver(reds=['a', 'b', 'c', 'i'], greens=[])
 _str = self._str
diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py
--- a/rpython/rlib/jit.py
+++ b/rpython/rlib/jit.py
@@ -84,6 +84,7 @@
 
 * promote - promote the argument from a variable into a constant
 * promote_string - same, but promote string by *value*
+* promote_unicode - same, but promote unicode string 

[pypy-commit] pypy default: merge regalloc-playground

2019-02-08 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r95905:cfad18a6fd4b
Date: 2019-02-08 15:38 +0100
http://bitbucket.org/pypy/pypy/changeset/cfad18a6fd4b/

Log:merge regalloc-playground

improve register allocation by using better heuristics.

diff too long, truncating to 2000 out of 3847 lines

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -23,4 +23,8 @@
 .. math-improvements
 
 Improve performance of long operations where one of the operands fits into
-an int.
\ No newline at end of file
+an int.
+
+.. regalloc-playgrounds
+
+Improve register allocation in the JIT.
diff --git a/pypy/doc/whatsnew-pypy2-5.10.0.rst 
b/pypy/doc/whatsnew-pypy2-5.10.0.rst
--- a/pypy/doc/whatsnew-pypy2-5.10.0.rst
+++ b/pypy/doc/whatsnew-pypy2-5.10.0.rst
@@ -1,42 +1,42 @@
-==
-What's new in PyPy2.7 5.10
-==
-
-.. this is a revision shortly after release-pypy2.7-v5.9.0
-.. startrev:d56dadcef996
-
-
-.. branch: cppyy-packaging
-
-Cleanup and improve cppyy packaging
-
-.. branch: docs-osx-brew-openssl
-
-.. branch: keep-debug-symbols
-
-Add a smartstrip tool, which can optionally keep the debug symbols in a
-separate file, instead of just stripping them away. Use it in packaging
-
-.. branch: bsd-patches
-
-Fix failures on FreeBSD, contributed by David Naylor as patches on the issue
-tracker (issues 2694, 2695, 2696, 2697)
-
-.. branch: run-extra-tests
-
-Run extra_tests/ in buildbot
-
-.. branch: vmprof-0.4.10
-
-Upgrade the _vmprof backend to vmprof 0.4.10
-
-.. branch: fix-vmprof-stacklet-switch
-.. branch: fix-vmprof-stacklet-switch-2
-
-Fix a vmprof+continulets (i.e. greenelts, eventlet, gevent, ...)
-
-.. branch: win32-vcvars
-
-.. branch: rdict-fast-hash
-
-Make it possible to declare that the hash function of an r_dict is fast in 
RPython.
+==
+What's new in PyPy2.7 5.10
+==
+
+.. this is a revision shortly after release-pypy2.7-v5.9.0
+.. startrev:d56dadcef996
+
+
+.. branch: cppyy-packaging
+
+Cleanup and improve cppyy packaging
+
+.. branch: docs-osx-brew-openssl
+
+.. branch: keep-debug-symbols
+
+Add a smartstrip tool, which can optionally keep the debug symbols in a
+separate file, instead of just stripping them away. Use it in packaging
+
+.. branch: bsd-patches
+
+Fix failures on FreeBSD, contributed by David Naylor as patches on the issue
+tracker (issues 2694, 2695, 2696, 2697)
+
+.. branch: run-extra-tests
+
+Run extra_tests/ in buildbot
+
+.. branch: vmprof-0.4.10
+
+Upgrade the _vmprof backend to vmprof 0.4.10
+
+.. branch: fix-vmprof-stacklet-switch
+.. branch: fix-vmprof-stacklet-switch-2
+
+Fix a vmprof+continulets (i.e. greenelts, eventlet, gevent, ...)
+
+.. branch: win32-vcvars
+
+.. branch: rdict-fast-hash
+
+Make it possible to declare that the hash function of an r_dict is fast in 
RPython.
diff --git a/pypy/doc/whatsnew-pypy2-6.0.0.rst 
b/pypy/doc/whatsnew-pypy2-6.0.0.rst
--- a/pypy/doc/whatsnew-pypy2-6.0.0.rst
+++ b/pypy/doc/whatsnew-pypy2-6.0.0.rst
@@ -1,128 +1,128 @@
-===
-What's new in PyPy2.7 5.10+
-===
-
-.. this is a revision shortly after release-pypy2.7-v5.10.0
-.. startrev: 6b024edd9d12
-
-.. branch: cpyext-avoid-roundtrip
-
-Big refactoring of some cpyext code, which avoids a lot of nonsense when
-calling C from Python and vice-versa: the result is a big speedup in
-function/method calls, up to 6 times faster.
-
-.. branch: cpyext-datetime2
-
-Support ``tzinfo`` field on C-API datetime objects, fixes latest pandas HEAD
-
-
-.. branch: mapdict-size-limit
-
-Fix a corner case of mapdict: When an instance is used like a dict (using
-``setattr`` and ``getattr``, or ``.__dict__``) and a lot of attributes are
-added, then the performance using mapdict is linear in the number of
-attributes. This is now fixed (by switching to a regular dict after 80
-attributes).
-
-
-.. branch: cpyext-faster-arg-passing
-
-When using cpyext, improve the speed of passing certain objects from PyPy to C
-code, most notably None, True, False, types, all instances of C-defined types.
-Before, a dict lookup was needed every time such an object crossed over, now it
-is just a field read.
-
-
-.. branch: 2634_datetime_timedelta_performance
-
-Improve datetime + timedelta performance.
-
-.. branch: memory-accounting
-
-Improve way to describe memory
-
-.. branch: msvc14
-
-Allow compilaiton with Visual Studio 2017 compiler suite on windows
-
-.. branch: refactor-slots
-
-Refactor cpyext slots.
-
-
-.. branch: call-loopinvariant-into-bridges
-
-Speed up branchy code that does a lot of function inlining by saving one call
-to read the TLS in most bridges.
-
-.. branch: rpython-sprint
-
-Refactor in rpython signatures
-
-.. branch: cpyext-tls-operror2
-
-Store error state thread-locally in executioncontext, fixes issue #2764
-
-.. branch: cpyext-fast-typecheck
-
-Optimize 

[pypy-commit] pypy default: Merge zlib-copying-third-time-a-charm.

2019-02-08 Thread Julian Berman
Author: Julian Berman 
Branch: 
Changeset: r95901:887c215ee39f
Date: 2019-02-08 07:29 -0500
http://bitbucket.org/pypy/pypy/changeset/887c215ee39f/

Log:Merge zlib-copying-third-time-a-charm.

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,6 +5,11 @@
 .. this is a revision shortly after release-pypy-7.0.0
 .. startrev: 481c69f7d81f
 
+.. branch: zlib-copying-third-time-a-charm
+
+Make sure zlib decompressobjs have their streams deallocated immediately
+on flush.
+
 .. branch: zlib-copying-redux
 
 Fix calling copy on already-flushed compressobjs.
diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py
--- a/pypy/module/zlib/interp_zlib.py
+++ b/pypy/module/zlib/interp_zlib.py
@@ -323,12 +323,16 @@
 try:
 self.lock()
 try:
+if not self.stream:
+raise oefmt(
+space.w_ValueError,
+"Decompressor was already flushed",
+)
 copied = rzlib.inflateCopy(self.stream)
 finally:
 self.unlock()
 except rzlib.RZlibError as e:
 raise zlib_error(space, e.msg)
-
 return Decompress(
 space=space,
 stream=copied,
@@ -359,6 +363,9 @@
 else:
 string, finished, unused_len = result
 self._save_unconsumed_input(data, finished, unused_len)
+if finished:
+rzlib.inflateEnd(self.stream)
+self.stream = rzlib.null_stream
 return space.newbytes(string)
 
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge math-improvements

2019-02-08 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r95895:7a4d0769c63d
Date: 2019-02-08 11:01 +0100
http://bitbucket.org/pypy/pypy/changeset/7a4d0769c63d/

Log:merge math-improvements

diff too long, truncating to 2000 out of 2276 lines

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -13,3 +13,9 @@
 
 The zlib module's compressobj and decompressobj now expose copy methods
 as they do on CPython.
+
+
+.. math-improvements
+
+Improve performance of long operations where one of the operands fits into
+an int.
\ No newline at end of file
diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py
--- a/pypy/objspace/std/intobject.py
+++ b/pypy/objspace/std/intobject.py
@@ -299,7 +299,7 @@
 return ix
 
 
-def _pow_ovf2long(space, iv, iw, w_modulus):
+def _pow_ovf2long(space, iv, w_iv, iw, w_iw, w_modulus):
 if space.is_none(w_modulus) and _recover_with_smalllong(space):
 from pypy.objspace.std.smalllongobject import _pow as _pow_small
 try:
@@ -308,9 +308,12 @@
 return _pow_small(space, r_longlong(iv), iw, r_longlong(0))
 except (OverflowError, ValueError):
 pass
-from pypy.objspace.std.longobject import W_LongObject
-w_iv = W_LongObject.fromint(space, iv)
-w_iw = W_LongObject.fromint(space, iw)
+from pypy.objspace.std.longobject import W_LongObject, W_AbstractLongObject
+if w_iv is None or not isinstance(w_iv, W_AbstractLongObject):
+w_iv = W_LongObject.fromint(space, iv)
+if w_iw is None or not isinstance(w_iw, W_AbstractLongObject):
+w_iw = W_LongObject.fromint(space, iw)
+
 return w_iv.descr_pow(space, w_iw, w_modulus)
 
 
@@ -318,7 +321,7 @@
 op = getattr(operator, opname, None)
 assert op or ovf2small
 
-def ovf2long(space, x, y):
+def ovf2long(space, x, w_x, y, w_y):
 """Handle overflowing to smalllong or long"""
 if _recover_with_smalllong(space):
 if ovf2small:
@@ -330,9 +333,12 @@
 b = r_longlong(y)
 return W_SmallLongObject(op(a, b))
 
-from pypy.objspace.std.longobject import W_LongObject
-w_x = W_LongObject.fromint(space, x)
-w_y = W_LongObject.fromint(space, y)
+from pypy.objspace.std.longobject import W_LongObject, 
W_AbstractLongObject
+if w_x is None or not isinstance(w_x, W_AbstractLongObject):
+w_x = W_LongObject.fromint(space, x)
+if w_y is None or not isinstance(w_y, W_AbstractLongObject):
+w_y = W_LongObject.fromint(space, y)
+
 return getattr(w_x, 'descr_' + opname)(space, w_y)
 
 return ovf2long
@@ -496,12 +502,18 @@
 # can't return NotImplemented (space.pow doesn't do full
 # ternary, i.e. w_modulus.__zpow__(self, w_exponent)), so
 # handle it ourselves
-return _pow_ovf2long(space, x, y, w_modulus)
+return _pow_ovf2long(space, x, self, y, w_exponent, w_modulus)
 
 try:
 result = _pow(space, x, y, z)
-except (OverflowError, ValueError):
-return _pow_ovf2long(space, x, y, w_modulus)
+except OverflowError:
+return _pow_ovf2long(space, x, self, y, w_exponent, w_modulus)
+except ValueError:
+# float result, so let avoid a roundtrip in rbigint.
+self = self.descr_float(space)
+w_exponent = w_exponent.descr_float(space)
+return space.pow(self, w_exponent, space.w_None)
+
 return space.newint(result)
 
 @unwrap_spec(w_modulus=WrappedDefault(None))
@@ -546,7 +558,7 @@
 try:
 z = ovfcheck(op(x, y))
 except OverflowError:
-return ovf2long(space, x, y)
+return ovf2long(space, x, self, y, w_other)
 else:
 z = op(x, y)
 return wrapint(space, z)
@@ -568,7 +580,7 @@
 try:
 z = ovfcheck(op(y, x))
 except OverflowError:
-return ovf2long(space, y, x)
+return ovf2long(space, y, w_other, x, self)  # XXX write a 
test
 else:
 z = op(y, x)
 return wrapint(space, z)
@@ -599,7 +611,7 @@
 try:
 return func(space, x, y)
 except OverflowError:
-return ovf2long(space, x, y)
+return ovf2long(space, x, self, y, w_other)
 else:
 return func(space, x, y)
 
@@ -614,7 +626,7 @@
 try:
 return func(space, y, x)
 except OverflowError:
-return ovf2long(space, y, x)
+return ovf2long(space, y, w_other, x, self)
 else:
 return func(space, y, x)
 
diff --git 

[pypy-commit] pypy default: merge heads

2019-02-06 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r95874:35673b0b2571
Date: 2019-02-06 14:21 +0100
http://bitbucket.org/pypy/pypy/changeset/35673b0b2571/

Log:merge heads

diff --git a/pypy/module/zlib/test/test_zlib.py 
b/pypy/module/zlib/test/test_zlib.py
--- a/pypy/module/zlib/test/test_zlib.py
+++ b/pypy/module/zlib/test/test_zlib.py
@@ -35,6 +35,8 @@
 compression and decompression tests have a little real data to assert
 against.
 """
+cls.w_runappdirect = cls.space.wrap(cls.runappdirect)
+
 cls.w_zlib = cls.space.getbuiltinmodule('zlib')
 expanded = 'some bytes which will be compressed'
 cls.w_expanded = cls.space.wrap(expanded)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Merge default heads

2019-02-06 Thread Julian Berman
Author: Julian Berman 
Branch: 
Changeset: r95867:60fa14799cf4
Date: 2019-02-06 12:06 +0100
http://bitbucket.org/pypy/pypy/changeset/60fa14799cf4/

Log:Merge default heads

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,6 +5,10 @@
 .. this is a revision shortly after release-pypy-7.0.0
 .. startrev: 481c69f7d81f
 
+.. branch: zlib-copying-redux
+
+Fix calling copy on already-flushed compressobjs.
+
 .. branch: zlib-copying
 
 The zlib module's compressobj and decompressobj now expose copy methods
diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py
--- a/pypy/module/zlib/interp_zlib.py
+++ b/pypy/module/zlib/interp_zlib.py
@@ -175,6 +175,11 @@
 try:
 self.lock()
 try:
+if not self.stream:
+raise oefmt(
+space.w_ValueError,
+"Compressor was already flushed",
+)
 copied = rzlib.deflateCopy(self.stream)
 finally:
 self.unlock()
@@ -318,9 +323,6 @@
 try:
 self.lock()
 try:
-if not self.stream:
-raise zlib_error(space,
- "decompressor object already flushed")
 copied = rzlib.inflateCopy(self.stream)
 finally:
 self.unlock()
diff --git a/pypy/module/zlib/test/test_zlib.py 
b/pypy/module/zlib/test/test_zlib.py
--- a/pypy/module/zlib/test/test_zlib.py
+++ b/pypy/module/zlib/test/test_zlib.py
@@ -307,7 +307,8 @@
 
 assert (d1 + from_copy) == (d1 + from_decompressor)
 
-def test_unsuccessful_decompress_copy(self):
+def test_cannot_copy_decompressor_with_stream_in_inconsistent_state(self):
+if self.runappdirect: skip("can't run with -A")
 decompressor = self.zlib.decompressobj()
 self.intentionally_break_a_z_stream(zobj=decompressor)
 raises(self.zlib.error, decompressor.copy)
@@ -341,7 +342,13 @@
 
 assert (d1 + from_copy) == (d1 + from_compressor)
 
-def test_unsuccessful_compress_copy(self):
+def test_cannot_copy_compressor_with_stream_in_inconsistent_state(self):
+if self.runappdirect: skip("can't run with -A")
 compressor = self.zlib.compressobj()
 self.intentionally_break_a_z_stream(zobj=compressor)
 raises(self.zlib.error, compressor.copy)
+
+def test_cannot_copy_compressor_with_flushed_stream(self):
+compressor = self.zlib.compressobj()
+compressor.flush()
+raises(ValueError, compressor.copy)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Merge zlib-copying-redux

2019-02-06 Thread Julian Berman
Author: Julian Berman 
Branch: 
Changeset: r95865:ec33801be3ff
Date: 2019-02-06 11:43 +0100
http://bitbucket.org/pypy/pypy/changeset/ec33801be3ff/

Log:Merge zlib-copying-redux

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,6 +5,10 @@
 .. this is a revision shortly after release-pypy-7.0.0
 .. startrev: 481c69f7d81f
 
+.. branch: zlib-copying-redux
+
+Fix calling copy on already-flushed compressobjs.
+
 .. branch: zlib-copying
 
 The zlib module's compressobj and decompressobj now expose copy methods
diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py
--- a/pypy/module/zlib/interp_zlib.py
+++ b/pypy/module/zlib/interp_zlib.py
@@ -175,6 +175,11 @@
 try:
 self.lock()
 try:
+if not self.stream:
+raise oefmt(
+space.w_ValueError,
+"Compressor was already flushed",
+)
 copied = rzlib.deflateCopy(self.stream)
 finally:
 self.unlock()
@@ -318,9 +323,6 @@
 try:
 self.lock()
 try:
-if not self.stream:
-raise zlib_error(space,
- "decompressor object already flushed")
 copied = rzlib.inflateCopy(self.stream)
 finally:
 self.unlock()
diff --git a/pypy/module/zlib/test/test_zlib.py 
b/pypy/module/zlib/test/test_zlib.py
--- a/pypy/module/zlib/test/test_zlib.py
+++ b/pypy/module/zlib/test/test_zlib.py
@@ -307,7 +307,8 @@
 
 assert (d1 + from_copy) == (d1 + from_decompressor)
 
-def test_unsuccessful_decompress_copy(self):
+def test_cannot_copy_decompressor_with_stream_in_inconsistent_state(self):
+if self.runappdirect: skip("can't run with -A")
 decompressor = self.zlib.decompressobj()
 self.intentionally_break_a_z_stream(zobj=decompressor)
 raises(self.zlib.error, decompressor.copy)
@@ -341,7 +342,13 @@
 
 assert (d1 + from_copy) == (d1 + from_compressor)
 
-def test_unsuccessful_compress_copy(self):
+def test_cannot_copy_compressor_with_stream_in_inconsistent_state(self):
+if self.runappdirect: skip("can't run with -A")
 compressor = self.zlib.compressobj()
 self.intentionally_break_a_z_stream(zobj=compressor)
 raises(self.zlib.error, compressor.copy)
+
+def test_cannot_copy_compressor_with_flushed_stream(self):
+compressor = self.zlib.compressobj()
+compressor.flush()
+raises(ValueError, compressor.copy)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Merge default

2019-02-06 Thread Julian Berman
Author: Julian Berman 
Branch: 
Changeset: r95866:24f1b599a64d
Date: 2019-02-06 11:59 +0100
http://bitbucket.org/pypy/pypy/changeset/24f1b599a64d/

Log:Merge default

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,6 +5,10 @@
 .. this is a revision shortly after release-pypy-7.0.0
 .. startrev: 481c69f7d81f
 
+.. branch: zlib-copying-redux
+
+Fix calling copy on already-flushed compressobjs.
+
 .. branch: zlib-copying
 
 The zlib module's compressobj and decompressobj now expose copy methods
diff --git a/pypy/module/zlib/interp_zlib.py b/pypy/module/zlib/interp_zlib.py
--- a/pypy/module/zlib/interp_zlib.py
+++ b/pypy/module/zlib/interp_zlib.py
@@ -175,6 +175,11 @@
 try:
 self.lock()
 try:
+if not self.stream:
+raise oefmt(
+space.w_ValueError,
+"Compressor was already flushed",
+)
 copied = rzlib.deflateCopy(self.stream)
 finally:
 self.unlock()
@@ -318,9 +323,6 @@
 try:
 self.lock()
 try:
-if not self.stream:
-raise zlib_error(space,
- "decompressor object already flushed")
 copied = rzlib.inflateCopy(self.stream)
 finally:
 self.unlock()
diff --git a/pypy/module/zlib/test/test_zlib.py 
b/pypy/module/zlib/test/test_zlib.py
--- a/pypy/module/zlib/test/test_zlib.py
+++ b/pypy/module/zlib/test/test_zlib.py
@@ -307,7 +307,8 @@
 
 assert (d1 + from_copy) == (d1 + from_decompressor)
 
-def test_unsuccessful_decompress_copy(self):
+def test_cannot_copy_decompressor_with_stream_in_inconsistent_state(self):
+if self.runappdirect: skip("can't run with -A")
 decompressor = self.zlib.decompressobj()
 self.intentionally_break_a_z_stream(zobj=decompressor)
 raises(self.zlib.error, decompressor.copy)
@@ -341,7 +342,13 @@
 
 assert (d1 + from_copy) == (d1 + from_compressor)
 
-def test_unsuccessful_compress_copy(self):
+def test_cannot_copy_compressor_with_stream_in_inconsistent_state(self):
+if self.runappdirect: skip("can't run with -A")
 compressor = self.zlib.compressobj()
 self.intentionally_break_a_z_stream(zobj=compressor)
 raises(self.zlib.error, compressor.copy)
+
+def test_cannot_copy_compressor_with_flushed_stream(self):
+compressor = self.zlib.compressobj()
+compressor.flush()
+raises(ValueError, compressor.copy)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2019-01-28 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r95741:018346b46386
Date: 2019-01-28 20:56 +0200
http://bitbucket.org/pypy/pypy/changeset/018346b46386/

Log:merge heads

diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -7,16 +7,16 @@
   Armin Rigo
   Maciej Fijalkowski
   Carl Friedrich Bolz-Tereick
+  Antonio Cuni
   Amaury Forgeot d'Arc
-  Antonio Cuni
   Matti Picus
   Samuele Pedroni
   Ronan Lamy
   Alex Gaynor
   Philip Jenvey
+  Richard Plangger
   Brian Kearns
-  Richard Plangger
-  Michael Hudson
+  Michael Hudson-Doyle
   Manuel Jacob
   David Schneider
   Holger Krekel
@@ -26,8 +26,8 @@
   Anders Chrigstrom
   Wim Lavrijsen
   Eric van Riet Paap
+  Remi Meier
   Richard Emslie
-  Remi Meier
   Alexander Schremmer
   Dan Villiom Podlaski Christiansen
   Lukas Diekmann
@@ -37,10 +37,10 @@
   Niklaus Haldimann
   Camillo Bruni
   Laura Creighton
-  Romain Guillebert
   Toon Verwaest
   Leonardo Santagada
   Seo Sanghyeon
+  Romain Guillebert
   Ronny Pfannschmidt
   Justin Peel
   Raffael Tfirst
@@ -81,12 +81,12 @@
   Squeaky
   Edd Barrett
   Timo Paulssen
+  Laurence Tratt
   Marius Gedminas
   Nicolas Truessel
   Alexandre Fayolle
   Simon Burton
   Martin Matusiak
-  Laurence Tratt
   Wenzhu Man
   Konstantin Lopuhin
   John Witulski
@@ -101,8 +101,9 @@
   Jean-Philippe St. Pierre
   Guido van Rossum
   Pavel Vinogradov
+  Stefan Beyer
+  William Leslie
   Pawe Piotr Przeradowski
-  William Leslie
   marky1991
   Ilya Osadchiy
   Tobias Oberstein
@@ -111,10 +112,10 @@
   Taavi Burns
   Adrian Kuhn
   tav
+  Stian Andreassen
   Georg Brandl
   Joannah Nanjekye
   Bert Freudenberg
-  Stian Andreassen
   Wanja Saatkamp
   Mike Blume
   Gerald Klix
@@ -130,6 +131,7 @@
   Vasily Kuznetsov
   Preston Timmons
   David Ripton
+  Pieter Zieschang
   Dusty Phillips
   Lukas Renggli
   Guenter Jantzen
@@ -143,6 +145,7 @@
   Andrew Durdin
   Ben Young
   Michael Schneider
+  Yusuke Tsutsumi
   Nicholas Riley
   Jason Chu
   Igor Trindade Oliveira
@@ -154,7 +157,6 @@
   Mariano Anaya
   anatoly techtonik
   Karl Bartel
-  Stefan Beyer
   Gabriel Lavoie
   Jared Grubb
   Alecsandru Patrascu
@@ -165,7 +167,6 @@
   Victor Stinner
   Andrews Medina
   Aaron Iles
-  p_ziesch...@yahoo.de
   Toby Watson
   Daniel Patrick
   Stuart Williams
@@ -177,6 +178,7 @@
   Mikael Schnenberg
   Stanislaw Halik
   Mihnea Saracin
+  Matt Jackson
   Berkin Ilbeyi
   Gasper Zejn
   Faye Zhao
@@ -184,12 +186,14 @@
   Anders Qvist
   Corbin Simpson
   Chirag Jadwani
+  Pauli Virtanen
   Jonathan David Riehl
   Beatrice During
   Alex Perry
   Robert Zaremba
   Alan McIntyre
   Alexander Sedov
+  David C Ellis
   Vaibhav Sood
   Reuben Cummings
   Attila Gobi
@@ -209,7 +213,6 @@
   Arjun Naik
   Aaron Gallagher
   Alexis Daboville
-  Pieter Zieschang
   Karl Ramm
   Lukas Vacek
   Omer Katz
@@ -237,12 +240,15 @@
   Catalin Gabriel Manciu
   Jacob Oscarson
   Ryan Gonzalez
+  Antoine Dupre
   Kristjan Valur Jonsson
   Lucio Torre
   Richard Lancaster
   Dan Buch
   Lene Wagner
   Tomo Cocoa
+  Miro Hronok
+  Anthony Sottile
   David Lievens
   Neil Blakey-Milner
   Henrik Vendelbo
@@ -257,10 +263,12 @@
   Bobby Impollonia
   Roberto De Ioris
   Jeong YunWon
+  andrewjlawrence
   Christopher Armstrong
   Aaron Tubbs
   Vasantha Ganesh K
   Jason Michalski
+  Radu Ciorba
   Markus Holtermann
   Andrew Thompson
   Yusei Tahara
@@ -268,28 +276,26 @@
   Fabio Niephaus
   Akira Li
   Gustavo Niemeyer
-  Rafa Gaczyski
+  Nate Bragg
   Lucas Stadler
   roberto@goyle
+  Carl Bordum Hansen
   Matt Bogosian
   Yury V. Zaytsev
   florinpapa
   Anders Sigfridsson
-  Matt Jackson
   Nikolay Zinov
   rafalgalczyn...@gmail.com
   Joshua Gilbert
   Anna Katrina Dominguez
   Kim Jin Su
   Amber Brown
-  Miro Hronok
-  Anthony Sottile
-  Nate Bragg
+  Andrew Stepanov
+  Rafa Gaczyski
   Ben Darnell
   Juan Francisco Cantero Hurtado
   Godefroid Chappelle
   Julian Berman
-  Michael Hudson-Doyle
   Stephan Busemann
   Dan Colish
   timo
@@ -299,6 +305,7 @@
   halgari
   Jim Baker
   Chris Lambacher
+  John Aldis
   coolbutusel...@gmail.com
   Mike Bayer
   Rodrigo Arajo
@@ -307,6 +314,7 @@
   OlivierBlanvillain
   Jonas Pfannschmidt
   Zearin
+  Johan Forsberg
   Andrey Churin
   Dan Crosta
   reub...@gmail.com
@@ -316,8 +324,9 @@
   Steve Papanik
   Eli Stevens
   Boglarka Vezer
-  gabrielg
+  gabri...@ec2-54-146-239-158.compute-1.amazonaws.com
   PavloKapyshin
+  Herv Beraud
   Tomer Chachamu
   Christopher Groskopf
   Asmo Soinio
@@ -331,8 +340,8 @@
   Michael Chermside
   Anna Ravencroft
   remarkablerocket
-  Pauli Virtanen
   Petre Vijiac
+  hgattic
   Berker Peksag
   Christian Muirhead
   soareschen
@@ -351,12 +360,13 @@
   Zooko Wilcox-O Hearn
   James Lan
   jiaaro
+  Evgenii Gorinov
   Markus Unterwaditzer
   Kristoffer Kleine
   Graham Markall
   Dan Loewenherz
   werat
-  Andrew Stepanov
+  Filip Salomonsson
   Niclas Olofsson
   Chris Pressey
   Tobias Diaz
diff 

[pypy-commit] pypy default: merge release-pypy2.7-7.x into default, to incorporate the version bump

2019-01-24 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r95711:c4dc91f2e037
Date: 2019-01-24 18:21 +0100
http://bitbucket.org/pypy/pypy/changeset/c4dc91f2e037/

Log:merge release-pypy2.7-7.x into default, to incorporate the version
bump

diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -65,10 +65,15 @@
 # |version| and |release|, also used in various other places throughout the
 # built documents.
 #
+
+# Make sure to keep this in sync with:
+#module/sys/version.py
+#module/cpyext/include/patchlevel.h
+#
 # The short X.Y version.
-version = '6.0'
+version = '7.0'
 # The full version, including alpha/beta/rc tags.
-release = '6.0.0'
+release = '7.0.0'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/pypy/module/cpyext/include/patchlevel.h 
b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -28,9 +28,12 @@
 /* Version as a string */
 #define PY_VERSION "2.7.13"
 
-/* PyPy version as a string */
-#define PYPY_VERSION "6.1.0-alpha0"
-#define PYPY_VERSION_NUM  0x0601
+/* PyPy version as a string: make sure to keep this in sync with:
+ * module/sys/version.py
+ * doc/conf.py
+ */
+#define PYPY_VERSION "7.0.0"
+#define PYPY_VERSION_NUM  0x0700
 
 /* Defined to mean a PyPy where cpyext holds more regular references
to PyObjects, e.g. staying alive as long as the internal PyPy object
diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
--- a/pypy/module/sys/version.py
+++ b/pypy/module/sys/version.py
@@ -10,7 +10,10 @@
 #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py
 CPYTHON_API_VERSION= 1013   #XXX # sync with include/modsupport.h
 
-PYPY_VERSION   = (6, 1, 0, "alpha", 0)#XXX # sync patchlevel.h
+# make sure to keep PYPY_VERSION in sync with:
+#module/cpyext/include/patchlevel.h
+#doc/conf.py
+PYPY_VERSION   = (7, 0, 0, "final", 0)
 
 
 import pypy
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Merge the gc-disable branch, which introduces two changes to the gc module:

2018-12-19 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r95514:947a6274ef5e
Date: 2018-12-19 18:14 +0100
http://bitbucket.org/pypy/pypy/changeset/947a6274ef5e/

Log:Merge the gc-disable branch, which introduces two changes to the gc
module:

1. gc.disable() actually disables the major collections of the GC
(thus causing an apparent leak if you leave it disabled for too
much)

2. gc.collect_step() lets you to manually run collection steps when
the GC is disabled

diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -394,8 +394,10 @@
   
 * some functions and attributes of the ``gc`` module behave in a
   slightly different way: for example, ``gc.enable`` and
-  ``gc.disable`` are supported, but instead of enabling and disabling
-  the GC, they just enable and disable the execution of finalizers.
+  ``gc.disable`` are supported, but "enabling and disabling the GC" has
+  a different meaning in PyPy than in CPython.  These functions
+  actually enable and disable the major collections and the
+  execution of finalizers.
 
 * PyPy prints a random line from past #pypy IRC topics at startup in
   interactive mode. In a released version, this behaviour is suppressed, but
diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -22,8 +22,44 @@
 larger.  (A third category, the very large objects, are initially allocated
 outside the nursery and never move.)
 
-Since Incminimark is an incremental GC, the major collection is incremental,
-meaning there should not be any pauses longer than 1ms.
+Since Incminimark is an incremental GC, the major collection is incremental:
+the goal is not to have any pause longer than 1ms, but in practice it depends
+on the size and characteristics of the heap: occasionally, there can be pauses
+between 10-100ms.
+
+
+Semi-manual GC management
+--
+
+If there are parts of the program where it is important to have a low latency,
+you might want to control precisely when the GC runs, to avoid unexpected
+pauses.  Note that this has effect only on major collections, while minor
+collections continue to work as usual.
+
+As explained above, a full major collection consists of ``N`` steps, where
+``N`` depends on the size of the heap; generally speaking, it is not possible
+to predict how many steps will be needed to complete a collection.
+
+``gc.enable()`` and ``gc.disable()`` control whether the GC runs collection
+steps automatically.  When the GC is disabled the memory usage will grow
+indefinitely, unless you manually call ``gc.collect()`` and
+``gc.collect_step()``.
+
+``gc.collect()`` runs a full major collection.
+
+``gc.collect_step()`` runs a single collection step. It returns an object of
+type GcCollectStepStats_, the same which is passed to the corresponding `GC
+Hooks`_.  The following code is roughly equivalent to a ``gc.collect()``::
+
+while True:
+if gc.collect_step().major_is_done:
+break
+  
+For a real-world example of usage of this API, you can look at the 3rd-party
+module `pypytools.gc.custom`_, which also provides a ``with customgc.nogc()``
+context manager to mark sections where the GC is forbidden.
+
+.. _`pypytools.gc.custom`: 
https://bitbucket.org/antocuni/pypytools/src/0273afc3e8bedf0eb1ef630c3bc69e8d9dd661fe/pypytools/gc/custom.py?at=default=file-view-default
 
 
 Fragmentation
@@ -184,6 +220,8 @@
 the number of pinned objects.
 
 
+.. _GcCollectStepStats:
+
 The attributes for ``GcCollectStepStats`` are:
 
 ``count``, ``duration``, ``duration_min``, ``duration_max``
@@ -192,10 +230,14 @@
 ``oldstate``, ``newstate``
 Integers which indicate the state of the GC before and after the step.
 
+``major_is_done``
+Boolean which indicate whether this was the last step of the major
+collection
+
 The value of ``oldstate`` and ``newstate`` is one of these constants, defined
 inside ``gc.GcCollectStepStats``: ``STATE_SCANNING``, ``STATE_MARKING``,
-``STATE_SWEEPING``, ``STATE_FINALIZING``.  It is possible to get a string
-representation of it by indexing the ``GC_STATS`` tuple.
+``STATE_SWEEPING``, ``STATE_FINALIZING``, ``STATE_USERDEL``.  It is possible
+to get a string representation of it by indexing the ``GC_STATES`` tuple.
 
 
 The attributes for ``GcCollectStats`` are:
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -60,3 +60,10 @@
 .. branch: cleanup-test_lib_pypy
 
 Update most test_lib_pypy/ tests and move them to extra_tests/.
+
+.. branch: gc-disable
+
+Make it possible to manually manage the GC by using a combination of
+gc.disable() and gc.collect_step(). Make sure to write a proper release
+announcement in which we explain that existing programs could leak memory if
+they run for too much time between 

[pypy-commit] pypy default: merge expose-gc-time:

2018-11-30 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r95390:c8b8ee6787e9
Date: 2018-11-30 16:20 +0100
http://bitbucket.org/pypy/pypy/changeset/c8b8ee6787e9/

Log:merge expose-gc-time:

expose the total time spent in the GC, also switch to using seconds
in the GC hooks

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -51,3 +51,8 @@
 .. branch: rlock-in-rpython
 
 Backport CPython fix for `thread.RLock` 
+
+
+.. branch: expose-gc-time
+
+Make GC hooks measure time in seconds (as opposed to an opaque unit).
diff --git a/pypy/goal/targetpypystandalone.py 
b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -368,7 +368,7 @@
 def get_gchooks(self):
 from pypy.module.gc.hook import LowLevelGcHooks
 if self.space is None:
-raise Exception("get_gchooks must be called afeter 
get_entry_point")
+raise Exception("get_gchooks must be called after get_entry_point")
 return self.space.fromcache(LowLevelGcHooks)
 
 def get_entry_point(self, config):
diff --git a/pypy/module/gc/app_referents.py b/pypy/module/gc/app_referents.py
--- a/pypy/module/gc/app_referents.py
+++ b/pypy/module/gc/app_referents.py
@@ -57,12 +57,14 @@
  'total_allocated_memory', 'jit_backend_allocated',
  'peak_memory', 'peak_allocated_memory', 
'total_arena_memory',
  'total_rawmalloced_memory', 'nursery_size',
- 'peak_arena_memory', 'peak_rawmalloced_memory'):
+ 'peak_arena_memory', 'peak_rawmalloced_memory',
+ ):
 setattr(self, item, self._format(getattr(self._s, item)))
 self.memory_used_sum = self._format(self._s.total_gc_memory + 
self._s.total_memory_pressure +
 self._s.jit_backend_used)
 self.memory_allocated_sum = 
self._format(self._s.total_allocated_memory + self._s.total_memory_pressure +
 self._s.jit_backend_allocated)
+self.total_gc_time = self._s.total_gc_time
 
 def _format(self, v):
 if v < 100:
@@ -92,6 +94,8 @@
 raw assembler allocated: %s%s
 -
 Total:   %s
+
+Total time spent in GC:  %s
 """ % (self.total_gc_memory, self.peak_memory,
   self.total_arena_memory,
   self.total_rawmalloced_memory,
@@ -106,7 +110,8 @@
   self.nursery_size,
self.jit_backend_allocated,
extra,
-   self.memory_allocated_sum)
+   self.memory_allocated_sum,
+   self.total_gc_time / 1000.0)
 
 
 def get_stats(memory_pressure=False):
diff --git a/pypy/module/gc/hook.py b/pypy/module/gc/hook.py
--- a/pypy/module/gc/hook.py
+++ b/pypy/module/gc/hook.py
@@ -7,6 +7,8 @@
 from pypy.interpreter.typedef import TypeDef, interp_attrproperty, 
GetSetProperty
 from pypy.interpreter.executioncontext import AsyncAction
 
+inf = float("inf")
+
 class LowLevelGcHooks(GcHooks):
 """
 These are the low-level hooks which are called directly from the GC.
@@ -126,9 +128,9 @@
 
 def reset(self):
 self.count = 0
-self.duration = r_longlong(0)
-self.duration_min = r_longlong(longlongmax)
-self.duration_max = r_longlong(0)
+self.duration = 0.0
+self.duration_min = inf
+self.duration_max = 0.0
 
 def fix_annotation(self):
 # the annotation of the class and its attributes must be completed
@@ -136,9 +138,9 @@
 # annotated with the correct types
 if NonConstant(False):
 self.count = NonConstant(-42)
-self.duration = NonConstant(r_longlong(-42))
-self.duration_min = NonConstant(r_longlong(-42))
-self.duration_max = NonConstant(r_longlong(-42))
+self.duration = NonConstant(-53.2)
+self.duration_min = NonConstant(-53.2)
+self.duration_max = NonConstant(-53.2)
 self.total_memory_used = NonConstant(r_uint(42))
 self.pinned_objects = NonConstant(-42)
 self.fire()
@@ -166,9 +168,9 @@
 
 def reset(self):
 self.count = 0
-self.duration = r_longlong(0)
-self.duration_min = r_longlong(longlongmax)
-self.duration_max = r_longlong(0)
+self.duration = 0.0
+self.duration_min = inf
+self.duration_max = 0.0
 
 def fix_annotation(self):
 # the annotation of the class and its attributes must be completed
@@ -176,9 +178,9 @@
 # annotated with the correct types
 if NonConstant(False):
 self.count = NonConstant(-42)
-self.duration = NonConstant(r_longlong(-42))
-self.duration_min = NonConstant(r_longlong(-42))
-self.duration_max = 

[pypy-commit] pypy default: merge heads

2018-11-11 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r95299:3e0eab4be623
Date: 2018-11-11 01:03 -0800
http://bitbucket.org/pypy/pypy/changeset/3e0eab4be623/

Log:merge heads

diff --git a/rpython/translator/platform/windows.py 
b/rpython/translator/platform/windows.py
--- a/rpython/translator/platform/windows.py
+++ b/rpython/translator/platform/windows.py
@@ -88,6 +88,9 @@
 stdout, stderr = popen.communicate()
 if popen.wait() != 0:
 return None
+if stdout[:5].lower() == 'error':
+log.msg('Running "%s" errored: %s' %(vcvars, 
stdout.split()[0]))
+return None
 except:
 return None
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2018-09-15 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r95120:476eea9a87f7
Date: 2018-09-15 22:34 +0200
http://bitbucket.org/pypy/pypy/changeset/476eea9a87f7/

Log:merge heads

diff --git a/README.rst b/README.rst
--- a/README.rst
+++ b/README.rst
@@ -4,7 +4,7 @@
 
 Welcome to PyPy!
 
-PyPy is an interperter that implements the Python programming language, based
+PyPy is an interpreter that implements the Python programming language, based
 on the RPython compiler framework for dynamic language implementations.
 
 The home page for the interpreter is:
@@ -15,29 +15,29 @@
 
 http://doc.pypy.org/
 
-More documentation about the RPython framework can be found here
+More documentation about the RPython framework can be found here:
 
-http://rpython.readthedocs.io
+http://rpython.readthedocs.io/
 
-The source for the documentation is in the pypy/doc directory 
+The source for the documentation is in the pypy/doc directory.
+
 
 Using PyPy instead of CPython
-=
+-
 
-Please read the information at http://pypy.org to find the correct way to
+Please read the information at http://pypy.org/ to find the correct way to
 download and use PyPy as an alternative to CPython. 
 
+
 Building
-
+
 
 Building PyPy is not the recommended way to obtain the PyPy alternative python
 interpreter. It is time-consuming and requires significant computing resources.
-More information can be found here
+More information can be found here:
 
 http://doc.pypy.org/en/latest/build.html
 
 Enjoy and send us feedback!
 
 the pypy-dev team 
-
-
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2018-09-02 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r95071:942ad6c1866e
Date: 2018-09-02 11:15 +0200
http://bitbucket.org/pypy/pypy/changeset/942ad6c1866e/

Log:merge heads

diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py
--- a/rpython/rlib/rstring.py
+++ b/rpython/rlib/rstring.py
@@ -464,6 +464,10 @@
 raise InvalidBaseError("%s() base must be >= 2 and <= 36" % fname)
 self.base = base
 
+# Leading underscores are not allowed
+if s.startswith('_'):
+self.error()
+
 if base == 16 and (s.startswith('0x') or s.startswith('0X')):
 s = s[2:]
 if base == 8 and (s.startswith('0o') or s.startswith('0O')):
diff --git a/rpython/rlib/test/test_rarithmetic.py 
b/rpython/rlib/test/test_rarithmetic.py
--- a/rpython/rlib/test/test_rarithmetic.py
+++ b/rpython/rlib/test/test_rarithmetic.py
@@ -554,50 +554,52 @@
 py.test.raises(ParseStringError, string_to_int, '+'+s, base)
 py.test.raises(ParseStringError, string_to_int, '-'+s, base)
 
-def test_number_underscores(self):
-VALID_UNDERSCORE_LITERALS = [
-'0_0_0',
-'4_2',
-'1__',
-'0b1001_0100',
-'0xfff_',
-'0o5_7_7',
-'0b_0',
-'0x_f',
-'0o_5',
-]
-INVALID_UNDERSCORE_LITERALS = [
-# Trailing underscores:
-'0_',
-'42_',
-'1.4j_',
-'0x_',
-'0b1_',
-'0xf_',
-'0o5_',
-# Underscores in the base selector:
-'0_b0',
-'0_xf',
-'0_o5',
-# Old-style octal, still disallowed:
-'09_99',
-# Multiple consecutive underscores:
-'4___2',
-'0b1001__0100',
-'0xfff__',
-'0x___',
-'0o5__77',
-'1e1__0',
-]
-for x in VALID_UNDERSCORE_LITERALS:
-print x
-y = string_to_int(x, base=0, allow_underscores=True,
-  no_implicit_octal=True)
-assert y == int(x.replace('_', ''), base=0)
-for x in INVALID_UNDERSCORE_LITERALS:
-print x
-py.test.raises(ParseStringError, string_to_int, x, base=0,
-   allow_underscores=True)
+@py.test.mark.parametrize('s', [
+'0_0_0',
+'4_2',
+'1__',
+'0b1001_0100',
+'0xfff_',
+'0o5_7_7',
+'0b_0',
+'0x_f',
+'0o_5',
+])
+def test_valid_underscores(self, s):
+result = string_to_int(
+s, base=0, allow_underscores=True, no_implicit_octal=True)
+assert result == int(s.replace('_', ''), base=0)
+
+@py.test.mark.parametrize('s', [
+# Leading underscores
+'_100',
+'_',
+'_0b1001_0100',
+# Trailing underscores:
+'0_',
+'42_',
+'1.4j_',
+'0x_',
+'0b1_',
+'0xf_',
+'0o5_',
+# Underscores in the base selector:
+'0_b0',
+'0_xf',
+'0_o5',
+# Old-style octal, still disallowed:
+'09_99',
+# Multiple consecutive underscores:
+'4___2',
+'0b1001__0100',
+'0xfff__',
+'0x___',
+'0o5__77',
+'1e1__0',
+])
+def test_invalid_underscores(self, s):
+with py.test.raises(ParseStringError):
+string_to_int(s, base=0, allow_underscores=True)
 
 def test_no_implicit_octal(self):
 TESTS = ['00', '000', '00_00', '02', '0377', '02_34']
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2018-09-01 Thread rlamy
Author: Ronan Lamy 
Branch: 
Changeset: r95057:b9cf6798af7e
Date: 2018-09-01 15:39 +0200
http://bitbucket.org/pypy/pypy/changeset/b9cf6798af7e/

Log:merge heads

diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py
--- a/lib_pypy/_ctypes/primitive.py
+++ b/lib_pypy/_ctypes/primitive.py
@@ -393,9 +393,11 @@
 _init_no_arg_ = __init__
 
 def _ensure_objects(self):
-if self._type_ not in 'zZP':
-assert self._objects is None
-return self._objects
+# No '_objects' is the common case for primitives.  Examples
+# where there is an _objects is if _type in 'zZP', or if
+# self comes from 'from_buffer(buf)'.  See module/test_lib_pypy/
+# ctypes_test/test_buffers.py: test_from_buffer_keepalive.
+return getattr(self, '_objects', None)
 
 def _getvalue(self):
 return self._buffer[0]
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_buffers.py 
b/pypy/module/test_lib_pypy/ctypes_tests/test_buffers.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_buffers.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_buffers.py
@@ -39,6 +39,15 @@
 assert b.value in (1684234849,   # little endian
1633837924)   # big endian
 
+def test_from_buffer_keepalive(self):
+# Issue #2878
+b1 = bytearray("ab")
+array = (c_uint16 * 32)()
+array[6] = c_uint16.from_buffer(b1)
+# this is also what we get on CPython.  I don't think it makes
+# sense because the array contains just a copy of the number.
+assert array._objects == {'6': b1}
+
 try:
 c_wchar
 except NameError:
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2018-07-14 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r94867:46410c58b280
Date: 2018-07-14 09:47 +0200
http://bitbucket.org/pypy/pypy/changeset/46410c58b280/

Log:merge heads

diff --git a/pypy/doc/config/objspace.disable_entrypoints.txt 
b/pypy/doc/config/objspace.disable_entrypoints.txt
new file mode 100644
diff --git a/pypy/doc/config/objspace.fstrings.txt 
b/pypy/doc/config/objspace.fstrings.txt
new file mode 100644
diff --git a/pypy/doc/config/objspace.hash.txt 
b/pypy/doc/config/objspace.hash.txt
new file mode 100644
diff --git a/pypy/doc/config/objspace.usemodules._frozen_importlib.txt 
b/pypy/doc/config/objspace.usemodules._frozen_importlib.txt
new file mode 100644
diff --git a/pypy/doc/config/objspace.usemodules._jitlog.txt 
b/pypy/doc/config/objspace.usemodules._jitlog.txt
new file mode 100644
diff --git a/pypy/doc/config/objspace.usemodules.faulthandler.txt 
b/pypy/doc/config/objspace.usemodules.faulthandler.txt
new file mode 100644
diff --git a/pypy/doc/config/translation.backendopt.replace_we_are_jitted.txt 
b/pypy/doc/config/translation.backendopt.replace_we_are_jitted.txt
new file mode 100644
diff --git a/pypy/doc/config/translation.jit_opencoder_model.txt 
b/pypy/doc/config/translation.jit_opencoder_model.txt
new file mode 100644
diff --git a/pypy/doc/config/translation.keepgoing.txt 
b/pypy/doc/config/translation.keepgoing.txt
new file mode 100644
diff --git a/pypy/doc/config/translation.libname.txt 
b/pypy/doc/config/translation.libname.txt
new file mode 100644
diff --git a/pypy/doc/config/translation.lto.txt 
b/pypy/doc/config/translation.lto.txt
new file mode 100644
diff --git a/pypy/doc/config/translation.profoptargs.txt 
b/pypy/doc/config/translation.profoptargs.txt
new file mode 100644
diff --git a/pypy/doc/config/translation.reverse_debugger.txt 
b/pypy/doc/config/translation.reverse_debugger.txt
new file mode 100644
diff --git a/pypy/doc/config/translation.split_gc_address_space.txt 
b/pypy/doc/config/translation.split_gc_address_space.txt
new file mode 100644
diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py
--- a/pypy/doc/tool/makecontributor.py
+++ b/pypy/doc/tool/makecontributor.py
@@ -18,12 +18,13 @@
 'Antonio Cuni': ['antocuni', 'anto'],
 'Armin Rigo': ['arigo', 'arfigo', 'armin', 'arigato'],
 'Maciej Fijalkowski': ['fijal'],
-'Carl Friedrich Bolz-Tereick': ['Carl Friedrich Bolz', 'cfbolz', 'cf'],
+'Carl Friedrich Bolz-Tereick': ['Carl Friedrich Bolz', 'cfbolz', 'cf', 
'cbolz'],
 'Samuele Pedroni': ['pedronis', 'samuele', 'samule'],
-'Richard Plangger':['planrich'],
-'Michael Hudson': ['mwh'],
+'Richard Plangger': ['planrich', 'plan_rich'],
+'Remi Meier': ['remi'],
+'Michael Hudson-Doyle': ['mwh', 'Michael Hudson'],
 'Holger Krekel': ['hpk', 'holger krekel', 'holger', 'hufpk'],
-"Amaury Forgeot d'Arc": ['afa'],
+"Amaury Forgeot d'Arc": ['afa', 'amaur...@gmail.com'],
 'Alex Gaynor': ['alex', 'agaynor'],
 'David Schneider': ['bivab', 'david'],
 'Christian Tismer': ['chris', 'christian', 'tismer',
@@ -41,7 +42,7 @@
 'Mark Pearse': ['mwp'],
 'Toon Verwaest': ['tverwaes'],
 'Eric van Riet Paap': ['ericvrp'],
-'Jacob Hallen': ['jacob', 'jakob'],
+'Jacob Hallen': ['jacob', 'jakob', 'jacob hallen'],
 'Anders Lehmann': ['ale', 'anders'],
 'Bert Freudenberg': ['bert'],
 'Boris Feigin': ['boris', 'boria'],
@@ -69,19 +70,25 @@
 'Manuel Jacob': ['mjacob'],
 'Rami Chowdhury': ['necaris'],
 'Stanislaw Halik': ['Stanislaw Halik', 'w31rd0'],
-'Wenzhu Man':['wenzhu man', 'wenzhuman'],
-'Anton Gulenko':['anton gulenko', 'anton_gulenko'],
-'Richard Lancaster':['richardlancaster'],
-'William Leslie':['William ML Leslie'],
-'Spenser Bauman':['Spenser Andrew Bauman'],
-'Raffael Tfirst':['raffael.tfi...@gmail.com'],
-'timo':['t...@eistee.fritz.box'],
-'Jasper Schulz':['Jasper.Schulz', 'jbs'],
-'Aaron Gallagher':['"Aaron Gallagher'],
-'Yasir Suhail':['yasirs'],
+'Wenzhu Man': ['wenzhu man', 'wenzhuman'],
+'Anton Gulenko': ['anton gulenko', 'anton_gulenko'],
+'Richard Lancaster': ['richardlancaster'],
+'William Leslie': ['William ML Leslie'],
+'Spenser Bauman': ['Spenser Andrew Bauman'],
+'Raffael Tfirst': ['raffael.tfi...@gmail.com'],
+'timo': ['t...@eistee.fritz.box'],
+'Jasper Schulz': ['Jasper.Schulz', 'jbs'],
+'Aaron Gallagher': ['"Aaron Gallagher'],
+'Yasir Suhail': ['yasirs'],
 'Squeaky': ['squeaky'],
-"Amaury Forgeot d'Arc": ['amaur...@gmail.com'],
 "Dodan Mihai": ['mihai.do...@gmail.com'],
+'Wim Lavrijsen': ['wlav'],
+'Toon Verwaest': ['toon', 'tverwaes'],
+'Seo Sanghyeon': ['sanxiyn'],
+'Leonardo Santagada': ['santagada'],
+'Laurence Tratt': ['ltratt'],
+'Pieter Zieschang': ['pzieschang', 'p_ziesch...@yahoo.de'],
+'John Witulski': ['witulski'],
 }
 
 alias_map = {}
@@ -103,7 +110,8 @@
 return set()
 ignore_words = ['around', 

[pypy-commit] pypy default: merge heads

2018-07-06 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r94815:b717d811eb2b
Date: 2018-07-06 16:26 +0200
http://bitbucket.org/pypy/pypy/changeset/b717d811eb2b/

Log:merge heads

diff --git a/pypy/module/_cppyy/converter.py b/pypy/module/_cppyy/converter.py
--- a/pypy/module/_cppyy/converter.py
+++ b/pypy/module/_cppyy/converter.py
@@ -82,10 +82,9 @@
 
 
 class TypeConverter(object):
-_immutable_fields_ = ['cffi_name', 'uses_local', 'name']
+_immutable_fields_ = ['cffi_name', 'name']
 
 cffi_name  = None
-uses_local = False
 name   = ""
 
 def __init__(self, space, extra):
@@ -108,10 +107,10 @@
 from pypy.module._cppyy.interp_cppyy import FastCallNotPossible
 raise FastCallNotPossible
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 self._is_abstract(space)
 
-def convert_argument_libffi(self, space, w_obj, address, call_local):
+def convert_argument_libffi(self, space, w_obj, address, scratch):
 from pypy.module._cppyy.interp_cppyy import FastCallNotPossible
 raise FastCallNotPossible
 
@@ -125,10 +124,10 @@
 def to_memory(self, space, w_obj, w_value, offset):
 self._is_abstract(space)
 
-def finalize_call(self, space, w_obj, call_local):
+def finalize_call(self, space, w_obj):
 pass
 
-def free_argument(self, space, arg, call_local):
+def free_argument(self, space, arg):
 pass
 
 
@@ -172,7 +171,7 @@
 state = space.fromcache(ffitypes.State)
 return state.c_voidp
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 w_tc = space.findattr(w_obj, space.newtext('typecode'))
 if w_tc is not None and space.text_w(w_tc) != self.typecode:
 raise oefmt(space.w_TypeError,
@@ -208,7 +207,7 @@
 class NumericTypeConverterMixin(object):
 _mixin_ = True
 
-def convert_argument_libffi(self, space, w_obj, address, call_local):
+def convert_argument_libffi(self, space, w_obj, address, scratch):
 x = rffi.cast(self.c_ptrtype, address)
 x[0] = self._unwrap_object(space, w_obj)
 
@@ -228,26 +227,23 @@
 
 class ConstRefNumericTypeConverterMixin(NumericTypeConverterMixin):
 _mixin_ = True
-_immutable_fields_ = ['uses_local']
-
-uses_local = True
 
 def cffi_type(self, space):
 state = space.fromcache(ffitypes.State)
 return state.c_voidp
 
-def convert_argument_libffi(self, space, w_obj, address, call_local):
-assert rffi.sizeof(self.c_type) <= 2*rffi.sizeof(rffi.VOIDP)  # see 
interp_cppyy.py
+def convert_argument_libffi(self, space, w_obj, address, scratch):
 obj = self._unwrap_object(space, w_obj)
-typed_buf = rffi.cast(self.c_ptrtype, call_local)
+typed_buf = rffi.cast(self.c_ptrtype, scratch)
 typed_buf[0] = obj
 x = rffi.cast(rffi.VOIDPP, address)
-x[0] = call_local
+x[0] = scratch
+
 
 class IntTypeConverterMixin(NumericTypeConverterMixin):
 _mixin_ = True
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 x = rffi.cast(self.c_ptrtype, address)
 x[0] = self._unwrap_object(space, w_obj)
 ba = rffi.cast(rffi.CCHARP, address)
@@ -256,7 +252,7 @@
 class FloatTypeConverterMixin(NumericTypeConverterMixin):
 _mixin_ = True
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 x = rffi.cast(self.c_ptrtype, address)
 x[0] = self._unwrap_object(space, w_obj)
 ba = rffi.cast(rffi.CCHARP, address)
@@ -273,18 +269,18 @@
 state = space.fromcache(ffitypes.State)
 return state.c_void
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 self._is_abstract(space)
 
 
 class BoolConverter(ffitypes.typeid(bool), TypeConverter):
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 x = rffi.cast(rffi.LONGP, address)
 x[0] = self._unwrap_object(space, w_obj)
 ba = rffi.cast(rffi.CCHARP, address)
 ba[capi.c_function_arg_typeoffset(space)] = 'b'
 
-def convert_argument_libffi(self, space, w_obj, address, call_local):
+def convert_argument_libffi(self, space, w_obj, address, scratch):
 x = rffi.cast(rffi.LONGP, address)
 x[0] = self._unwrap_object(space, w_obj)
 
@@ -303,13 +299,13 @@
 address[0] = '\x00'
 
 class CharConverter(ffitypes.typeid(rffi.CHAR), TypeConverter):
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 x = rffi.cast(rffi.CCHARP, address)
 x[0] = self._unwrap_object(space, w_obj)

[pypy-commit] pypy default: merge cppyy-packaging: fixes performance regression (see: https://bitbucket.org/wlav/cppyy/issues/34/class-member-function-calls-not-being)

2018-07-05 Thread wlav
Author: Wim Lavrijsen 
Branch: 
Changeset: r94811:53d95e0b1421
Date: 2018-07-05 18:11 -0700
http://bitbucket.org/pypy/pypy/changeset/53d95e0b1421/

Log:merge cppyy-packaging: fixes performance regression (see:
https://bitbucket.org/wlav/cppyy/issues/34/class-member-function-
calls-not-being)

diff --git a/pypy/module/_cppyy/converter.py b/pypy/module/_cppyy/converter.py
--- a/pypy/module/_cppyy/converter.py
+++ b/pypy/module/_cppyy/converter.py
@@ -82,10 +82,9 @@
 
 
 class TypeConverter(object):
-_immutable_fields_ = ['cffi_name', 'uses_local', 'name']
+_immutable_fields_ = ['cffi_name', 'name']
 
 cffi_name  = None
-uses_local = False
 name   = ""
 
 def __init__(self, space, extra):
@@ -108,10 +107,10 @@
 from pypy.module._cppyy.interp_cppyy import FastCallNotPossible
 raise FastCallNotPossible
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 self._is_abstract(space)
 
-def convert_argument_libffi(self, space, w_obj, address, call_local):
+def convert_argument_libffi(self, space, w_obj, address, scratch):
 from pypy.module._cppyy.interp_cppyy import FastCallNotPossible
 raise FastCallNotPossible
 
@@ -125,10 +124,10 @@
 def to_memory(self, space, w_obj, w_value, offset):
 self._is_abstract(space)
 
-def finalize_call(self, space, w_obj, call_local):
+def finalize_call(self, space, w_obj):
 pass
 
-def free_argument(self, space, arg, call_local):
+def free_argument(self, space, arg):
 pass
 
 
@@ -172,7 +171,7 @@
 state = space.fromcache(ffitypes.State)
 return state.c_voidp
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 w_tc = space.findattr(w_obj, space.newtext('typecode'))
 if w_tc is not None and space.text_w(w_tc) != self.typecode:
 raise oefmt(space.w_TypeError,
@@ -208,7 +207,7 @@
 class NumericTypeConverterMixin(object):
 _mixin_ = True
 
-def convert_argument_libffi(self, space, w_obj, address, call_local):
+def convert_argument_libffi(self, space, w_obj, address, scratch):
 x = rffi.cast(self.c_ptrtype, address)
 x[0] = self._unwrap_object(space, w_obj)
 
@@ -228,26 +227,23 @@
 
 class ConstRefNumericTypeConverterMixin(NumericTypeConverterMixin):
 _mixin_ = True
-_immutable_fields_ = ['uses_local']
-
-uses_local = True
 
 def cffi_type(self, space):
 state = space.fromcache(ffitypes.State)
 return state.c_voidp
 
-def convert_argument_libffi(self, space, w_obj, address, call_local):
-assert rffi.sizeof(self.c_type) <= 2*rffi.sizeof(rffi.VOIDP)  # see 
interp_cppyy.py
+def convert_argument_libffi(self, space, w_obj, address, scratch):
 obj = self._unwrap_object(space, w_obj)
-typed_buf = rffi.cast(self.c_ptrtype, call_local)
+typed_buf = rffi.cast(self.c_ptrtype, scratch)
 typed_buf[0] = obj
 x = rffi.cast(rffi.VOIDPP, address)
-x[0] = call_local
+x[0] = scratch
+
 
 class IntTypeConverterMixin(NumericTypeConverterMixin):
 _mixin_ = True
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 x = rffi.cast(self.c_ptrtype, address)
 x[0] = self._unwrap_object(space, w_obj)
 ba = rffi.cast(rffi.CCHARP, address)
@@ -256,7 +252,7 @@
 class FloatTypeConverterMixin(NumericTypeConverterMixin):
 _mixin_ = True
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 x = rffi.cast(self.c_ptrtype, address)
 x[0] = self._unwrap_object(space, w_obj)
 ba = rffi.cast(rffi.CCHARP, address)
@@ -273,18 +269,18 @@
 state = space.fromcache(ffitypes.State)
 return state.c_void
 
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 self._is_abstract(space)
 
 
 class BoolConverter(ffitypes.typeid(bool), TypeConverter):
-def convert_argument(self, space, w_obj, address, call_local):
+def convert_argument(self, space, w_obj, address):
 x = rffi.cast(rffi.LONGP, address)
 x[0] = self._unwrap_object(space, w_obj)
 ba = rffi.cast(rffi.CCHARP, address)
 ba[capi.c_function_arg_typeoffset(space)] = 'b'
 
-def convert_argument_libffi(self, space, w_obj, address, call_local):
+def convert_argument_libffi(self, space, w_obj, address, scratch):
 x = rffi.cast(rffi.LONGP, address)
 x[0] = self._unwrap_object(space, w_obj)
 
@@ -303,13 +299,13 @@
 address[0] = '\x00'
 
 class CharConverter(ffitypes.typeid(rffi.CHAR), TypeConverter):
-def convert_argument(self, space, w_obj, address, call_local):
+  

[pypy-commit] pypy default: merge pyparser-improvements-3

2018-06-06 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r94730:e85e93d7927e
Date: 2018-06-06 15:11 +0200
http://bitbucket.org/pypy/pypy/changeset/e85e93d7927e/

Log:merge pyparser-improvements-3

some small refactorings in interpreter/pyparser and module/parser

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -27,3 +27,8 @@
 
 The reverse-debugger branch has been merged.  For more information, see
 https://bitbucket.org/pypy/revdb
+
+
+.. branch: pyparser-improvements-3
+
+Small refactorings in the Python parser.
diff --git a/pypy/interpreter/pyparser/future.py 
b/pypy/interpreter/pyparser/future.py
--- a/pypy/interpreter/pyparser/future.py
+++ b/pypy/interpreter/pyparser/future.py
@@ -43,7 +43,7 @@
 self.tok = self.tokens[index]
 
 def skip(self, n):
-if self.tok[0] == n:
+if self.tok.token_type == n:
 self.next()
 return True
 else:
@@ -51,7 +51,7 @@
 
 def skip_name(self, name):
 from pypy.interpreter.pyparser import pygram
-if self.tok[0] == pygram.tokens.NAME and self.tok[1] == name:
+if self.tok.token_type == pygram.tokens.NAME and self.tok.value == 
name:
 self.next()
 return True
 else:
@@ -59,8 +59,8 @@
 
 def next_feature_name(self):
 from pypy.interpreter.pyparser import pygram
-if self.tok[0] == pygram.tokens.NAME:
-name = self.tok[1]
+if self.tok.token_type == pygram.tokens.NAME:
+name = self.tok.value
 self.next()
 if self.skip_name("as"):
 self.skip(pygram.tokens.NAME)
@@ -101,7 +101,7 @@
 # somewhere inside the last __future__ import statement
 # (at the start would be fine too, but it's easier to grab a
 # random position inside)
-last_position = (it.tok[2], it.tok[3])
+last_position = (it.tok.lineno, it.tok.column)
 result |= future_flags.get_compiler_feature(it.next_feature_name())
 while it.skip(pygram.tokens.COMMA):
 result |= future_flags.get_compiler_feature(it.next_feature_name())
diff --git a/pypy/interpreter/pyparser/parser.py 
b/pypy/interpreter/pyparser/parser.py
--- a/pypy/interpreter/pyparser/parser.py
+++ b/pypy/interpreter/pyparser/parser.py
@@ -34,6 +34,18 @@
 new.token_ids = self.token_ids
 return new
 
+
+def classify(self, token):
+"""Find the label for a token."""
+if token.token_type == self.KEYWORD_TOKEN:
+label_index = self.keyword_ids.get(token.value, -1)
+if label_index != -1:
+return label_index
+label_index = self.token_ids.get(token.token_type, -1)
+if label_index == -1:
+raise ParseError("invalid token", token)
+return label_index
+
 def _freeze_(self):
 # Remove some attributes not used in parsing.
 try:
@@ -66,6 +78,33 @@
 b[pos] |= bit
 return str(b)
 
+
+class Token(object):
+def __init__(self, token_type, value, lineno, column, line):
+self.token_type = token_type
+self.value = value
+self.lineno = lineno
+# 0-based offset
+self.column = column
+self.line = line
+
+def __repr__(self):
+return "Token(%s, %s)" % (self.token_type, self.value)
+
+def __eq__(self, other):
+# for tests
+return (
+self.token_type == other.token_type and
+self.value == other.value and
+self.lineno == other.lineno and
+self.column == other.column and
+self.line == other.line
+)
+
+def __ne__(self, other):
+return not self == other
+
+
 class Node(object):
 
 __slots__ = ("type", )
@@ -106,6 +145,11 @@
 self.lineno = lineno
 self.column = column
 
+@staticmethod
+def fromtoken(token):
+return Terminal(
+token.token_type, token.value, token.lineno, token.column)
+
 def __repr__(self):
 return "Terminal(type=%s, value=%r)" % (self.type, self.value)
 
@@ -194,20 +238,14 @@
 
 class ParseError(Exception):
 
-def __init__(self, msg, token_type, value, lineno, column, line,
- expected=-1, expected_str=None):
+def __init__(self, msg, token, expected=-1, expected_str=None):
 self.msg = msg
-self.token_type = token_type
-self.value = value
-self.lineno = lineno
-# this is a 0-based index
-self.column = column
-self.line = line
+self.token = token
 self.expected = expected
 self.expected_str = expected_str
 
 def __str__(self):
-return "ParserError(%s, %r)" % (self.token_type, self.value)
+return "ParserError(%s)" % (self.token, )
 
 
 class StackEntry(object):
@@ -250,8 +288,8 @@
 self.root = None
 self.stack 

[pypy-commit] pypy default: merge

2018-05-21 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r94630:622b5969fe73
Date: 2018-05-21 16:03 +0200
http://bitbucket.org/pypy/pypy/changeset/622b5969fe73/

Log:merge

diff --git a/pypy/interpreter/astcompiler/ast.py 
b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -39,6 +39,9 @@
 def mutate_over(self, visitor):
 raise AssertionError("mutate_over() implementation not provided")
 
+def to_object(self, space):
+raise NotImplementedError("abstract base class")
+
 
 class NodeVisitorNotImplemented(Exception):
 pass
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py 
b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -435,6 +435,9 @@
 def mutate_over(self, visitor):
 raise AssertionError("mutate_over() implementation not provided")
 
+def to_object(self, space):
+raise NotImplementedError("abstract base class")
+
 
 class NodeVisitorNotImplemented(Exception):
 pass
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -33,6 +33,7 @@
 __slots__ = ('__weakref__',)
 _must_be_light_finalizer_ = True
 user_overridden_class = False
+_settled_ = True
 
 def getdict(self, space):
 return None
@@ -197,6 +198,8 @@
 # hooks that the mapdict implementations needs:
 def _get_mapdict_map(self):
 return None
+def _mapdict_init_empty(self, terminator):
+return None
 def _set_mapdict_map(self, map):
 raise NotImplementedError
 def _mapdict_read_storage(self, index):
@@ -913,9 +916,11 @@
 """Unpack an iterable into a real (interpreter-level) list.
 
 Raise an OperationError(w_ValueError) if the length is wrong."""
+from pypy.interpreter.generator import GeneratorIterator
 w_iterator = self.iter(w_iterable)
 if expected_length == -1:
 if self.is_generator(w_iterator):
+assert isinstance(w_iterator, GeneratorIterator)
 # special hack for speed
 lst_w = []
 w_iterator.unpack_into(lst_w)
diff --git a/pypy/objspace/std/dictmultiobject.py 
b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -301,6 +301,7 @@
 """
 strategy = self.get_strategy()
 strategy.move_to_end(self, w_key, last_flag)
+# XXX this should be in the default move_to_end method!
 
 def nondescr_popitem_first(self, space):
 """Not exposed directly to app-level, but via __pypy__.popitem_first().
@@ -498,6 +499,9 @@
 def get_empty_storage(self):
 raise NotImplementedError
 
+def switch_to_object_strategy(self, w_dict):
+raise NotImplementedError
+
 @jit.look_inside_iff(lambda self, w_dict:
  w_dict_unrolling_heuristic(w_dict))
 def w_keys(self, w_dict):
@@ -584,6 +588,36 @@
 def prepare_update(self, w_dict, num_extra):
 pass
 
+def length(self, w_dict):
+raise NotImplementedError
+
+def getitem(self, w_dict, w_key):
+raise NotImplementedError
+
+def getitem_str(self, w_dict, key):
+raise NotImplementedError
+
+def setitem(self, w_dict, w_key, w_value):
+raise NotImplementedError
+
+def setitem_str(self, w_dict, key, w_value):
+raise NotImplementedError
+
+def delitem(self, w_dict, w_key):
+raise NotImplementedError
+
+def setdefault(self, w_dict, w_key, w_default):
+raise NotImplementedError
+
+def iterkeys(self, w_dict):
+raise NotImplementedError
+
+def itervalues(self, w_dict):
+raise NotImplementedError
+
+def iteritems(self, w_dict):
+raise NotImplementedError
+
 def move_to_end(self, w_dict, w_key, last_flag):
 # fall-back
 w_value = w_dict.getitem(w_key)
@@ -807,12 +841,22 @@
 class BaseKeyIterator(BaseIteratorImplementation):
 next_key = _new_next('key')
 
+def next_key_entry(self):
+raise NotImplementedError
+
+
 class BaseValueIterator(BaseIteratorImplementation):
 next_value = _new_next('value')
 
+def next_value_entry(self):
+raise NotImplementedError
+
 class BaseItemIterator(BaseIteratorImplementation):
 next_item = _new_next('item')
 
+def next_item_entry(self):
+raise NotImplementedError
+
 
 def create_iterator_classes(dictimpl):
 if not hasattr(dictimpl, 'wrapkey'):
@@ -1447,6 +1491,7 @@
 class W_DictMultiIterKeysObject(W_BaseDictMultiIterObject):
 def descr_next(self, space):
 iteratorimplementation = self.iteratorimplementation
+assert isinstance(iteratorimplementation, BaseKeyIterator)
 w_key 

[pypy-commit] pypy default: merge the gc-more-logging branch, which logs some extra gc-minor and

2018-05-16 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r94602:49ede3b5afe6
Date: 2018-05-16 18:25 +0200
http://bitbucket.org/pypy/pypy/changeset/49ede3b5afe6/

Log:merge the gc-more-logging branch, which logs some extra gc-minor and
gc-collect-step info in the PYPYLOG

diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
--- a/rpython/memory/gc/incminimark.py
+++ b/rpython/memory/gc/incminimark.py
@@ -1836,6 +1836,7 @@
 debug_print("minor collect, total memory used:", total_memory_used)
 debug_print("number of pinned objects:",
 self.pinned_objects_in_nursery)
+debug_print("total size of surviving objects:", 
self.nursery_surviving_size)
 if self.DEBUG >= 2:
 self.debug_check_consistency() # expensive!
 #
@@ -2401,7 +2402,9 @@
 # a total object size of at least '3 * nursery_size' bytes
 # is processed.
 limit = 3 * self.nursery_size // self.small_request_threshold
-self.free_unvisited_rawmalloc_objects_step(limit)
+nobjects = self.free_unvisited_rawmalloc_objects_step(limit)
+debug_print("freeing raw objects:", limit-nobjects,
+"freed, limit was", limit)
 done = False# the 2nd half below must still be done
 else:
 # Ask the ArenaCollection to visit a fraction of the objects.
@@ -2411,6 +2414,8 @@
 limit = 3 * self.nursery_size // self.ac.page_size
 done = self.ac.mass_free_incremental(self._free_if_unvisited,
  limit)
+status = done and "No more pages left." or "More to do."
+debug_print("freeing GC objects, up to", limit, "pages.", 
status)
 # XXX tweak the limits above
 #
 if done:
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: Merge of cppyy-packaging: pulls in fully deferred loading for the backend (needed for pydoc tests in p3.5 branch), support for function pointer arguments, proper types for

2018-05-07 Thread wlav
Author: Wim Lavrijsen 
Branch: 
Changeset: r94499:a4c2916c877f
Date: 2018-05-07 21:02 -0700
http://bitbucket.org/pypy/pypy/changeset/a4c2916c877f/

Log:Merge of cppyy-packaging: pulls in fully deferred loading for the
backend (needed for pydoc tests in p3.5 branch), support for
function pointer arguments, proper types for anonymous enums, and
correct handling of const data.

diff --git a/pypy/module/_cppyy/__init__.py b/pypy/module/_cppyy/__init__.py
--- a/pypy/module/_cppyy/__init__.py
+++ b/pypy/module/_cppyy/__init__.py
@@ -7,6 +7,7 @@
 interpleveldefs = {
 '_resolve_name'  : 'interp_cppyy.resolve_name',
 '_scope_byname'  : 'interp_cppyy.scope_byname',
+'_is_static_data': 'interp_cppyy.is_static_data',
 '_is_template'   : 'interp_cppyy.is_template',
 '_std_string_name'   : 'interp_cppyy.std_string_name',
 '_set_class_generator'   : 'interp_cppyy.set_class_generator',
@@ -21,7 +22,7 @@
 }
 
 appleveldefs = {
-'_init_pythonify': 'pythonify._init_pythonify',
+'_post_import_startup'   : 'pythonify._post_import_startup',
 'add_pythonization'  : 'pythonify.add_pythonization',
 'Template'   : 'pythonify.CPPTemplate',
 }
@@ -34,9 +35,3 @@
 # code generation is not, so give it a chance to run now
 from pypy.module._cppyy import capi
 capi.register_pythonizations(space)
-
-def startup(self, space):
-from pypy.module._cppyy import capi
-capi.verify_backend(space)  # may raise ImportError
-
-space.call_method(self, '_init_pythonify')
diff --git a/pypy/module/_cppyy/capi/loadable_capi.py 
b/pypy/module/_cppyy/capi/loadable_capi.py
--- a/pypy/module/_cppyy/capi/loadable_capi.py
+++ b/pypy/module/_cppyy/capi/loadable_capi.py
@@ -308,7 +308,7 @@
 c_call = state.capi_calls[name]
 except KeyError:
 if state.backend is None:
-load_backend(space)
+verify_backend(space)
 iface = state.capi_call_ifaces[name]
 cfunc = W_RCTypeFunc(space, iface[0], iface[1], False)
 c_call = state.backend.load_function(cfunc, 'cppyy_'+name)
@@ -421,7 +421,7 @@
 _cdata_to_ptr(space, call_capi(space, 'function_address_from_index', 
args)))
 def c_function_address_from_method(space, cppmethod):
 return rffi.cast(C_FUNC_PTR,
-_cdata_to_ptr(space, call_capi(space, 'function_address_from_method', 
_ArgH(cppmethod
+_cdata_to_ptr(space, call_capi(space, 'function_address_from_method', 
[_ArgH(cppmethod)])))
 
 # handling of function argument buffer ---
 def c_allocate_function_args(space, size):
diff --git a/pypy/module/_cppyy/converter.py b/pypy/module/_cppyy/converter.py
--- a/pypy/module/_cppyy/converter.py
+++ b/pypy/module/_cppyy/converter.py
@@ -686,6 +686,34 @@
 decref(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0]))
 
 
+class FunctionPointerConverter(TypeConverter):
+_immutable_fields_ = ['signature']
+
+def __init__(self, space, signature):
+self.signature = signature
+
+def convert_argument(self, space, w_obj, address, call_local):
+# TODO: atm, does not actually get an overload, but a staticmethod
+from pypy.module._cppyy.interp_cppyy import W_CPPOverload
+cppol = space.interp_w(W_CPPOverload, w_obj)
+
+# find the function with matching signature
+for i in range(len(cppol.functions)):
+m = cppol.functions[i]
+if m.signature(False) == self.signature:
+x = rffi.cast(rffi.VOIDPP, address)
+x[0] = rffi.cast(rffi.VOIDP,
+capi.c_function_address_from_method(space, m.cppmethod))
+address = rffi.cast(capi.C_OBJECT, address)
+ba = rffi.cast(rffi.CCHARP, address)
+ba[capi.c_function_arg_typeoffset(space)] = 'p'
+return
+
+# lookup failed
+raise oefmt(space.w_TypeError,
+"no overload found matching %s", self.signature)
+
+
 class MacroConverter(TypeConverter):
 def from_memory(self, space, w_obj, w_pycppclass, offset):
 # TODO: get the actual type info from somewhere ...
@@ -749,6 +777,14 @@
 return InstancePtrPtrConverter(space, clsdecl)
 elif compound == "":
 return InstanceConverter(space, clsdecl)
+elif "(anonymous)" in name:
+# special case: enum w/o a type name
+return _converters["internal_enum_type_t"](space, default)
+elif "(*)" in name or "::*)" in name:
+# function pointer
+pos = name.find("*)")
+if pos > 0:
+return FunctionPointerConverter(space, name[pos+2:])
 
 #   5) void* or void converter (which fails on use)
 if 0 <= compound.find('*'):
diff --git a/pypy/module/_cppyy/executor.py 

[pypy-commit] pypy default: merge socket_default_timeout_blockingness into default

2018-05-06 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r94478:7fbd70c05558
Date: 2018-05-06 22:24 +0300
http://bitbucket.org/pypy/pypy/changeset/7fbd70c05558/

Log:merge socket_default_timeout_blockingness into default

diff --git a/rpython/rlib/rsocket.py b/rpython/rlib/rsocket.py
--- a/rpython/rlib/rsocket.py
+++ b/rpython/rlib/rsocket.py
@@ -550,7 +550,7 @@
 self.family = family
 self.type = type
 self.proto = proto
-self.timeout = defaults.timeout
+self.settimeout(defaults.timeout)
 
 @staticmethod
 def empty_rsocket():
diff --git a/rpython/rlib/test/test_rsocket.py 
b/rpython/rlib/test/test_rsocket.py
--- a/rpython/rlib/test/test_rsocket.py
+++ b/rpython/rlib/test/test_rsocket.py
@@ -465,6 +465,15 @@
 s.connect(INETAddress('python.org', 80))
 s.close()
 
+def test_connect_with_default_timeout_fail():
+rsocket.setdefaulttimeout(0.1)
+s = RSocket()
+rsocket.setdefaulttimeout(None)
+assert s.gettimeout() == 0.1
+with py.test.raises(SocketTimeout):
+s.connect(INETAddress('172.30.172.30', 12345))
+s.close()
+
 def test_getsetsockopt():
 import struct
 assert struct.calcsize("i") == rffi.sizeof(rffi.INT)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge cppyy-packaging: move to latest backend (0.6.0) and support exceptions through wrappers

2018-04-23 Thread wlav
Author: Wim Lavrijsen 
Branch: 
Changeset: r94435:e50e11af23f1
Date: 2018-04-23 19:57 -0700
http://bitbucket.org/pypy/pypy/changeset/e50e11af23f1/

Log:merge cppyy-packaging: move to latest backend (0.6.0) and support
exceptions through wrappers

diff --git a/pypy/module/_cppyy/__init__.py b/pypy/module/_cppyy/__init__.py
--- a/pypy/module/_cppyy/__init__.py
+++ b/pypy/module/_cppyy/__init__.py
@@ -13,7 +13,7 @@
 '_set_function_generator': 'interp_cppyy.set_function_generator',
 '_register_class': 'interp_cppyy.register_class',
 '_get_nullptr'   : 'interp_cppyy.get_nullptr',
-'CPPClassBase'   : 'interp_cppyy.W_CPPClass',
+'CPPInstanceBase': 'interp_cppyy.W_CPPInstance',
 'addressof'  : 'interp_cppyy.addressof',
 '_bind_object'   : 'interp_cppyy._bind_object',
 'bind_object': 'interp_cppyy.bind_object',
diff --git a/pypy/module/_cppyy/capi/loadable_capi.py 
b/pypy/module/_cppyy/capi/loadable_capi.py
--- a/pypy/module/_cppyy/capi/loadable_capi.py
+++ b/pypy/module/_cppyy/capi/loadable_capi.py
@@ -121,11 +121,11 @@
 
 # TODO: the following need to match up with the globally defined C_XYZ 
low-level
 # types (see capi/__init__.py), but by using strings here, that isn't 
guaranteed
-c_opaque_ptr = state.c_ulong
+c_opaque_ptr = state.c_ulong# not ptrdiff_t (which is signed)
  
 c_scope   = c_opaque_ptr
 c_type= c_scope
-c_object  = c_opaque_ptr
+c_object  = c_opaque_ptr# not voidp (to stick with one handle 
type)
 c_method  = c_opaque_ptr
 c_index   = state.c_long
 c_index_array = state.c_voidp
@@ -150,16 +150,17 @@
 
 self.capi_call_ifaces = {
 # name to opaque C++ scope representation
-'num_scopes'   : ([c_scope],  c_int),
-'scope_name'   : ([c_scope, c_int],   
c_ccharp),
-
 'resolve_name' : ([c_ccharp], 
c_ccharp),
+'resolve_enum' : ([c_ccharp], 
c_ccharp),
 'get_scope': ([c_ccharp], c_scope),
 'actual_class' : ([c_type, c_object], c_type),
+'size_of_klass': ([c_type],   
c_size_t),
+'size_of_type' : ([c_ccharp], 
c_size_t),
 
 # memory management
 'allocate' : ([c_type],   
c_object),
 'deallocate'   : ([c_type, c_object], c_void),
+'construct': ([c_type],   
c_object),
 'destruct' : ([c_type, c_object], c_void),
 
 # method/function dispatching
@@ -182,7 +183,8 @@
 'constructor'  : ([c_method, c_object, c_int, c_voidp],   
c_object),
 'call_o'   : ([c_method, c_object, c_int, c_voidp, c_type],
 c_object),
 
-'get_function_address' : ([c_scope, c_index], 
c_voidp), # TODO: verify
+'function_address_from_index'  : ([c_scope, c_index], 
c_voidp), # TODO: verify
+'function_address_from_method' : ([c_method], 
c_voidp), # id.
 
 # handling of function argument buffer
 'allocate_function_args'   : ([c_int],c_voidp),
@@ -196,6 +198,8 @@
 'is_abstract'  : ([c_type],   c_int),
 'is_enum'  : ([c_ccharp], c_int),
 
+'get_all_cpp_names': ([c_scope, c_voidp], 
c_voidp), # const char**
+
 # type/class reflection information
 'final_name'   : ([c_type],   
c_ccharp),
 'scoped_final_name': ([c_type],   
c_ccharp),
@@ -208,10 +212,10 @@
 
 # method/function reflection information
 'num_methods'  : ([c_scope],  c_int),
-'method_index_at'  : ([c_scope, c_int],   c_index),
 'method_indices_from_name' : ([c_scope, c_ccharp],
c_index_array),
 
 'method_name'  : ([c_scope, c_index], 
c_ccharp),
+'method_mangled_name'  : ([c_scope, c_index], 
c_ccharp),
 'method_result_type'   : ([c_scope, c_index], 
c_ccharp),
 'method_num_args'  : ([c_scope, c_index], c_int),
 'method_req_args'  : ([c_scope, c_index], c_int),
@@ -219,7 +223,9 @@
 'method_arg_default'   : ([c_scope, c_index, c_int],  
c_ccharp),
 'method_signature' : ([c_scope, c_index, c_int],  

[pypy-commit] pypy default: Merge the gc-hook-better-timestamp branch. It improves gc hooks in two ways:

2018-04-18 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r94378:e7b972c672e6
Date: 2018-04-18 15:07 +0200
http://bitbucket.org/pypy/pypy/changeset/e7b972c672e6/

Log:Merge the gc-hook-better-timestamp branch. It improves gc hooks in
two ways:

- the duration field is computed using the very same timestamps
which are used to generate PYPYLOG, so that it is easier to
correlate the two, if needed

- the stats now report duration_min and duration_max, to give an
idea of how much the fastest and slowest event took

diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -169,6 +169,12 @@
 The total time spent inside minor collections since the last hook
 call. See below for more information on the unit.
 
+``duration_min``
+The duration of the fastest minor collection since the last hook call.
+
+``duration_max``
+The duration of the slowest minor collection since the last hook call.
+
  ``total_memory_used``
 The amount of memory used at the end of the minor collection, in
 bytes. This include the memory used in arenas (for GC-managed memory) and
@@ -180,7 +186,7 @@
 
 The attributes for ``GcCollectStepStats`` are:
 
-``count``, ``duration``
+``count``, ``duration``, ``duration_min``, ``duration_max``
 See above.
 
 ``oldstate``, ``newstate``
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -14,3 +14,7 @@
 .. branch: gc-hooks
 
 Introduce GC hooks, as documented in doc/gc_info.rst
+
+.. branch: gc-hook-better-timestamp
+
+Improve GC hooks
diff --git a/pypy/module/__pypy__/interp_debug.py 
b/pypy/module/__pypy__/interp_debug.py
--- a/pypy/module/__pypy__/interp_debug.py
+++ b/pypy/module/__pypy__/interp_debug.py
@@ -3,9 +3,12 @@
 from rpython.rlib import rtimer
 
 @jit.dont_look_inside
-@unwrap_spec(category='text')
-def debug_start(space, category):
-debug.debug_start(category)
+@unwrap_spec(category='text', timestamp=bool)
+def debug_start(space, category, timestamp=False):
+res = debug.debug_start(category, timestamp=timestamp)
+if timestamp:
+return space.newint(res)
+return space.w_None
 
 @jit.dont_look_inside
 def debug_print(space, args_w):
@@ -13,10 +16,12 @@
 debug.debug_print(' '.join(parts))
 
 @jit.dont_look_inside
-@unwrap_spec(category='text')
-def debug_stop(space, category):
-debug.debug_stop(category)
-
+@unwrap_spec(category='text', timestamp=bool)
+def debug_stop(space, category, timestamp=False):
+res = debug.debug_stop(category, timestamp=timestamp)
+if timestamp:
+return space.newint(res)
+return space.w_None
 
 @unwrap_spec(category='text')
 def debug_print_once(space, category, args_w):
diff --git a/pypy/module/__pypy__/test/test_debug.py 
b/pypy/module/__pypy__/test/test_debug.py
--- a/pypy/module/__pypy__/test/test_debug.py
+++ b/pypy/module/__pypy__/test/test_debug.py
@@ -59,3 +59,15 @@
 from __pypy__ import debug_get_timestamp_unit
 unit = debug_get_timestamp_unit()
 assert unit in ('tsc', 'ns', 'QueryPerformanceCounter')
+
+def test_debug_start_stop_timestamp(self):
+import time
+from __pypy__ import debug_start, debug_stop, debug_read_timestamp
+assert debug_start('foo') is None
+assert debug_stop('foo') is None
+ts1 = debug_start('foo', timestamp=True)
+t = time.time()
+while time.time() - t < 0.02:
+pass
+ts2 = debug_stop('foo', timestamp=True)
+assert ts2 > ts1
diff --git a/pypy/module/gc/hook.py b/pypy/module/gc/hook.py
--- a/pypy/module/gc/hook.py
+++ b/pypy/module/gc/hook.py
@@ -1,7 +1,7 @@
 from rpython.memory.gc.hook import GcHooks
 from rpython.memory.gc import incminimark 
 from rpython.rlib.nonconst import NonConstant
-from rpython.rlib.rarithmetic import r_uint, r_longlong
+from rpython.rlib.rarithmetic import r_uint, r_longlong, longlongmax
 from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault
 from pypy.interpreter.baseobjspace import W_Root
 from pypy.interpreter.typedef import TypeDef, interp_attrproperty, 
GetSetProperty
@@ -35,6 +35,8 @@
 action = self.w_hooks.gc_minor
 action.count += 1
 action.duration += duration
+action.duration_min = min(action.duration_min, duration)
+action.duration_max = max(action.duration_max, duration)
 action.total_memory_used = total_memory_used
 action.pinned_objects = pinned_objects
 action.fire()
@@ -43,6 +45,8 @@
 action = self.w_hooks.gc_collect_step
 action.count += 1
 action.duration += duration
+action.duration_min = min(action.duration_min, duration)
+action.duration_max = max(action.duration_max, duration)
 action.oldstate = oldstate
 action.newstate = newstate
 action.fire()
@@ 

[pypy-commit] pypy default: merge the gc-hooks branch: it is now possible to install app-level hooks which are triggered whenever a specific GC activity occurs

2018-04-17 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r94361:5bd740a5496c
Date: 2018-04-17 12:09 +0200
http://bitbucket.org/pypy/pypy/changeset/5bd740a5496c/

Log:merge the gc-hooks branch: it is now possible to install app-level
hooks which are triggered whenever a specific GC activity occurs

diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst
--- a/pypy/doc/gc_info.rst
+++ b/pypy/doc/gc_info.rst
@@ -121,6 +121,160 @@
   alive by GC objects, but not accounted in the GC
 
 
+GC Hooks
+
+
+GC hooks are user-defined functions which are called whenever a specific GC
+event occur, and can be used to monitor GC activity and pauses.  You can
+install the hooks by setting the following attributes:
+
+``gc.hook.on_gc_minor``
+Called whenever a minor collection occurs. It corresponds to
+``gc-minor`` sections inside ``PYPYLOG``.
+
+``gc.hook.on_gc_collect_step``
+Called whenever an incremental step of a major collection occurs. It
+corresponds to ``gc-collect-step`` sections inside ``PYPYLOG``.
+
+``gc.hook.on_gc_collect``
+Called after the last incremental step, when a major collection is fully
+done. It corresponds to ``gc-collect-done`` sections inside ``PYPYLOG``.
+
+To uninstall a hook, simply set the corresponding attribute to ``None``.  To
+install all hooks at once, you can call ``gc.hooks.set(obj)``, which will look
+for methods ``on_gc_*`` on ``obj``.  To uninstall all the hooks at once, you
+can call ``gc.hooks.reset()``.
+
+The functions called by the hooks receive a single ``stats`` argument, which
+contains various statistics about the event.
+
+Note that PyPy cannot call the hooks immediately after a GC event, but it has
+to wait until it reaches a point in which the interpreter is in a known state
+and calling user-defined code is harmless.  It might happen that multiple
+events occur before the hook is invoked: in this case, you can inspect the
+value ``stats.count`` to know how many times the event occured since the last
+time the hook was called.  Similarly, ``stats.duration`` contains the
+**total** time spent by the GC for this specific event since the last time the
+hook was called.
+
+On the other hand, all the other fields of the ``stats`` object are relative
+only to the **last** event of the series.
+
+The attributes for ``GcMinorStats`` are:
+
+``count``
+The number of minor collections occured since the last hook call.
+
+``duration``
+The total time spent inside minor collections since the last hook
+call. See below for more information on the unit.
+
+ ``total_memory_used``
+The amount of memory used at the end of the minor collection, in
+bytes. This include the memory used in arenas (for GC-managed memory) and
+raw-malloced memory (e.g., the content of numpy arrays).
+
+``pinned_objects``
+the number of pinned objects.
+
+
+The attributes for ``GcCollectStepStats`` are:
+
+``count``, ``duration``
+See above.
+
+``oldstate``, ``newstate``
+Integers which indicate the state of the GC before and after the step.
+
+The value of ``oldstate`` and ``newstate`` is one of these constants, defined
+inside ``gc.GcCollectStepStats``: ``STATE_SCANNING``, ``STATE_MARKING``,
+``STATE_SWEEPING``, ``STATE_FINALIZING``.  It is possible to get a string
+representation of it by indexing the ``GC_STATS`` tuple.
+
+
+The attributes for ``GcCollectStats`` are:
+
+``count``
+See above.
+
+``num_major_collects``
+The total number of major collections which have been done since the
+start. Contrarily to ``count``, this is an always-growing counter and it's
+not reset between invocations.
+
+``arenas_count_before``, ``arenas_count_after``
+Number of arenas used before and after the major collection.
+
+``arenas_bytes``
+Total number of bytes used by GC-managed objects.
+
+``rawmalloc_bytes_before``, ``rawmalloc_bytes_after``
+Total number of bytes used by raw-malloced objects, before and after the
+major collection.
+
+Note that ``GcCollectStats`` has **not** got a ``duration`` field. This is
+because all the GC work is done inside ``gc-collect-step``:
+``gc-collect-done`` is used only to give additional stats, but doesn't do any
+actual work.
+
+A note about the ``duration`` field: depending on the architecture and
+operating system, PyPy uses different ways to read timestamps, so ``duration``
+is expressed in varying units. It is possible to know which by calling
+``__pypy__.debug_get_timestamp_unit()``, which can be one of the following
+values:
+
+``tsc``
+The default on ``x86`` machines: timestamps are expressed in CPU ticks, as
+read by the `Time Stamp Counter`_.
+
+``ns``
+Timestamps are expressed in nanoseconds.
+
+``QueryPerformanceCounter``
+On Windows, in case for some reason ``tsc`` is not available: timestamps
+are read using the win API ``QueryPerformanceCounter()``.
+
+
+Unfortunately, there does not seem to be a reliable standard way for
+converting 

[pypy-commit] pypy default: merge pyparser-improvements-2

2018-04-10 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r94294:e66f24650daf
Date: 2018-04-10 09:41 +0200
http://bitbucket.org/pypy/pypy/changeset/e66f24650daf/

Log:merge pyparser-improvements-2

- fixes .offset values of SyntaxError, which is 1-based (but the
raising code sometimes assumed it was 0-based)

- expand some abbreviations

- better error messages for non-matching parenthesis

diff --git a/lib-python/2.7/test/test_eof.py b/lib-python/2.7/test/test_eof.py
--- a/lib-python/2.7/test/test_eof.py
+++ b/lib-python/2.7/test/test_eof.py
@@ -5,7 +5,7 @@
 
 class EOFTestCase(unittest.TestCase):
 def test_EOFC(self):
-expect = "EOL while scanning string literal (, line 1)"
+expect = "end of line (EOL) while scanning string literal (, 
line 1)"
 try:
 eval("""'this is a test\
 """)
@@ -15,7 +15,7 @@
 raise test_support.TestFailed
 
 def test_EOFS(self):
-expect = ("EOF while scanning triple-quoted string literal "
+expect = ("end of file (EOF) while scanning triple-quoted string 
literal "
   "(, line 1)")
 try:
 eval("""'''this is a test""")
diff --git a/lib-python/2.7/test/test_traceback.py 
b/lib-python/2.7/test/test_traceback.py
--- a/lib-python/2.7/test/test_traceback.py
+++ b/lib-python/2.7/test/test_traceback.py
@@ -123,10 +123,7 @@
 self.assertEqual(len(err), 4)
 self.assertEqual(err[1].strip(), "print(2)")
 self.assertIn("^", err[2])
-if check_impl_detail():
-self.assertEqual(err[1].find("p"), err[2].find("^"))
-if check_impl_detail(pypy=True):
-self.assertEqual(err[1].find("2)") + 1, err[2].find("^"))
+self.assertEqual(err[1].find("p"), err[2].find("^"))
 
 def test_base_exception(self):
 # Test that exceptions derived from BaseException are formatted right
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -10,3 +10,9 @@
 Fix for python-level classes that inherit from C-API types, previously the
 `w_obj` was not necessarily preserved throughout the lifetime of the `pyobj`
 which led to cases where instance attributes were lost. Fixes issue #2793
+
+
+.. branch: pyparser-improvements-2
+
+Improve line offsets that are reported by SyntaxError. Improve error messages
+for a few situations, including mismatched parenthesis.
diff --git a/pypy/interpreter/pyparser/error.py 
b/pypy/interpreter/pyparser/error.py
--- a/pypy/interpreter/pyparser/error.py
+++ b/pypy/interpreter/pyparser/error.py
@@ -6,6 +6,7 @@
  lastlineno=0):
 self.msg = msg
 self.lineno = lineno
+# NB: offset is a 1-based index!
 self.offset = offset
 self.text = text
 self.filename = filename
diff --git a/pypy/interpreter/pyparser/parser.py 
b/pypy/interpreter/pyparser/parser.py
--- a/pypy/interpreter/pyparser/parser.py
+++ b/pypy/interpreter/pyparser/parser.py
@@ -199,6 +199,7 @@
 self.token_type = token_type
 self.value = value
 self.lineno = lineno
+# this is a 0-based index
 self.column = column
 self.line = line
 self.expected = expected
diff --git a/pypy/interpreter/pyparser/pyparse.py 
b/pypy/interpreter/pyparser/pyparse.py
--- a/pypy/interpreter/pyparser/pyparse.py
+++ b/pypy/interpreter/pyparser/pyparse.py
@@ -188,7 +188,9 @@
 if e.expected_str is not None:
 msg += " (expected '%s')" % e.expected_str
 
-raise new_err(msg, e.lineno, e.column, e.line,
+# parser.ParseError(...).column is 0-based, but the offsets in 
the
+# exceptions in the error module are 1-based, hence the '+ 1'
+raise new_err(msg, e.lineno, e.column + 1, e.line,
   compile_info.filename)
 else:
 tree = self.root
diff --git a/pypy/interpreter/pyparser/pytokenizer.py 
b/pypy/interpreter/pyparser/pytokenizer.py
--- a/pypy/interpreter/pyparser/pytokenizer.py
+++ b/pypy/interpreter/pyparser/pytokenizer.py
@@ -73,14 +73,14 @@
 logical line; continuation lines are included.
 """
 token_list = []
-lnum = parenlev = continued = 0
+lnum = continued = 0
 namechars = NAMECHARS
 numchars = NUMCHARS
 contstr, needcont = '', 0
 contline = None
 indents = [0]
 last_comment = ''
-parenlevstart = (0, 0, "")
+parenstack = []
 
 # make the annotator happy
 endDFA = DUMMY_DFA
@@ -97,7 +97,7 @@
 if contstr:
 if not line:
 raise TokenError(
-"EOF while scanning triple-quoted string literal",
+"end of file (EOF) while scanning triple-quoted string 
literal",
 strstart[2], strstart[0], strstart[1]+1,
   

[pypy-commit] pypy default: merge cpyext-subclass-setattr which fixes cpyext pyobjects "losing" a w_obj

2018-04-07 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r94262:be473ba66a18
Date: 2018-04-08 00:42 +0300
http://bitbucket.org/pypy/pypy/changeset/be473ba66a18/

Log:merge cpyext-subclass-setattr which fixes cpyext pyobjects "losing"
a w_obj

diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -640,7 +640,7 @@
 'Py_DivisionWarningFlag', 'Py_DontWriteBytecodeFlag', 
'Py_NoUserSiteDirectory',
 '_Py_QnewFlag', 'Py_Py3kWarningFlag', 'Py_HashRandomizationFlag', 
'_Py_PackageContext',
 '_PyTraceMalloc_Track', '_PyTraceMalloc_Untrack', 'PyMem_Malloc',
-'Py_IncRef', 'Py_DecRef', 'PyObject_Free', 'PyObject_GC_Del', 
'PyType_GenericAlloc',
+'PyObject_Free', 'PyObject_GC_Del', 'PyType_GenericAlloc',
 '_PyObject_New', '_PyObject_NewVar',
 '_PyObject_GC_New', '_PyObject_GC_NewVar',
 'PyObject_Init', 'PyObject_InitVar', 'PyInt_FromLong',
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -60,10 +60,10 @@
 
 def _cpyext_attach_pyobj(self, space, py_obj):
 self._cpy_ref = py_obj
-rawrefcount.create_link_pyobj(self, py_obj)
+rawrefcount.create_link_pypy(self, py_obj)
 cls._cpyext_attach_pyobj = _cpyext_attach_pyobj
 
-add_direct_pyobj_storage(W_BaseCPyObject)
+add_direct_pyobj_storage(W_BaseCPyObject) 
 add_direct_pyobj_storage(W_TypeObject)
 add_direct_pyobj_storage(W_NoneObject)
 add_direct_pyobj_storage(W_BoolObject)
@@ -414,3 +414,14 @@
 @cpython_api([rffi.VOIDP], lltype.Signed, error=CANNOT_FAIL)
 def _Py_HashPointer(space, ptr):
 return rffi.cast(lltype.Signed, ptr)
+
+@cpython_api([PyObject], lltype.Void)
+def Py_IncRef(space, obj):
+# used only ifdef PYPY_DEBUG_REFCOUNT
+if obj:
+incref(space, obj)
+
+@cpython_api([PyObject], lltype.Void)
+def Py_DecRef(space, obj):
+# used only ifdef PYPY_DEBUG_REFCOUNT
+decref(space, obj)
diff --git a/pypy/module/cpyext/src/object.c b/pypy/module/cpyext/src/object.c
--- a/pypy/module/cpyext/src/object.c
+++ b/pypy/module/cpyext/src/object.c
@@ -5,18 +5,6 @@
 extern void _PyPy_Free(void *ptr);
 extern void *_PyPy_Malloc(Py_ssize_t size);
 
-void
-Py_IncRef(PyObject *o)
-{
-Py_XINCREF(o);
-}
-
-void
-Py_DecRef(PyObject *o)
-{
-Py_XDECREF(o);
-}
-
 /* 
  * The actual value of this variable will be the address of
  * pyobject.w_marker_deallocating, and will be set by
diff --git a/pypy/module/cpyext/test/array.c b/pypy/module/cpyext/test/array.c
--- a/pypy/module/cpyext/test/array.c
+++ b/pypy/module/cpyext/test/array.c
@@ -2492,6 +2492,87 @@
 return PyLong_FromLong(obj1->ob_type->tp_dealloc == 
obj2->ob_type->tp_dealloc);
 }
 
+static PyObject *
+subclass_with_attribute(PyObject *self, PyObject* args) {
+/* what happens when we use tp_alloc to create the subclass, then
+ * assign to the w_obj via python, then get the GC to collect?
+ * The w_obj should not be collected!!
+ */
+PyObject * obj, *sub, *attrib, *funcname, *attribname, *collect, *res, 
*tup;
+PyTypeObject * subtype;
+int i;
+if (!PyArg_ParseTuple(args, "", , , , 
)) {
+return NULL;
+}
+if (!PyType_Check(obj)) {
+PyErr_SetString(PyExc_TypeError,
+"expected type object");
+return NULL;
+}
+subtype = (PyTypeObject*)obj;
+sub = subtype->tp_alloc(subtype, 0);
+if (!sub) {
+return NULL;
+}
+attrib = PyObject_GetAttr(sub, funcname);
+if (!attrib || (attrib == Py_None) ) {
+PyErr_SetString(PyExc_ValueError,
+"could not find function to call");
+Py_XDECREF(attrib);
+Py_DECREF(sub);
+return NULL;
+}
+tup = PyTuple_New(0);
+/*
+#ifdef PYPY_VERSION
+printf("calling addattrib pypylink %lu \n", sub->ob_pypy_link);
+#endif
+*/
+res = PyObject_Call(attrib, tup, NULL);
+/*
+#ifdef PYPY_VERSION
+printf("after addattrib pypylink %lu \n", sub->ob_pypy_link);
+#endif
+*/
+Py_DECREF(attrib);
+if (res == NULL) {
+Py_DECREF(tup);
+Py_DECREF(sub);
+return NULL;
+}
+Py_DECREF(res);
+for(i=0; i<10; i++) {
+/*
+#ifdef PYPY_VERSION
+printf("starting loop iteration %d refcnt %lu pypylink %lu \n", i, 
+sub->ob_refcnt, sub->ob_pypy_link);
+#else
+printf("starting loop iteration %d refcnt %lu\n", i, 
sub->ob_refcnt);
+#endif
+*/
+attrib =  PyObject_GetAttr(sub, attribname);
+if (!attrib || (attrib == Py_None)) {
+PyErr_SetString(PyExc_ValueError,
+"could not find attrib on object");
+Py_XDECREF(attrib);
+Py_DECREF(tup);
+Py_DECREF(sub);
+return NULL;
+}
+Py_XDECREF(attrib);
+res = 

[pypy-commit] pypy default: merge branch which fixes issue #2776

2018-04-02 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r94220:03739fa3fc5c
Date: 2018-04-03 06:06 +0300
http://bitbucket.org/pypy/pypy/changeset/03739fa3fc5c/

Log:merge branch which fixes issue #2776

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -91,3 +91,9 @@
 .. branch: pyparser-improvements
 
 Improve speed of Python parser, improve ParseError messages slightly.
+
+.. branch: ioctl-arg-size
+
+Work around possible bugs in upstream ioctl users, like CPython allocate at
+least 1024 bytes for the arg in calls to ``ioctl(fd, request, arg)``. Fixes
+issue #2776
diff --git a/pypy/module/fcntl/interp_fcntl.py 
b/pypy/module/fcntl/interp_fcntl.py
--- a/pypy/module/fcntl/interp_fcntl.py
+++ b/pypy/module/fcntl/interp_fcntl.py
@@ -204,6 +204,7 @@
 
 # XXX this function's interface is a mess.
 # We try to emulate the behavior of Python >= 2.5 w.r.t. mutate_flag
+IOCTL_BUFSZ = 1024 # like cpython
 
 fd = space.c_filedescriptor_w(w_fd)
 op = rffi.cast(rffi.INT, op)# C long => C int
@@ -216,15 +217,19 @@
 else:
 arg = rwbuffer.as_str()
 ll_arg = rffi.str2charp(arg)
+to_alloc = max(IOCTL_BUFSZ, len(arg))
 try:
-rv = ioctl_str(fd, op, ll_arg)
-if rv < 0:
-raise _get_error(space, "ioctl")
-arg = rffi.charpsize2str(ll_arg, len(arg))
-if mutate_flag != 0:
-rwbuffer.setslice(0, arg)
-return space.newint(rv)
-return space.newbytes(arg)
+with rffi.scoped_alloc_buffer(to_alloc) as buf:
+rffi.c_memcpy(rffi.cast(rffi.VOIDP, buf.raw),
+  rffi.cast(rffi.VOIDP, ll_arg), len(arg))
+rv = ioctl_str(fd, op, buf.raw)
+if rv < 0:
+raise _get_error(space, "ioctl")
+arg = rffi.charpsize2str(buf.raw, len(arg))
+if mutate_flag != 0:
+rwbuffer.setslice(0, arg)
+return space.newint(rv)
+return space.newbytes(arg)
 finally:
 lltype.free(ll_arg, flavor='raw')
 
@@ -240,11 +245,15 @@
 raise
 else:
 ll_arg = rffi.str2charp(arg)
+to_alloc = max(IOCTL_BUFSZ, len(arg))
 try:
-rv = ioctl_str(fd, op, ll_arg)
-if rv < 0:
-raise _get_error(space, "ioctl")
-arg = rffi.charpsize2str(ll_arg, len(arg))
+with rffi.scoped_alloc_buffer(to_alloc) as buf:
+rffi.c_memcpy(rffi.cast(rffi.VOIDP, buf.raw),
+  rffi.cast(rffi.VOIDP, ll_arg), len(arg))
+rv = ioctl_str(fd, op, buf.raw)
+if rv < 0:
+raise _get_error(space, "ioctl")
+arg = rffi.charpsize2str(buf.raw, len(arg))
 return space.newbytes(arg)
 finally:
 lltype.free(ll_arg, flavor='raw')
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge pyparser-improvements:

2018-04-01 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r94210:045483a22d66
Date: 2018-04-01 12:36 +0200
http://bitbucket.org/pypy/pypy/changeset/045483a22d66/

Log:merge pyparser-improvements:

- speed up python parser
- create slightly more helpful SyntaxError message (only in very clear
and obvious cases)

diff --git a/lib-python/2.7/test/test_genexps.py 
b/lib-python/2.7/test/test_genexps.py
--- a/lib-python/2.7/test/test_genexps.py
+++ b/lib-python/2.7/test/test_genexps.py
@@ -87,7 +87,7 @@
 >>> dict(a = i for i in xrange(10))
 Traceback (most recent call last):
...
-SyntaxError: invalid syntax
+SyntaxError: invalid syntax (expected ')')
 
 Verify that parenthesis are required when used as a keyword argument value
 
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -86,3 +86,8 @@
 that jit hooks are currently not enabled at all. in that case, the list of ops
 does not have to be created in the case of the on_abort hook (which is
 expensive).
+
+
+.. branch: pyparser-improvements
+
+Improve speed of Python parser, improve ParseError messages slightly.
diff --git a/pypy/interpreter/pyparser/metaparser.py 
b/pypy/interpreter/pyparser/metaparser.py
--- a/pypy/interpreter/pyparser/metaparser.py
+++ b/pypy/interpreter/pyparser/metaparser.py
@@ -147,8 +147,10 @@
 for label, next in state.arcs.iteritems():
 arcs.append((self.make_label(gram, label), 
dfa.index(next)))
 states.append((arcs, state.is_final))
-gram.dfas.append((states, self.make_first(gram, name)))
-assert len(gram.dfas) - 1 == gram.symbol_ids[name] - 256
+symbol_id = gram.symbol_ids[name]
+dfa = parser.DFA(symbol_id, states, self.make_first(gram, name))
+gram.dfas.append(dfa)
+assert len(gram.dfas) - 1 == symbol_id - 256
 gram.start = gram.symbol_ids[self.start_symbol]
 return gram
 
@@ -162,6 +164,13 @@
 else:
 gram.labels.append(gram.symbol_ids[label])
 gram.symbol_to_label[label] = label_index
+first = self.first[label]
+if len(first) == 1:
+first, = first
+if not first[0].isupper():
+first = first.strip("\"'")
+assert label_index not in 
gram.token_to_error_string
+gram.token_to_error_string[label_index] = first
 return label_index
 elif label.isupper():
 token_index = gram.TOKENS[label]
@@ -183,7 +192,7 @@
 else:
 gram.labels.append(gram.KEYWORD_TOKEN)
 gram.keyword_ids[value] = label_index
-return label_index
+result = label_index
 else:
 try:
 token_index = gram.OPERATOR_MAP[value]
@@ -194,7 +203,10 @@
 else:
 gram.labels.append(token_index)
 gram.token_ids[token_index] = label_index
-return label_index
+result = label_index
+assert result not in gram.token_to_error_string
+gram.token_to_error_string[result] = value
+return result
 
 def make_first(self, gram, name):
 original_firsts = self.first[name]
diff --git a/pypy/interpreter/pyparser/parser.py 
b/pypy/interpreter/pyparser/parser.py
--- a/pypy/interpreter/pyparser/parser.py
+++ b/pypy/interpreter/pyparser/parser.py
@@ -1,6 +1,7 @@
 """
 A CPython inspired RPython parser.
 """
+from rpython.rlib.objectmodel import not_rpython
 
 
 class Grammar(object):
@@ -16,6 +17,7 @@
 self.symbol_names = {}
 self.symbol_to_label = {}
 self.keyword_ids = {}
+self.token_to_error_string = {}
 self.dfas = []
 self.labels = [0]
 self.token_ids = {}
@@ -41,6 +43,27 @@
 pass
 return True
 
+class DFA(object):
+def __init__(self, symbol_id, states, first):
+self.symbol_id = symbol_id
+self.states = states
+self.first = self._first_to_string(first)
+
+def could_match_token(self, label_index):
+pos = label_index >> 3
+bit = 1 << (label_index & 0b111)
+return bool(ord(self.first[label_index >> 3]) & bit)
+
+@staticmethod
+@not_rpython
+def _first_to_string(first):
+l = sorted(first.keys())
+b = bytearray(32)
+for label_index in l:
+pos = label_index >> 3
+bit = 1 << (label_index & 0b111)
+b[pos] |= bit
+return str(b)
 
 class Node(object):
 
@@ -127,14 +150,17 @@
 
 class Nonterminal(AbstractNonterminal):
 __slots__ = ("_children", )
-def 

[pypy-commit] pypy default: merge fix-sre-problems:

2018-03-28 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r94164:ac140c11bea3
Date: 2018-03-28 14:54 +0200
http://bitbucket.org/pypy/pypy/changeset/ac140c11bea3/

Log:merge fix-sre-problems:

- stop switching to the blackhole interpreter in random places,
which leads to arbitrary misbehaviour
- disable greenfields, because their interaction with virtualizables
is broken

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -68,3 +68,14 @@
 Optimize `Py*_Check` for `Bool`, `Float`, `Set`. Also refactor and simplify
 `W_PyCWrapperObject` which is used to call slots from the C-API, greatly
 improving microbenchmarks in https://github.com/antocuni/cpyext-benchmarks
+
+
+.. branch: fix-sre-problems
+
+Fix two (unrelated) JIT bugs manifesting in the re module:
+
+- green fields are broken and were thus disabled, plus their usage removed from
+  the _sre implementation
+
+- in rare "trace is too long" situations, the JIT could break behaviour
+  arbitrarily.
diff --git a/pypy/module/_cffi_backend/ccallback.py 
b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -232,7 +232,9 @@
 "different from the 'ffi.h' file seen at compile-time)")
 
 def py_invoke(self, ll_res, ll_args):
+key_pycode = self.key_pycode
 jitdriver1.jit_merge_point(callback=self,
+   key_pycode=key_pycode,
ll_res=ll_res,
ll_args=ll_args)
 self.do_invoke(ll_res, ll_args)
@@ -294,7 +296,7 @@
 return 'cffi_callback ' + key_pycode.get_repr()
 
 jitdriver1 = jit.JitDriver(name='cffi_callback',
-   greens=['callback.key_pycode'],
+   greens=['key_pycode'],
reds=['ll_res', 'll_args', 'callback'],
get_printable_location=get_printable_location1)
 
diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py
--- a/pypy/module/_sre/interp_sre.py
+++ b/pypy/module/_sre/interp_sre.py
@@ -77,15 +77,15 @@
 w_import = space.getattr(w_builtin, space.newtext("__import__"))
 return space.call_function(w_import, space.newtext("re"))
 
-def matchcontext(space, ctx):
+def matchcontext(space, ctx, pattern):
 try:
-return rsre_core.match_context(ctx)
+return rsre_core.match_context(ctx, pattern)
 except rsre_core.Error as e:
 raise OperationError(space.w_RuntimeError, space.newtext(e.msg))
 
-def searchcontext(space, ctx):
+def searchcontext(space, ctx, pattern):
 try:
-return rsre_core.search_context(ctx)
+return rsre_core.search_context(ctx, pattern)
 except rsre_core.Error as e:
 raise OperationError(space.w_RuntimeError, space.newtext(e.msg))
 
@@ -114,7 +114,7 @@
 pos = len(unicodestr)
 if endpos > len(unicodestr):
 endpos = len(unicodestr)
-return rsre_core.UnicodeMatchContext(self.code, unicodestr,
+return rsre_core.UnicodeMatchContext(unicodestr,
  pos, endpos, self.flags)
 elif space.isinstance_w(w_string, space.w_bytes):
 str = space.bytes_w(w_string)
@@ -122,7 +122,7 @@
 pos = len(str)
 if endpos > len(str):
 endpos = len(str)
-return rsre_core.StrMatchContext(self.code, str,
+return rsre_core.StrMatchContext(str,
  pos, endpos, self.flags)
 else:
 buf = space.readbuf_w(w_string)
@@ -132,7 +132,7 @@
 pos = size
 if endpos > size:
 endpos = size
-return rsre_core.BufMatchContext(self.code, buf,
+return rsre_core.BufMatchContext(buf,
  pos, endpos, self.flags)
 
 def getmatch(self, ctx, found):
@@ -144,12 +144,12 @@
 @unwrap_spec(pos=int, endpos=int)
 def match_w(self, w_string, pos=0, endpos=sys.maxint):
 ctx = self.make_ctx(w_string, pos, endpos)
-return self.getmatch(ctx, matchcontext(self.space, ctx))
+return self.getmatch(ctx, matchcontext(self.space, ctx, self.code))
 
 @unwrap_spec(pos=int, endpos=int)
 def search_w(self, w_string, pos=0, endpos=sys.maxint):
 ctx = self.make_ctx(w_string, pos, endpos)
-return self.getmatch(ctx, searchcontext(self.space, ctx))
+return self.getmatch(ctx, searchcontext(self.space, ctx, self.code))
 
 @unwrap_spec(pos=int, endpos=int)
 def findall_w(self, w_string, pos=0, endpos=sys.maxint):
@@ -157,7 +157,7 @@
 matchlist_w = []
 ctx = self.make_ctx(w_string, pos, endpos)
 while ctx.match_start <= 

[pypy-commit] pypy default: Merge the second part of the cpyext-fast-typecheck branch. Despite the name,

2018-03-24 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r94128:f902cda5d7ea
Date: 2018-03-24 11:05 +0100
http://bitbucket.org/pypy/pypy/changeset/f902cda5d7ea/

Log:Merge the second part of the cpyext-fast-typecheck branch. Despite
the name, this implements a very different feature :)

This heavily refactors and simplify W_PyCWrapperObject, which is
used to call all C slots from Python. Instead of taking a generic
callback to call, we create a specialized subclass for each kind of
slot. In particular, this lets us to avoid creating a full tuple
(and possibly a dict) to contain the wrapped arguments.

The end result is a hugh speedup in some of the antocuni/cpyext-
benchmarks microbenchmarks; in particular, compared to default:

- len(Foo()) is 9x faster
- Foo()[0] is 5.7x faster
- np.__getitem__ is ~50% faster
- np.mean is ~20% faster

diff --git a/pypy/module/cpyext/methodobject.py 
b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -45,6 +45,18 @@
 from pypy.module.cpyext.object import _dealloc
 _dealloc(space, py_obj)
 
+def w_kwargs_from_args(space, __args__):
+w_kwargs = None
+if __args__.keywords:
+# CCC: we should probably have a @jit.look_inside_iff if the
+# keyword count is constant, as we do in Arguments.unpack
+w_kwargs = space.newdict()
+for i in range(len(__args__.keywords)):
+key = __args__.keywords[i]
+w_obj = __args__.keywords_w[i]
+space.setitem(w_kwargs, space.newtext(key), w_obj)
+return w_kwargs
+
 class W_PyCFunctionObject(W_Root):
 _immutable_fields_ = ["flags"]
 
@@ -103,15 +115,7 @@
 def call_keywords(self, space, w_self, __args__):
 func = rffi.cast(PyCFunctionKwArgs, self.ml.c_ml_meth)
 py_args = tuple_from_args_w(space, __args__.arguments_w)
-w_kwargs = None
-if __args__.keywords:
-# CCC: we should probably have a @jit.look_inside_iff if the
-# keyword count is constant, as we do in Arguments.unpack
-w_kwargs = space.newdict()
-for i in range(len(__args__.keywords)):
-key = __args__.keywords[i]
-w_obj = __args__.keywords_w[i]
-space.setitem(w_kwargs, space.newtext(key), w_obj)
+w_kwargs = w_kwargs_from_args(space, __args__)
 try:
 return generic_cpy_call(space, func, w_self, py_args, w_kwargs)
 finally:
@@ -213,14 +217,15 @@
 (self.name, self.w_objclass.getname(self.space)))
 
 
+class W_PyCWrapperObject(W_Root):
+"""
+Abstract class; for concrete subclasses, see slotdefs.py
+"""
+_immutable_fields_ = ['offset[*]']
 
-class W_PyCWrapperObject(W_Root):
-def __init__(self, space, pto, method_name, wrapper_func,
- wrapper_func_kwds, doc, func, offset=None):
+def __init__(self, space, pto, method_name, doc, func, offset):
 self.space = space
 self.method_name = method_name
-self.wrapper_func = wrapper_func
-self.wrapper_func_kwds = wrapper_func_kwds
 self.doc = doc
 self.func = func
 self.offset = offset
@@ -229,10 +234,17 @@
 assert isinstance(w_type, W_TypeObject)
 self.w_objclass = w_type
 
-def call(self, space, w_self, w_args, w_kw):
+def descr_call(self, space, w_self, __args__):
+return self.call(space, w_self, __args__)
+
+def call(self, space, w_self, __args__):
+raise NotImplementedError
+
+@jit.unroll_safe
+def get_func_to_call(self):
 func_to_call = self.func
 if self.offset:
-pto = as_pyobj(space, self.w_objclass)
+pto = as_pyobj(self.space, self.w_objclass)
 # make ptr the equivalent of this, using the offsets
 #func_to_call = rffi.cast(rffi.VOIDP, 
ptr.c_tp_as_number.c_nb_multiply)
 if pto:
@@ -246,31 +258,33 @@
 assert False, "failed to convert w_type %s to PyObject" % str(
   self.w_objclass)
 assert func_to_call
-if self.wrapper_func is None:
-assert self.wrapper_func_kwds is not None
-return self.wrapper_func_kwds(space, w_self, w_args, func_to_call,
-  w_kw)
-if space.is_true(w_kw):
-raise oefmt(space.w_TypeError,
+return func_to_call
+
+def check_args(self, __args__, arity):
+length = len(__args__.arguments_w)
+if length != arity:
+raise oefmt(self.space.w_TypeError, "expected %d arguments, got 
%d",
+arity, length)
+if __args__.keywords:
+raise oefmt(self.space.w_TypeError,
 "wrapper %s doesn't take any 

[pypy-commit] pypy default: Merge the first part of the cpyext-fast-typecheck branch.

2018-03-24 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r94126:66208269e66e
Date: 2018-03-24 10:47 +0100
http://bitbucket.org/pypy/pypy/changeset/66208269e66e/

Log:Merge the first part of the cpyext-fast-typecheck branch.

This branch introduce a way to implement Py*_Check efficiently as C
macros which simply checks a flag on the C typeobject, instead of
doing a full roundtrip to RPython land to do a space.isinstance_w().

For now, implement PyFloat_Check, PyBool_Check and PySlice_Check
using the new technique. Eventually, we should do the same for all
type checkers and possibly kill build_type_checkers().

This commits merges only part of the branch because later commits
have diverged from the original scope and implements a different
feature (bad me); they will be merged in a separate commit, to keep
the diff clear.

diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -133,6 +133,11 @@
  'TYPE', 'STRING'): # 'STRING' -> 'BYTES' in py3
 constant_names.append('Py_TPFLAGS_%s_SUBCLASS' % name)
 
+# PyPy-specific flags
+for name in ('FLOAT',):
+constant_names.append('Py_TPPYPYFLAGS_%s_SUBCLASS' % name)
+
+
 for name in constant_names:
 setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name))
 globals().update(rffi_platform.configure(CConfig_constants))
diff --git a/pypy/module/cpyext/boolobject.py b/pypy/module/cpyext/boolobject.py
--- a/pypy/module/cpyext/boolobject.py
+++ b/pypy/module/cpyext/boolobject.py
@@ -1,9 +1,5 @@
-from rpython.rtyper.lltypesystem import rffi, lltype
-from pypy.module.cpyext.api import (cpython_api, PyObject, CANNOT_FAIL,
-build_type_checkers)
-
-# Inheriting from bool isn't actually possible.
-PyBool_Check = build_type_checkers("Bool")[1]
+from rpython.rtyper.lltypesystem import rffi
+from pypy.module.cpyext.api import cpython_api, PyObject
 
 @cpython_api([rffi.LONG], PyObject)
 def PyBool_FromLong(space, value):
diff --git a/pypy/module/cpyext/floatobject.py 
b/pypy/module/cpyext/floatobject.py
--- a/pypy/module/cpyext/floatobject.py
+++ b/pypy/module/cpyext/floatobject.py
@@ -1,7 +1,7 @@
 from rpython.rtyper.lltypesystem import rffi, lltype
 from pypy.module.cpyext.api import (PyObjectFields, bootstrap_function,
 cpython_struct,
-CANNOT_FAIL, cpython_api, PyObject, build_type_checkers, CONST_STRING)
+CANNOT_FAIL, cpython_api, PyObject, CONST_STRING)
 from pypy.module.cpyext.pyobject import (
 make_typedescr, track_reference, from_ref)
 from pypy.interpreter.error import OperationError
@@ -38,8 +38,6 @@
 track_reference(space, obj, w_obj)
 return w_obj
 
-PyFloat_Check, PyFloat_CheckExact = build_type_checkers("Float")
-
 @cpython_api([lltype.Float], PyObject)
 def PyFloat_FromDouble(space, value):
 return space.newfloat(value)
diff --git a/pypy/module/cpyext/include/boolobject.h 
b/pypy/module/cpyext/include/boolobject.h
--- a/pypy/module/cpyext/include/boolobject.h
+++ b/pypy/module/cpyext/include/boolobject.h
@@ -16,6 +16,8 @@
 #define Py_RETURN_TRUE return Py_INCREF(Py_True), Py_True
 #define Py_RETURN_FALSE return Py_INCREF(Py_False), Py_False
 
+#define PyBool_Check(op) ((op)->ob_type == _Type)
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/pypy/module/cpyext/include/floatobject.h 
b/pypy/module/cpyext/include/floatobject.h
--- a/pypy/module/cpyext/include/floatobject.h
+++ b/pypy/module/cpyext/include/floatobject.h
@@ -32,6 +32,11 @@
 return PyFloat_FromDouble(-Py_HUGE_VAL);\
 } while(0)
 
+#define PyFloat_Check(op) \
+_PyPy_Type_FastSubclass((op)->ob_type, 
Py_TPPYPYFLAGS_FLOAT_SUBCLASS)
+#define PyFloat_CheckExact(op) ((op)->ob_type == _Type)
+
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/pypy/module/cpyext/include/object.h 
b/pypy/module/cpyext/include/object.h
--- a/pypy/module/cpyext/include/object.h
+++ b/pypy/module/cpyext/include/object.h
@@ -228,6 +228,11 @@
 #define Py_TPFLAGS_BASE_EXC_SUBCLASS   (1L<<30)
 #define Py_TPFLAGS_TYPE_SUBCLASS   (1L<<31)
 
+/* These are conceptually the same as the flags above, but they are
+   PyPy-specific and are stored inside tp_pypy_flags */
+#define Py_TPPYPYFLAGS_FLOAT_SUBCLASS (1L<<0)
+
+
 #define Py_TPFLAGS_DEFAULT_EXTERNAL ( \
  Py_TPFLAGS_HAVE_GETCHARBUFFER | \
  Py_TPFLAGS_HAVE_SEQUENCE_IN | \
@@ -247,6 +252,8 @@
 #define PyType_HasFeature(t,f)  (((t)->tp_flags & (f)) != 0)
 #define PyType_FastSubclass(t,f)  PyType_HasFeature(t,f)
 
+#define _PyPy_Type_FastSubclass(t,f) (((t)->tp_pypy_flags & (f)) != 0)
+
 #define PyType_Check(op) \
 PyType_FastSubclass(Py_TYPE(op), Py_TPFLAGS_TYPE_SUBCLASS)
 #define PyType_CheckExact(op) (Py_TYPE(op) == _Type)
diff --git a/pypy/module/cpyext/include/sliceobject.h 

[pypy-commit] pypy default: merge rpython-sprint which refactors rpython signature

2018-03-18 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r93996:17ba5f6cbab7
Date: 2018-03-19 00:09 +0100
http://bitbucket.org/pypy/pypy/changeset/17ba5f6cbab7/

Log:merge rpython-sprint which refactors rpython signature

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -54,3 +54,7 @@
 
 Speed up branchy code that does a lot of function inlining by saving one call
 to read the TLS in most bridges.
+
+.. branch: rpython-sprint
+
+Refactor in rpython signatures
diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py
--- a/rpython/annotator/signature.py
+++ b/rpython/annotator/signature.py
@@ -14,16 +14,16 @@
 
 def _annotation_key(t):
 from rpython.rtyper import extregistry
-if type(t) is list:
+if isinstance(t, list):
 assert len(t) == 1
 return ('list', _annotation_key(t[0]))
-elif type(t) is dict:
+elif isinstance(t, dict):
 assert len(t.keys()) == 1
 return ('dict', _annotation_key(t.items()[0]))
 elif isinstance(t, tuple):
 return tuple([_annotation_key(i) for i in t])
 elif extregistry.is_registered(t):
-# XXX should it really be always different?
+# XXX do we want to do something in this case?
 return t
 return t
 
@@ -38,24 +38,36 @@
 return t
 return _compute_annotation(t, bookkeeper)
 
+
+def _validate_annotation_size(t):
+try:
+_ = iter(t)
+except TypeError:  # if it's not an iterable, just return
+return t   # (size does not matter)
+if isinstance(t, tuple):  # we accept tuples with any length, because
+return t  # their in-memory representation is predictable
+if len(t) > 1:
+raise TypeError("Cannot specify multiple types in a %s (try using 
tuple)", type(t))
+
+
 def _compute_annotation(t, bookkeeper=None):
 from rpython.rtyper.lltypesystem import lltype
 from rpython.rtyper.llannotation import lltype_to_annotation
+_validate_annotation_size(t)
 if isinstance(t, SomeObject):
 return t
 elif isinstance(t, lltype.LowLevelType):
 return lltype_to_annotation(t)
 elif isinstance(t, list):
-assert len(t) == 1, "We do not support type joining in list"
-listdef = ListDef(bookkeeper, annotation(t[0]), mutated=True, 
resized=True)
-return SomeList(listdef)
+return SomeList(
+ListDef(bookkeeper, annotation(t[0]),
+mutated=True, resized=True))
 elif isinstance(t, tuple):
 return SomeTuple(tuple([annotation(i) for i in t]))
 elif isinstance(t, dict):
-assert len(t) == 1, "We do not support type joining in dict"
-result = SomeDict(DictDef(bookkeeper, annotation(t.keys()[0]),
-annotation(t.values()[0])))
-return result
+return SomeDict(
+DictDef(bookkeeper,
+annotation(t.keys()[0]), annotation(t.values()[0])))
 elif type(t) is types.NoneType:
 return s_None
 elif extregistry.is_registered(t):
@@ -84,13 +96,12 @@
 elif t is types.NoneType:
 return s_None
 elif bookkeeper and extregistry.is_registered_type(t):
-entry = extregistry.lookup_type(t)
-return entry.compute_annotation_bk(bookkeeper)
+return (extregistry.lookup_type(t)
+.compute_annotation_bk(bookkeeper))
 elif t is type:
 return SomeType()
 elif bookkeeper and not hasattr(t, '_freeze_'):
-classdef = bookkeeper.getuniqueclassdef(t)
-return SomeInstance(classdef)
+return SomeInstance(bookkeeper.getuniqueclassdef(t))
 else:
 raise AssertionError("annotationoftype(%r)" % (t,))
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge call-loopinvariant-into-bridges: speed up branchy code that does a lot of inlining

2018-03-12 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r93971:ed869ecba520
Date: 2018-03-12 13:58 +0100
http://bitbucket.org/pypy/pypy/changeset/ed869ecba520/

Log:merge call-loopinvariant-into-bridges: speed up branchy code that
does a lot of inlining

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -48,3 +48,9 @@
 .. branch: refactor-slots
 
 Refactor cpyext slots.
+
+
+.. branch: call-loopinvariant-into-bridges
+
+Speed up branchy code that does a lot of function inlining by saving one call
+to read the TLS in most bridges.
diff --git a/rpython/jit/metainterp/optimizeopt/bridgeopt.py 
b/rpython/jit/metainterp/optimizeopt/bridgeopt.py
--- a/rpython/jit/metainterp/optimizeopt/bridgeopt.py
+++ b/rpython/jit/metainterp/optimizeopt/bridgeopt.py
@@ -17,11 +17,17 @@
 # 
 # (  ) length times, if getfield(box1, descr) == box2
 # both boxes should be in the liveboxes
+# (or constants)
 #
 # 
 # (   ) length times, if getarrayitem_gc(box1, 
index, descr) == box2
 # both boxes should be in the liveboxes
+# (or constants)
 #
+#  call_loopinvariant knowledge
+# 
+# ( ) length times, if call_loopinvariant(const) == box2
+#  box2 should be in liveboxes
 # 
 
 
@@ -55,11 +61,11 @@
 return box
 
 def serialize_optimizer_knowledge(optimizer, numb_state, liveboxes, 
liveboxes_from_env, memo):
+from rpython.jit.metainterp.history import ConstInt
 available_boxes = {}
 for box in liveboxes:
 if box is not None and box in liveboxes_from_env:
 available_boxes[box] = None
-metainterp_sd = optimizer.metainterp_sd
 
 # class knowledge is stored as bits, true meaning the class is known, false
 # means unknown. on deserializing we look at the bits, and read the runtime
@@ -106,7 +112,19 @@
 numb_state.append_int(0)
 numb_state.append_int(0)
 
+if optimizer.optrewrite:
+tuples_loopinvariant = optimizer.optrewrite.serialize_optrewrite(
+available_boxes)
+numb_state.append_int(len(tuples_loopinvariant))
+for constarg0, box in tuples_loopinvariant:
+numb_state.append_short(
+tag_box(ConstInt(constarg0), liveboxes_from_env, memo))
+numb_state.append_short(tag_box(box, liveboxes_from_env, memo))
+else:
+numb_state.append_int(0)
+
 def deserialize_optimizer_knowledge(optimizer, resumestorage, frontend_boxes, 
liveboxes):
+from rpython.jit.metainterp.history import ConstInt
 reader = resumecode.Reader(resumestorage.rd_numb)
 assert len(frontend_boxes) == len(liveboxes)
 metainterp_sd = optimizer.metainterp_sd
@@ -131,8 +149,6 @@
 optimizer.make_constant_class(box, cls)
 
 # heap knowledge
-if not optimizer.optheap:
-return
 length = reader.next_item()
 result_struct = []
 for i in range(length):
@@ -154,4 +170,19 @@
 tagged = reader.next_item()
 box2 = decode_box(resumestorage, tagged, liveboxes, metainterp_sd.cpu)
 result_array.append((box1, index, descr, box2))
-optimizer.optheap.deserialize_optheap(result_struct, result_array)
+if optimizer.optheap:
+optimizer.optheap.deserialize_optheap(result_struct, result_array)
+
+# call_loopinvariant knowledge
+length = reader.next_item()
+result_loopinvariant = []
+for i in range(length):
+tagged1 = reader.next_item()
+const = decode_box(resumestorage, tagged1, liveboxes, 
metainterp_sd.cpu)
+assert isinstance(const, ConstInt)
+i = const.getint()
+tagged2 = reader.next_item()
+box = decode_box(resumestorage, tagged2, liveboxes, metainterp_sd.cpu)
+result_loopinvariant.append((i, box))
+if optimizer.optrewrite:
+optimizer.optrewrite.deserialize_optrewrite(result_loopinvariant)
diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py 
b/rpython/jit/metainterp/optimizeopt/rewrite.py
--- a/rpython/jit/metainterp/optimizeopt/rewrite.py
+++ b/rpython/jit/metainterp/optimizeopt/rewrite.py
@@ -877,6 +877,18 @@
 optimize_SAME_AS_R = optimize_SAME_AS_I
 optimize_SAME_AS_F = optimize_SAME_AS_I
 
+def serialize_optrewrite(self, available_boxes):
+res = []
+for i, box in self.loop_invariant_results.iteritems():
+box = self.get_box_replacement(box)
+if box in available_boxes:
+res.append((i, box))
+return res
+
+def deserialize_optrewrite(self, tups):
+for i, box in tups:
+self.loop_invariant_results[i] = box
+
 dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_',
   default=OptRewrite.emit)
 optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD')
diff --git 

[pypy-commit] pypy default: merge heads

2018-03-09 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r93964:42f9add4e266
Date: 2018-03-09 08:59 +0100
http://bitbucket.org/pypy/pypy/changeset/42f9add4e266/

Log:merge heads

___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2018-02-24 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r93879:ebe1844aaf79
Date: 2018-02-24 09:26 +0100
http://bitbucket.org/pypy/pypy/changeset/ebe1844aaf79/

Log:merge heads

diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -270,8 +270,8 @@
 #
 try:
 if operr is not None:
-ec = self.space.getexecutioncontext()
-next_instr = self.handle_operation_error(ec, operr)
+next_instr = self.handle_operation_error(
+executioncontext, operr)
 self.last_instr = intmask(next_instr - 1)
 else:
 # Execution starts just after the last_instr.  Initially,
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2018-02-21 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r93866:8f992014885b
Date: 2018-02-22 08:04 +0100
http://bitbucket.org/pypy/pypy/changeset/8f992014885b/

Log:merge heads

diff too long, truncating to 2000 out of 4036 lines

diff --git a/pypy/module/test_lib_pypy/pyrepl/__init__.py 
b/extra_tests/test_pyrepl/__init__.py
rename from pypy/module/test_lib_pypy/pyrepl/__init__.py
rename to extra_tests/test_pyrepl/__init__.py
--- a/pypy/module/test_lib_pypy/pyrepl/__init__.py
+++ b/extra_tests/test_pyrepl/__init__.py
@@ -1,3 +1,1 @@
-import sys
-import lib_pypy.pyrepl
-sys.modules['pyrepl'] = sys.modules['lib_pypy.pyrepl']
+
diff --git a/pypy/module/test_lib_pypy/pyrepl/infrastructure.py 
b/extra_tests/test_pyrepl/infrastructure.py
rename from pypy/module/test_lib_pypy/pyrepl/infrastructure.py
rename to extra_tests/test_pyrepl/infrastructure.py
diff --git a/pypy/module/test_lib_pypy/pyrepl/test_basic.py 
b/extra_tests/test_pyrepl/test_basic.py
rename from pypy/module/test_lib_pypy/pyrepl/test_basic.py
rename to extra_tests/test_pyrepl/test_basic.py
diff --git a/pypy/module/test_lib_pypy/pyrepl/test_bugs.py 
b/extra_tests/test_pyrepl/test_bugs.py
rename from pypy/module/test_lib_pypy/pyrepl/test_bugs.py
rename to extra_tests/test_pyrepl/test_bugs.py
diff --git a/extra_tests/test_pyrepl/test_functional.py 
b/extra_tests/test_pyrepl/test_functional.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_pyrepl/test_functional.py
@@ -0,0 +1,28 @@
+#   Copyright 2000-2007 Michael Hudson-Doyle 
+#   Maciek Fijalkowski
+# License: MIT
+# some functional tests, to see if this is really working
+
+import pytest
+import sys
+
+
+@pytest.fixture()
+def child():
+try:
+import pexpect
+except ImportError:
+pytest.skip("no pexpect module")
+except SyntaxError:
+pytest.skip('pexpect wont work on py3k')
+child = pexpect.spawn(sys.executable, ['-S'], timeout=10)
+child.logfile = sys.stdout
+child.sendline('from pyrepl.python_reader import main')
+child.sendline('main()')
+return child
+
+
+def test_basic(child):
+child.sendline('a = 3')
+child.sendline('a')
+child.expect('3')
diff --git a/pypy/module/test_lib_pypy/pyrepl/test_keymap.py 
b/extra_tests/test_pyrepl/test_keymap.py
rename from pypy/module/test_lib_pypy/pyrepl/test_keymap.py
rename to extra_tests/test_pyrepl/test_keymap.py
diff --git a/pypy/module/test_lib_pypy/pyrepl/test_reader.py 
b/extra_tests/test_pyrepl/test_reader.py
rename from pypy/module/test_lib_pypy/pyrepl/test_reader.py
rename to extra_tests/test_pyrepl/test_reader.py
diff --git a/pypy/module/test_lib_pypy/pyrepl/test_readline.py 
b/extra_tests/test_pyrepl/test_readline.py
rename from pypy/module/test_lib_pypy/pyrepl/test_readline.py
rename to extra_tests/test_pyrepl/test_readline.py
diff --git a/pypy/module/test_lib_pypy/pyrepl/test_wishes.py 
b/extra_tests/test_pyrepl/test_wishes.py
rename from pypy/module/test_lib_pypy/pyrepl/test_wishes.py
rename to extra_tests/test_pyrepl/test_wishes.py
diff --git a/get_externals.py b/get_externals.py
new file mode 100644
--- /dev/null
+++ b/get_externals.py
@@ -0,0 +1,69 @@
+'''Get external dependencies for building PyPy
+they will end up in the platform.host().basepath, something like 
repo-root/external
+'''
+
+from __future__ import print_function
+
+import argparse
+import os
+import zipfile
+from subprocess import Popen, PIPE
+from rpython.translator.platform import host
+
+def runcmd(cmd, verbose):
+stdout = stderr = ''
+report = False
+try:
+p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+stdout, stderr = p.communicate()
+if p.wait() != 0 or verbose:
+report = True
+except Exception as e:
+stderr = str(e) + '\n' + stderr
+report = True
+if report:
+print('running "%s" returned\n%s\n%s' % (' '.join(cmd), stdout, 
stderr))
+if stderr:
+raise RuntimeError(stderr)
+
+def checkout_repo(dest='externals', org='pypy', branch='default', 
verbose=False):
+url = 'https://bitbucket.org/{}/externals'.format(org)
+if not os.path.exists(dest):
+cmd = ['hg','clone',url,dest]
+runcmd(cmd, verbose)
+cmd = ['hg','-R', dest, 'update',branch]
+runcmd(cmd, verbose)
+
+def extract_zip(externals_dir, zip_path):
+with zipfile.ZipFile(os.fspath(zip_path)) as zf:
+zf.extractall(os.fspath(externals_dir))
+return externals_dir / zf.namelist()[0].split('/')[0]
+
+def parse_args():
+p = argparse.ArgumentParser()
+p.add_argument('-v', '--verbose', action='store_true')
+p.add_argument('-O', '--organization',
+   help='Organization owning the deps repos', default='pypy')
+p.add_argument('-e', '--externals', default=host.externals,
+   help='directory in which to store dependencies',
+   )
+p.add_argument('-b', '--branch', default=host.externals_branch,
+   

[pypy-commit] pypy default: merge msvc14 which allows using Visual Studio 17 compiler suite

2018-02-12 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r93811:496d05d4758e
Date: 2018-02-12 14:06 -0500
http://bitbucket.org/pypy/pypy/changeset/496d05d4758e/

Log:merge msvc14 which allows using Visual Studio 17 compiler suite

diff too long, truncating to 2000 out of 2842 lines

diff --git a/get_externals.py b/get_externals.py
new file mode 100644
--- /dev/null
+++ b/get_externals.py
@@ -0,0 +1,69 @@
+'''Get external dependencies for building PyPy
+they will end up in the platform.host().basepath, something like 
repo-root/external
+'''
+
+from __future__ import print_function
+
+import argparse
+import os
+import zipfile
+from subprocess import Popen, PIPE
+from rpython.translator.platform import host
+
+def runcmd(cmd, verbose):
+stdout = stderr = ''
+report = False
+try:
+p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+stdout, stderr = p.communicate()
+if p.wait() != 0 or verbose:
+report = True
+except Exception as e:
+stderr = str(e) + '\n' + stderr
+report = True
+if report:
+print('running "%s" returned\n%s\n%s' % (' '.join(cmd), stdout, 
stderr))
+if stderr:
+raise RuntimeError(stderr)
+
+def checkout_repo(dest='externals', org='pypy', branch='default', 
verbose=False):
+url = 'https://bitbucket.org/{}/externals'.format(org)
+if not os.path.exists(dest):
+cmd = ['hg','clone',url,dest]
+runcmd(cmd, verbose)
+cmd = ['hg','-R', dest, 'update',branch]
+runcmd(cmd, verbose)
+
+def extract_zip(externals_dir, zip_path):
+with zipfile.ZipFile(os.fspath(zip_path)) as zf:
+zf.extractall(os.fspath(externals_dir))
+return externals_dir / zf.namelist()[0].split('/')[0]
+
+def parse_args():
+p = argparse.ArgumentParser()
+p.add_argument('-v', '--verbose', action='store_true')
+p.add_argument('-O', '--organization',
+   help='Organization owning the deps repos', default='pypy')
+p.add_argument('-e', '--externals', default=host.externals,
+   help='directory in which to store dependencies',
+   )
+p.add_argument('-b', '--branch', default=host.externals_branch,
+   help='branch to check out',
+   )
+p.add_argument('-p', '--platform', default=None,
+   help='someday support cross-compilation, ignore for now',
+   )
+return p.parse_args()
+
+
+def main():
+args = parse_args()
+checkout_repo(
+dest=args.externals,
+org=args.organization,
+branch=args.branch,
+verbose=args.verbose,
+)
+
+if __name__ == '__main__':
+main()
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -39,10 +39,24 @@
 
 .. _Microsoft Visual C++ Compiler for Python 2.7: 
https://www.microsoft.com/en-us/download/details.aspx?id=44266
 
+Installing "Build Tools for Visual Studio 2017" (for Python 3)
+--
+
+As documented in the CPython Wiki_, CPython now recommends Visual C++ version
+14.0. A compact version of the compiler suite can be obtained from Microsoft_
+downloads, search the page for "Build Tools for Visual Studio 2017".
+
+You will also need to install the the `Windows SDK`_ in order to use the 
+`mt.exe` mainfest compiler.
+
+.. _Wiki: https://wiki.python.org/moin/WindowsCompilers
+.. _Microsoft: https://www.visualstudio.com/downloads
+.. _`Windows SDK`: 
https://developer.microsoft.com/en-us/windows/downloads/windows-10-sdk
+
 Translating PyPy with Visual Studio
 ---
 
-We routinely test translation using v9, also known as Visual Studio 2008.
+We routinely test translation of PyPy 2.7 using v9 and PyPy 3 with vc14.
 Other configurations may work as well.
 
 The translation scripts will set up the appropriate environment variables
@@ -82,8 +96,8 @@
 
 .. _build instructions: http://pypy.org/download.html#building-from-source
 
-Setting Up Visual Studio for building SSL in Python3
-
+Setting Up Visual Studio 9.0 for building SSL in Python3
+
 
 On Python3, the ``ssl`` module is based on ``cffi``, and requires a build step 
after
 translation. However ``distutils`` does not support the Micorosft-provided 
Visual C
@@ -132,243 +146,14 @@
 Installing external packages
 
 
-On Windows, there is no standard place where to download, build and
-install third-party libraries.  We recommend installing them in the parent
-directory of the pypy checkout.  For example, if you installed pypy in
-``d:\pypy\trunk\`` (This directory contains a README file), the base
-directory is ``d:\pypy``. You must then set the
-INCLUDE, LIB and PATH (for DLLs) environment variables appropriately.
+We uses a `repository` parallel to pypy to hold 

[pypy-commit] pypy default: Merge memory-accounting which adds extra functions to gc that

2018-02-06 Thread fijal
Author: fijal
Branch: 
Changeset: r93768:f23eec5d0d6d
Date: 2018-02-06 10:49 +0100
http://bitbucket.org/pypy/pypy/changeset/f23eec5d0d6d/

Log:Merge memory-accounting which adds extra functions to gc that let
you describe the whole memory

diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -35,7 +35,7 @@
 except ImportError:
 assert '__pypy__' not in sys.builtin_module_names
 newlist_hint = lambda sizehint: []
-add_memory_pressure = lambda size: None
+add_memory_pressure = lambda size, obj: None
 
 if sys.version_info[0] >= 3:
 StandardError = Exception
@@ -153,9 +153,10 @@
 factory = Connection if not factory else factory
 # an sqlite3 db seems to be around 100 KiB at least (doesn't matter if
 # backed by :memory: or a file)
-add_memory_pressure(100 * 1024)
-return factory(database, timeout, detect_types, isolation_level,
+res = factory(database, timeout, detect_types, isolation_level,
 check_same_thread, factory, cached_statements)
+add_memory_pressure(100 * 1024, res)
+return res
 
 
 def _unicode_text_factory(x):
diff --git a/pypy/module/__pypy__/interp_magic.py 
b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -142,11 +142,14 @@
   space.newbool(debug))
 
 @unwrap_spec(estimate=int)
-def add_memory_pressure(estimate):
+def add_memory_pressure(space, estimate, w_obj=None):
 """ Add memory pressure of estimate bytes. Useful when calling a C function
 that internally allocates a big chunk of memory. This instructs the GC to
 garbage collect sooner than it would otherwise."""
+#if space.is_none(w_obj):
 rgc.add_memory_pressure(estimate)
+#else:
+#rgc.add_memory_pressure(estimate, w_obj)
 
 @unwrap_spec(w_frame=PyFrame)
 def locals_to_fast(space, w_frame):
diff --git a/pypy/module/_cffi_backend/allocator.py 
b/pypy/module/_cffi_backend/allocator.py
--- a/pypy/module/_cffi_backend/allocator.py
+++ b/pypy/module/_cffi_backend/allocator.py
@@ -21,13 +21,13 @@
 if self.w_alloc is None:
 if self.should_clear_after_alloc:
 ptr = lltype.malloc(rffi.CCHARP.TO, datasize,
-flavor='raw', zero=True,
-add_memory_pressure=True)
+flavor='raw', zero=True)
 else:
 ptr = lltype.malloc(rffi.CCHARP.TO, datasize,
-flavor='raw', zero=False,
-add_memory_pressure=True)
-return cdataobj.W_CDataNewStd(space, ptr, ctype, length)
+flavor='raw', zero=False)
+w_res = cdataobj.W_CDataNewStd(space, ptr, ctype, length)
+rgc.add_memory_pressure(datasize, w_res)
+return w_res
 else:
 w_raw_cdata = space.call_function(self.w_alloc,
   space.newint(datasize))
@@ -53,7 +53,7 @@
 if self.w_free is not None:
 res.w_free = self.w_free
 res.register_finalizer(space)
-rgc.add_memory_pressure(datasize)
+rgc.add_memory_pressure(datasize, res)
 return res
 
 @unwrap_spec(w_init=WrappedDefault(None))
diff --git a/pypy/module/_cffi_backend/cdataobj.py 
b/pypy/module/_cffi_backend/cdataobj.py
--- a/pypy/module/_cffi_backend/cdataobj.py
+++ b/pypy/module/_cffi_backend/cdataobj.py
@@ -447,7 +447,10 @@
 with self as ptr:
 w_res = W_CDataGCP(space, ptr, self.ctype, self, w_destructor)
 if size != 0:
-rgc.add_memory_pressure(size)
+if isinstance(w_res, W_CDataGCP):
+rgc.add_memory_pressure(size, w_res)
+else:
+rgc.add_memory_pressure(size, self)
 return w_res
 
 def unpack(self, length):
diff --git a/pypy/module/_hashlib/interp_hashlib.py 
b/pypy/module/_hashlib/interp_hashlib.py
--- a/pypy/module/_hashlib/interp_hashlib.py
+++ b/pypy/module/_hashlib/interp_hashlib.py
@@ -61,7 +61,8 @@
 ctx = ropenssl.EVP_MD_CTX_new()
 if ctx is None:
 raise MemoryError
-rgc.add_memory_pressure(ropenssl.HASH_MALLOC_SIZE + self.digest_size)
+rgc.add_memory_pressure(ropenssl.HASH_MALLOC_SIZE + self.digest_size,
+self)
 try:
 if copy_from:
 if not ropenssl.EVP_MD_CTX_copy(ctx, copy_from):
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -1316,8 +1316,9 @@
 if not ctx:
 raise ssl_error(space, "failed to allocate SSL context")
 
-rgc.add_memory_pressure(10 * 1024 * 1024)
 self = 

[pypy-commit] pypy default: merge heads

2018-02-04 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r93753:03e7d032c07f
Date: 2018-02-04 13:52 +0100
http://bitbucket.org/pypy/pypy/changeset/03e7d032c07f/

Log:merge heads

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -23,3 +23,16 @@
 added, then the performance using mapdict is linear in the number of
 attributes. This is now fixed (by switching to a regular dict after 80
 attributes).
+
+
+.. branch: cpyext-faster-arg-passing
+
+When using cpyext, improve the speed of passing certain objects from PyPy to C
+code, most notably None, True, False, types, all instances of C-defined types.
+Before, a dict lookup was needed every time such an object crossed over, now it
+is just a field read.
+
+
+.. branch: 2634_datetime_timedelta_performance
+
+Improve datetime + timedelta performance.
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -208,6 +208,21 @@
 def _set_mapdict_storage_and_map(self, storage, map):
 raise NotImplementedError
 
+
+# ---
+# cpyext support
+# these functions will only be seen by the annotator if we translate
+# with the cpyext module
+
+def _cpyext_as_pyobj(self, space):
+from pypy.module.cpyext.pyobject import w_root_as_pyobj
+return w_root_as_pyobj(self, space)
+
+def _cpyext_attach_pyobj(self, space, py_obj):
+from pypy.module.cpyext.pyobject import w_root_attach_pyobj
+return w_root_attach_pyobj(self, space, py_obj)
+
+
 # ---
 
 def is_w(self, space, w_other):
diff --git a/pypy/module/_io/test/test_interp_textio.py 
b/pypy/module/_io/test/test_interp_textio.py
--- a/pypy/module/_io/test/test_interp_textio.py
+++ b/pypy/module/_io/test/test_interp_textio.py
@@ -1,6 +1,6 @@
 import pytest
 try:
-from hypothesis import given, strategies as st
+from hypothesis import given, strategies as st, settings
 except ImportError:
 pytest.skip("hypothesis required")
 import os
@@ -29,6 +29,7 @@
 
 @given(data=st_readline(),
mode=st.sampled_from(['\r', '\n', '\r\n', '']))
+@settings(deadline=None)
 def test_readline(space, data, mode):
 txt, limits = data
 w_stream = W_BytesIO(space)
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -10,6 +10,8 @@
 PyVarObject, Py_ssize_t, init_function, cts)
 from pypy.module.cpyext.state import State
 from pypy.objspace.std.typeobject import W_TypeObject
+from pypy.objspace.std.noneobject import W_NoneObject
+from pypy.objspace.std.boolobject import W_BoolObject
 from pypy.objspace.std.objectobject import W_ObjectObject
 from rpython.rlib.objectmodel import specialize, we_are_translated
 from rpython.rlib.objectmodel import keepalive_until_here
@@ -21,6 +23,52 @@
 #
 # type description
 
+class W_BaseCPyObject(W_ObjectObject):
+""" A subclass of W_ObjectObject that has one field for directly storing
+the link from the w_obj to the cpy ref. This is only used for C-defined
+types. """
+
+
+def check_true(s_arg, bookeeper):
+assert s_arg.const is True
+
+def w_root_as_pyobj(w_obj, space):
+from rpython.rlib.debug import check_annotation
+# make sure that translation crashes if we see this while not translating
+# with cpyext
+check_annotation(space.config.objspace.usemodules.cpyext, check_true)
+# default implementation of _cpyext_as_pyobj
+return rawrefcount.from_obj(PyObject, w_obj)
+
+def w_root_attach_pyobj(w_obj, space, py_obj):
+from rpython.rlib.debug import check_annotation
+check_annotation(space.config.objspace.usemodules.cpyext, check_true)
+assert space.config.objspace.usemodules.cpyext
+# default implementation of _cpyext_attach_pyobj
+rawrefcount.create_link_pypy(w_obj, py_obj)
+
+
+def add_direct_pyobj_storage(cls):
+""" Add the necessary methods to a class to store a reference to the py_obj
+on its instances directly. """
+
+cls._cpy_ref = lltype.nullptr(PyObject.TO)
+
+def _cpyext_as_pyobj(self, space):
+return self._cpy_ref
+cls._cpyext_as_pyobj = _cpyext_as_pyobj
+
+def _cpyext_attach_pyobj(self, space, py_obj):
+self._cpy_ref = py_obj
+rawrefcount.create_link_pyobj(self, py_obj)
+cls._cpyext_attach_pyobj = _cpyext_attach_pyobj
+
+add_direct_pyobj_storage(W_BaseCPyObject)
+add_direct_pyobj_storage(W_TypeObject)
+add_direct_pyobj_storage(W_NoneObject)
+add_direct_pyobj_storage(W_BoolObject)
+
+
 class BaseCpyTypedescr(object):
 basestruct = PyObject.TO
 W_BaseObject = W_ObjectObject
@@ -66,8 +114,12 @@
 
 def 

[pypy-commit] pypy default: merge cpyext-faster-arg-passing

2018-02-02 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r93739:6a370242b86c
Date: 2018-02-02 22:24 +0100
http://bitbucket.org/pypy/pypy/changeset/6a370242b86c/

Log:merge cpyext-faster-arg-passing

When using cpyext, improve the speed of passing certain objects from
PyPy to C code, most notably None, True, False, types, all instances
of C-defined types. Before, a dict lookup was needed every time such
an object crossed over, now it is just a field read.

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -23,3 +23,11 @@
 added, then the performance using mapdict is linear in the number of
 attributes. This is now fixed (by switching to a regular dict after 80
 attributes).
+
+
+.. branch: cpyext-faster-arg-passing
+
+When using cpyext, improve the speed of passing certain objects from PyPy to C
+code, most notably None, True, False, types, all instances of C-defined types.
+Before, a dict lookup was needed every time such an object crossed over, now it
+is just a field read.
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -208,6 +208,21 @@
 def _set_mapdict_storage_and_map(self, storage, map):
 raise NotImplementedError
 
+
+# ---
+# cpyext support
+# these functions will only be seen by the annotator if we translate
+# with the cpyext module
+
+def _cpyext_as_pyobj(self, space):
+from pypy.module.cpyext.pyobject import w_root_as_pyobj
+return w_root_as_pyobj(self, space)
+
+def _cpyext_attach_pyobj(self, space, py_obj):
+from pypy.module.cpyext.pyobject import w_root_attach_pyobj
+return w_root_attach_pyobj(self, space, py_obj)
+
+
 # ---
 
 def is_w(self, space, w_other):
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -10,6 +10,8 @@
 PyVarObject, Py_ssize_t, init_function, cts)
 from pypy.module.cpyext.state import State
 from pypy.objspace.std.typeobject import W_TypeObject
+from pypy.objspace.std.noneobject import W_NoneObject
+from pypy.objspace.std.boolobject import W_BoolObject
 from pypy.objspace.std.objectobject import W_ObjectObject
 from rpython.rlib.objectmodel import specialize, we_are_translated
 from rpython.rlib.objectmodel import keepalive_until_here
@@ -21,6 +23,52 @@
 #
 # type description
 
+class W_BaseCPyObject(W_ObjectObject):
+""" A subclass of W_ObjectObject that has one field for directly storing
+the link from the w_obj to the cpy ref. This is only used for C-defined
+types. """
+
+
+def check_true(s_arg, bookeeper):
+assert s_arg.const is True
+
+def w_root_as_pyobj(w_obj, space):
+from rpython.rlib.debug import check_annotation
+# make sure that translation crashes if we see this while not translating
+# with cpyext
+check_annotation(space.config.objspace.usemodules.cpyext, check_true)
+# default implementation of _cpyext_as_pyobj
+return rawrefcount.from_obj(PyObject, w_obj)
+
+def w_root_attach_pyobj(w_obj, space, py_obj):
+from rpython.rlib.debug import check_annotation
+check_annotation(space.config.objspace.usemodules.cpyext, check_true)
+assert space.config.objspace.usemodules.cpyext
+# default implementation of _cpyext_attach_pyobj
+rawrefcount.create_link_pypy(w_obj, py_obj)
+
+
+def add_direct_pyobj_storage(cls):
+""" Add the necessary methods to a class to store a reference to the py_obj
+on its instances directly. """
+
+cls._cpy_ref = lltype.nullptr(PyObject.TO)
+
+def _cpyext_as_pyobj(self, space):
+return self._cpy_ref
+cls._cpyext_as_pyobj = _cpyext_as_pyobj
+
+def _cpyext_attach_pyobj(self, space, py_obj):
+self._cpy_ref = py_obj
+rawrefcount.create_link_pyobj(self, py_obj)
+cls._cpyext_attach_pyobj = _cpyext_attach_pyobj
+
+add_direct_pyobj_storage(W_BaseCPyObject)
+add_direct_pyobj_storage(W_TypeObject)
+add_direct_pyobj_storage(W_NoneObject)
+add_direct_pyobj_storage(W_BoolObject)
+
+
 class BaseCpyTypedescr(object):
 basestruct = PyObject.TO
 W_BaseObject = W_ObjectObject
@@ -66,8 +114,12 @@
 
 def realize(self, space, obj):
 w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
+assert isinstance(w_type, W_TypeObject)
 try:
-w_obj = space.allocate_instance(self.W_BaseObject, w_type)
+if w_type.flag_cpytype:
+w_obj = space.allocate_instance(W_BaseCPyObject, w_type)
+else:
+w_obj = space.allocate_instance(self.W_BaseObject, 

[pypy-commit] pypy default: merge 2634_datetime_timedelta_performance

2018-02-02 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r93738:2f5eb5410769
Date: 2018-02-02 13:36 +0100
http://bitbucket.org/pypy/pypy/changeset/2f5eb5410769/

Log:merge 2634_datetime_timedelta_performance

improve the performance of datetime + timedelta by skipping the
consistency checks of the datetime values (they are correct by
construction)

This fixes #2634

Thanks Barry Whart!

diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -1415,9 +1415,14 @@
 self.__setstate(year, month)
 self._hashcode = -1
 return self
-year, month, day = _check_date_fields(year, month, day)
-hour, minute, second, microsecond = _check_time_fields(
-hour, minute, second, microsecond)
+elif isinstance(year, tuple) and len(year) == 7:
+# Used by internal functions where the arguments are guaranteed to
+# be valid.
+year, month, day, hour, minute, second, microsecond = year
+else:
+year, month, day = _check_date_fields(year, month, day)
+hour, minute, second, microsecond = _check_time_fields(
+hour, minute, second, microsecond)
 _check_tzinfo_arg(tzinfo)
 self = dateinterop.__new__(cls)
 self._year = year
@@ -1491,7 +1496,7 @@
 us = 0
 y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp)
 ss = min(ss, 59)# clamp out leap seconds if the platform has them
-return cls(y, m, d, hh, mm, ss, us, tzinfo)
+return cls((y, m, d, hh, mm, ss, us), tzinfo=tzinfo)
 
 @classmethod
 def now(cls, tz=None):
@@ -1800,7 +1805,7 @@
 return diff and 1 or 0
 
 def _add_timedelta(self, other, factor):
-y, m, d, hh, mm, ss, us = _normalize_datetime(
+result = _normalize_datetime(
 self._year,
 self._month,
 self._day + other.days * factor,
@@ -1808,7 +1813,7 @@
 self._minute,
 self._second + other.seconds * factor,
 self._microsecond + other.microseconds * factor)
-return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo)
+return datetime(result, tzinfo=self._tzinfo)
 
 def __add__(self, other):
 "Add a datetime and a timedelta."
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge mapdict-size-limit

2018-01-29 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r93710:7add90a647dc
Date: 2018-01-29 10:43 +0100
http://bitbucket.org/pypy/pypy/changeset/7add90a647dc/

Log:merge mapdict-size-limit

stop using mapdict after 80 attributes to prevent linear lookups on
attributes when using getattr and settatr to make gigantic
instances. instead, switch to using a (string) dictionary. This
fixes issue #2728

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -14,3 +14,12 @@
 .. branch: cpyext-datetime2
 
 Support ``tzinfo`` field on C-API datetime objects, fixes latest pandas HEAD
+
+
+.. branch: mapdict-size-limit
+
+Fix a corner case of mapdict: When an instance is used like a dict (using
+``setattr`` and ``getattr``, or ``.__dict__``) and a lot of attributes are
+added, then the performance using mapdict is linear in the number of
+attributes. This is now fixed (by switching to a regular dict after 80
+attributes).
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -7,7 +7,7 @@
 from pypy.objspace.std.dictmultiobject import (
 W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator,
 BaseValueIterator, BaseItemIterator, _never_equal_to_string,
-W_DictObject,
+W_DictObject, BytesDictStrategy, UnicodeDictStrategy
 )
 from pypy.objspace.std.typeobject import MutableCell
 
@@ -25,6 +25,10 @@
 # note: we use "x * NUM_DIGITS_POW2" instead of "x << NUM_DIGITS" because
 # we want to propagate knowledge that the result cannot be negative
 
+# the maximum number of attributes stored in mapdict (afterwards just use a
+# dict)
+LIMIT_MAP_ATTRIBUTES = 80
+
 
 class AbstractAttribute(object):
 _immutable_fields_ = ['terminator']
@@ -252,6 +256,9 @@
 def materialize_r_dict(self, space, obj, dict_w):
 raise NotImplementedError("abstract base class")
 
+def materialize_str_dict(self, space, obj, str_dict):
+raise NotImplementedError("abstract base class")
+
 def remove_dict_entries(self, obj):
 raise NotImplementedError("abstract base class")
 
@@ -271,6 +278,13 @@
 
 def _write_terminator(self, obj, name, index, w_value):
 obj._get_mapdict_map().add_attr(obj, name, index, w_value)
+if index == DICT and obj._get_mapdict_map().length() >= 
LIMIT_MAP_ATTRIBUTES:
+space = self.space
+w_dict = obj.getdict(space)
+assert isinstance(w_dict, W_DictMultiObject)
+strategy = w_dict.get_strategy()
+assert isinstance(strategy, MapDictStrategy)
+strategy.switch_to_text_strategy(w_dict)
 return True
 
 def copy(self, obj):
@@ -301,6 +315,12 @@
 self.devolved_dict_terminator = DevolvedDictTerminator(space, w_cls)
 
 def materialize_r_dict(self, space, obj, dict_w):
+return self._make_devolved(space)
+
+def materialize_str_dict(self, space, obj, dict_w):
+return self._make_devolved(space)
+
+def _make_devolved(self, space):
 result = Object()
 result.space = space
 result._mapdict_init_empty(self.devolved_dict_terminator)
@@ -407,6 +427,14 @@
 self._copy_attr(obj, new_obj)
 return new_obj
 
+def materialize_str_dict(self, space, obj, str_dict):
+new_obj = self.back.materialize_str_dict(space, obj, str_dict)
+if self.index == DICT:
+str_dict[self.name] = obj._mapdict_read_storage(self.storageindex)
+else:
+self._copy_attr(obj, new_obj)
+return new_obj
+
 def remove_dict_entries(self, obj):
 new_obj = self.back.remove_dict_entries(obj)
 if self.index != DICT:
@@ -738,6 +766,15 @@
 assert w_obj.getdict(self.space) is w_dict or 
w_obj._get_mapdict_map().terminator.w_cls is None
 materialize_r_dict(self.space, w_obj, dict_w)
 
+def switch_to_text_strategy(self, w_dict):
+w_obj = self.unerase(w_dict.dstorage)
+strategy = self.space.fromcache(BytesDictStrategy)
+str_dict = strategy.unerase(strategy.get_empty_storage())
+w_dict.set_strategy(strategy)
+w_dict.dstorage = strategy.erase(str_dict)
+assert w_obj.getdict(self.space) is w_dict or 
w_obj._get_mapdict_map().terminator.w_cls is None
+materialize_str_dict(self.space, w_obj, str_dict)
+
 def getitem(self, w_dict, w_key):
 space = self.space
 w_lookup_type = space.type(w_key)
@@ -833,6 +870,11 @@
 new_obj = map.materialize_r_dict(space, obj, dict_w)
 obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
 
+def materialize_str_dict(space, obj, dict_w):
+map = obj._get_mapdict_map()
+new_obj = map.materialize_str_dict(space, obj, dict_w)
+obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
+
 
 class 

[pypy-commit] pypy default: merge cpyext-datetime2 which adds a tzinfo field to datetime C-API types

2018-01-19 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r93684:8a1cb5467f77
Date: 2018-01-19 11:10 +0200
http://bitbucket.org/pypy/pypy/changeset/8a1cb5467f77/

Log:merge cpyext-datetime2 which adds a tzinfo field to datetime C-API
types

diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -17,10 +17,13 @@
 """
 
 from __future__ import division
-import time as _time
+import time as _timemodule
 import math as _math
 import struct as _struct
 
+# for cpyext, use these as base classes
+from __pypy__._pypydatetime import dateinterop, deltainterop, timeinterop
+
 _SENTINEL = object()
 
 def _cmp(x, y):
@@ -179,7 +182,7 @@
 def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
 wday = (_ymd2ord(y, m, d) + 6) % 7
 dnum = _days_before_month(y, m) + d
-return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
+return _timemodule.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
 
 def _format_time(hh, mm, ss, us):
 # Skip trailing microseconds when us==0.
@@ -247,7 +250,7 @@
 else:
 push(ch)
 newformat = "".join(newformat)
-return _time.strftime(newformat, timetuple)
+return _timemodule.strftime(newformat, timetuple)
 
 # Just raise TypeError if the arg isn't None or a string.
 def _check_tzname(name):
@@ -433,7 +436,7 @@
 raise TypeError("unsupported type for timedelta %s component: %s" %
 (tag, type(num)))
 
-class timedelta(object):
+class timedelta(deltainterop):
 """Represent the difference between two datetime objects.
 
 Supported operators:
@@ -489,7 +492,7 @@
 if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS:
 raise OverflowError("days=%d; must have magnitude <= %d" % (d, 
_MAX_DELTA_DAYS))
 
-self = object.__new__(cls)
+self = deltainterop.__new__(cls)
 self._days = d
 self._seconds = s
 self._microseconds = us
@@ -667,7 +670,7 @@
 timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 100-1)
 timedelta.resolution = timedelta(microseconds=1)
 
-class date(object):
+class date(dateinterop):
 """Concrete date type.
 
 Constructors:
@@ -707,12 +710,12 @@
 if month is None and isinstance(year, bytes) and len(year) == 4 and \
 1 <= ord(year[2]) <= 12:
 # Pickle support
-self = object.__new__(cls)
+self = dateinterop.__new__(cls)
 self.__setstate(year)
 self._hashcode = -1
 return self
 year, month, day = _check_date_fields(year, month, day)
-self = object.__new__(cls)
+self = dateinterop.__new__(cls)
 self._year = year
 self._month = month
 self._day = day
@@ -724,13 +727,13 @@
 @classmethod
 def fromtimestamp(cls, t):
 "Construct a date from a POSIX timestamp (like time.time())."
-y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
+y, m, d, hh, mm, ss, weekday, jday, dst = _timemodule.localtime(t)
 return cls(y, m, d)
 
 @classmethod
 def today(cls):
 "Construct a date from time.time()."
-t = _time.time()
+t = _timemodule.time()
 return cls.fromtimestamp(t)
 
 @classmethod
@@ -1061,7 +1064,7 @@
 
 _tzinfo_class = tzinfo
 
-class time(object):
+class time(timeinterop):
 """Time with time zone.
 
 Constructors:
@@ -1097,14 +1100,14 @@
 """
 if isinstance(hour, bytes) and len(hour) == 6 and ord(hour[0]) < 24:
 # Pickle support
-self = object.__new__(cls)
+self = timeinterop.__new__(cls)
 self.__setstate(hour, minute or None)
 self._hashcode = -1
 return self
 hour, minute, second, microsecond = _check_time_fields(
 hour, minute, second, microsecond)
 _check_tzinfo_arg(tzinfo)
-self = object.__new__(cls)
+self = timeinterop.__new__(cls)
 self._hour = hour
 self._minute = minute
 self._second = second
@@ -1408,7 +1411,7 @@
 if isinstance(year, bytes) and len(year) == 10 and \
 1 <= ord(year[2]) <= 12:
 # Pickle support
-self = object.__new__(cls)
+self = dateinterop.__new__(cls)
 self.__setstate(year, month)
 self._hashcode = -1
 return self
@@ -1416,7 +1419,7 @@
 hour, minute, second, microsecond = _check_time_fields(
 hour, minute, second, microsecond)
 _check_tzinfo_arg(tzinfo)
-self = object.__new__(cls)
+self = dateinterop.__new__(cls)
 self._year = year
 self._month = month
 self._day = day
@@ -1461,7 +1464,7 @@
 A timezone info object may be passed in as well.
 """
 _check_tzinfo_arg(tz)
-converter = _time.localtime if tz is None else _time.gmtime
+converter = 

[pypy-commit] pypy default: merge heads

2017-12-25 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r93577:7eecbbca7bdc
Date: 2017-12-26 08:28 +0100
http://bitbucket.org/pypy/pypy/changeset/7eecbbca7bdc/

Log:merge heads

diff too long, truncating to 2000 out of 3533 lines

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,3 +5,6 @@
 .. this is a revision shortly after release-pypy2.7-v5.10.0
 .. startrev: 6b024edd9d12
 
+.. branch: cpyext-avoid-roundtrip
+
+Reduce conversions of c struct to rpython equivalent where possible
diff --git a/pypy/module/_cppyy/converter.py b/pypy/module/_cppyy/converter.py
--- a/pypy/module/_cppyy/converter.py
+++ b/pypy/module/_cppyy/converter.py
@@ -682,8 +682,8 @@
 if hasattr(space, "fake"):
 raise NotImplementedError
 space.getbuiltinmodule("cpyext")
-from pypy.module.cpyext.pyobject import Py_DecRef, PyObject
-Py_DecRef(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0]))
+from pypy.module.cpyext.pyobject import decref, PyObject
+decref(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0]))
 
 
 class MacroConverter(TypeConverter):
diff --git a/pypy/module/_cppyy/executor.py b/pypy/module/_cppyy/executor.py
--- a/pypy/module/_cppyy/executor.py
+++ b/pypy/module/_cppyy/executor.py
@@ -220,11 +220,11 @@
 
 def wrap_result(self, space, lresult):
 space.getbuiltinmodule("cpyext")
-from pypy.module.cpyext.pyobject import PyObject, from_ref, make_ref, 
Py_DecRef
+from pypy.module.cpyext.pyobject import PyObject, from_ref, make_ref, 
decref
 result = rffi.cast(PyObject, lresult)
 w_obj = from_ref(space, result)
 if result:
-Py_DecRef(space, result)
+decref(space, result)
 return w_obj
 
 def execute(self, space, cppmethod, cppthis, num_args, args):
diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py
--- a/pypy/module/cpyext/__init__.py
+++ b/pypy/module/cpyext/__init__.py
@@ -17,11 +17,12 @@
 space.fromcache(State).startup(space)
 method = pypy.module.cpyext.typeobject.get_new_method_def(space)
 # the w_self argument here is a dummy, the only thing done with w_obj
-# is call space.type on it
-w_obj = pypy.module.cpyext.methodobject.W_PyCFunctionObject(space, 
method, space.w_None)
-space.appexec([space.type(w_obj)], """(methodtype):
+# is call type() on it
+w_obj = pypy.module.cpyext.methodobject.W_PyCFunctionObject(space,
+   method, 
space.w_None)
+space.appexec([w_obj], """(meth):
 from pickle import Pickler
-Pickler.dispatch[methodtype] = Pickler.save_global
+Pickler.dispatch[type(meth)] = Pickler.save_global
 """)
 
 def register_atexit(self, function):
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -204,6 +204,9 @@
 # id.  Invariant: this variable always contain 0 when the PyPy GIL is
 # released.  It should also contain 0 when regular RPython code
 # executes.  In non-cpyext-related code, it will thus always be 0.
+# When cpyext-related C code runs, it contains the thread id (usually)
+# or the value -1 (only for state.C.PyXxx() functions which are short-
+# running and should not themselves release the GIL).
 #
 # **make_generic_cpy_call():** RPython to C, with the GIL held.  Before
 # the call, must assert that the global variable is 0 and set the
@@ -255,14 +258,73 @@
 
 cpyext_namespace = NameManager('cpyext_')
 
-class ApiFunction(object):
-def __init__(self, argtypes, restype, callable, error=CANNOT_FAIL,
- c_name=None, cdecl=None, gil=None,
- result_borrowed=False, result_is_ll=False):
+class BaseApiFunction(object):
+def __init__(self, argtypes, restype, callable):
 self.argtypes = argtypes
 self.restype = restype
 self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype))
 self.callable = callable
+self.cdecl = None# default
+#
+def get_llhelper(space):
+return llhelper(self.functype, self.get_wrapper(space))
+self.get_llhelper = get_llhelper
+
+def get_api_decl(self, name, c_writer):
+restype = self.get_c_restype(c_writer)
+args = self.get_c_args(c_writer)
+res = self.API_VISIBILITY % (restype,)
+return "{res} {name}({args});".format(**locals())
+
+def get_c_restype(self, c_writer):
+if self.cdecl:
+return self.cdecl.tp.result.get_c_name()
+return c_writer.gettype(self.restype).replace('@', '').strip()
+
+def get_c_args(self, c_writer):
+if self.cdecl:
+args = [tp.get_c_name('arg%d' % i) for i, tp in
+enumerate(self.cdecl.tp.args)]
+   

[pypy-commit] pypy default: merge cpyext-avoid-roundtrip which avoids some rpython-c conversions

2017-12-25 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r93562:7b550e9b3cee
Date: 2017-12-25 23:31 +0200
http://bitbucket.org/pypy/pypy/changeset/7b550e9b3cee/

Log:merge cpyext-avoid-roundtrip which avoids some rpython-c conversions

diff too long, truncating to 2000 out of 3523 lines

diff --git a/pypy/module/_cppyy/converter.py b/pypy/module/_cppyy/converter.py
--- a/pypy/module/_cppyy/converter.py
+++ b/pypy/module/_cppyy/converter.py
@@ -682,8 +682,8 @@
 if hasattr(space, "fake"):
 raise NotImplementedError
 space.getbuiltinmodule("cpyext")
-from pypy.module.cpyext.pyobject import Py_DecRef, PyObject
-Py_DecRef(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0]))
+from pypy.module.cpyext.pyobject import decref, PyObject
+decref(space, rffi.cast(PyObject, rffi.cast(rffi.VOIDPP, arg)[0]))
 
 
 class MacroConverter(TypeConverter):
diff --git a/pypy/module/_cppyy/executor.py b/pypy/module/_cppyy/executor.py
--- a/pypy/module/_cppyy/executor.py
+++ b/pypy/module/_cppyy/executor.py
@@ -220,11 +220,11 @@
 
 def wrap_result(self, space, lresult):
 space.getbuiltinmodule("cpyext")
-from pypy.module.cpyext.pyobject import PyObject, from_ref, make_ref, 
Py_DecRef
+from pypy.module.cpyext.pyobject import PyObject, from_ref, make_ref, 
decref
 result = rffi.cast(PyObject, lresult)
 w_obj = from_ref(space, result)
 if result:
-Py_DecRef(space, result)
+decref(space, result)
 return w_obj
 
 def execute(self, space, cppmethod, cppthis, num_args, args):
diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py
--- a/pypy/module/cpyext/__init__.py
+++ b/pypy/module/cpyext/__init__.py
@@ -17,11 +17,12 @@
 space.fromcache(State).startup(space)
 method = pypy.module.cpyext.typeobject.get_new_method_def(space)
 # the w_self argument here is a dummy, the only thing done with w_obj
-# is call space.type on it
-w_obj = pypy.module.cpyext.methodobject.W_PyCFunctionObject(space, 
method, space.w_None)
-space.appexec([space.type(w_obj)], """(methodtype):
+# is call type() on it
+w_obj = pypy.module.cpyext.methodobject.W_PyCFunctionObject(space,
+   method, 
space.w_None)
+space.appexec([w_obj], """(meth):
 from pickle import Pickler
-Pickler.dispatch[methodtype] = Pickler.save_global
+Pickler.dispatch[type(meth)] = Pickler.save_global
 """)
 
 def register_atexit(self, function):
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -204,6 +204,9 @@
 # id.  Invariant: this variable always contain 0 when the PyPy GIL is
 # released.  It should also contain 0 when regular RPython code
 # executes.  In non-cpyext-related code, it will thus always be 0.
+# When cpyext-related C code runs, it contains the thread id (usually)
+# or the value -1 (only for state.C.PyXxx() functions which are short-
+# running and should not themselves release the GIL).
 #
 # **make_generic_cpy_call():** RPython to C, with the GIL held.  Before
 # the call, must assert that the global variable is 0 and set the
@@ -255,14 +258,73 @@
 
 cpyext_namespace = NameManager('cpyext_')
 
-class ApiFunction(object):
-def __init__(self, argtypes, restype, callable, error=CANNOT_FAIL,
- c_name=None, cdecl=None, gil=None,
- result_borrowed=False, result_is_ll=False):
+class BaseApiFunction(object):
+def __init__(self, argtypes, restype, callable):
 self.argtypes = argtypes
 self.restype = restype
 self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype))
 self.callable = callable
+self.cdecl = None# default
+#
+def get_llhelper(space):
+return llhelper(self.functype, self.get_wrapper(space))
+self.get_llhelper = get_llhelper
+
+def get_api_decl(self, name, c_writer):
+restype = self.get_c_restype(c_writer)
+args = self.get_c_args(c_writer)
+res = self.API_VISIBILITY % (restype,)
+return "{res} {name}({args});".format(**locals())
+
+def get_c_restype(self, c_writer):
+if self.cdecl:
+return self.cdecl.tp.result.get_c_name()
+return c_writer.gettype(self.restype).replace('@', '').strip()
+
+def get_c_args(self, c_writer):
+if self.cdecl:
+args = [tp.get_c_name('arg%d' % i) for i, tp in
+enumerate(self.cdecl.tp.args)]
+return ', '.join(args) or "void"
+args = []
+for i, argtype in enumerate(self.argtypes):
+if argtype is CONST_STRING:
+arg = 'const char *@'
+elif argtype is CONST_STRINGP:
+arg = 'const char **@'
+

[pypy-commit] pypy default: merge again fix-vmprof-stacklet-switch-2: this should fix translation on platforms where vmprof is not supported, and it also refactor rvmprof.cintf to be slightly saner

2017-12-19 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r93500:e742e3594267
Date: 2017-12-19 19:04 +0100
http://bitbucket.org/pypy/pypy/changeset/e742e3594267/

Log:merge again fix-vmprof-stacklet-switch-2: this should fix
translation on platforms where vmprof is not supported, and it also
refactor rvmprof.cintf to be slightly saner

diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
--- a/rpython/rlib/rvmprof/cintf.py
+++ b/rpython/rlib/rvmprof/cintf.py
@@ -14,67 +14,75 @@
 class VMProfPlatformUnsupported(Exception):
 pass
 
+# vmprof works only on x86 for now
+IS_SUPPORTED = host_platform.machine() in ('i686', 'x86_64')
+
 ROOT = py.path.local(rpythonroot).join('rpython', 'rlib', 'rvmprof')
 SRC = ROOT.join('src')
 SHARED = SRC.join('shared')
 BACKTRACE = SHARED.join('libbacktrace')
 
-compile_extra = ['-DRPYTHON_VMPROF']
-separate_module_files = [
-SHARED.join('symboltable.c'),
-SHARED.join('vmprof_unix.c')
-]
-if sys.platform.startswith('linux'):
-separate_module_files += [
-   BACKTRACE.join('atomic.c'),
-   BACKTRACE.join('backtrace.c'),
-   BACKTRACE.join('state.c'),
-   BACKTRACE.join('elf.c'),
-   BACKTRACE.join('dwarf.c'),
-   BACKTRACE.join('fileline.c'),
-   BACKTRACE.join('mmap.c'),
-   BACKTRACE.join('mmapio.c'),
-   BACKTRACE.join('posix.c'),
-   BACKTRACE.join('sort.c'),
+def make_eci():
+if make_eci.called:
+raise ValueError("make_eci() should be called at most once")
+#
+compile_extra = ['-DRPYTHON_VMPROF']
+separate_module_files = [
+SHARED.join('symboltable.c'),
+SHARED.join('vmprof_unix.c')
 ]
-_libs = ['dl']
-compile_extra += ['-DVMPROF_UNIX']
-compile_extra += ['-DVMPROF_LINUX']
-elif sys.platform == 'win32':
-compile_extra += ['-DVMPROF_WINDOWS']
-separate_module_files = [SHARED.join('vmprof_win.c')]
-_libs = []
-else:
-# Guessing a BSD-like Unix platform
-compile_extra += ['-DVMPROF_UNIX']
-compile_extra += ['-DVMPROF_MAC']
-if sys.platform.startswith('freebsd'):
-_libs = ['unwind']
+if sys.platform.startswith('linux'):
+separate_module_files += [
+   BACKTRACE.join('atomic.c'),
+   BACKTRACE.join('backtrace.c'),
+   BACKTRACE.join('state.c'),
+   BACKTRACE.join('elf.c'),
+   BACKTRACE.join('dwarf.c'),
+   BACKTRACE.join('fileline.c'),
+   BACKTRACE.join('mmap.c'),
+   BACKTRACE.join('mmapio.c'),
+   BACKTRACE.join('posix.c'),
+   BACKTRACE.join('sort.c'),
+]
+_libs = ['dl']
+compile_extra += ['-DVMPROF_UNIX']
+compile_extra += ['-DVMPROF_LINUX']
+elif sys.platform == 'win32':
+compile_extra += ['-DVMPROF_WINDOWS']
+separate_module_files = [SHARED.join('vmprof_win.c')]
+_libs = []
 else:
-_libs = []
+# Guessing a BSD-like Unix platform
+compile_extra += ['-DVMPROF_UNIX']
+compile_extra += ['-DVMPROF_MAC']
+if sys.platform.startswith('freebsd'):
+_libs = ['unwind']
+else:
+_libs = []
 
-
-eci_kwds = dict(
-include_dirs = [SRC, SHARED, BACKTRACE],
-includes = ['rvmprof.h','vmprof_stack.h'],
-libraries = _libs,
-separate_module_files = [
-SRC.join('rvmprof.c'),
-SHARED.join('compat.c'),
-SHARED.join('machine.c'),
-SHARED.join('vmp_stack.c'),
-SHARED.join('vmprof_memory.c'),
-SHARED.join('vmprof_common.c'),
-# symbol table already in separate_module_files
-] + separate_module_files,
-post_include_bits=[],
-compile_extra=compile_extra
-)
-if sys.platform != 'win32':
-eci_kwds['separate_module_files'].append(
-SHARED.join('vmprof_mt.c'),
-)
-global_eci = ExternalCompilationInfo(**eci_kwds)
+eci_kwds = dict(
+include_dirs = [SRC, SHARED, BACKTRACE],
+includes = ['rvmprof.h','vmprof_stack.h'],
+libraries = _libs,
+separate_module_files = [
+SRC.join('rvmprof.c'),
+SHARED.join('compat.c'),
+SHARED.join('machine.c'),
+SHARED.join('vmp_stack.c'),
+SHARED.join('vmprof_memory.c'),
+SHARED.join('vmprof_common.c'),
+# symbol table already in separate_module_files
+] + separate_module_files,
+post_include_bits=[],
+compile_extra=compile_extra
+)
+if sys.platform != 'win32':
+eci_kwds['separate_module_files'].append(
+SHARED.join('vmprof_mt.c'),
+)
+make_eci.called = True
+return ExternalCompilationInfo(**eci_kwds), eci_kwds
+make_eci.called = False
 
 def configure_libbacktrace_linux():
 bits = 32 if sys.maxsize == 2**31-1 else 64
@@ -85,14 +93,17 @@
 shutil.copy(str(BACKTRACE.join(specific_config)), str(config))
 
 def setup():
+if not IS_SUPPORTED:
+raise 

[pypy-commit] pypy default: merge

2017-12-19 Thread fijal
Author: fijal
Branch: 
Changeset: r93488:2ac941dfb825
Date: 2017-12-19 15:58 +0200
http://bitbucket.org/pypy/pypy/changeset/2ac941dfb825/

Log:merge

diff --git a/pypy/doc/release-v5.10.0.rst b/pypy/doc/release-v5.10.0.rst
--- a/pypy/doc/release-v5.10.0.rst
+++ b/pypy/doc/release-v5.10.0.rst
@@ -17,10 +17,11 @@
 
 As always, this release is 100% compatible with the previous one and fixed
 several issues and bugs raised by the growing community of PyPy users.
-As always, wxe strongly recommend updating.
+As always, we strongly recommend updating.
 
 This release concludes the Mozilla Open Source `grant`_ for having a compatible
-PyPy 3.5 release and we're very grateful for that.
+PyPy 3.5 release and we're very grateful for that.  Of course, we will continue
+to improve PyPy 3.5 and probably move to 3.6 during the course of 2018.
 
 You can download the v5.10 releases here:
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2017-12-18 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r93472:13a87780bd5a
Date: 2017-12-18 13:43 +0100
http://bitbucket.org/pypy/pypy/changeset/13a87780bd5a/

Log:merge heads

diff --git a/extra_tests/requirements.txt b/extra_tests/requirements.txt
--- a/extra_tests/requirements.txt
+++ b/extra_tests/requirements.txt
@@ -1,2 +1,3 @@
 pytest
 hypothesis
+vmprof
diff --git a/extra_tests/test_vmprof_greenlet.py 
b/extra_tests/test_vmprof_greenlet.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_vmprof_greenlet.py
@@ -0,0 +1,28 @@
+import time
+import pytest
+import greenlet
+import vmprof
+
+def count_samples(filename):
+stats = vmprof.read_profile(filename)
+return len(stats.profiles)
+
+def cpuburn(duration):
+end = time.time() + duration
+while time.time() < end:
+pass
+
+def test_sampling_inside_callback(tmpdir):
+# see also test_sampling_inside_callback inside
+# pypy/module/_continuation/test/test_stacklet.py
+#
+G = greenlet.greenlet(cpuburn)
+fname = tmpdir.join('log.vmprof')
+with fname.open('w+b') as f:
+vmprof.enable(f.fileno(), 1/250.0)
+G.switch(0.1)
+vmprof.disable()
+
+samples = count_samples(str(fname))
+# 0.1 seconds at 250Hz should be 25 samples
+assert 23 < samples < 27
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -31,7 +31,7 @@
 Upgrade the _vmprof backend to vmprof 0.4.10
 
 .. branch: fix-vmprof-stacklet-switch
-
+.. branch: fix-vmprof-stacklet-switch-2
 Fix a vmprof+continulets (i.e. greenelts, eventlet, gevent, ...)
 
 .. branch: win32-vcvars
@@ -39,3 +39,4 @@
 .. branch: rdict-fast-hash
 
 Make it possible to declare that the hash function of an r_dict is fast in 
RPython.
+
diff --git a/pypy/module/_continuation/interp_continuation.py 
b/pypy/module/_continuation/interp_continuation.py
--- a/pypy/module/_continuation/interp_continuation.py
+++ b/pypy/module/_continuation/interp_continuation.py
@@ -1,5 +1,6 @@
 from rpython.rlib.rstacklet import StackletThread
 from rpython.rlib import jit
+from rpython.rlib import rvmprof
 from pypy.interpreter.error import OperationError, get_cleared_operation_error
 from pypy.interpreter.executioncontext import ExecutionContext
 from pypy.interpreter.baseobjspace import W_Root
@@ -222,12 +223,15 @@
 self.h = h
 global_state.clear()
 try:
+rvmprof.start_sampling()
 frame = self.bottomframe
 w_result = frame.execute_frame()
 except Exception as e:
 global_state.propagate_exception = e
 else:
 global_state.w_value = w_result
+finally:
+rvmprof.stop_sampling()
 self.sthread.ec.topframeref = jit.vref_None
 global_state.origin = self
 global_state.destination = self
diff --git a/pypy/module/_continuation/test/test_stacklet.py 
b/pypy/module/_continuation/test/test_stacklet.py
--- a/pypy/module/_continuation/test/test_stacklet.py
+++ b/pypy/module/_continuation/test/test_stacklet.py
@@ -1,7 +1,10 @@
+import pytest
 import os
+from rpython.rlib.rvmprof.test.support import fakevmprof
+from pypy.interpreter.gateway import interp2app
 from pypy.module._continuation.test.support import BaseAppTest
 
-
+@pytest.mark.usefixtures('app_fakevmprof')
 class AppTestStacklet(BaseAppTest):
 def setup_class(cls):
 BaseAppTest.setup_class.im_func(cls)
@@ -34,10 +37,34 @@
 return res
 return stack
""")
+cls.w_appdirect = cls.space.wrap(cls.runappdirect)
 if cls.runappdirect:
 # make sure that "self.stack" does not pass the self
 cls.w_stack = staticmethod(cls.w_stack.im_func)
 
+
+@pytest.fixture
+def app_fakevmprof(self, fakevmprof):
+"""
+This is automaticaly re-initialized for every method: thanks to
+fakevmprof's finalizer, it checks that we called {start,stop}_sampling
+the in pairs
+"""
+w = self.space.wrap
+i2a = interp2app
+def is_sampling_enabled(space):
+return space.wrap(fakevmprof.is_sampling_enabled)
+self.w_is_sampling_enabled = w(i2a(is_sampling_enabled))
+#
+def start_sampling(space):
+fakevmprof.start_sampling()
+self.w_start_sampling = w(i2a(start_sampling))
+#
+def stop_sampling(space):
+fakevmprof.stop_sampling()
+self.w_stop_sampling = w(i2a(stop_sampling))
+
+
 def test_new_empty(self):
 from _continuation import continulet
 #
@@ -770,3 +797,25 @@
 
 continulet.switch(c1, to=c2)
 raises(error, continulet.switch, c1, to=c2)
+
+def test_sampling_inside_callback(self):
+if self.appdirect:
+# see also
+# extra_tests.test_vmprof_greenlet.test_sampling_inside_callback
+# for a "translated" version of this test
+skip("we can't run this 

[pypy-commit] pypy default: merge the fix-vmprof-stacklet-switch-2 branch, which fixes vmprof+greenlet:

2017-12-18 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r93465:c30916ebe15f
Date: 2017-12-18 11:03 +0100
http://bitbucket.org/pypy/pypy/changeset/c30916ebe15f/

Log:merge the fix-vmprof-stacklet-switch-2 branch, which fixes
vmprof+greenlet: before, vmprof did not take any sample inside
greenlets as soon as you do a switch().

diff --git a/extra_tests/requirements.txt b/extra_tests/requirements.txt
--- a/extra_tests/requirements.txt
+++ b/extra_tests/requirements.txt
@@ -1,2 +1,3 @@
 pytest
 hypothesis
+vmprof
diff --git a/extra_tests/test_vmprof_greenlet.py 
b/extra_tests/test_vmprof_greenlet.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_vmprof_greenlet.py
@@ -0,0 +1,28 @@
+import time
+import pytest
+import greenlet
+import vmprof
+
+def count_samples(filename):
+stats = vmprof.read_profile(filename)
+return len(stats.profiles)
+
+def cpuburn(duration):
+end = time.time() + duration
+while time.time() < end:
+pass
+
+def test_sampling_inside_callback(tmpdir):
+# see also test_sampling_inside_callback inside
+# pypy/module/_continuation/test/test_stacklet.py
+#
+G = greenlet.greenlet(cpuburn)
+fname = tmpdir.join('log.vmprof')
+with fname.open('w+b') as f:
+vmprof.enable(f.fileno(), 1/250.0)
+G.switch(0.1)
+vmprof.disable()
+
+samples = count_samples(str(fname))
+# 0.1 seconds at 250Hz should be 25 samples
+assert 23 < samples < 27
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -31,7 +31,7 @@
 Upgrade the _vmprof backend to vmprof 0.4.10
 
 .. branch: fix-vmprof-stacklet-switch
-
+.. branch: fix-vmprof-stacklet-switch-2
 Fix a vmprof+continulets (i.e. greenelts, eventlet, gevent, ...)
 
 .. branch: win32-vcvars
@@ -39,3 +39,4 @@
 .. branch: rdict-fast-hash
 
 Make it possible to declare that the hash function of an r_dict is fast in 
RPython.
+
diff --git a/pypy/module/_continuation/interp_continuation.py 
b/pypy/module/_continuation/interp_continuation.py
--- a/pypy/module/_continuation/interp_continuation.py
+++ b/pypy/module/_continuation/interp_continuation.py
@@ -1,5 +1,6 @@
 from rpython.rlib.rstacklet import StackletThread
 from rpython.rlib import jit
+from rpython.rlib import rvmprof
 from pypy.interpreter.error import OperationError, get_cleared_operation_error
 from pypy.interpreter.executioncontext import ExecutionContext
 from pypy.interpreter.baseobjspace import W_Root
@@ -222,12 +223,15 @@
 self.h = h
 global_state.clear()
 try:
+rvmprof.start_sampling()
 frame = self.bottomframe
 w_result = frame.execute_frame()
 except Exception as e:
 global_state.propagate_exception = e
 else:
 global_state.w_value = w_result
+finally:
+rvmprof.stop_sampling()
 self.sthread.ec.topframeref = jit.vref_None
 global_state.origin = self
 global_state.destination = self
diff --git a/pypy/module/_continuation/test/test_stacklet.py 
b/pypy/module/_continuation/test/test_stacklet.py
--- a/pypy/module/_continuation/test/test_stacklet.py
+++ b/pypy/module/_continuation/test/test_stacklet.py
@@ -1,7 +1,10 @@
+import pytest
 import os
+from rpython.rlib.rvmprof.test.support import fakevmprof
+from pypy.interpreter.gateway import interp2app
 from pypy.module._continuation.test.support import BaseAppTest
 
-
+@pytest.mark.usefixtures('app_fakevmprof')
 class AppTestStacklet(BaseAppTest):
 def setup_class(cls):
 BaseAppTest.setup_class.im_func(cls)
@@ -34,10 +37,34 @@
 return res
 return stack
""")
+cls.w_appdirect = cls.space.wrap(cls.runappdirect)
 if cls.runappdirect:
 # make sure that "self.stack" does not pass the self
 cls.w_stack = staticmethod(cls.w_stack.im_func)
 
+
+@pytest.fixture
+def app_fakevmprof(self, fakevmprof):
+"""
+This is automaticaly re-initialized for every method: thanks to
+fakevmprof's finalizer, it checks that we called {start,stop}_sampling
+the in pairs
+"""
+w = self.space.wrap
+i2a = interp2app
+def is_sampling_enabled(space):
+return space.wrap(fakevmprof.is_sampling_enabled)
+self.w_is_sampling_enabled = w(i2a(is_sampling_enabled))
+#
+def start_sampling(space):
+fakevmprof.start_sampling()
+self.w_start_sampling = w(i2a(start_sampling))
+#
+def stop_sampling(space):
+fakevmprof.stop_sampling()
+self.w_stop_sampling = w(i2a(stop_sampling))
+
+
 def test_new_empty(self):
 from _continuation import continulet
 #
@@ -770,3 +797,25 @@
 
 continulet.switch(c1, to=c2)
 raises(error, continulet.switch, c1, to=c2)
+
+def test_sampling_inside_callback(self):
+if self.appdirect:
+# see 

[pypy-commit] pypy default: merge rdict-fast-hash:

2017-12-12 Thread cfbolz
Author: Carl Friedrich Bolz-Tereick 
Branch: 
Changeset: r93386:a00ca61351ba
Date: 2017-12-12 18:27 +0100
http://bitbucket.org/pypy/pypy/changeset/a00ca61351ba/

Log:merge rdict-fast-hash:

make it possible to declare that the hash and eq functions used in
an objectmodel.r_dict are "simple", which means that they will not
change the dict, and that the hash function is fast enough so that
caching the hash is not necessary.

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -36,3 +36,6 @@
 
 .. branch: win32-vcvars
 
+.. branch rdict-fast-hash
+
+Make it possible to declare that the hash function of an r_dict is fast in 
RPython.
diff --git a/pypy/module/_pypyjson/interp_decoder.py 
b/pypy/module/_pypyjson/interp_decoder.py
--- a/pypy/module/_pypyjson/interp_decoder.py
+++ b/pypy/module/_pypyjson/interp_decoder.py
@@ -71,7 +71,7 @@
 self.ll_chars = rffi.str2charp(s)
 self.end_ptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
 self.pos = 0
-self.cache = r_dict(slice_eq, slice_hash)
+self.cache = r_dict(slice_eq, slice_hash, simple_hash_eq=True)
 
 def close(self):
 rffi.free_charp(self.ll_chars)
diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py
--- a/rpython/annotator/bookkeeper.py
+++ b/rpython/annotator/bookkeeper.py
@@ -194,13 +194,14 @@
 listdef.generalize_range_step(flags['range_step'])
 return SomeList(listdef)
 
-def getdictdef(self, is_r_dict=False, force_non_null=False):
+def getdictdef(self, is_r_dict=False, force_non_null=False, 
simple_hash_eq=False):
 """Get the DictDef associated with the current position."""
 try:
 dictdef = self.dictdefs[self.position_key]
 except KeyError:
 dictdef = DictDef(self, is_r_dict=is_r_dict,
-  force_non_null=force_non_null)
+  force_non_null=force_non_null,
+  simple_hash_eq=simple_hash_eq)
 self.dictdefs[self.position_key] = dictdef
 return dictdef
 
diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py
--- a/rpython/annotator/builtin.py
+++ b/rpython/annotator/builtin.py
@@ -237,22 +237,30 @@
 return SomeInstance(clsdef)
 
 @analyzer_for(rpython.rlib.objectmodel.r_dict)
-def robjmodel_r_dict(s_eqfn, s_hashfn, s_force_non_null=None):
+def robjmodel_r_dict(s_eqfn, s_hashfn, s_force_non_null=None, 
s_simple_hash_eq=None):
+return _r_dict_helper(SomeDict, s_eqfn, s_hashfn, s_force_non_null, 
s_simple_hash_eq)
+
+@analyzer_for(rpython.rlib.objectmodel.r_ordereddict)
+def robjmodel_r_ordereddict(s_eqfn, s_hashfn, s_force_non_null=None, 
s_simple_hash_eq=None):
+return _r_dict_helper(SomeOrderedDict, s_eqfn, s_hashfn,
+  s_force_non_null, s_simple_hash_eq)
+
+def _r_dict_helper(cls, s_eqfn, s_hashfn, s_force_non_null, s_simple_hash_eq):
 if s_force_non_null is None:
 force_non_null = False
 else:
 assert s_force_non_null.is_constant()
 force_non_null = s_force_non_null.const
+if s_simple_hash_eq is None:
+simple_hash_eq = False
+else:
+assert s_simple_hash_eq.is_constant()
+simple_hash_eq = s_simple_hash_eq.const
 dictdef = getbookkeeper().getdictdef(is_r_dict=True,
- force_non_null=force_non_null)
+ force_non_null=force_non_null,
+ simple_hash_eq=simple_hash_eq)
 dictdef.dictkey.update_rdict_annotations(s_eqfn, s_hashfn)
-return SomeDict(dictdef)
-
-@analyzer_for(rpython.rlib.objectmodel.r_ordereddict)
-def robjmodel_r_ordereddict(s_eqfn, s_hashfn):
-dictdef = getbookkeeper().getdictdef(is_r_dict=True)
-dictdef.dictkey.update_rdict_annotations(s_eqfn, s_hashfn)
-return SomeOrderedDict(dictdef)
+return cls(dictdef)
 
 @analyzer_for(rpython.rlib.objectmodel.hlinvoke)
 def robjmodel_hlinvoke(s_repr, s_llcallable, *args_s):
diff --git a/rpython/annotator/dictdef.py b/rpython/annotator/dictdef.py
--- a/rpython/annotator/dictdef.py
+++ b/rpython/annotator/dictdef.py
@@ -81,12 +81,14 @@
 def __init__(self, bookkeeper, s_key = s_ImpossibleValue,
  s_value = s_ImpossibleValue,
is_r_dict = False,
-   force_non_null = False):
+   force_non_null = False,
+   simple_hash_eq = False):
 self.dictkey = DictKey(bookkeeper, s_key, is_r_dict)
 self.dictkey.itemof[self] = True
 self.dictvalue = DictValue(bookkeeper, s_value)
 self.dictvalue.itemof[self] = True
 self.force_non_null = force_non_null
+self.simple_hash_eq = simple_hash_eq
 
 def 

[pypy-commit] pypy default: merge win32-vcvars, log more and try vsvars32 before vcvars32, go figure

2017-11-29 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r93209:d1aaa6aca19d
Date: 2017-11-29 10:41 +0200
http://bitbucket.org/pypy/pypy/changeset/d1aaa6aca19d/

Log:merge win32-vcvars, log more and try vsvars32 before vcvars32, go
figure

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -26,3 +26,6 @@
 
 .. branch: fix-vmprof-stacklet-switch
 Fix a vmprof+continulets (i.e. greenelts, eventlet, gevent, ...)
+
+.. branch: win32-vcvars
+
diff --git a/rpython/translator/platform/windows.py 
b/rpython/translator/platform/windows.py
--- a/rpython/translator/platform/windows.py
+++ b/rpython/translator/platform/windows.py
@@ -65,12 +65,12 @@
 vcbindir = os.path.join(vcinstalldir, 'BIN')
 vcvars = os.path.join(vcbindir, 'amd64', 'vcvarsamd64.bat')
 else:
-vcvars = os.path.join(toolsdir, 'vcvars32.bat')
+vcvars = os.path.join(toolsdir, 'vsvars32.bat')
 if not os.path.exists(vcvars):
 # even msdn does not know which to run
 # see 
https://msdn.microsoft.com/en-us/library/1700bbwd(v=vs.90).aspx
 # wich names both
-vcvars = os.path.join(toolsdir, 'vsvars32.bat') 
+vcvars = os.path.join(toolsdir, 'vcvars32.bat') 
 
 import subprocess
 try:
@@ -92,25 +92,21 @@
 key, value = line.split('=', 1)
 if key.upper() in ['PATH', 'INCLUDE', 'LIB']:
 env[key.upper()] = value
-## log.msg("Updated environment with %s" % (vcvars,))
+log.msg("Updated environment with %s" % (vcvars,))
 return env
 
 def find_msvc_env(x64flag=False):
+vcvers = [140, 100, 90, 80, 71, 70]
 # First, try to get the compiler which served to compile python
 msc_pos = sys.version.find('MSC v.')
 if msc_pos != -1:
 msc_ver = int(sys.version[msc_pos+6:msc_pos+10])
-# 1300 -> 70, 1310 -> 71, 1400 -> 80, 1500 -> 90
+# 1500 -> 90, 1900 -> 140
 vsver = (msc_ver / 10) - 60
+vcvers.insert(0, vsver)
+errs = []
+for vsver in vcvers: 
 env = _get_msvc_env(vsver, x64flag)
-
-if env is not None:
-return env
-
-# Then, try any other version
-for vsver in (100, 90, 80, 71, 70): # All the versions I know
-env = _get_msvc_env(vsver, x64flag)
-
 if env is not None:
 return env
 log.error("Could not find a Microsoft Compiler")
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge

2017-11-20 Thread fijal
Author: fijal
Branch: 
Changeset: r93100:21fd35c44d66
Date: 2017-11-20 16:42 +0100
http://bitbucket.org/pypy/pypy/changeset/21fd35c44d66/

Log:merge

diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py
--- a/rpython/annotator/annrpython.py
+++ b/rpython/annotator/annrpython.py
@@ -15,34 +15,10 @@
 typeof, s_ImpossibleValue, SomeInstance, intersection, difference)
 from rpython.annotator.bookkeeper import Bookkeeper
 from rpython.rtyper.normalizecalls import perform_normalizations
-from collections import deque
 
 log = AnsiLogger("annrpython")
 
 
-class ShuffleDict(object):
-def __init__(self):
-self._d = {}
-self.keys = deque()
-
-def __setitem__(self, k, v):
-if k in self._d:
-self._d[k] = v
-else:
-self._d[k] = v
-self.keys.append(k)
-
-def __getitem__(self, k):
-return self._d[k]
-
-def popitem(self):
-key = self.keys.popleft()
-item = self._d.pop(key)
-return (key, item)
-
-def __nonzero__(self):
-return bool(self._d)
-
 class RPythonAnnotator(object):
 """Block annotator for RPython.
 See description in doc/translation.txt."""
@@ -57,7 +33,7 @@
 translator = TranslationContext()
 translator.annotator = self
 self.translator = translator
-self.pendingblocks = ShuffleDict()  # map {block: graph-containing-it}
+self.genpendingblocks=[{}] # [{block: graph-containing-it}] * 
generation
 self.annotated = {}  # set of blocks already seen
 self.added_blocks = None # see processblock() below
 self.links_followed = {} # set of links that have ever been followed
@@ -81,7 +57,7 @@
 self.errors = []
 
 def __getstate__(self):
-attrs = """translator pendingblocks annotated links_followed
+attrs = """translator genpendingblocks annotated links_followed
 notify bookkeeper frozen policy added_blocks""".split()
 ret = self.__dict__.copy()
 for key, value in ret.items():
@@ -212,19 +188,47 @@
 else:
 self.mergeinputargs(graph, block, cells)
 if not self.annotated[block]:
-self.pendingblocks[block] = graph
+self.schedulependingblock(graph, block)
+
+def schedulependingblock(self, graph, block):
+# 'self.genpendingblocks' is a list of dictionaries which is
+# logically equivalent to just one dictionary.  But we keep a
+# 'generation' number on each block (=key), and whenever we
+# process a block, we increase its generation number.  The
+# block is added to the 'genpendingblocks' indexed by its
+# generation number.  See complete_pending_blocks() below.
+generation = getattr(block, 'generation', 0)
+self.genpendingblocks[generation][block] = graph
 
 def complete_pending_blocks(self):
-while self.pendingblocks:
-block, graph = self.pendingblocks.popitem()
-self.processblock(graph, block)
+while True:
+# Find the first of the dictionaries in 'self.genpendingblocks'
+# which is not empty
+gen = 0
+for pendingblocks in self.genpendingblocks:
+if pendingblocks:
+break
+gen += 1
+else:
+return# all empty => done
+
+gen += 1   # next generation number
+if len(self.genpendingblocks) == gen:
+self.genpendingblocks.append({})
+
+# Process all blocks at this level
+# (if any gets re-inserted, it will be into the next level)
+while pendingblocks:
+block, graph = pendingblocks.popitem()
+block.generation = gen
+self.processblock(graph, block)
 
 def complete(self):
 """Process pending blocks until none is left."""
 while True:
 self.complete_pending_blocks()
 self.policy.no_more_blocks_to_annotate(self)
-if not self.pendingblocks:
+if not any(self.genpendingblocks):
 break   # finished
 # make sure that the return variables of all graphs is annotated
 if self.added_blocks is not None:
@@ -410,7 +414,7 @@
 def reflowpendingblock(self, graph, block):
 assert not self.frozen
 assert graph not in self.fixed_graphs
-self.pendingblocks[block] = graph
+self.schedulependingblock(graph, block)
 assert block in self.annotated
 self.annotated[block] = False  # must re-flow
 self.blocked_blocks[block] = (graph, None)
diff --git a/rpython/flowspace/model.py b/rpython/flowspace/model.py
--- a/rpython/flowspace/model.py
+++ b/rpython/flowspace/model.py
@@ -170,7 +170,7 @@
 
 class Block(object):
 __slots__ = """inputargs operations exitswitch
-

[pypy-commit] pypy default: merge the fix-vmprof-stacklet-switch: make sure that vmprof does not segfault in presence of continuation.switch (and thus with greenlets, eventlet, etc.)

2017-11-15 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r93039:927cc69f4d52
Date: 2017-11-15 16:06 +0100
http://bitbucket.org/pypy/pypy/changeset/927cc69f4d52/

Log:merge the fix-vmprof-stacklet-switch: make sure that vmprof does not
segfault in presence of continuation.switch (and thus with
greenlets, eventlet, etc.)

diff --git a/pypy/module/_continuation/test/test_translated.py 
b/pypy/module/_continuation/test/test_translated.py
--- a/pypy/module/_continuation/test/test_translated.py
+++ b/pypy/module/_continuation/test/test_translated.py
@@ -5,6 +5,7 @@
 py.test.skip("to run on top of a translated pypy-c")
 
 import sys, random
+from rpython.tool.udir import udir
 
 # 
 
@@ -92,6 +93,33 @@
 from pypy.conftest import option
 if not option.runappdirect:
 py.test.skip("meant only for -A run")
+cls.w_vmprof_file = cls.space.wrap(str(udir.join('profile.vmprof')))
+
+def test_vmprof(self):
+"""
+The point of this test is to check that we do NOT segfault.  In
+particular, we need to ensure that vmprof does not sample the stack in
+the middle of a switch, else we read nonsense.
+"""
+try:
+import _vmprof
+except ImportError:
+py.test.skip("no _vmprof")
+#
+def switch_forever(c):
+while True:
+c.switch()
+#
+f = open(self.vmprof_file, 'w+b')
+_vmprof.enable(f.fileno(), 1/250.0, False, False, False, False)
+c = _continuation.continulet(switch_forever)
+for i in range(10**7):
+if i % 10 == 0:
+print i
+c.switch()
+_vmprof.disable()
+f.close()
+
 
 def _setup():
 for _i in range(20):
diff --git a/pypy/module/_vmprof/interp_vmprof.py 
b/pypy/module/_vmprof/interp_vmprof.py
--- a/pypy/module/_vmprof/interp_vmprof.py
+++ b/pypy/module/_vmprof/interp_vmprof.py
@@ -93,8 +93,8 @@
 return space.newtext(path)
 
 def stop_sampling(space):
-return space.newint(rvmprof.stop_sampling(space))
+return space.newint(rvmprof.stop_sampling())
 
 def start_sampling(space):
-rvmprof.start_sampling(space)
+rvmprof.start_sampling()
 return space.w_None
diff --git a/rpython/rlib/rstacklet.py b/rpython/rlib/rstacklet.py
--- a/rpython/rlib/rstacklet.py
+++ b/rpython/rlib/rstacklet.py
@@ -3,6 +3,7 @@
 from rpython.rlib import jit
 from rpython.rlib.objectmodel import fetch_translated_config
 from rpython.rtyper.lltypesystem import lltype, llmemory
+from rpython.rlib import rvmprof
 from rpython.rlib.rvmprof import cintf
 
 DEBUG = False
@@ -40,11 +41,13 @@
 def switch(self, stacklet):
 if DEBUG:
 debug.remove(stacklet)
+rvmprof.stop_sampling()
 x = cintf.save_rvmprof_stack()
 try:
 h = self._gcrootfinder.switch(stacklet)
 finally:
 cintf.restore_rvmprof_stack(x)
+rvmprof.start_sampling()
 if DEBUG:
 debug.add(h)
 return h
diff --git a/rpython/rlib/rvmprof/__init__.py b/rpython/rlib/rvmprof/__init__.py
--- a/rpython/rlib/rvmprof/__init__.py
+++ b/rpython/rlib/rvmprof/__init__.py
@@ -55,9 +55,9 @@
 
 return None
 
-def stop_sampling(space):
+def stop_sampling():
 fd = _get_vmprof().cintf.vmprof_stop_sampling()
 return rffi.cast(lltype.Signed, fd)
 
-def start_sampling(space):
+def start_sampling():
 _get_vmprof().cintf.vmprof_start_sampling()
diff --git a/rpython/rlib/test/test_rstacklet.py 
b/rpython/rlib/test/test_rstacklet.py
--- a/rpython/rlib/test/test_rstacklet.py
+++ b/rpython/rlib/test/test_rstacklet.py
@@ -10,6 +10,8 @@
 from rpython.config.translationoption import DEFL_ROOTFINDER_WITHJIT
 from rpython.rlib import rrandom, rgc
 from rpython.rlib.rarithmetic import intmask
+from rpython.rlib.nonconst import NonConstant
+from rpython.rlib import rvmprof
 from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
 from rpython.translator.c.test.test_standalone import StandaloneTests
 
@@ -273,7 +275,23 @@
 llmemory.raw_free(raw)
 
 
+# 
+# bah, we need to make sure that vmprof_execute_code is annotated, else
+# rvmprof.c does not compile correctly
+class FakeVMProfCode(object):
+pass
+rvmprof.register_code_object_class(FakeVMProfCode, lambda code: 'name')
+@rvmprof.vmprof_execute_code("xcode1", lambda code, num: code)
+def fake_vmprof_main(code, num):
+return 42
+# 
+
 def entry_point(argv):
+# 
+if NonConstant(False):
+fake_vmprof_main(FakeVMProfCode(), 42)
+# 
+#
 seed = 0
 if len(argv) > 1:
 seed = int(argv[1])
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2017-11-14 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r93033:821e59360f37
Date: 2017-11-15 02:13 +0100
http://bitbucket.org/pypy/pypy/changeset/821e59360f37/

Log:merge heads

diff --git a/pypy/module/_continuation/test/test_stacklet.py 
b/pypy/module/_continuation/test/test_stacklet.py
--- a/pypy/module/_continuation/test/test_stacklet.py
+++ b/pypy/module/_continuation/test/test_stacklet.py
@@ -340,16 +340,41 @@
 import sys
 from _continuation import continulet
 #
+def stack(f=None):
+"""
+get the call-stack of the caller or the specified frame
+"""
+if f is None:
+f = sys._getframe(1)
+res = []
+seen = set()
+while f:
+if f in seen:
+# frame loop
+res.append('...')
+break
+seen.add(f)
+res.append(f.f_code.co_name)
+f = f.f_back
+#print res
+return res
+
 def bar(c):
+assert stack() == ['bar', 'foo', 'test_f_back']
 c.switch(sys._getframe(0))
 c.switch(sys._getframe(0).f_back)
 c.switch(sys._getframe(1))
+#
+assert stack() == ['bar', 'foo', 'main', 'test_f_back']
 c.switch(sys._getframe(1).f_back)
+#
+assert stack() == ['bar', 'foo', 'main2', 'test_f_back']
 assert sys._getframe(2) is f3_foo.f_back
 c.switch(sys._getframe(2))
 def foo(c):
 bar(c)
 #
+assert stack() == ['test_f_back']
 c = continulet(foo)
 f1_bar = c.switch()
 assert f1_bar.f_code.co_name == 'bar'
@@ -358,14 +383,20 @@
 f3_foo = c.switch()
 assert f3_foo is f2_foo
 assert f1_bar.f_back is f3_foo
+#
 def main():
 f4_main = c.switch()
 assert f4_main.f_code.co_name == 'main'
 assert f3_foo.f_back is f1_bar# not running, so a loop
+assert stack() == ['main', 'test_f_back']
+assert stack(f1_bar) == ['bar', 'foo', '...']
+#
 def main2():
 f5_main2 = c.switch()
 assert f5_main2.f_code.co_name == 'main2'
 assert f3_foo.f_back is f1_bar# not running, so a loop
+assert stack(f1_bar) == ['bar', 'foo', '...']
+#
 main()
 main2()
 res = c.switch()
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge the vmprof-0.4.10 branch:

2017-11-08 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r92973:4b7ad9d4be0d
Date: 2017-11-08 17:47 +0100
http://bitbucket.org/pypy/pypy/changeset/4b7ad9d4be0d/

Log:merge the vmprof-0.4.10 branch:

- copy the recent changes to the C part of vmprof from github

- make sure that the tests are actually testing something: so far,
most of the were just silently skipped on the nightly buildbot :(

- test_native is broken: it has been broken since the merge of
vmprof-0.4.8, but we didn't notice

- I expect some tests to fail on weird architectures. Once we know
which, we can explicitly skip them

diff --git a/requirements.txt b/requirements.txt
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,5 @@
 cffi>=1.4.0
+vmprof>=0.4.10  # required to parse log files in rvmprof tests
 
 # hypothesis is used for test generation on untranslated tests
 hypothesis
diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_common.c 
b/rpython/rlib/rvmprof/src/shared/vmprof_common.c
--- a/rpython/rlib/rvmprof/src/shared/vmprof_common.c
+++ b/rpython/rlib/rvmprof/src/shared/vmprof_common.c
@@ -32,12 +32,21 @@
 static size_t threads_size = 0;
 static size_t thread_count = 0;
 static size_t threads_size_step = 8;
-#endif
 
 int vmprof_get_itimer_type(void) {
 return itimer_type;
 }
 
+int vmprof_get_signal_type(void) {
+return signal_type;
+}
+#endif
+
+#ifdef VMPROF_WINDOWS
+#include "vmprof_win.h"
+#endif
+
+
 int vmprof_is_enabled(void) {
 return is_enabled;
 }
@@ -62,10 +71,6 @@
 profile_interval_usec = value;
 }
 
-int vmprof_get_signal_type(void) {
-return signal_type;
-}
-
 char *vmprof_init(int fd, double interval, int memory,
   int proflines, const char *interp_name, int native, int 
real_time)
 {
diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_common.h 
b/rpython/rlib/rvmprof/src/shared/vmprof_common.h
--- a/rpython/rlib/rvmprof/src/shared/vmprof_common.h
+++ b/rpython/rlib/rvmprof/src/shared/vmprof_common.h
@@ -15,7 +15,9 @@
 #include 
 #endif
 
+#ifdef VMPROF_UNIX
 #include "vmprof_getpc.h"
+#endif
 
 #ifdef VMPROF_LINUX
 #include 
diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_memory.c 
b/rpython/rlib/rvmprof/src/shared/vmprof_memory.c
--- a/rpython/rlib/rvmprof/src/shared/vmprof_memory.c
+++ b/rpython/rlib/rvmprof/src/shared/vmprof_memory.c
@@ -8,7 +8,7 @@
 #include 
 
 static mach_port_t mach_task;
-#else
+#elif defined(VMPROF_UNIX)
 #include 
 #include 
 #include 
diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_unix.c 
b/rpython/rlib/rvmprof/src/shared/vmprof_unix.c
--- a/rpython/rlib/rvmprof/src/shared/vmprof_unix.c
+++ b/rpython/rlib/rvmprof/src/shared/vmprof_unix.c
@@ -41,8 +41,6 @@
 void vmprof_ignore_signals(int ignored)
 {
 if (ignored) {
-/* set the last bit, and wait until concurrently-running signal
-   handlers finish */
 __sync_add_and_fetch(_handler_ignore, 1L);
 while (signal_handler_entries != 0L) {
 usleep(1);
@@ -370,7 +368,7 @@
 goto error;
 if (install_sigprof_timer() == -1)
 goto error;
-vmprof_ignore_signals(0);
+signal_handler_ignore = 0;
 return 0;
 
  error:
@@ -394,7 +392,7 @@
 
 int vmprof_disable(void)
 {
-vmprof_ignore_signals(1);
+signal_handler_ignore = 1;
 vmprof_set_profile_interval_usec(0);
 #ifdef VMP_SUPPORTS_NATIVE_PROFILING
 disable_cpyprof();
diff --git a/rpython/rlib/rvmprof/src/shared/vmprof_win.c 
b/rpython/rlib/rvmprof/src/shared/vmprof_win.c
--- a/rpython/rlib/rvmprof/src/shared/vmprof_win.c
+++ b/rpython/rlib/rvmprof/src/shared/vmprof_win.c
@@ -1,7 +1,7 @@
-// cannot include this header because it also has definitions
-#include "windows.h"
-#include "compat.h"
-#include "vmp_stack.h"
+#include "vmprof_win.h"
+
+volatile int thread_started = 0;
+volatile int enabled = 0;
 
 HANDLE write_mutex;
 
@@ -12,7 +12,20 @@
 return 0;
 }
 
-#include 
+int vmprof_register_virtual_function(char *code_name, intptr_t code_uid,
+ int auto_retry)
+{
+char buf[2048];
+long namelen;
+
+namelen = (long)strnlen(code_name, 1023);
+buf[0] = MARKER_VIRTUAL_IP;
+*(intptr_t*)(buf + 1) = code_uid;
+*(long*)(buf + 1 + sizeof(intptr_t)) = namelen;
+memcpy(buf + 1 + sizeof(intptr_t) + sizeof(long), code_name, namelen);
+vmp_write_all(buf, 1 + sizeof(intptr_t) + sizeof(long) + namelen);
+return 0;
+}
 
 int vmp_write_all(const char *buf, size_t bufsize)
 {
@@ -40,3 +53,168 @@
 return 0;
 }
 
+HANDLE write_mutex;
+
+#include "vmprof_common.h"
+
+int vmprof_snapshot_thread(DWORD thread_id, PY_WIN_THREAD_STATE *tstate, 
prof_stacktrace_s *stack)
+{
+HRESULT result;
+HANDLE hThread;
+int depth;
+CONTEXT ctx;
+#ifdef RPYTHON_LL2CTYPES
+return 0; // not much we can do
+#else
+#if !defined(RPY_TLOFS_thread_ident) && defined(RPYTHON_VMPROF)
+return 0; // we can't freeze threads, unsafe
+#else
+

[pypy-commit] pypy default: merge branch run-extra-tests

2017-11-03 Thread rlamy
Author: Ronan Lamy 
Branch: 
Changeset: r92922:a88ed18e1a6a
Date: 2017-11-03 16:51 +
http://bitbucket.org/pypy/pypy/changeset/a88ed18e1a6a/

Log:merge branch run-extra-tests

diff --git a/extra_tests/requirements.txt b/extra_tests/requirements.txt
new file mode 100644
--- /dev/null
+++ b/extra_tests/requirements.txt
@@ -0,0 +1,2 @@
+pytest
+hypothesis
diff --git a/extra_tests/test_failing.py b/extra_tests/test_failing.py
new file mode 100644
--- /dev/null
+++ b/extra_tests/test_failing.py
@@ -0,0 +1,8 @@
+from hypothesis import given, strategies
+
+def mean(a, b):
+return (a + b)/2.
+
+@given(strategies.integers(), strategies.integers())
+def test_mean_failing(a, b):
+assert mean(a, b) >= min(a, b)
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -17,3 +17,6 @@
 .. branch: bsd-patches
 Fix failures on FreeBSD, contributed by David Naylor as patches on the issue
 tracker (issues 2694, 2695, 2696, 2697)
+
+.. branch: run-extra-tests
+Run extra_tests/ in buildbot
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge bsd-patches which fixes failures on FreeBSD (David Naylor)

2017-11-02 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r92908:959da02fe2dc
Date: 2017-11-02 16:59 +0200
http://bitbucket.org/pypy/pypy/changeset/959da02fe2dc/

Log:merge bsd-patches which fixes failures on FreeBSD (David Naylor)

diff --git a/lib-python/2.7/ctypes/__init__.py 
b/lib-python/2.7/ctypes/__init__.py
--- a/lib-python/2.7/ctypes/__init__.py
+++ b/lib-python/2.7/ctypes/__init__.py
@@ -360,14 +360,15 @@
 self._FuncPtr = _FuncPtr
 
 if handle is None:
-if flags & _FUNCFLAG_CDECL:
-pypy_dll = _ffi.CDLL(name, mode)
-else:
-pypy_dll = _ffi.WinDLL(name, mode)
-self.__pypy_dll__ = pypy_dll
-handle = int(pypy_dll)
-if _sys.maxint > 2 ** 32:
-handle = int(handle)   # long -> int
+handle = 0
+if flags & _FUNCFLAG_CDECL:
+pypy_dll = _ffi.CDLL(name, mode, handle)
+else:
+pypy_dll = _ffi.WinDLL(name, mode, handle)
+self.__pypy_dll__ = pypy_dll
+handle = int(pypy_dll)
+if _sys.maxint > 2 ** 32:
+handle = int(handle)   # long -> int
 self._handle = handle
 
 def __repr__(self):
diff --git a/pypy/module/_rawffi/alt/interp_funcptr.py 
b/pypy/module/_rawffi/alt/interp_funcptr.py
--- a/pypy/module/_rawffi/alt/interp_funcptr.py
+++ b/pypy/module/_rawffi/alt/interp_funcptr.py
@@ -314,7 +314,7 @@
 # 
 
 class W_CDLL(W_Root):
-def __init__(self, space, name, mode):
+def __init__(self, space, name, mode, handle):
 self.flags = libffi.FUNCFLAG_CDECL
 self.space = space
 if name is None:
@@ -322,7 +322,7 @@
 else:
 self.name = name
 try:
-self.cdll = libffi.CDLL(name, mode)
+self.cdll = libffi.CDLL(name, mode, handle)
 except DLOpenError as e:
 raise wrap_dlopenerror(space, e, self.name)
 except OSError as e:
@@ -344,9 +344,9 @@
 def getidentifier(self, space):
 return space.newint(self.cdll.getidentifier())
 
-@unwrap_spec(name='fsencode_or_none', mode=int)
-def descr_new_cdll(space, w_type, name, mode=-1):
-return W_CDLL(space, name, mode)
+@unwrap_spec(name='fsencode_or_none', mode=int, handle=int)
+def descr_new_cdll(space, w_type, name, mode=-1, handle=0):
+return W_CDLL(space, name, mode, handle)
 
 
 W_CDLL.typedef = TypeDef(
@@ -359,13 +359,13 @@
 )
 
 class W_WinDLL(W_CDLL):
-def __init__(self, space, name, mode):
-W_CDLL.__init__(self, space, name, mode)
+def __init__(self, space, name, mode, handle):
+W_CDLL.__init__(self, space, name, mode, handle)
 self.flags = libffi.FUNCFLAG_STDCALL
 
-@unwrap_spec(name='fsencode_or_none', mode=int)
-def descr_new_windll(space, w_type, name, mode=-1):
-return W_WinDLL(space, name, mode)
+@unwrap_spec(name='fsencode_or_none', mode=int, handle=int)
+def descr_new_windll(space, w_type, name, mode=-1, handle=0):
+return W_WinDLL(space, name, mode, handle)
 
 
 W_WinDLL.typedef = TypeDef(
@@ -380,4 +380,4 @@
 # 
 
 def get_libc(space):
-return W_CDLL(space, get_libc_name(), -1)
+return W_CDLL(space, get_libc_name(), -1, 0)
diff --git a/pypy/module/_vmprof/test/test__vmprof.py 
b/pypy/module/_vmprof/test/test__vmprof.py
--- a/pypy/module/_vmprof/test/test__vmprof.py
+++ b/pypy/module/_vmprof/test/test__vmprof.py
@@ -1,3 +1,4 @@
+import py
 import sys
 from rpython.tool.udir import udir
 from pypy.tool.pytest.objspace import gettestobjspace
@@ -107,6 +108,7 @@
 _vmprof.disable()
 assert _vmprof.is_enabled() is False
 
+@py.test.mark.xfail(sys.platform.startswith('freebsd'), reason = "not 
implemented")
 def test_get_profile_path(self):
 import _vmprof
 tmpfile = open(self.tmpfilename, 'wb')
diff --git a/pypy/module/termios/test/test_termios.py 
b/pypy/module/termios/test/test_termios.py
--- a/pypy/module/termios/test/test_termios.py
+++ b/pypy/module/termios/test/test_termios.py
@@ -7,9 +7,6 @@
 if os.name != 'posix':
 py.test.skip('termios module only available on unix')
 
-if sys.platform.startswith('freebsd'):
-raise Exception('XXX seems to hangs on FreeBSD9')
-
 class TestTermios(object):
 def setup_class(cls):
 try:
diff --git a/pypy/module/test_lib_pypy/pyrepl/__init__.py 
b/pypy/module/test_lib_pypy/pyrepl/__init__.py
--- a/pypy/module/test_lib_pypy/pyrepl/__init__.py
+++ b/pypy/module/test_lib_pypy/pyrepl/__init__.py
@@ -1,6 +1,3 @@
 import sys
 import lib_pypy.pyrepl
 sys.modules['pyrepl'] = sys.modules['lib_pypy.pyrepl']
-
-if sys.platform.startswith('freebsd'):
-raise Exception('XXX seems to hangs on FreeBSD9')
diff --git a/pypy/module/test_lib_pypy/pyrepl/test_readline.py 
b/pypy/module/test_lib_pypy/pyrepl/test_readline.py
--- 

[pypy-commit] pypy default: merge the branch keep-debug-symbols:

2017-11-01 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r92895:77fff565d382
Date: 2017-11-01 10:51 +0100
http://bitbucket.org/pypy/pypy/changeset/77fff565d382/

Log:merge the branch keep-debug-symbols:

- symbols are stripped from the executable and placed in a file
libpypy-c.so.debug

- we add a gnu-debug-link section to libpypy-c.so which points to
.debug, so that it works transparently in gdb

- this generates immensely more useful stack trace inside gdb;
moreover, it is also potentially usable by vmprof

- the .debug file is ~18MB. The tarball size goes from 22MB to 25MB.
I claim that disk space and bandwidth are cheap, so we should just
don't care, especially for nightly builds

- if we REALLY care about the tarball size of official releases, we
can simply remove the .debug from the tarball

diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py
--- a/pypy/tool/release/package.py
+++ b/pypy/tool/release/package.py
@@ -21,6 +21,7 @@
 import fnmatch
 import subprocess
 import glob
+from pypy.tool.release.smartstrip import smartstrip
 
 if sys.version_info < (2,6): py.test.skip("requires 2.6 so far")
 
@@ -212,15 +213,8 @@
 old_dir = os.getcwd()
 try:
 os.chdir(str(builddir))
-if not options.nostrip:
-for source, target in binaries:
-if sys.platform == 'win32':
-pass
-elif sys.platform == 'darwin':
-# 'strip' fun: see issue #587 for why -x
-os.system("strip -x " + str(bindir.join(target)))# 
ignore errors
-else:
-os.system("strip " + str(bindir.join(target)))# ignore 
errors
+for source, target in binaries:
+smartstrip(bindir.join(target), keep_debug=options.keep_debug)
 #
 if USE_ZIPFILE_MODULE:
 import zipfile
@@ -281,8 +275,8 @@
 help='do not build and package the %r cffi module' % 
(key,))
 parser.add_argument('--without-cffi', dest='no_cffi', action='store_true',
 help='skip building *all* the cffi modules listed above')
-parser.add_argument('--nostrip', dest='nostrip', action='store_true',
-help='do not strip the exe, making it ~10MB larger')
+parser.add_argument('--no-keep-debug', dest='keep_debug',
+action='store_false', help='do not keep debug symbols')
 parser.add_argument('--rename_pypy_c', dest='pypy_c', type=str, 
default=pypy_exe,
 help='target executable name, defaults to "pypy"')
 parser.add_argument('--archive-name', dest='name', type=str, default='',
@@ -295,8 +289,8 @@
 help='use as pypy exe instead of pypy/goal/pypy-c')
 options = parser.parse_args(args)
 
-if os.environ.has_key("PYPY_PACKAGE_NOSTRIP"):
-options.nostrip = True
+if os.environ.has_key("PYPY_PACKAGE_NOKEEPDEBUG"):
+options.keep_debug = False
 if os.environ.has_key("PYPY_PACKAGE_WITHOUTTK"):
 options.no_tk = True
 if not options.builddir:
diff --git a/pypy/tool/release/smartstrip.py b/pypy/tool/release/smartstrip.py
new file mode 100644
--- /dev/null
+++ b/pypy/tool/release/smartstrip.py
@@ -0,0 +1,32 @@
+"""
+Strip symbols from an executable, but keep them in a .debug file
+"""
+
+import sys
+import os
+import py
+
+def _strip(exe):
+if sys.platform == 'win32':
+pass
+elif sys.platform == 'darwin':
+# 'strip' fun: see issue #587 for why -x
+os.system("strip -x " + str(exe))# ignore errors
+else:
+os.system("strip " + str(exe))   # ignore errors
+
+def _extract_debug_symbols(exe, debug):
+if sys.platform == 'linux2':
+os.system("objcopy --only-keep-debug %s %s" % (exe, debug))
+os.system("objcopy --add-gnu-debuglink=%s %s" % (debug, exe))
+
+def smartstrip(exe, keep_debug=True):
+exe = py.path.local(exe)
+debug = py.path.local(str(exe) + '.debug')
+if keep_debug:
+_extract_debug_symbols(exe, debug)
+_strip(exe)
+
+
+if __name__ == '__main__':
+smartstrip(sys.argv[1])
diff --git a/pypy/tool/release/test/test_smartstrip.py 
b/pypy/tool/release/test/test_smartstrip.py
new file mode 100644
--- /dev/null
+++ b/pypy/tool/release/test/test_smartstrip.py
@@ -0,0 +1,50 @@
+import pytest
+import sys
+import os
+from commands import getoutput
+from pypy.tool.release.smartstrip import smartstrip
+
+@pytest.fixture
+def exe(tmpdir):
+src = tmpdir.join("myprog.c")
+src.write("""
+int foo(int a, int b) {
+return a+b;
+}
+int main(void) { }
+""")
+exe = tmpdir.join("myprog")
+ret = os.system("gcc -o %s %s" % (exe, src))
+assert ret == 0
+return exe
+
+def info_symbol(exe, symbol):
+out = getoutput("gdb %s -ex 'info symbol %s' -ex 'quit'" % (exe, symbol))
+lines = out.splitlines()
+return lines[-1]
+

[pypy-commit] pypy default: merge cppyy-packaging with improved consistency for cppyy CPython <-> PyPy

2017-10-28 Thread wlav
Author: Wim Lavrijsen 
Branch: 
Changeset: r92872:0b8528722439
Date: 2017-10-28 15:55 -0700
http://bitbucket.org/pypy/pypy/changeset/0b8528722439/

Log:merge cppyy-packaging with improved consistency for cppyy CPython
<-> PyPy

diff too long, truncating to 2000 out of 3147 lines

diff --git a/pypy/module/_cppyy/__init__.py b/pypy/module/_cppyy/__init__.py
--- a/pypy/module/_cppyy/__init__.py
+++ b/pypy/module/_cppyy/__init__.py
@@ -7,7 +7,7 @@
 interpleveldefs = {
 '_resolve_name'  : 'interp_cppyy.resolve_name',
 '_scope_byname'  : 'interp_cppyy.scope_byname',
-'_template_byname'   : 'interp_cppyy.template_byname',
+'_is_template'   : 'interp_cppyy.is_template',
 '_std_string_name'   : 'interp_cppyy.std_string_name',
 '_set_class_generator'   : 'interp_cppyy.set_class_generator',
 '_set_function_generator': 'interp_cppyy.set_function_generator',
@@ -15,7 +15,9 @@
 '_get_nullptr'   : 'interp_cppyy.get_nullptr',
 'CPPClassBase'   : 'interp_cppyy.W_CPPClass',
 'addressof'  : 'interp_cppyy.addressof',
+'_bind_object'   : 'interp_cppyy._bind_object',
 'bind_object': 'interp_cppyy.bind_object',
+'move'   : 'interp_cppyy.move',
 }
 
 appleveldefs = {
diff --git a/pypy/module/_cppyy/capi/loadable_capi.py 
b/pypy/module/_cppyy/capi/loadable_capi.py
--- a/pypy/module/_cppyy/capi/loadable_capi.py
+++ b/pypy/module/_cppyy/capi/loadable_capi.py
@@ -217,7 +217,8 @@
 'method_req_args'  : ([c_scope, c_index], c_int),
 'method_arg_type'  : ([c_scope, c_index, c_int],  
c_ccharp),
 'method_arg_default'   : ([c_scope, c_index, c_int],  
c_ccharp),
-'method_signature' : ([c_scope, c_index], 
c_ccharp),
+'method_signature' : ([c_scope, c_index, c_int],  
c_ccharp),
+'method_prototype' : ([c_scope, c_index, c_int],  
c_ccharp),
 
 'method_is_template'   : ([c_scope, c_index], c_int),
 'method_num_template_args' : ([c_scope, c_index], c_int),
@@ -498,9 +499,12 @@
 def c_method_arg_default(space, cppscope, index, arg_index):
 args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(arg_index)]
 return charp2str_free(space, call_capi(space, 'method_arg_default', args))
-def c_method_signature(space, cppscope, index):
-args = [_ArgH(cppscope.handle), _ArgL(index)]
+def c_method_signature(space, cppscope, index, show_formalargs=True):
+args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(show_formalargs)]
 return charp2str_free(space, call_capi(space, 'method_signature', args))
+def c_method_prototype(space, cppscope, index, show_formalargs=True):
+args = [_ArgH(cppscope.handle), _ArgL(index), _ArgL(show_formalargs)]
+return charp2str_free(space, call_capi(space, 'method_prototype', args))
 
 def c_method_is_template(space, cppscope, index):
 args = [_ArgH(cppscope.handle), _ArgL(index)]
diff --git a/pypy/module/_cppyy/converter.py b/pypy/module/_cppyy/converter.py
--- a/pypy/module/_cppyy/converter.py
+++ b/pypy/module/_cppyy/converter.py
@@ -4,7 +4,7 @@
 
 from rpython.rtyper.lltypesystem import rffi, lltype
 from rpython.rlib.rarithmetic import r_singlefloat, r_longfloat
-from rpython.rlib import rfloat
+from rpython.rlib import rfloat, rawrefcount
 
 from pypy.module._rawffi.interp_rawffi import letter2tp
 from pypy.module._rawffi.array import W_Array, W_ArrayInstance
@@ -21,9 +21,9 @@
 # match for the qualified type.
 
 
-def get_rawobject(space, w_obj):
+def get_rawobject(space, w_obj, can_be_None=True):
 from pypy.module._cppyy.interp_cppyy import W_CPPClass
-cppinstance = space.interp_w(W_CPPClass, w_obj, can_be_None=True)
+cppinstance = space.interp_w(W_CPPClass, w_obj, can_be_None=can_be_None)
 if cppinstance:
 rawobject = cppinstance.get_rawobject()
 assert lltype.typeOf(rawobject) == capi.C_OBJECT
@@ -48,17 +48,16 @@
 return capi.C_NULL_OBJECT
 
 def is_nullpointer_specialcase(space, w_obj):
-# 0, None, and nullptr may serve as "NULL", check for any of them
+# 0 and nullptr may serve as "NULL"
 
 # integer 0
 try:
 return space.int_w(w_obj) == 0
 except Exception:
 pass
-# None or nullptr
+# C++-style nullptr
 from pypy.module._cppyy import interp_cppyy
-return space.is_true(space.is_(w_obj, space.w_None)) or \
-space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space)))
+return space.is_true(space.is_(w_obj, interp_cppyy.get_nullptr(space)))
 
 def get_rawbuffer(space, w_obj):
 # raw buffer
@@ -74,7 +73,7 @@
 return rffi.cast(rffi.VOIDP, space.uint_w(arr.getbuffer(space)))
 except Exception:
 pass
-# pre-defined NULL
+# pre-defined nullptr

[pypy-commit] pypy default: merge heads

2017-10-09 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r92685:386fcca6c58f
Date: 2017-10-09 16:54 +0200
http://bitbucket.org/pypy/pypy/changeset/386fcca6c58f/

Log:merge heads

diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py
--- a/pypy/module/cpyext/dictobject.py
+++ b/pypy/module/cpyext/dictobject.py
@@ -258,7 +258,7 @@
 if w_dict is None:
 return 0
 if not space.isinstance_w(w_dict, space.w_dict):
-return 0 
+return 0
 pos = ppos[0]
 py_obj = as_pyobj(space, w_dict)
 py_dict = rffi.cast(PyDictObject, py_obj)
@@ -266,6 +266,8 @@
 # Store the current keys in the PyDictObject.
 decref(space, py_dict.c__tmpkeys)
 w_keys = space.call_method(space.w_dict, "keys", w_dict)
+# w_keys must use the object strategy in order to keep the keys alive
+w_keys.switch_to_object_strategy()
 py_dict.c__tmpkeys = create_ref(space, w_keys)
 Py_IncRef(space, py_dict.c__tmpkeys)
 else:
@@ -278,10 +280,10 @@
 decref(space, py_dict.c__tmpkeys)
 py_dict.c__tmpkeys = lltype.nullptr(PyObject.TO)
 return 0
-w_key = space.listview(w_keys)[pos]
+w_key = space.listview(w_keys)[pos]  # fast iff w_keys uses object strat
 w_value = space.getitem(w_dict, w_key)
 if pkey:
-pkey[0]   = as_pyobj(space, w_key)
+pkey[0] = as_pyobj(space, w_key)
 if pvalue:
 pvalue[0] = as_pyobj(space, w_value)
 return 1
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -15,7 +15,7 @@
 from rpython.rlib.objectmodel import keepalive_until_here
 from rpython.rtyper.annlowlevel import llhelper
 from rpython.rlib import rawrefcount, jit
-from rpython.rlib.debug import fatalerror
+from rpython.rlib.debug import ll_assert, fatalerror
 
 
 #
@@ -243,6 +243,11 @@
 py_obj = rawrefcount.from_obj(PyObject, w_obj)
 if not py_obj:
 py_obj = create_ref(space, w_obj, w_userdata, immortal=immortal)
+#
+# Try to crash here, instead of randomly, if we don't keep w_obj alive
+ll_assert(py_obj.c_ob_refcnt >= rawrefcount.REFCNT_FROM_PYPY,
+  "Bug in cpyext: The W_Root object was garbage-collected "
+  "while being converted to PyObject.")
 return py_obj
 else:
 return lltype.nullptr(PyObject.TO)
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2017-10-08 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r92649:aedd9ba91c12
Date: 2017-10-08 08:22 +0200
http://bitbucket.org/pypy/pypy/changeset/aedd9ba91c12/

Log:merge heads

diff --git a/pypy/module/_cppyy/test/conftest.py 
b/pypy/module/_cppyy/test/conftest.py
--- a/pypy/module/_cppyy/test/conftest.py
+++ b/pypy/module/_cppyy/test/conftest.py
@@ -4,7 +4,7 @@
 def pytest_runtest_setup(item):
 if py.path.local.sysfind('genreflex') is None:
 import pypy.module._cppyy.capi.loadable_capi as lcapi
-if 'dummy' in lcapi.reflection_library:
+if 'dummy' in lcapi.backend_library:
 # run only tests that are covered by the dummy backend and tests
 # that do not rely on reflex
 import os
@@ -33,7 +33,7 @@
 import pypy.module._cppyy.capi.loadable_capi as lcapi
 try:
 import ctypes
-ctypes.CDLL(lcapi.reflection_library)
+ctypes.CDLL(lcapi.backend_library)
 except Exception as e:
 if config.option.runappdirect:
 return   # "can't run dummy tests in -A"
@@ -71,4 +71,4 @@
 return
 raise
 
-lcapi.reflection_library = str(soname)
+lcapi.backend_library = str(soname)
diff --git a/rpython/doc/jit/optimizer.rst b/rpython/doc/jit/optimizer.rst
--- a/rpython/doc/jit/optimizer.rst
+++ b/rpython/doc/jit/optimizer.rst
@@ -42,10 +42,9 @@
 There are better ways to compute the sum from ``[0..100]``, but it gives a 
better intuition on how
 traces are constructed than ``sum(range(101))``.
 Note that the trace syntax is the one used in the test suite. It is also very
-similar to traces printed at runtime by PYPYLOG_. The first line gives the 
input variables, the
-second line is a ``label`` operation, the last one is the backwards ``jump`` 
operation.
-
-.. _PYPYLOG: logging.html
+similar to traces printed at runtime by :doc:`PYPYLOG <../logging>`. The first
+line gives the input variables, the second line is a ``label`` operation, the
+last one is the backwards ``jump`` operation.
 
 These instructions mentioned earlier are special:
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge the cpyext-jit; this makes cpyext calls up to 7x faster in very simple cases, see whatsnew for details

2017-10-01 Thread antocuni
Author: Antonio Cuni 
Branch: 
Changeset: r92538:9f9989a1ffe6
Date: 2017-10-01 22:25 +0200
http://bitbucket.org/pypy/pypy/changeset/9f9989a1ffe6/

Log:merge the cpyext-jit; this makes cpyext calls up to 7x faster in
very simple cases, see whatsnew for details

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -4,3 +4,12 @@
 
 .. this is a revision shortly after release-pypy2.7-v5.9.0
 .. startrev:899e5245de1e
+
+.. branch: cpyext-jit
+
+Differentiate the code to call METH_NOARGS, METH_O and METH_VARARGS in cpyext:
+this allows to write specialized code which is much faster than previous
+completely generic version. Moreover, let the JIT to look inside the cpyext
+module: the net result is that cpyext calls are up to 7x faster. However, this
+is true only for very simple situations: in all real life code, we are still
+much slower than CPython (more optimizations to come)
diff --git a/pypy/module/_cffi_backend/cffi1_module.py 
b/pypy/module/_cffi_backend/cffi1_module.py
--- a/pypy/module/_cffi_backend/cffi1_module.py
+++ b/pypy/module/_cffi_backend/cffi1_module.py
@@ -1,4 +1,5 @@
 from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rlib import jit
 
 from pypy.interpreter.error import oefmt
 from pypy.interpreter.module import Module
@@ -15,7 +16,7 @@
 
 INITFUNCPTR = lltype.Ptr(lltype.FuncType([rffi.VOIDPP], lltype.Void))
 
-
+@jit.dont_look_inside
 def load_cffi1_module(space, name, path, initptr):
 # This is called from pypy.module.cpyext.api.load_extension_module()
 from pypy.module._cffi_backend.call_python import get_ll_cffi_call_python
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -449,6 +449,11 @@
 if func.__name__ in FUNCTIONS_BY_HEADER[header]:
 raise ValueError("%s already registered" % func.__name__)
 func._always_inline_ = 'try'
+#
+# XXX: should we @jit.dont_look_inside all the @cpython_api functions,
+# or we should only disable some of them?
+func._jit_look_inside_ = False
+#
 api_function = ApiFunction(
 argtypes, restype, func,
 error=_compute_error(error, restype), gil=gil,
diff --git a/pypy/module/cpyext/methodobject.py 
b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -1,4 +1,5 @@
 from rpython.rtyper.lltypesystem import lltype, rffi
+from rpython.rlib import jit
 
 from pypy.interpreter.baseobjspace import W_Root
 from pypy.interpreter.error import OperationError, oefmt
@@ -42,8 +43,8 @@
 from pypy.module.cpyext.object import _dealloc
 _dealloc(space, py_obj)
 
-
 class W_PyCFunctionObject(W_Root):
+# TODO create a slightly different class depending on the c_ml_flags
 def __init__(self, space, ml, w_self, w_module=None):
 self.ml = ml
 self.name = rffi.charp2str(rffi.cast(rffi.CCHARP,self.ml.c_ml_name))
@@ -56,7 +57,7 @@
 w_self = self.w_self
 flags = rffi.cast(lltype.Signed, self.ml.c_ml_flags)
 flags &= ~(METH_CLASS | METH_STATIC | METH_COEXIST)
-if space.is_true(w_kw) and not flags & METH_KEYWORDS:
+if not flags & METH_KEYWORDS and space.is_true(w_kw):
 raise oefmt(space.w_TypeError,
 "%s() takes no keyword arguments", self.name)
 
@@ -96,6 +97,20 @@
 else:
 return space.w_None
 
+class W_PyCFunctionObjectNoArgs(W_PyCFunctionObject):
+def call(self, space, w_self, w_args, w_kw):
+# Call the C function
+if w_self is None:
+w_self = self.w_self
+func = self.ml.c_ml_meth
+return generic_cpy_call(space, func, w_self, None)
+
+class W_PyCFunctionObjectSingleObject(W_PyCFunctionObject):
+def call(self, space, w_self, w_o, w_kw):
+if w_self is None:
+w_self = self.w_self
+func = self.ml.c_ml_meth
+return generic_cpy_call(space, func, w_self, w_o)
 
 class W_PyCMethodObject(W_PyCFunctionObject):
 w_self = None
@@ -215,6 +230,7 @@
   (self.method_name,
self.w_objclass.name))
 
+@jit.dont_look_inside
 def cwrapper_descr_call(space, w_self, __args__):
 self = space.interp_w(W_PyCWrapperObject, w_self)
 args_w, kw_w = __args__.unpack()
@@ -225,10 +241,22 @@
 space.setitem(w_kw, space.newtext(key), w_obj)
 return self.call(space, w_self, w_args, w_kw)
 
+def cfunction_descr_call_noargs(space, w_self):
+# special case for calling with flags METH_NOARGS
+self = space.interp_w(W_PyCFunctionObjectNoArgs, w_self)
+return self.call(space, None, None, None)
 
+def cfunction_descr_call_single_object(space, w_self, w_o):
+# special case for calling with flags METH_O
+self = 

[pypy-commit] pypy default: merge heads

2017-09-29 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r92508:c4d6cc4a81fe
Date: 2017-09-29 17:52 +0200
http://bitbucket.org/pypy/pypy/changeset/c4d6cc4a81fe/

Log:merge heads

diff --git a/pypy/module/cpyext/methodobject.py 
b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -10,7 +10,8 @@
 from pypy.module.cpyext.api import (
 CONST_STRING, METH_CLASS, METH_COEXIST, METH_KEYWORDS, METH_NOARGS, METH_O,
 METH_STATIC, METH_VARARGS, PyObject, bootstrap_function,
-cpython_api, generic_cpy_call, CANNOT_FAIL, slot_function, cts)
+cpython_api, generic_cpy_call, CANNOT_FAIL, slot_function, cts,
+build_type_checkers)
 from pypy.module.cpyext.pyobject import (
 Py_DecRef, from_ref, make_ref, as_pyobj, make_typedescr)
 
@@ -136,6 +137,10 @@
 ret = self.call(space, w_instance, w_args, w_kw)
 return ret
 
+# PyPy addition, for Cython
+_, _ = build_type_checkers("MethodDescr", W_PyCMethodObject)
+
+
 @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
 def PyCFunction_Check(space, w_obj):
 from pypy.interpreter.function import BuiltinFunction
@@ -162,6 +167,7 @@
 (self.name, self.w_objclass.getname(self.space)))
 
 
+
 class W_PyCWrapperObject(W_Root):
 def __init__(self, space, pto, method_name, wrapper_func,
  wrapper_func_kwds, doc, func, offset=None):
diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c
--- a/pypy/module/cpyext/test/foo.c
+++ b/pypy/module/cpyext/test/foo.c
@@ -83,19 +83,32 @@
 return cls;
 }
 
+// for CPython
+#ifndef PyMethodDescr_Check
+int PyMethodDescr_Check(PyObject* method)
+{
+PyObject *meth = PyObject_GetAttrString((PyObject*)_Type, "append");
+if (!meth) return 0;
+int res = PyObject_TypeCheck(method, meth->ob_type);
+Py_DECREF(meth);
+return res;
+}
+#endif
+
 PyObject* make_classmethod(PyObject* method)
 {
 // adapted from __Pyx_Method_ClassMethod
-if (PyObject_TypeCheck(method, _Type)) {
-return PyClassMethod_New(method);
+if (PyMethodDescr_Check(method)) {
+PyMethodDescrObject *descr = (PyMethodDescrObject *)method;
+PyTypeObject *d_type = descr->d_type;
+return PyDescr_NewClassMethod(d_type, descr->d_method);
 }
 else if (PyMethod_Check(method)) {
 return PyClassMethod_New(PyMethod_GET_FUNCTION(method));
 }
 else {
-PyMethodDescrObject *descr = (PyMethodDescrObject *)method;
-PyTypeObject *d_type = descr->d_type;
-return PyDescr_NewClassMethod(d_type, descr->d_method);
+PyErr_SetString(PyExc_TypeError, "unknown method kind");
+return NULL;
 }
 }
 
@@ -825,6 +838,8 @@
 
 fake_classmeth = PyDict_GetItemString((PyObject *)fooType.tp_dict, 
"fake_classmeth");
 classmeth = make_classmethod(fake_classmeth);
+if (classmeth == NULL)
+INITERROR;
 if (PyDict_SetItemString((PyObject *)fooType.tp_dict, "fake_classmeth", 
classmeth) < 0)
 INITERROR;
 
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2017-09-19 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r92418:a9eafbdf3c28
Date: 2017-09-19 12:13 +0200
http://bitbucket.org/pypy/pypy/changeset/a9eafbdf3c28/

Log:merge heads

diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -120,7 +120,7 @@
 Download the versions of all the external packages from
 https://bitbucket.org/pypy/pypy/downloads/local_59.zip
 (for post-5.8 builds) with sha256 checksum
-``0f96c045db1f5f73ad0fae7857caa69c261324bd8e51f6d2ad1fa842c4a5f26f``
+``6344230e90ab7a9cb84efbae1ba22051cdeeb40a31823e0808545b705aba8911``
 https://bitbucket.org/pypy/pypy/downloads/local_5.8.zip
 (to reproduce 5.8 builds) with sha256 checksum 
 ``fbe769bf3a4ab6f5a8b0a05b61930fc7f37da2a9a85a8f609cf5a9bad06e2554`` or
diff --git a/pypy/module/cpyext/methodobject.py 
b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -109,21 +109,27 @@
 return self.space.unwrap(self.descr_method_repr())
 
 def descr_method_repr(self):
+w_objclass = self.w_objclass 
+assert isinstance(w_objclass, W_TypeObject)
 return self.space.newtext("" % (
-self.name, self.w_objclass.getname(self.space)))
+self.name, w_objclass.name))
 
 def descr_call(self, space, __args__):
 args_w, kw_w = __args__.unpack()
 if len(args_w) < 1:
+w_objclass = self.w_objclass 
+assert isinstance(w_objclass, W_TypeObject)
 raise oefmt(space.w_TypeError,
 "descriptor '%s' of '%s' object needs an argument",
-self.name, self.w_objclass.getname(self.space))
+self.name, w_objclass.name)
 w_instance = args_w[0]
 # XXX: needs a stricter test
 if not space.isinstance_w(w_instance, self.w_objclass):
+w_objclass = self.w_objclass 
+assert isinstance(w_objclass, W_TypeObject)
 raise oefmt(space.w_TypeError,
 "descriptor '%s' requires a '%s' object but received a '%T'",
-self.name, self.w_objclass.getname(self.space), w_instance)
+self.name, w_objclass.name, w_instance)
 w_args = space.newtuple(args_w[1:])
 w_kw = space.newdict()
 for key, w_obj in kw_w.items():
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -91,13 +91,14 @@
 'float_info': 'system.get_float_info(space)',
 'long_info' : 'system.get_long_info(space)',
 'float_repr_style'  : 'system.get_float_repr_style(space)',
-'getdlopenflags': 'system.getdlopenflags',
-'setdlopenflags': 'system.setdlopenflags',
 }
 
 if sys.platform == 'win32':
 interpleveldefs['winver'] = 'version.get_winver(space)'
 interpleveldefs['getwindowsversion'] = 'vm.getwindowsversion'
+else:
+interpleveldefs['getdlopenflags'] = 'system.getdlopenflags'
+interpleveldefs['setdlopenflags'] = 'system.setdlopenflags'
 
 appleveldefs = {
 'excepthook': 'app.excepthook',
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2017-09-13 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r92379:d8cf1127fabb
Date: 2017-09-13 22:41 +0200
http://bitbucket.org/pypy/pypy/changeset/d8cf1127fabb/

Log:merge heads

diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -209,17 +209,85 @@
 The expat XML parser
 
 
-Download the source code of expat on sourceforge:
-https://github.com/libexpat/libexpat/releases and extract it in the base 
directory.
-Version 2.1.1 is known to pass tests. Then open the project file ``expat.dsw``
-with Visual Studio; follow the instruction for converting the project files,
-switch to the "Release" configuration, use the ``expat_static`` project,
-reconfigure the runtime for Multi-threaded DLL (/MD) and build. Do the same for
-the ``expat`` project to build the ``expat.dll`` (for tests via ll2ctypes)
+CPython compiles expat from source as part of the build. PyPy uses the same
+code base, but expects to link to a static lib of expat. Here are instructions
+to reproduce the static lib in version 2.2.4.
 
-Then, copy the file ``win32\bin\release\libexpat.lib`` into
-LIB, and both ``lib\expat.h`` and ``lib\expat_external.h`` in
-INCLUDE, and ``win32\bin\release\libexpat.dll`` into PATH.
+Download the source code of expat: https://github.com/libexpat/libexpat. 
+``git checkout`` the proper tag, in this case ``R_2_2_4``. Run
+``vcvars.bat`` to set up the visual compiler tools, and CD into the source
+directory. Create a file ``stdbool.h`` with the content
+
+.. code-block:: c
+
+#pragma once
+
+#define false   0
+#define true1
+
+#define bool int
+
+and put it in a place on the ``INCLUDE`` path, or create it in the local
+directory and add ``.`` to the ``INCLUDE`` path::
+
+SET INCLUDE=%INCLUDE%;.
+
+Then compile all the ``*.c`` file into ``*.obj``::
+
+cl.exe /nologo /MD  /O2 *c /c
+rem for debug
+cl.exe /nologo /MD  /O0 /Ob0 /Zi *c /c
+
+You may need to move some variable declarations to the beginning of the
+function, to be compliant with C89 standard. Here is the diff for version 2.2.4
+
+.. code-block:: diff
+
+diff --git a/expat/lib/xmltok.c b/expat/lib/xmltok.c
+index 007aed0..a2dcaad 100644
+--- a/expat/lib/xmltok.c
++++ b/expat/lib/xmltok.c
+@@ -399,19 +399,21 @@ utf8_toUtf8(const ENCODING *UNUSED_P(enc),
+   /* Avoid copying partial characters (due to limited space). */
+   const ptrdiff_t bytesAvailable = fromLim - *fromP;
+   const ptrdiff_t bytesStorable = toLim - *toP;
++  const char * fromLimBefore;
++  ptrdiff_t bytesToCopy;
+   if (bytesAvailable > bytesStorable) {
+ fromLim = *fromP + bytesStorable;
+ output_exhausted = true;
+   }
+
+   /* Avoid copying partial characters (from incomplete input). */
+-  const char * const fromLimBefore = fromLim;
++  fromLimBefore = fromLim;
+   align_limit_to_full_utf8_characters(*fromP, );
+   if (fromLim < fromLimBefore) {
+ input_incomplete = true;
+   }
+
+-  const ptrdiff_t bytesToCopy = fromLim - *fromP;
++  bytesToCopy = fromLim - *fromP;
+   memcpy((void *)*toP, (const void *)*fromP, (size_t)bytesToCopy);
+   *fromP += bytesToCopy;
+   *toP += bytesToCopy;
+
+
+Create ``libexpat.lib`` (for translation) and ``libexpat.dll`` (for tests)::
+
+cl /LD *.obj libexpat.def /Felibexpat.dll 
+rem for debug
+rem cl /LDd /Zi *.obj libexpat.def /Felibexpat.dll
+
+rem this will override the export library created in the step above
+rem but tests do not need the export library, they load the dll dynamically
+lib *.obj /out:libexpat.lib
+
+Then, copy 
+
+- ``libexpat.lib`` into LIB
+- both ``lib\expat.h`` and ``lib\expat_external.h`` in INCLUDE
+- ``libexpat.dll`` into PATH
 
 
 The OpenSSL library
@@ -363,7 +431,7 @@
 It is probably not too much work if the goal is only to get a translated
 PyPy executable, and to run all tests before translation.  But you need
 to start somewhere, and you should start with some tests in
-rpython/translator/c/test/, like ``test_standalone.py`` and
+``rpython/translator/c/test/``, like ``test_standalone.py`` and
 ``test_newgc.py``: try to have them pass on top of CPython64/64.
 
 Keep in mind that this runs small translations, and some details may go
@@ -373,7 +441,7 @@
 should be something like ``long long``.
 
 What is more generally needed is to review all the C files in
-rpython/translator/c/src for the word ``long``, because this means a
+``rpython/translator/c/src`` for the word ``long``, because this means a
 32-bit integer even on Win64.  Replace it with ``Signed`` most of the
 times.  You can replace one with the other without breaking anything on
 any other platform, so feel free to.
___
pypy-commit mailing list
pypy-commit@python.org
https://mail.python.org/mailman/listinfo/pypy-commit


[pypy-commit] pypy default: merge heads

2017-09-08 Thread arigo
Author: Armin Rigo 
Branch: 
Changeset: r92359:a1608b11c5da
Date: 2017-09-08 17:22 +0200
http://bitbucket.org/pypy/pypy/changeset/a1608b11c5da/

Log:merge heads

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -77,3 +77,7 @@
 .. branch: pypy_swappedbytes
 
 Added ``_swappedbytes_`` support for ``ctypes.Structure``
+
+.. branch: pycheck-macros
+
+Convert many Py*_Check cpyext functions into macros, like CPython.
diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py
--- a/pypy/module/_csv/interp_csv.py
+++ b/pypy/module/_csv/interp_csv.py
@@ -29,10 +29,15 @@
 return default
 return space.is_true(w_src)
 
-def _get_int(space, w_src, default):
+def _get_int(space, w_src, default, attrname):
 if w_src is None:
 return default
-return space.int_w(w_src)
+try:
+return space.int_w(w_src)
+except OperationError as e:
+if e.match(space, space.w_TypeError):
+raise oefmt(space.w_TypeError, '"%s" must be a string', attrname)
+raise
 
 def _get_str(space, w_src, default, attrname):
 if w_src is None:
@@ -100,7 +105,7 @@
 dialect.escapechar = _get_char(space, w_escapechar, '\0', 'escapechar')
 dialect.lineterminator = _get_str(space, w_lineterminator, '\r\n', 
'lineterminator')
 dialect.quotechar = _get_char(space, w_quotechar, '"', 'quotechar')
-tmp_quoting = _get_int(space, w_quoting, QUOTE_MINIMAL)
+tmp_quoting = _get_int(space, w_quoting, QUOTE_MINIMAL, 'quoting')
 dialect.skipinitialspace = _get_bool(space, w_skipinitialspace, False)
 dialect.strict = _get_bool(space, w_strict, False)
 
diff --git a/pypy/module/_csv/test/test_dialect.py 
b/pypy/module/_csv/test/test_dialect.py
--- a/pypy/module/_csv/test/test_dialect.py
+++ b/pypy/module/_csv/test/test_dialect.py
@@ -65,7 +65,8 @@
 name = attempt[0]
 for value in attempt[1:]:
 kwargs = {name: value}
-raises(TypeError, _csv.register_dialect, 'foo1', **kwargs)
+exc_info = raises(TypeError, _csv.register_dialect, 'foo1', 
**kwargs)
+assert name in exc_info.value.args[0]
 
 exc_info = raises(TypeError, _csv.register_dialect, 'foo1', 
lineterminator=4)
 assert exc_info.value.args[0] == '"lineterminator" must be a string'
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -129,6 +129,11 @@
 Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS
 PyBUF_FORMAT PyBUF_ND PyBUF_STRIDES PyBUF_WRITABLE
 """.split()
+
+for name in ('INT', 'LONG', 'LIST', 'TUPLE', 'UNICODE', 'DICT', 'BASE_EXC',
+ 'TYPE', 'STRING'): # 'STRING' -> 'BYTES' in py3
+constant_names.append('Py_TPFLAGS_%s_SUBCLASS' % name)
+
 for name in constant_names:
 setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name))
 globals().update(rffi_platform.configure(CConfig_constants))
@@ -749,6 +754,45 @@
 
 return check, check_exact
 
+def build_type_checkers_flags(type_name, cls=None, flagsubstr=None):
+"""
+Builds two api functions: Py_XxxCheck() and Py_XxxCheckExact()
+Does not export the functions, assumes they are macros in the *. files
+check will try a fast path via pto flags
+"""
+if cls is None:
+attrname = "w_" + type_name.lower()
+def get_w_type(space):
+return getattr(space, attrname)
+else:
+def get_w_type(space):
+return getattr(space, cls)
+if flagsubstr is None:
+   tp_flag_str = 'Py_TPFLAGS_%s_SUBCLASS' % type_name.upper()
+else:
+   tp_flag_str = 'Py_TPFLAGS_%s_SUBCLASS' % flagsubstr
+check_name = "Py" + type_name + "_Check"
+tp_flag = globals()[tp_flag_str]
+
+@specialize.argtype(1)
+def check(space, pto):
+from pypy.module.cpyext.pyobject import is_pyobj, as_pyobj
+"Implements the Py_Xxx_Check function"
+if is_pyobj(pto):
+return (pto.c_ob_type.c_tp_flags & tp_flag) == tp_flag
+w_obj_type = space.type(pto)
+w_type = get_w_type(space)
+return (space.is_w(w_obj_type, w_type) or
+space.issubtype_w(w_obj_type, w_type))
+
+def check_exact(space, w_obj):
+"Implements the Py_Xxx_CheckExact function"
+w_obj_type = space.type(w_obj)
+w_type = get_w_type(space)
+return space.is_w(w_obj_type, w_type)
+
+return check, check_exact
+
 pypy_debug_catch_fatal_exception = 
rffi.llexternal('pypy_debug_catch_fatal_exception', [], lltype.Void)
 
 
diff --git a/pypy/module/cpyext/bytesobject.py 
b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -1,7 +1,7 @@
 from pypy.interpreter.error import oefmt
 from rpython.rtyper.lltypesystem import rffi, lltype
 from 

[pypy-commit] pypy default: merge pycheck-macros, which avoids cpyext for many Py*_Check function calls

2017-09-08 Thread mattip
Author: Matti Picus 
Branch: 
Changeset: r92354:2782c2c4ec0b
Date: 2017-09-08 17:25 +0300
http://bitbucket.org/pypy/pypy/changeset/2782c2c4ec0b/

Log:merge pycheck-macros, which avoids cpyext for many Py*_Check
function calls

diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -77,3 +77,7 @@
 .. branch: pypy_swappedbytes
 
 Added ``_swappedbytes_`` support for ``ctypes.Structure``
+
+.. branch: pycheck-macros
+
+Convert many Py*_Check cpyext functions into macros, like CPython.
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -129,6 +129,11 @@
 Py_LT Py_LE Py_EQ Py_NE Py_GT Py_GE Py_TPFLAGS_CHECKTYPES Py_MAX_NDIMS
 PyBUF_FORMAT PyBUF_ND PyBUF_STRIDES PyBUF_WRITABLE
 """.split()
+
+for name in ('INT', 'LONG', 'LIST', 'TUPLE', 'UNICODE', 'DICT', 'BASE_EXC',
+ 'TYPE', 'STRING'): # 'STRING' -> 'BYTES' in py3
+constant_names.append('Py_TPFLAGS_%s_SUBCLASS' % name)
+
 for name in constant_names:
 setattr(CConfig_constants, name, rffi_platform.ConstantInteger(name))
 globals().update(rffi_platform.configure(CConfig_constants))
@@ -749,6 +754,45 @@
 
 return check, check_exact
 
+def build_type_checkers_flags(type_name, cls=None, flagsubstr=None):
+"""
+Builds two api functions: Py_XxxCheck() and Py_XxxCheckExact()
+Does not export the functions, assumes they are macros in the *. files
+check will try a fast path via pto flags
+"""
+if cls is None:
+attrname = "w_" + type_name.lower()
+def get_w_type(space):
+return getattr(space, attrname)
+else:
+def get_w_type(space):
+return getattr(space, cls)
+if flagsubstr is None:
+   tp_flag_str = 'Py_TPFLAGS_%s_SUBCLASS' % type_name.upper()
+else:
+   tp_flag_str = 'Py_TPFLAGS_%s_SUBCLASS' % flagsubstr
+check_name = "Py" + type_name + "_Check"
+tp_flag = globals()[tp_flag_str]
+
+@specialize.argtype(1)
+def check(space, pto):
+from pypy.module.cpyext.pyobject import is_pyobj, as_pyobj
+"Implements the Py_Xxx_Check function"
+if is_pyobj(pto):
+return (pto.c_ob_type.c_tp_flags & tp_flag) == tp_flag
+w_obj_type = space.type(pto)
+w_type = get_w_type(space)
+return (space.is_w(w_obj_type, w_type) or
+space.issubtype_w(w_obj_type, w_type))
+
+def check_exact(space, w_obj):
+"Implements the Py_Xxx_CheckExact function"
+w_obj_type = space.type(w_obj)
+w_type = get_w_type(space)
+return space.is_w(w_obj_type, w_type)
+
+return check, check_exact
+
 pypy_debug_catch_fatal_exception = 
rffi.llexternal('pypy_debug_catch_fatal_exception', [], lltype.Void)
 
 
diff --git a/pypy/module/cpyext/bytesobject.py 
b/pypy/module/cpyext/bytesobject.py
--- a/pypy/module/cpyext/bytesobject.py
+++ b/pypy/module/cpyext/bytesobject.py
@@ -1,7 +1,7 @@
 from pypy.interpreter.error import oefmt
 from rpython.rtyper.lltypesystem import rffi, lltype
 from pypy.module.cpyext.api import (
-cpython_api, cpython_struct, bootstrap_function, build_type_checkers,
+cpython_api, cpython_struct, bootstrap_function, build_type_checkers_flags,
 PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL, slot_function)
 from pypy.module.cpyext.pyerrors import PyErr_BadArgument
 from pypy.module.cpyext.pyobject import (
@@ -58,7 +58,7 @@
dealloc=bytes_dealloc,
realize=bytes_realize)
 
-PyString_Check, PyString_CheckExact = build_type_checkers("String", "w_bytes")
+PyString_Check, PyString_CheckExact = build_type_checkers_flags("String", 
"w_bytes")
 
 def new_empty_str(space, length):
 """
diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py
--- a/pypy/module/cpyext/dictobject.py
+++ b/pypy/module/cpyext/dictobject.py
@@ -4,7 +4,7 @@
 from pypy.objspace.std.classdict import ClassDictStrategy
 from pypy.interpreter.typedef import GetSetProperty
 from pypy.module.cpyext.api import (
-cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t,
+cpython_api, CANNOT_FAIL, build_type_checkers_flags, Py_ssize_t,
 Py_ssize_tP, CONST_STRING, PyObjectFields, cpython_struct,
 bootstrap_function, slot_function)
 from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, as_pyobj,
@@ -66,7 +66,7 @@
 def PyDict_New(space):
 return space.newdict()
 
-PyDict_Check, PyDict_CheckExact = build_type_checkers("Dict")
+PyDict_Check, PyDict_CheckExact = build_type_checkers_flags("Dict")
 
 @cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL,
  result_borrowed=True)
diff --git a/pypy/module/cpyext/include/dictobject.h 
b/pypy/module/cpyext/include/dictobject.h
--- a/pypy/module/cpyext/include/dictobject.h
+++ b/pypy/module/cpyext/include/dictobject.h
@@ -12,6 +12,10 

  1   2   3   4   5   6   7   8   9   10   >