Hello community,

here is the log from the commit of package python-python-rapidjson for 
openSUSE:Factory checked in at 2019-08-28 18:38:03
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-python-rapidjson (Old)
 and      /work/SRC/openSUSE:Factory/.python-python-rapidjson.new.7948 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-python-rapidjson"

Wed Aug 28 18:38:03 2019 rev:4 rq:726709 version:0.8.0

Changes:
--------
--- 
/work/SRC/openSUSE:Factory/python-python-rapidjson/python-python-rapidjson.changes
  2019-07-22 17:20:11.137897830 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-python-rapidjson.new.7948/python-python-rapidjson.changes
        2019-08-28 18:38:06.617256284 +0200
@@ -1,0 +2,6 @@
+Wed Aug 28 08:34:01 UTC 2019 - Marketa Calabkova <[email protected]>
+
+- Update to 0.8.0
+  * New serialization option bytes_mode to control how bytes instances gets 
encoded
+
+-------------------------------------------------------------------

Old:
----
  python-rapidjson-0.7.2.tar.gz

New:
----
  python-rapidjson-0.8.0.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-python-rapidjson.spec ++++++
--- /var/tmp/diff_new_pack.A8rcbk/_old  2019-08-28 18:38:07.797256080 +0200
+++ /var/tmp/diff_new_pack.A8rcbk/_new  2019-08-28 18:38:07.801256079 +0200
@@ -19,7 +19,7 @@
 %define skip_python2 1
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-python-rapidjson
-Version:        0.7.2
+Version:        0.8.0
 Release:        0
 Summary:        Python wrapper around rapidjson
 License:        MIT
@@ -52,6 +52,7 @@
 
 %install
 %python_install
+%python_expand %fdupes %{buildroot}%{$python_sitelib}
 
 %check
 export LANG=en_US.UTF-8

++++++ python-rapidjson-0.7.2.tar.gz -> python-rapidjson-0.8.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-rapidjson-0.7.2/CHANGES.rst 
new/python-rapidjson-0.8.0/CHANGES.rst
--- old/python-rapidjson-0.7.2/CHANGES.rst      2019-06-09 11:15:04.000000000 
+0200
+++ new/python-rapidjson-0.8.0/CHANGES.rst      2019-08-09 12:57:38.000000000 
+0200
@@ -1,6 +1,15 @@
 Changes
 -------
 
+0.8.0 (2019-08-09)
+~~~~~~~~~~~~~~~~~~
+
+* New serialization option ``bytes_mode`` to control how bytes instances gets 
encoded
+  (`issue #122`__)
+
+  __ https://github.com/python-rapidjson/python-rapidjson/issues/122
+
+
 0.7.2 (2019-06-09)
 ~~~~~~~~~~~~~~~~~~
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-rapidjson-0.7.2/docs/api.rst 
new/python-rapidjson-0.8.0/docs/api.rst
--- old/python-rapidjson-0.7.2/docs/api.rst     2019-06-09 11:15:04.000000000 
+0200
+++ new/python-rapidjson-0.8.0/docs/api.rst     2019-08-09 12:57:38.000000000 
+0200
@@ -146,6 +146,20 @@
    *arrays* and *objects*.
 
 
+.. rubric:: `bytes_mode` related constants
+
+.. data:: BM_NONE
+
+   This disables the default handling mode (:data:`BM_UTF8`) of :class:`bytes` 
instances:
+   they won't be treated in any special way and will raise an ``TypeError`` 
exception when
+   encountered. On the other hand, in this mode they can be managed by a 
`default`
+   handler.
+
+.. data:: BM_UTF8
+
+   This is the default setting for `bytes_mode`: any :class:`bytes` instance 
will be
+   assumed to be an ``UTF-8`` encoded string, and decoded accordingly.
+
 .. rubric:: Exceptions
 
 .. exception:: JSONDecodeError
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-rapidjson-0.7.2/docs/dump.rst 
new/python-rapidjson-0.8.0/docs/dump.rst
--- old/python-rapidjson-0.7.2/docs/dump.rst    2019-06-09 11:15:04.000000000 
+0200
+++ new/python-rapidjson-0.8.0/docs/dump.rst    2019-08-09 12:57:38.000000000 
+0200
@@ -18,7 +18,7 @@
 
 .. function:: dump(obj, stream, *, skipkeys=False, ensure_ascii=True, 
indent=None, \
                    default=None, sort_keys=False, number_mode=None, 
datetime_mode=None, \
-                   uuid_mode=None, chunk_size=65536, allow_nan=True)
+                   uuid_mode=None, bytes_mode=BM_UTF8, chunk_size=65536, 
allow_nan=True)
 
    Encode given Python `obj` instance into a ``JSON`` stream.
 
@@ -36,6 +36,7 @@
    :param int datetime_mode: how should :class:`datetime`, :class:`time` and
                              :class:`date` instances be handled
    :param int uuid_mode: how should :class:`UUID` instances be handled
+   :param int bytes_mode: how should :class:`bytes` instances be handled
    :param int chunk_size: write the stream in chunks of this size at a time
    :param bool allow_nan: *compatibility* flag equivalent to 
``number_mode=NM_NAN``
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-rapidjson-0.7.2/docs/dumps.rst 
new/python-rapidjson-0.8.0/docs/dumps.rst
--- old/python-rapidjson-0.7.2/docs/dumps.rst   2019-06-09 11:15:04.000000000 
+0200
+++ new/python-rapidjson-0.8.0/docs/dumps.rst   2019-08-09 12:57:38.000000000 
+0200
@@ -2,7 +2,7 @@
 .. :Project:   python-rapidjson -- dumps function documentation
 .. :Author:    Lele Gaifax <[email protected]>
 .. :License:   MIT License
-.. :Copyright: © 2016, 2017, 2018 Lele Gaifax
+.. :Copyright: © 2016, 2017, 2018, 2019 Lele Gaifax
 ..
 
 ==================
@@ -13,14 +13,14 @@
 
 .. testsetup::
 
-   from rapidjson import (dumps, loads, DM_NONE, DM_ISO8601, DM_UNIX_TIME,
-                          DM_ONLY_SECONDS, DM_IGNORE_TZ, DM_NAIVE_IS_UTC, 
DM_SHIFT_TO_UTC,
-                          UM_NONE, UM_CANONICAL, UM_HEX, NM_NATIVE, 
NM_DECIMAL, NM_NAN,
-                          PM_NONE, PM_COMMENTS, PM_TRAILING_COMMAS)
+   from rapidjson import (dumps, loads, BM_NONE, BM_UTF8, DM_NONE, DM_ISO8601,
+                          DM_UNIX_TIME, DM_ONLY_SECONDS, DM_IGNORE_TZ, 
DM_NAIVE_IS_UTC,
+                          DM_SHIFT_TO_UTC, UM_NONE, UM_CANONICAL, UM_HEX, 
NM_NATIVE,
+                          NM_DECIMAL, NM_NAN, PM_NONE, PM_COMMENTS, 
PM_TRAILING_COMMAS)
 
 .. function:: dumps(obj, *, skipkeys=False, ensure_ascii=True, indent=None, \
                     default=None, sort_keys=False, number_mode=None, 
datetime_mode=None, \
-                    uuid_mode=None, allow_nan=True)
+                    uuid_mode=None, bytes_mode=BM_UTF8, allow_nan=True)
 
    Encode given Python `obj` instance into a ``JSON`` string.
 
@@ -37,6 +37,7 @@
    :param int datetime_mode: how should :class:`datetime`, :class:`time` and
                              :class:`date` instances be handled
    :param int uuid_mode: how should :class:`UUID` instances be handled
+   :param int bytes_mode: how should :class:`bytes` instances be handled
    :param bool allow_nan: *compatibility* flag equivalent to 
``number_mode=NM_NAN``
    :returns: A Python :class:`str` instance.
 
@@ -349,6 +350,36 @@
       '"be57634565b54fc292c594e2f82e38fd"'
 
 
+   .. _dumps-bytes-mode:
+   .. rubric:: `bytes_mode`
+
+   By default all :class:`bytes` instances are assumed to be ``UTF-8`` encoded 
strings,
+   and acted on accordingly:
+
+   .. doctest::
+
+      >>> ascii_string = 'ciao'
+      >>> bytes_string = b'cio\xc3\xa8'
+      >>> unicode_string = 'cioè'
+      >>> dumps([ascii_string, bytes_string, unicode_string])
+      '["ciao","cio\\u00E8","cio\\u00E8"]'
+
+   Sometime you may prefer a different approach, explicitly disabling that 
behavior using
+   the :data:`BM_NONE` mode:
+
+   .. doctest::
+
+      >>> dumps([ascii_string, bytes_string, unicode_string],
+      ...       bytes_mode=BM_NONE)
+      Traceback (most recent call last):
+        File "<stdin>", line 1, in <module>
+      TypeError: b'cio\xc3\xa8' is not JSON serializable
+      >>> my_bytes_handler = lambda b: b.decode('UTF-8').upper()
+      >>> dumps([ascii_string, bytes_string, unicode_string],
+      ...       bytes_mode=BM_NONE, default=my_bytes_handler)
+      '["ciao","CIO\\u00C8","cio\\u00E8"]'
+
+
 .. _ISO 8601: https://en.wikipedia.org/wiki/ISO_8601
 .. _RapidJSON: http://rapidjson.org/
 .. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-rapidjson-0.7.2/docs/encoder.rst 
new/python-rapidjson-0.8.0/docs/encoder.rst
--- old/python-rapidjson-0.7.2/docs/encoder.rst 2019-06-09 11:15:04.000000000 
+0200
+++ new/python-rapidjson-0.8.0/docs/encoder.rst 2019-08-09 12:57:38.000000000 
+0200
@@ -2,7 +2,7 @@
 .. :Project:   python-rapidjson -- Encoder class documentation
 .. :Author:    Lele Gaifax <[email protected]>
 .. :License:   MIT License
-.. :Copyright: © 2017, 2018 Lele Gaifax
+.. :Copyright: © 2017, 2018, 2019 Lele Gaifax
 ..
 
 ===============
@@ -13,13 +13,14 @@
 
 .. testsetup::
 
-   from rapidjson import (Decoder, Encoder, DM_NONE, DM_ISO8601, DM_UNIX_TIME,
-                          DM_ONLY_SECONDS, DM_IGNORE_TZ, DM_NAIVE_IS_UTC, 
DM_SHIFT_TO_UTC,
-                          UM_NONE, UM_CANONICAL, UM_HEX, NM_NATIVE, 
NM_DECIMAL, NM_NAN,
-                          PM_NONE, PM_COMMENTS, PM_TRAILING_COMMAS)
+   from rapidjson import (Decoder, Encoder, BM_NONE, BM_UTF8, DM_NONE, 
DM_ISO8601,
+                          DM_UNIX_TIME, DM_ONLY_SECONDS, DM_IGNORE_TZ, 
DM_NAIVE_IS_UTC,
+                          DM_SHIFT_TO_UTC, UM_NONE, UM_CANONICAL, UM_HEX, 
NM_NATIVE,
+                          NM_DECIMAL, NM_NAN, PM_NONE, PM_COMMENTS, 
PM_TRAILING_COMMAS)
 
 .. class:: Encoder(skip_invalid_keys=False, ensure_ascii=True, indent=None, \
-                   sort_keys=False, number_mode=None, datetime_mode=None, 
uuid_mode=None)
+                   sort_keys=False, number_mode=None, datetime_mode=None, \
+                   uuid_mode=None, bytes_mode=BM_UTF8)
 
    Class-based :func:`dumps`\ -like functionality.
 
@@ -36,6 +37,7 @@
    :param int datetime_mode: how should :ref:`datetime, time and date 
instances be handled
                              <dumps-datetime-mode>`
    :param int uuid_mode: how should :ref:`UUID instances be handled 
<dumps-uuid-mode>`
+   :param int bytes_mode: how should :ref:`bytes instances be handled 
<dumps-bytes-mode>`
 
 
    .. method:: __call__(obj, stream=None, *, chunk_size=65536)
@@ -59,7 +61,7 @@
 
       .. doctest::
 
-         >>> class Point(object):
+         >>> class Point:
          ...   def __init__(self, x, y):
          ...     self.x = x
          ...     self.y = y
@@ -79,3 +81,20 @@
          >>> pe = PointEncoder(sort_keys=True)
          >>> pe(point)
          '{"x":1,"y":2}'
+
+      When you want to treat your :class:`bytes` instances in a special way, 
you can use
+      :data:`BM_NONE` together with this method:
+
+      .. doctest::
+
+         >>> class HexifyBytes(Encoder):
+         ...   def default(self, obj):
+         ...     if isinstance(obj, bytes):
+         ...       return obj.hex()
+         ...     else:
+         ...       return obj
+         ...
+         >>> small_numbers = bytes([1, 2, 3])
+         >>> hb = HexifyBytes(bytes_mode=BM_NONE)
+         >>> hb(small_numbers)
+         '"010203"'
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-rapidjson-0.7.2/rapidjson.cpp 
new/python-rapidjson-0.8.0/rapidjson.cpp
--- old/python-rapidjson-0.7.2/rapidjson.cpp    2019-06-09 11:15:04.000000000 
+0200
+++ new/python-rapidjson-0.8.0/rapidjson.cpp    2019-08-09 12:57:38.000000000 
+0200
@@ -159,6 +159,12 @@
 };
 
 
+enum BytesMode {
+    BM_NONE = 0,
+    BM_UTF8 = 1<<0     // try to convert to UTF-8
+};
+
+
 enum ParseMode {
     PM_NONE = 0,
     PM_COMMENTS = 1<<0,       // Allow one-line // ... and multi-line /* ... 
*/ comments
@@ -184,12 +190,14 @@
 static PyObject* do_encode(PyObject* value, bool skipInvalidKeys, PyObject* 
defaultFn,
                            bool sortKeys, bool ensureAscii, bool prettyPrint,
                            unsigned indent, NumberMode numberMode,
-                           DatetimeMode datetimeMode, UuidMode uuidMode);
+                           DatetimeMode datetimeMode, UuidMode uuidMode,
+                           BytesMode bytesMode);
 static PyObject* do_stream_encode(PyObject* value, PyObject* stream, size_t 
chunkSize,
                                   bool skipInvalidKeys, PyObject* defaultFn,
                                   bool sortKeys, bool ensureAscii, bool 
prettyPrint,
                                   unsigned indent, NumberMode numberMode,
-                                  DatetimeMode datetimeMode, UuidMode 
uuidMode);
+                                  DatetimeMode datetimeMode, UuidMode uuidMode,
+                                  BytesMode bytesMode);
 static PyObject* encoder_call(PyObject* self, PyObject* args, PyObject* 
kwargs);
 static PyObject* encoder_new(PyTypeObject* type, PyObject* args, PyObject* 
kwargs);
 
@@ -314,7 +322,7 @@
     PyWriteStreamWrapper(PyObject* stream, size_t size)
         : stream(stream) {
         Py_INCREF(stream);
-        buffer = (char*) malloc(size);
+        buffer = (char*) PyMem_Malloc(size);
         assert(buffer);
         bufferEnd = buffer + size;
         cursor = buffer;
@@ -324,7 +332,7 @@
 
     ~PyWriteStreamWrapper() {
         Py_CLEAR(stream);
-        free(buffer);
+        PyMem_Free(buffer);
     }
 
     Ch Peek() {
@@ -593,7 +601,7 @@
         while (!stack.empty()) {
             const HandlerContext& ctx = stack.back();
             if (ctx.copiedKey)
-                free((void*) ctx.key);
+                PyMem_Free((void*) ctx.key);
             stack.pop_back();
         }
         Py_CLEAR(decoderStartObject);
@@ -646,12 +654,12 @@
         // the context gets reused for the next dictionary key
 
         if (current.key && current.copiedKey) {
-            free((void*) current.key);
+            PyMem_Free((void*) current.key);
             current.key = NULL;
         }
 
         if (copy) {
-            char* copied_str = (char*) malloc(length+1);
+            char* copied_str = (char*) PyMem_Malloc(length+1);
             if (copied_str == NULL)
                 return false;
             memcpy(copied_str, str, length+1);
@@ -706,7 +714,7 @@
         const HandlerContext& ctx = stack.back();
 
         if (ctx.copiedKey)
-            free((void*) ctx.key);
+            PyMem_Free((void*) ctx.key);
 
         PyObject* mapping = ctx.object;
         stack.pop_back();
@@ -800,7 +808,7 @@
         const HandlerContext& ctx = stack.back();
 
         if (ctx.copiedKey)
-            free((void*) ctx.key);
+            PyMem_Free((void*) ctx.key);
 
         PyObject* sequence = ctx.object;
         stack.pop_back();
@@ -1882,7 +1890,7 @@
     Reader reader;
 
     if (jsonStr != NULL) {
-        char* jsonStrCopy = (char*) malloc(sizeof(char) * (jsonStrLen+1));
+        char* jsonStrCopy = (char*) PyMem_Malloc(sizeof(char) * 
(jsonStrLen+1));
 
         if (jsonStrCopy == NULL)
             return PyErr_NoMemory();
@@ -1893,7 +1901,7 @@
 
         DECODE(reader, kParseInsituFlag, ss, handler);
 
-        free(jsonStrCopy);
+        PyMem_Free(jsonStrCopy);
     }
     else {
         PyReadStreamWrapper sw(jsonStream, chunkSize);
@@ -2173,12 +2181,13 @@
     bool sortKeys,
     NumberMode numberMode,
     DatetimeMode datetimeMode,
-    UuidMode uuidMode)
+    UuidMode uuidMode,
+    BytesMode bytesMode)
 {
     int is_decimal;
 
 #define RECURSE(v) dumps_internal(writer, v, skipKeys, defaultFn, sortKeys, \
-                                  numberMode, datetimeMode, uuidMode)
+                                  numberMode, datetimeMode, uuidMode, 
bytesMode)
 
 #define ASSERT_VALID_SIZE(l) do {                                       \
     if (l < 0 || l > UINT_MAX) {                                        \
@@ -2329,7 +2338,8 @@
         ASSERT_VALID_SIZE(l);
         writer->String(s, (SizeType) l);
     }
-    else if (PyBytes_Check(object) || PyByteArray_Check(object)) {
+    else if (bytesMode == BM_UTF8
+             && (PyBytes_Check(object) || PyByteArray_Check(object))) {
         PyObject* unicodeObj = PyUnicode_FromEncodedObject(object, "utf-8", 
NULL);
 
         if (unicodeObj == NULL)
@@ -2809,13 +2819,14 @@
     DatetimeMode datetimeMode;
     UuidMode uuidMode;
     NumberMode numberMode;
+    BytesMode bytesMode;
 } EncoderObject;
 
 
 PyDoc_STRVAR(dumps_docstring,
              "dumps(obj, *, skipkeys=False, ensure_ascii=True, indent=None, 
default=None,"
              " sort_keys=False, number_mode=None, datetime_mode=None, 
uuid_mode=None,"
-             " allow_nan=True)\n"
+             " bytes_mode=BM_UTF8, allow_nan=True)\n"
              "\n"
              "Encode a Python object into a JSON string.");
 
@@ -2837,6 +2848,8 @@
     DatetimeMode datetimeMode = DM_NONE;
     PyObject* uuidModeObj = NULL;
     UuidMode uuidMode = UM_NONE;
+    PyObject* bytesModeObj = NULL;
+    BytesMode bytesMode = BM_UTF8;
     bool prettyPrint = false;
     unsigned indentCharCount = 4;
     int allowNan = -1;
@@ -2850,6 +2863,7 @@
         "number_mode",
         "datetime_mode",
         "uuid_mode",
+        "bytes_mode",
 
         /* compatibility with stdlib json */
         "allow_nan",
@@ -2857,7 +2871,7 @@
         NULL
     };
 
-    if (!PyArg_ParseTupleAndKeywords(args, kwargs, 
"O|$ppOOpOOOp:rapidjson.dumps",
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, 
"O|$ppOOpOOOOp:rapidjson.dumps",
                                      (char**) kwlist,
                                      &value,
                                      &skipKeys,
@@ -2868,6 +2882,7 @@
                                      &numberModeObj,
                                      &datetimeModeObj,
                                      &uuidModeObj,
+                                     &bytesModeObj,
                                      &allowNan))
         return NULL;
 
@@ -2945,16 +2960,32 @@
         }
     }
 
+    if (bytesModeObj) {
+        if (bytesModeObj == Py_None)
+            bytesMode = BM_NONE;
+        else if (PyLong_Check(bytesModeObj)) {
+            bytesMode = (BytesMode) PyLong_AsLong(bytesModeObj);
+            if (bytesMode < BM_NONE || bytesMode > BM_UTF8) {
+                PyErr_SetString(PyExc_ValueError, "Invalid bytes_mode");
+                return NULL;
+            }
+        }
+        else {
+            PyErr_SetString(PyExc_TypeError, "bytes_mode must be an integer 
value");
+            return NULL;
+        }
+    }
+
     return do_encode(value, skipKeys ? true : false, defaultFn, sortKeys ? 
true : false,
                      ensureAscii ? true : false, prettyPrint ? true : false,
-                     indentCharCount, numberMode, datetimeMode, uuidMode);
+                     indentCharCount, numberMode, datetimeMode, uuidMode, 
bytesMode);
 }
 
 
 PyDoc_STRVAR(dump_docstring,
              "dump(obj, stream, *, skipkeys=False, ensure_ascii=True, 
indent=None,"
              " default=None, sort_keys=False, number_mode=None, 
datetime_mode=None,"
-             " uuid_mode=None, chunk_size=65536, allow_nan=True)\n"
+             " uuid_mode=None, bytes_mode=BM_UTF8, chunk_size=65536, 
allow_nan=True)\n"
              "\n"
              "Encode a Python object into a JSON stream.");
 
@@ -2977,6 +3008,8 @@
     DatetimeMode datetimeMode = DM_NONE;
     PyObject* uuidModeObj = NULL;
     UuidMode uuidMode = UM_NONE;
+    PyObject* bytesModeObj = NULL;
+    BytesMode bytesMode = BM_UTF8;
     bool prettyPrint = false;
     unsigned indentCharCount = 4;
     PyObject* chunkSizeObj = NULL;
@@ -2993,6 +3026,7 @@
         "number_mode",
         "datetime_mode",
         "uuid_mode",
+        "bytes_mode",
         "chunk_size",
 
         /* compatibility with stdlib json */
@@ -3001,7 +3035,7 @@
         NULL
     };
 
-    if (!PyArg_ParseTupleAndKeywords(args, kwargs, 
"OO|$ppOOpOOOOp:rapidjson.dump",
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, 
"OO|$ppOOpOOOOOp:rapidjson.dump",
                                      (char**) kwlist,
                                      &value,
                                      &stream,
@@ -3013,6 +3047,7 @@
                                      &numberModeObj,
                                      &datetimeModeObj,
                                      &uuidModeObj,
+                                     &bytesModeObj,
                                      &chunkSizeObj,
                                      &allowNan))
         return NULL;
@@ -3091,6 +3126,22 @@
         }
     }
 
+    if (bytesModeObj) {
+        if (bytesModeObj == Py_None)
+            bytesMode = BM_NONE;
+        else if (PyLong_Check(bytesModeObj)) {
+            bytesMode = (BytesMode) PyLong_AsLong(bytesModeObj);
+            if (bytesMode < BM_NONE || bytesMode > BM_UTF8) {
+                PyErr_SetString(PyExc_ValueError, "Invalid bytes_mode");
+                return NULL;
+            }
+        }
+        else {
+            PyErr_SetString(PyExc_TypeError, "bytes_mode must be an integer 
value");
+            return NULL;
+        }
+    }
+
     if (chunkSizeObj && chunkSizeObj != Py_None) {
         if (PyLong_Check(chunkSizeObj)) {
             Py_ssize_t size = PyNumber_AsSsize_t(chunkSizeObj, 
PyExc_ValueError);
@@ -3112,14 +3163,15 @@
     return do_stream_encode(value, stream, chunkSize, skipKeys ? true : false,
                             defaultFn, sortKeys ? true : false,
                             ensureAscii ? true : false, prettyPrint ? true : 
false,
-                            indentCharCount, numberMode, datetimeMode, 
uuidMode);
+                            indentCharCount, numberMode, datetimeMode, 
uuidMode,
+                            bytesMode);
 }
 
 
 PyDoc_STRVAR(encoder_doc,
              "Encoder(skip_invalid_keys=False, ensure_ascii=True, indent=None,"
-             " sort_keys=False, number_mode=None, datetime_mode=None, 
uuid_mode=None)\n"
-             "\n"
+             " sort_keys=False, number_mode=None, datetime_mode=None, 
uuid_mode=None,"
+             " bytes_mode=None)\n\n"
              "Create and return a new Encoder instance.");
 
 
@@ -3138,6 +3190,8 @@
      T_UINT, offsetof(EncoderObject, uuidMode), READONLY, "uuid_mode"},
     {"number_mode",
      T_UINT, offsetof(EncoderObject, numberMode), READONLY, "number_mode"},
+    {"bytes_mode",
+     T_UINT, offsetof(EncoderObject, bytesMode), READONLY, "bytes_mode"},
     {NULL}
 };
 
@@ -3197,14 +3251,16 @@
                     sortKeys,                           \
                     numberMode,                         \
                     datetimeMode,                       \
-                    uuidMode)                           \
+                    uuidMode,                           \
+                    bytesMode)                          \
      ? PyUnicode_FromString(buf.GetString()) : NULL)
 
 
 static PyObject*
 do_encode(PyObject* value, bool skipInvalidKeys, PyObject* defaultFn, bool 
sortKeys,
           bool ensureAscii, bool prettyPrint, unsigned indent,
-          NumberMode numberMode, DatetimeMode datetimeMode, UuidMode uuidMode)
+          NumberMode numberMode, DatetimeMode datetimeMode, UuidMode uuidMode,
+          BytesMode bytesMode)
 {
     if (!prettyPrint) {
         if (ensureAscii) {
@@ -3241,7 +3297,8 @@
                     sortKeys,                           \
                     numberMode,                         \
                     datetimeMode,                       \
-                    uuidMode)                           \
+                    uuidMode,                           \
+                    bytesMode)                          \
      ? Py_INCREF(Py_None), Py_None : NULL)
 
 
@@ -3249,7 +3306,8 @@
 do_stream_encode(PyObject* value, PyObject* stream, size_t chunkSize,
                  bool skipInvalidKeys, PyObject* defaultFn, bool sortKeys,
                  bool ensureAscii, bool prettyPrint, unsigned indent,
-                 NumberMode numberMode, DatetimeMode datetimeMode, UuidMode 
uuidMode)
+                 NumberMode numberMode, DatetimeMode datetimeMode, UuidMode 
uuidMode,
+                 BytesMode bytesMode)
 {
     PyWriteStreamWrapper os(stream, chunkSize);
 
@@ -3329,12 +3387,13 @@
         }
         result = do_stream_encode(value, stream, chunkSize, 
e->skipInvalidKeys, defaultFn,
                                   e->sortKeys, e->ensureAscii, e->prettyPrint, 
e->indent,
-                                  e->numberMode, e->datetimeMode, e->uuidMode);
+                                  e->numberMode, e->datetimeMode, e->uuidMode,
+                                  e->bytesMode);
     }
     else {
         result = do_encode(value, e->skipInvalidKeys, defaultFn, e->sortKeys,
                            e->ensureAscii, e->prettyPrint, e->indent,
-                           e->numberMode, e->datetimeMode, e->uuidMode);
+                           e->numberMode, e->datetimeMode, e->uuidMode, 
e->bytesMode);
     }
 
     if (defaultFn != NULL)
@@ -3358,6 +3417,8 @@
     DatetimeMode datetimeMode = DM_NONE;
     PyObject* uuidModeObj = NULL;
     UuidMode uuidMode = UM_NONE;
+    PyObject* bytesModeObj = NULL;
+    BytesMode bytesMode = BM_UTF8;
     unsigned indentCharCount = 4;
     bool prettyPrint = false;
 
@@ -3369,10 +3430,11 @@
         "number_mode",
         "datetime_mode",
         "uuid_mode",
+        "bytes_mode",
         NULL
     };
 
-    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ppOpOOO:Encoder",
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ppOpOOOO:Encoder",
                                      (char**) kwlist,
                                      &skipInvalidKeys,
                                      &ensureAscii,
@@ -3380,7 +3442,8 @@
                                      &sortKeys,
                                      &numberModeObj,
                                      &datetimeModeObj,
-                                     &uuidModeObj))
+                                     &uuidModeObj,
+                                     &bytesModeObj))
         return NULL;
 
     if (indent && indent != Py_None) {
@@ -3442,6 +3505,22 @@
         }
     }
 
+    if (bytesModeObj) {
+        if (bytesModeObj == Py_None)
+            bytesMode = BM_NONE;
+        else if (PyLong_Check(bytesModeObj)) {
+            bytesMode = (BytesMode) PyLong_AsLong(bytesModeObj);
+            if (bytesMode < BM_NONE || bytesMode > BM_UTF8) {
+                PyErr_SetString(PyExc_ValueError, "Invalid bytes_mode");
+                return NULL;
+            }
+        }
+        else {
+            PyErr_SetString(PyExc_TypeError, "bytes_mode must be an integer 
value");
+            return NULL;
+        }
+    }
+
     e = (EncoderObject*) type->tp_alloc(type, 0);
     if (e == NULL)
         return NULL;
@@ -3454,6 +3533,7 @@
     e->datetimeMode = datetimeMode;
     e->uuidMode = uuidMode;
     e->numberMode = numberMode;
+    e->bytesMode = bytesMode;
 
     return (PyObject*) e;
 }
@@ -3839,6 +3919,9 @@
         || PyModule_AddIntConstant(m, "PM_COMMENTS", PM_COMMENTS)
         || PyModule_AddIntConstant(m, "PM_TRAILING_COMMAS", PM_TRAILING_COMMAS)
 
+        || PyModule_AddIntConstant(m, "BM_NONE", BM_NONE)
+        || PyModule_AddIntConstant(m, "BM_UTF8", BM_UTF8)
+
         || PyModule_AddStringConstant(m, "__version__", 
STRINGIFY(PYTHON_RAPIDJSON_VERSION))
         || PyModule_AddStringConstant(m, "__author__", "Ken Robbins 
<[email protected]>")
         || PyModule_AddStringConstant(m, "__rapidjson_version__", 
RAPIDJSON_VERSION_STRING))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-rapidjson-0.7.2/tests/test_memory_leaks.py 
new/python-rapidjson-0.8.0/tests/test_memory_leaks.py
--- old/python-rapidjson-0.7.2/tests/test_memory_leaks.py       2019-06-09 
11:15:04.000000000 +0200
+++ new/python-rapidjson-0.8.0/tests/test_memory_leaks.py       2019-08-09 
12:57:38.000000000 +0200
@@ -62,7 +62,8 @@
         tracemalloc.Filter(True, __file__),))
 
     for _ in range(10):
-        content = io.StringIO('[' + ','.join('{"foo": "bar"}' for _ in 
range(100)) + ']')
+        dct = '{' + ','.join('"foo%d":"bar%d"' % (i, i) for i in range(100)) + 
'}'
+        content = io.StringIO('[' + ','.join(dct for _ in range(100)) + ']')
         rj.load(content, chunk_size=50)
 
     del content
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/python-rapidjson-0.7.2/version.txt 
new/python-rapidjson-0.8.0/version.txt
--- old/python-rapidjson-0.7.2/version.txt      2019-06-09 11:15:04.000000000 
+0200
+++ new/python-rapidjson-0.8.0/version.txt      2019-08-09 12:57:38.000000000 
+0200
@@ -1 +1 @@
-0.7.2
\ No newline at end of file
+0.8.0
\ No newline at end of file


Reply via email to