Hello community,

here is the log from the commit of package python-eventlet for openSUSE:Factory 
checked in at 2020-05-01 11:06:49
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-eventlet (Old)
 and      /work/SRC/openSUSE:Factory/.python-eventlet.new.2738 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "python-eventlet"

Fri May  1 11:06:49 2020 rev:34 rq:798871 version:0.25.2

Changes:
--------
--- /work/SRC/openSUSE:Factory/python-eventlet/python-eventlet.changes  
2019-09-23 12:05:16.989926444 +0200
+++ 
/work/SRC/openSUSE:Factory/.python-eventlet.new.2738/python-eventlet.changes    
    2020-05-01 11:06:54.179005795 +0200
@@ -1,0 +2,6 @@
+Wed Apr 29 10:49:14 UTC 2020 - Dirk Mueller <[email protected]>
+
+- update to 0.25.2:
+  * green.ssl: redundant set_nonblocking() caused SSLWantReadError
+
+-------------------------------------------------------------------

Old:
----
  eventlet-0.25.1.tar.gz

New:
----
  eventlet-0.25.2.tar.gz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ python-eventlet.spec ++++++
--- /var/tmp/diff_new_pack.GprWTE/_old  2020-05-01 11:06:55.459008579 +0200
+++ /var/tmp/diff_new_pack.GprWTE/_new  2020-05-01 11:06:55.459008579 +0200
@@ -1,7 +1,7 @@
 #
 # spec file for package python-eventlet
 #
-# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2020 SUSE LLC
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -18,7 +18,7 @@
 
 %{?!python_module:%define python_module() python-%{**} python3-%{**}}
 Name:           python-eventlet
-Version:        0.25.1
+Version:        0.25.2
 Release:        0
 Summary:        Concurrent networking library for Python
 License:        MIT

++++++ eventlet-0.25.1.tar.gz -> eventlet-0.25.2.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/eventlet-0.25.1/NEWS new/eventlet-0.25.2/NEWS
--- old/eventlet-0.25.1/NEWS    2019-08-21 23:29:28.000000000 +0200
+++ new/eventlet-0.25.2/NEWS    2020-04-09 14:43:16.000000000 +0200
@@ -1,4 +1,9 @@
+0.25.2
+======
+* green.ssl: redundant set_nonblocking() caused SSLWantReadError
+
 0.25.1
+======
 * wsgi (tests): Stop using deprecated cgi.parse_qs() to support Python 3.8; 
Thanks to Miro HronĨok
 * os: Add workaround to `open` for pathlib on py 3.7; Thanks to David Szotten
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/eventlet-0.25.1/PKG-INFO new/eventlet-0.25.2/PKG-INFO
--- old/eventlet-0.25.1/PKG-INFO        2019-08-21 23:35:05.000000000 +0200
+++ new/eventlet-0.25.2/PKG-INFO        2020-04-09 14:44:00.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: eventlet
-Version: 0.25.1
+Version: 0.25.2
 Summary: Highly concurrent networking library
 Home-page: http://eventlet.net
 Author: Linden Lab
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/eventlet-0.25.1/benchmarks/__init__.py 
new/eventlet-0.25.2/benchmarks/__init__.py
--- old/eventlet-0.25.1/benchmarks/__init__.py  1970-01-01 01:00:00.000000000 
+0100
+++ new/eventlet-0.25.2/benchmarks/__init__.py  2020-03-02 12:07:13.000000000 
+0100
@@ -0,0 +1,207 @@
+from __future__ import print_function
+import argparse
+import gc
+import importlib
+import inspect
+import math
+import random
+import re
+import sys
+import timeit
+
+import eventlet
+import six
+
+
+# legacy, TODO convert context/localhost_socket benchmarks to new way
+def measure_best(repeat, iters,
+                 common_setup='pass',
+                 common_cleanup='pass',
+                 *funcs):
+    funcs = list(funcs)
+    results = dict((f, []) for f in funcs)
+
+    for _ in range(repeat):
+        random.shuffle(funcs)
+        for func in funcs:
+            gc.collect()
+            t = timeit.Timer(func, setup=common_setup)
+            results[func].append(t.timeit(iters))
+            common_cleanup()
+
+    best_results = {}
+    for func, times in six.iteritems(results):
+        best_results[func] = min(times)
+    return best_results
+
+
+class Benchmark:
+    func = None
+    name = ''
+    iters = 0
+    ns_per_op = 0
+    allocs_per_op = 0
+    mb_per_s = 0
+
+    def __init__(self, **kwargs):
+        for k, v in six.iteritems(kwargs):
+            if not hasattr(self, k):
+                raise AttributeError(k)
+            setattr(self, k, v)
+
+    def __str__(self):
+        kvs = ', '.join('{}={}'.format(k, v) for k, v in 
six.iteritems(self.__dict__) if not k.startswith('_'))
+        return 'Benchmark<{}>'.format(kvs)
+
+    __repr__ = __str__
+
+    def format_result(self, name_pad_to=64):
+        # format compatible with golang.org/x/tools/cmd/benchcmp
+        return "Benchmark_{b.name}{pad}\t{b.iters}\t{b.ns_per_op} 
ns/op".format(
+            b=self, pad=' ' * (name_pad_to + 1 - len(self.name)))
+
+    def run(self, repeat=5):
+        wrapper_time = _run_timeit(self.func, 0)
+        times = []
+        for _ in range(repeat):
+            t = _run_timeit(self.func, self.iters)
+            if t == 0.0:
+                raise Exception('{} time=0'.format(repr(self)))
+            times.append(t)
+        best_time = min(times) - wrapper_time
+        self.ns_per_op = int((best_time * 1e9) / self.iters)
+
+
+def _run_timeit(func, number):
+    # common setup
+    gc.collect()
+    manager = getattr(func, '_benchmark_manager', None)
+    try:
+        # TODO collect allocations count, memory usage
+        # TODO collect custom MB/sec metric reported by benchmark
+        if manager is not None:
+            with manager(number) as ctx:
+                return timeit.Timer(lambda: func(ctx)).timeit(number=number)
+        else:
+            return timeit.Timer(func).timeit(number=number)
+    finally:
+        # common cleanup
+        eventlet.sleep(0.01)
+
+
+def optimal_iters(func, target_time):
+    '''Find optimal number of iterations to run func closely >= target_time.
+    '''
+    iters = 1
+    target_time = float(target_time)
+    max_iters = int(getattr(func, '_benchmark_max_iters', 0))
+    # TODO automatically detect non-linear time growth
+    scale_factor = getattr(func, '_benchmark_scale_factor', 0.0)
+    for _ in range(10):
+        if max_iters and iters > max_iters:
+            return max_iters
+        # print('try iters={iters}'.format(**locals()))
+        t = _run_timeit(func, number=iters)
+        # print('... t={t}'.format(**locals()))
+        if t >= target_time:
+            return iters
+
+        if scale_factor:
+            iters *= scale_factor
+            continue
+
+        # following assumes and works well for linear complexity target 
functions
+        if t < (target_time / 2):
+            # roughly target half optimal time, ensure iterations keep 
increasing
+            iters = iters * (target_time / t / 2) + 1
+            # round up to nearest power of 10
+            iters = int(10 ** math.ceil(math.log10(iters)))
+        elif t < target_time:
+            # half/double dance is less prone to overshooting iterations
+            iters *= 2
+    raise Exception('could not find optimal iterations for time={} 
func={}'.format(target_time, repr(func)))
+
+
+def collect(filter_fun):
+    # running `python benchmarks/__init__.py` or `python -m benchmarks`
+    # puts .../eventlet/benchmarks at top of sys.path, fix it to project root
+    if sys.path[0].endswith('/benchmarks'):
+        path = sys.path.pop(0)
+        correct = path.rsplit('/', 1)[0]
+        sys.path.insert(0, correct)
+
+    common_prefix = 'benchmark_'
+    result = []
+    # TODO step 1: put all toplevel benchmarking code under `if __name__ == 
'__main__'`
+    # TODO step 2: auto import benchmarks/*.py, remove whitelist below
+    # TODO step 3: convert existing benchmarks
+    for name in ('hub_timers', 'spawn'):
+        mod = importlib.import_module('benchmarks.' + name)
+        for name, obj in inspect.getmembers(mod):
+            if name.startswith(common_prefix) and inspect.isfunction(obj):
+                useful_name = name[len(common_prefix):]
+                if filter_fun(useful_name):
+                    result.append(Benchmark(name=useful_name, func=obj))
+
+    return result
+
+
+def noop(*a, **kw):
+    pass
+
+
+def configure(manager=None, scale_factor=0.0, max_iters=0):
+    def wrapper(func):
+        func._benchmark_manager = manager
+        func._benchmark_scale_factor = scale_factor
+        func._benchmark_max_iters = max_iters
+        return func
+    return wrapper
+
+
+def main():
+    cmdline = argparse.ArgumentParser(description='Run benchmarks')
+    cmdline.add_argument('-autotime', default=3.0, type=float, 
metavar='seconds',
+                         help='''autoscale iterations close to this time per 
benchmark,
+                         in seconds (default: %(default).1f)''')
+    cmdline.add_argument('-collect', default=False, action='store_true',
+                         help='stop after collecting, useful for debugging 
this tool')
+    cmdline.add_argument('-filter', default='', metavar='regex',
+                         help='process benchmarks matching regex (default: 
all)')
+    cmdline.add_argument('-iters', default=None, type=int, metavar='int',
+                         help='force this number of iterations (default: 
auto)')
+    cmdline.add_argument('-repeat', default=5, type=int, metavar='int',
+                         help='repeat each benchmark, report best result 
(default: %(default)d)')
+    args = cmdline.parse_args()
+    filter_re = re.compile(args.filter)
+
+    bs = collect(filter_re.search)
+    if args.filter and not bs:
+        # TODO stderr
+        print('error: no benchmarks matched by filter 
"{}"'.format(args.filter))
+        sys.exit(1)
+    if args.collect:
+        bs.sort(key=lambda b: b.name)
+        print('\n'.join(b.name for b in bs))
+        return
+    if not bs:
+        raise Exception('no benchmarks to run')
+
+    # execute in random order
+    random.shuffle(bs)
+    for b in bs:
+        b.iters = args.iters or optimal_iters(b.func, 
target_time=args.autotime)
+        b.run()
+
+    # print results in alphabetic order
+    max_name_len = max(len(b.name) for b in bs)
+    bs.sort(key=lambda b: b.name)
+    for b in bs:
+        print(b.format_result(name_pad_to=max_name_len))
+
+
+if __name__ == '__main__':
+    try:
+        main()
+    except KeyboardInterrupt:
+        sys.exit(1)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/eventlet-0.25.1/benchmarks/localhost_socket.py 
new/eventlet-0.25.2/benchmarks/localhost_socket.py
--- old/eventlet-0.25.1/benchmarks/localhost_socket.py  1970-01-01 
01:00:00.000000000 +0100
+++ new/eventlet-0.25.2/benchmarks/localhost_socket.py  2018-08-06 
18:17:47.000000000 +0200
@@ -0,0 +1,117 @@
+"""Benchmark evaluating eventlet's performance at speaking to itself over a 
localhost socket."""
+from __future__ import print_function
+
+import time
+
+import benchmarks
+import six
+
+
+BYTES = 1000
+SIZE = 1
+CONCURRENCY = 50
+TRIES = 5
+
+
+def reader(sock):
+    expect = BYTES
+    while expect > 0:
+        d = sock.recv(min(expect, SIZE))
+        expect -= len(d)
+
+
+def writer(addr, socket_impl):
+    sock = socket_impl(socket.AF_INET, socket.SOCK_STREAM)
+    sock.connect(addr)
+    sent = 0
+    while sent < BYTES:
+        d = 'xy' * (max(min(SIZE / 2, BYTES - sent), 1))
+        sock.sendall(d)
+        sent += len(d)
+
+
+def green_accepter(server_sock, pool):
+    for i in six.moves.range(CONCURRENCY):
+        sock, addr = server_sock.accept()
+        pool.spawn_n(reader, sock)
+
+
+def heavy_accepter(server_sock, pool):
+    for i in six.moves.range(CONCURRENCY):
+        sock, addr = server_sock.accept()
+        t = threading.Thread(None, reader, "reader thread", (sock,))
+        t.start()
+        pool.append(t)
+
+
+import eventlet.green.socket
+import eventlet
+
+from eventlet import debug
+debug.hub_exceptions(True)
+
+
+def launch_green_threads():
+    pool = eventlet.GreenPool(CONCURRENCY * 2 + 1)
+    server_sock = eventlet.green.socket.socket(socket.AF_INET, 
socket.SOCK_STREAM)
+    server_sock.bind(('localhost', 0))
+    server_sock.listen(50)
+    addr = ('localhost', server_sock.getsockname()[1])
+    pool.spawn_n(green_accepter, server_sock, pool)
+    for i in six.moves.range(CONCURRENCY):
+        pool.spawn_n(writer, addr, eventlet.green.socket.socket)
+    pool.waitall()
+
+
+import threading
+import socket
+
+
+def launch_heavy_threads():
+    threads = []
+    server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    server_sock.bind(('localhost', 0))
+    server_sock.listen(50)
+    addr = ('localhost', server_sock.getsockname()[1])
+    accepter_thread = threading.Thread(
+        None, heavy_accepter, "accepter thread", (server_sock, threads))
+    accepter_thread.start()
+    threads.append(accepter_thread)
+    for i in six.moves.range(CONCURRENCY):
+        client_thread = threading.Thread(None, writer, "writer thread", (addr, 
socket.socket))
+        client_thread.start()
+        threads.append(client_thread)
+    for t in threads:
+        t.join()
+
+
+if __name__ == "__main__":
+    import optparse
+    parser = optparse.OptionParser()
+    parser.add_option('--compare-threading', action='store_true', 
dest='threading', default=False)
+    parser.add_option('-b', '--bytes', type='int', dest='bytes',
+                      default=BYTES)
+    parser.add_option('-s', '--size', type='int', dest='size',
+                      default=SIZE)
+    parser.add_option('-c', '--concurrency', type='int', dest='concurrency',
+                      default=CONCURRENCY)
+    parser.add_option('-t', '--tries', type='int', dest='tries',
+                      default=TRIES)
+
+    opts, args = parser.parse_args()
+    BYTES = opts.bytes
+    SIZE = opts.size
+    CONCURRENCY = opts.concurrency
+    TRIES = opts.tries
+
+    funcs = [launch_green_threads]
+    if opts.threading:
+        funcs = [launch_green_threads, launch_heavy_threads]
+    results = benchmarks.measure_best(TRIES, 3,
+                                      lambda: None, lambda: None,
+                                      *funcs)
+    print("green:", results[launch_green_threads])
+    if opts.threading:
+        print("threads:", results[launch_heavy_threads])
+        print("%", (results[launch_green_threads] - 
results[launch_heavy_threads]
+                    ) / results[launch_heavy_threads] * 100)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/eventlet-0.25.1/benchmarks/spawn.py 
new/eventlet-0.25.2/benchmarks/spawn.py
--- old/eventlet-0.25.1/benchmarks/spawn.py     1970-01-01 01:00:00.000000000 
+0100
+++ new/eventlet-0.25.2/benchmarks/spawn.py     2020-03-02 12:07:13.000000000 
+0100
@@ -0,0 +1,79 @@
+import contextlib
+
+import eventlet
+import benchmarks
+
+
+def dummy(i=None):
+    return i
+
+
+def linked(gt, arg):
+    return arg
+
+
+def benchmark_sleep():
+    eventlet.sleep()
+
+
+def benchmark_spawn_link1():
+    t = eventlet.spawn(dummy)
+    t.link(linked, 1)
+    t.wait()
+
+
+def benchmark_spawn_link5():
+    t = eventlet.spawn(dummy)
+    t.link(linked, 1)
+    t.link(linked, 2)
+    t.link(linked, 3)
+    t.link(linked, 4)
+    t.link(linked, 5)
+    t.wait()
+
+
+def benchmark_spawn_link5_unlink3():
+    t = eventlet.spawn(dummy)
+    t.link(linked, 1)
+    t.link(linked, 2)
+    t.link(linked, 3)
+    t.link(linked, 4)
+    t.link(linked, 5)
+    t.unlink(linked, 3)
+    t.wait()
+
+
[email protected](max_iters=1e5)
+def benchmark_spawn_nowait():
+    eventlet.spawn(dummy, 1)
+
+
+def benchmark_spawn():
+    eventlet.spawn(dummy, 1).wait()
+
+
[email protected](max_iters=1e5)
+def benchmark_spawn_n():
+    eventlet.spawn_n(dummy, 1)
+
+
[email protected](max_iters=1e5)
+def benchmark_spawn_n_kw():
+    eventlet.spawn_n(dummy, i=1)
+
+
[email protected]
+def pool_setup(iters):
+    pool = eventlet.GreenPool(iters)
+    yield pool
+    pool.waitall()
+
+
[email protected](manager=pool_setup)
+def benchmark_pool_spawn(pool):
+    pool.spawn(dummy, 1)
+
+
[email protected](manager=pool_setup, max_iters=1e5)
+def benchmark_pool_spawn_n(pool):
+    pool.spawn_n(dummy, 1)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/eventlet-0.25.1/eventlet/__init__.py 
new/eventlet-0.25.2/eventlet/__init__.py
--- old/eventlet-0.25.1/eventlet/__init__.py    2019-08-21 23:29:28.000000000 
+0200
+++ new/eventlet-0.25.2/eventlet/__init__.py    2020-04-09 14:42:39.000000000 
+0200
@@ -1,7 +1,7 @@
 import os
 
 
-version_info = (0, 25, 1)
+version_info = (0, 25, 2)
 __version__ = '.'.join(map(str, version_info))
 # This is to make Debian packaging easier, it ignores import
 # errors of greenlet so that the packager can still at least
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/eventlet-0.25.1/eventlet/green/ssl.py 
new/eventlet-0.25.2/eventlet/green/ssl.py
--- old/eventlet-0.25.1/eventlet/green/ssl.py   2019-05-24 11:00:38.000000000 
+0200
+++ new/eventlet-0.25.2/eventlet/green/ssl.py   2020-03-02 13:04:48.000000000 
+0100
@@ -384,7 +384,6 @@
             while True:
                 try:
                     newsock, addr = socket.accept(self)
-                    set_nonblocking(newsock)
                     break
                 except orig_socket.error as e:
                     if get_errno(e) not in greenio.SOCKET_BLOCKING:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/eventlet-0.25.1/eventlet.egg-info/PKG-INFO 
new/eventlet-0.25.2/eventlet.egg-info/PKG-INFO
--- old/eventlet-0.25.1/eventlet.egg-info/PKG-INFO      2019-08-21 
23:35:04.000000000 +0200
+++ new/eventlet-0.25.2/eventlet.egg-info/PKG-INFO      2020-04-09 
14:43:59.000000000 +0200
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: eventlet
-Version: 0.25.1
+Version: 0.25.2
 Summary: Highly concurrent networking library
 Home-page: http://eventlet.net
 Author: Linden Lab
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/eventlet-0.25.1/eventlet.egg-info/SOURCES.txt 
new/eventlet-0.25.2/eventlet.egg-info/SOURCES.txt
--- old/eventlet-0.25.1/eventlet.egg-info/SOURCES.txt   2019-08-21 
23:35:05.000000000 +0200
+++ new/eventlet-0.25.2/eventlet.egg-info/SOURCES.txt   2020-04-09 
14:44:00.000000000 +0200
@@ -5,6 +5,9 @@
 README.rst
 setup.cfg
 setup.py
+benchmarks/__init__.py
+benchmarks/localhost_socket.py
+benchmarks/spawn.py
 doc/Makefile
 doc/authors.rst
 doc/basic_usage.rst


Reply via email to