Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package python-eventlet for openSUSE:Factory
checked in at 2021-12-23 17:53:27
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-eventlet (Old)
and /work/SRC/openSUSE:Factory/.python-eventlet.new.2520 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-eventlet"
Thu Dec 23 17:53:27 2021 rev:39 rq:942015 version:0.33.0
Changes:
--------
--- /work/SRC/openSUSE:Factory/python-eventlet/python-eventlet.changes
2021-11-21 23:51:45.814333891 +0100
+++
/work/SRC/openSUSE:Factory/.python-eventlet.new.2520/python-eventlet.changes
2021-12-23 17:53:29.835709737 +0100
@@ -1,0 +2,15 @@
+Thu Dec 16 20:11:12 UTC 2021 - Ben Greiner <[email protected]>
+
+- update to 0.33.0
+ * green.thread: unlocked Lock().release() should raise exception,
+ returned True
+ * wsgi: Don???t break HTTP framing during 100-continue handling
+ * Python 3.10 partial support
+ * greendns: Create a DNS resolver lazily rather than on import
+ * ssl: GreenSSLContext minimum_version and maximum_version
+ setters
+- Refresh denosing patches: merge remove_nose.patch and
+ remove_nose_part_2.patch into denose-eventlet.patch
+- Refresh newdnspython.patch
+
+-------------------------------------------------------------------
Old:
----
eventlet-0.32.0.tar.gz
remove_nose.patch
remove_nose_part_2.patch
New:
----
denose-eventlet.patch
eventlet-0.33.0.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ python-eventlet.spec ++++++
--- /var/tmp/diff_new_pack.Hx8NKw/_old 2021-12-23 17:53:31.279710632 +0100
+++ /var/tmp/diff_new_pack.Hx8NKw/_new 2021-12-23 17:53:31.283710635 +0100
@@ -17,8 +17,9 @@
%{?!python_module:%define python_module() python-%{**} python3-%{**}}
+%bcond_without python2
Name: python-eventlet
-Version: 0.32.0
+Version: 0.33.0
Release: 0
Summary: Concurrent networking library for Python
License: MIT
@@ -27,13 +28,11 @@
Source:
https://files.pythonhosted.org/packages/source/e/eventlet/eventlet-%{version}.tar.gz
# PATCH-FEATURE-UPSTREAM remove_nose.patch gh#eventlet/eventlet#638
[email protected]
# Removes dependency on nose
-Patch0: remove_nose.patch
+Patch0: denose-eventlet.patch
# PATCH-FIX-UPSTREAM newdnspython.patch [email protected] -- patch is from
gh#rthalley/dnspython#519, discussion in gh#eventlet/eventlet#638
Patch1: newdnspython.patch
-# Really remove the dependency on nose
-Patch3: remove_nose_part_2.patch
BuildRequires: %{python_module setuptools}
-%if 0%{?suse_version} < 1550
+%if %{with python2}
BuildRequires: python2-monotonic >= 1.4
%endif
BuildRequires: fdupes
@@ -95,6 +94,8 @@
skiptests+=" or test_dns_methods_are_green or test_noraise_dns_tcp"
# These are flaky inside the OBS environment
skiptests+=" or test_fork_after_monkey_patch or test_send_1k_req_rep or
test_cpu_usage_after_bind"
+# tracebacks in denosed suite with pytest inside obs presumably work different
than when upstream is running nose?
+skiptests+=" or test_leakage_from_tracebacks"
# Unknown Python 3.6 specific errors
# TypeError: _wrap_socket() argument 1 must be _socket.socket, not SSLSocket
@@ -104,6 +105,10 @@
%if %python3_version_nodots == 36
python3_skiptests+="$python36_skiptests"
%endif
+# https://github.com/eventlet/eventlet/issues/730
+python310_skiptests+=" or test_patcher_existing_locks_locked"
+# https://github.com/eventlet/eventlet/issues/739
+python310_skiptests+=" or test_017_ssl_zeroreturnerror"
# no subdir recursion
https://github.com/eventlet/eventlet/issues/638#issuecomment-676085599
%pytest -o norecursedirs="tests/*" -k "not ($skiptests ${$python_skiptests})"
${$python_pytest_param}
++++++ denose-eventlet.patch ++++++
Index: eventlet-0.33.0/setup.py
===================================================================
--- eventlet-0.33.0.orig/setup.py
+++ eventlet-0.33.0/setup.py
@@ -27,7 +27,7 @@ setuptools.setup(
'README.rst'
)
).read(),
- test_suite='nose.collector',
+ test_suite='tests',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
Index: eventlet-0.33.0/eventlet.egg-info/SOURCES.txt
===================================================================
--- eventlet-0.33.0.orig/eventlet.egg-info/SOURCES.txt
+++ eventlet-0.33.0/eventlet.egg-info/SOURCES.txt
@@ -175,7 +175,6 @@ tests/greenthread_test.py
tests/hub_test.py
tests/mock.py
tests/mysqldb_test.py
-tests/nosewrapper.py
tests/openssl_test.py
tests/os_test.py
tests/parse_results.py
@@ -275,4 +274,4 @@ tests/stdlib/test_threading_local.py
tests/stdlib/test_timeout.py
tests/stdlib/test_urllib.py
tests/stdlib/test_urllib2.py
-tests/stdlib/test_urllib2_localnet.py
\ No newline at end of file
+tests/stdlib/test_urllib2_localnet.py
Index: eventlet-0.33.0/tests/greenio_test.py
===================================================================
--- eventlet-0.33.0.orig/tests/greenio_test.py
+++ eventlet-0.33.0/tests/greenio_test.py
@@ -9,8 +9,6 @@ import socket as _orig_sock
import sys
import tempfile
-from nose.tools import eq_
-
import eventlet
from eventlet import event, greenio, debug
from eventlet.hubs import get_hub
@@ -39,7 +37,7 @@ def expect_socket_timeout(function, *arg
raise AssertionError("socket.timeout not raised")
except socket.timeout as e:
assert hasattr(e, 'args')
- eq_(e.args[0], 'timed out')
+ assert e.args[0] == 'timed out'
def min_buf_size():
@@ -674,8 +672,8 @@ class TestGreenSocket(tests.LimitedTestC
sender.sendto(b'second', 0, address)
sender_address = ('127.0.0.1', sender.getsockname()[1])
- eq_(receiver.recvfrom(1024), (b'first', sender_address))
- eq_(receiver.recvfrom(1024), (b'second', sender_address))
+ assert receiver.recvfrom(1024) == (b'first', sender_address)
+ assert receiver.recvfrom(1024) == (b'second', sender_address)
def test_get_fileno_of_a_socket_works():
Index: eventlet-0.33.0/tests/nosewrapper.py
===================================================================
--- eventlet-0.33.0.orig/tests/nosewrapper.py
+++ eventlet-0.33.0/tests/nosewrapper.py
@@ -1,20 +1,13 @@
""" This script simply gets the paths correct for testing eventlet with the
hub extension for Nose."""
-import nose
from os.path import dirname, realpath, abspath
import sys
+import unittest
parent_dir = dirname(dirname(realpath(abspath(__file__))))
if parent_dir not in sys.path:
sys.path.insert(0, parent_dir)
-# hudson does a better job printing the test results if the exit value is 0
-zero_status = '--force-zero-status'
-if zero_status in sys.argv:
- sys.argv.remove(zero_status)
- launch = nose.run
-else:
- launch = nose.main
-
-launch(argv=sys.argv)
+if __name__ == '__main__':
+ unittest.main()
Index: eventlet-0.33.0/tests/__init__.py
===================================================================
--- eventlet-0.33.0.orig/tests/__init__.py
+++ eventlet-0.33.0/tests/__init__.py
@@ -20,7 +20,7 @@ import sys
import unittest
import warnings
-from nose.plugins.skip import SkipTest
+from unittest import SkipTest
import eventlet
from eventlet import tpool
@@ -223,7 +223,6 @@ class LimitedTestCase(unittest.TestCase)
def check_idle_cpu_usage(duration, allowed_part):
if resource is None:
# TODO: use https://code.google.com/p/psutil/
- from nose.plugins.skip import SkipTest
raise SkipTest('CPU usage testing not supported (`import resource`
failed)')
r1 = resource.getrusage(resource.RUSAGE_SELF)
Index: eventlet-0.33.0/tests/dagpool_test.py
===================================================================
--- eventlet-0.33.0.orig/tests/dagpool_test.py
+++ eventlet-0.33.0/tests/dagpool_test.py
@@ -5,7 +5,6 @@
@brief Test DAGPool class
"""
-from nose.tools import *
import eventlet
from eventlet.dagpool import DAGPool, Collision, PropagateError
import six
@@ -13,8 +12,8 @@ from contextlib import contextmanager
import itertools
-# Not all versions of nose.tools.assert_raises() support the usage in this
-# module, but it's straightforward enough to code that explicitly.
+# Not all versions of assert_raises() support the usage in this module,
+# but it's straightforward enough to code that explicitly.
@contextmanager
def assert_raises(exc):
"""exc is an exception class"""
@@ -163,7 +162,7 @@ class Capture(object):
# a set. Make a set containing its elements.
setlist.append(set(subseq))
# Now that we've massaged 'sequence' into 'setlist', compare.
- assert_equal(self.sequence, setlist)
+ assert self.sequence == setlist
# ****************************************************************************
@@ -191,14 +190,14 @@ def test_init():
with check_no_suspend():
results = pool.waitall()
# with no spawn() or post(), waitall() returns preload data
- assert_equals(results, dict(a=1, b=2, c=3))
+ assert results == dict(a=1, b=2, c=3)
# preload sequence of pairs
pool = DAGPool([("d", 4), ("e", 5), ("f", 6)])
# this must not hang
with check_no_suspend():
results = pool.waitall()
- assert_equals(results, dict(d=4, e=5, f=6))
+ assert results == dict(d=4, e=5, f=6)
def test_wait_each_empty():
@@ -216,10 +215,10 @@ def test_wait_each_preload():
with check_no_suspend():
# wait_each() may deliver in arbitrary order; collect into a dict
# for comparison
- assert_equals(dict(pool.wait_each("abc")), dict(a=1, b=2, c=3))
+ assert dict(pool.wait_each("abc")) == dict(a=1, b=2, c=3)
# while we're at it, test wait() for preloaded keys
- assert_equals(pool.wait("bc"), dict(b=2, c=3))
+ assert pool.wait("bc") == dict(b=2, c=3)
def post_each(pool, capture):
@@ -257,7 +256,7 @@ def test_wait_posted():
eventlet.spawn(post_each, pool, capture)
gotten = pool.wait("bcdefg")
capture.add("got all")
- assert_equals(gotten,
+ assert (gotten ==
dict(b=2, c=3,
d="dval", e="eval",
f="fval", g="gval"))
@@ -285,7 +284,7 @@ def test_spawn_collision_spawn():
pool = DAGPool()
pool.spawn("a", (), lambda key, results: "aval")
# hasn't yet even started
- assert_equals(pool.get("a"), None)
+ assert pool.get("a") == None
with assert_raises(Collision):
# Attempting to spawn again with same key should collide even if the
# first spawned greenthread hasn't yet had a chance to run.
@@ -293,7 +292,7 @@ def test_spawn_collision_spawn():
# now let the spawned eventlet run
eventlet.sleep(0)
# should have finished
- assert_equals(pool.get("a"), "aval")
+ assert pool.get("a") == "aval"
with assert_raises(Collision):
# Attempting to spawn with same key collides even when the greenthread
# has completed.
@@ -324,60 +323,60 @@ def test_spawn_multiple():
capture.step()
# but none of them has yet produced a result
for k in "defgh":
- assert_equals(pool.get(k), None)
- assert_equals(set(pool.keys()), set("abc"))
- assert_equals(dict(pool.items()), dict(a=1, b=2, c=3))
- assert_equals(pool.running(), 5)
- assert_equals(set(pool.running_keys()), set("defgh"))
- assert_equals(pool.waiting(), 1)
- assert_equals(pool.waiting_for(), dict(h=set("defg")))
- assert_equals(pool.waiting_for("d"), set())
- assert_equals(pool.waiting_for("c"), set())
+ assert pool.get(k) == None
+ assert set(pool.keys()) == set("abc")
+ assert dict(pool.items()) == dict(a=1, b=2, c=3)
+ assert pool.running() == 5
+ assert set(pool.running_keys()) == set("defgh")
+ assert pool.waiting() == 1
+ assert pool.waiting_for() == dict(h=set("defg"))
+ assert pool.waiting_for("d") == set()
+ assert pool.waiting_for("c") == set()
with assert_raises(KeyError):
pool.waiting_for("j")
- assert_equals(pool.waiting_for("h"), set("defg"))
+ assert pool.waiting_for("h") == set("defg")
# let one of the upstream greenthreads complete
events["f"].send("fval")
spin()
capture.step()
- assert_equals(pool.get("f"), "fval")
- assert_equals(set(pool.keys()), set("abcf"))
- assert_equals(dict(pool.items()), dict(a=1, b=2, c=3, f="fval"))
- assert_equals(pool.running(), 4)
- assert_equals(set(pool.running_keys()), set("degh"))
- assert_equals(pool.waiting(), 1)
- assert_equals(pool.waiting_for("h"), set("deg"))
+ assert pool.get("f") == "fval"
+ assert set(pool.keys()) == set("abcf")
+ assert dict(pool.items()) == dict(a=1, b=2, c=3, f="fval")
+ assert pool.running() == 4
+ assert set(pool.running_keys()) == set("degh")
+ assert pool.waiting() == 1
+ assert pool.waiting_for("h") == set("deg")
# now two others
events["e"].send("eval")
events["g"].send("gval")
spin()
capture.step()
- assert_equals(pool.get("e"), "eval")
- assert_equals(pool.get("g"), "gval")
- assert_equals(set(pool.keys()), set("abcefg"))
- assert_equals(dict(pool.items()),
+ assert pool.get("e") == "eval"
+ assert pool.get("g") == "gval"
+ assert set(pool.keys()) == set("abcefg")
+ assert (dict(pool.items()) ==
dict(a=1, b=2, c=3, e="eval", f="fval", g="gval"))
- assert_equals(pool.running(), 2)
- assert_equals(set(pool.running_keys()), set("dh"))
- assert_equals(pool.waiting(), 1)
- assert_equals(pool.waiting_for("h"), set("d"))
+ assert pool.running() == 2
+ assert set(pool.running_keys()) == set("dh")
+ assert pool.waiting() == 1
+ assert pool.waiting_for("h") == set("d")
# last one
events["d"].send("dval")
# make sure both pool greenthreads get a chance to run
spin()
capture.step()
- assert_equals(pool.get("d"), "dval")
- assert_equals(set(pool.keys()), set("abcdefgh"))
- assert_equals(dict(pool.items()),
+ assert pool.get("d") == "dval"
+ assert set(pool.keys()) == set("abcdefgh")
+ assert (dict(pool.items()) ==
dict(a=1, b=2, c=3,
d="dval", e="eval", f="fval", g="gval", h="hval"))
- assert_equals(pool.running(), 0)
- assert_false(pool.running_keys())
- assert_equals(pool.waiting(), 0)
- assert_equals(pool.waiting_for("h"), set())
+ assert pool.running() == 0
+ assert not pool.running_keys()
+ assert pool.waiting() == 0
+ assert pool.waiting_for("h") == set()
capture.validate([
["h got b", "h got c"],
@@ -432,13 +431,13 @@ def test_spawn_many():
spin()
# verify that e completed (also that post(key) within greenthread
# overrides implicit post of return value, which would be None)
- assert_equals(pool.get("e"), "e")
+ assert pool.get("e") == "e"
# With the dependency graph shown above, it is not guaranteed whether b or
# c will complete first. Handle either case.
sequence = capture.sequence[:]
sequence[1:3] = [set([sequence[1].pop(), sequence[2].pop()])]
- assert_equals(sequence,
+ assert (sequence ==
[set(["a done"]),
set(["b done", "c done"]),
set(["d done"]),
@@ -466,7 +465,7 @@ def test_wait_each_all():
for pos in range(len(keys)):
# next value from wait_each()
k, v = next(each)
- assert_equals(k, keys[pos])
+ assert k == keys[pos]
# advance every pool greenlet as far as it can go
spin()
# everything from keys[:pos+1] should have a value by now
@@ -494,7 +493,7 @@ def test_kill():
pool.kill("a")
# didn't run
spin()
- assert_equals(pool.get("a"), None)
+ assert pool.get("a") == None
# killing it forgets about it
with assert_raises(KeyError):
pool.kill("a")
@@ -505,7 +504,7 @@ def test_kill():
with assert_raises(KeyError):
pool.kill("a")
# verify it ran to completion
- assert_equals(pool.get("a"), 2)
+ assert pool.get("a") == 2
def test_post_collision_preload():
@@ -533,7 +532,7 @@ def test_post_collision_spawn():
pool.kill("a")
# now we can post
pool.post("a", 3)
- assert_equals(pool.get("a"), 3)
+ assert pool.get("a") == 3
pool = DAGPool()
pool.spawn("a", (), lambda key, result: 4)
@@ -553,10 +552,10 @@ def test_post_replace():
pool = DAGPool()
pool.post("a", 1)
pool.post("a", 2, replace=True)
- assert_equals(pool.get("a"), 2)
- assert_equals(dict(pool.wait_each("a")), dict(a=2))
- assert_equals(pool.wait("a"), dict(a=2))
- assert_equals(pool["a"], 2)
+ assert pool.get("a") == 2
+ assert dict(pool.wait_each("a")) == dict(a=2)
+ assert pool.wait("a") == dict(a=2)
+ assert pool["a"] == 2
def waitfor(capture, pool, key):
@@ -598,14 +597,14 @@ def test_waitall_exc():
try:
pool.waitall()
except PropagateError as err:
- assert_equals(err.key, "a")
+ assert err.key == "a"
assert isinstance(err.exc, BogusError), \
"exc attribute is {0}, not BogusError".format(err.exc)
- assert_equals(str(err.exc), "bogus")
+ assert str(err.exc) == "bogus"
msg = str(err)
- assert_in("PropagateError(a)", msg)
- assert_in("BogusError", msg)
- assert_in("bogus", msg)
+ assert "PropagateError(a)" in msg
+ assert "BogusError" in msg
+ assert "bogus" in msg
def test_propagate_exc():
@@ -616,20 +615,20 @@ def test_propagate_exc():
try:
pool["c"]
except PropagateError as errc:
- assert_equals(errc.key, "c")
+ assert errc.key == "c"
errb = errc.exc
- assert_equals(errb.key, "b")
+ assert errb.key == "b"
erra = errb.exc
- assert_equals(erra.key, "a")
+ assert erra.key == "a"
assert isinstance(erra.exc, BogusError), \
"exc attribute is {0}, not BogusError".format(erra.exc)
- assert_equals(str(erra.exc), "bogus")
+ assert str(erra.exc) == "bogus"
msg = str(errc)
- assert_in("PropagateError(a)", msg)
- assert_in("PropagateError(b)", msg)
- assert_in("PropagateError(c)", msg)
- assert_in("BogusError", msg)
- assert_in("bogus", msg)
+ assert "PropagateError(a)" in msg
+ assert "PropagateError(b)" in msg
+ assert "PropagateError(c)" in msg
+ assert "BogusError" in msg
+ assert "bogus" in msg
def test_wait_each_exc():
@@ -681,13 +680,13 @@ def test_post_get_exc():
pass
# wait_each_success() filters
- assert_equals(dict(pool.wait_each_success()), dict(a=bogua))
- assert_equals(dict(pool.wait_each_success("ab")), dict(a=bogua))
- assert_equals(dict(pool.wait_each_success("a")), dict(a=bogua))
- assert_equals(dict(pool.wait_each_success("b")), {})
+ assert dict(pool.wait_each_success()) == dict(a=bogua)
+ assert dict(pool.wait_each_success("ab")) == dict(a=bogua)
+ assert dict(pool.wait_each_success("a")) == dict(a=bogua)
+ assert dict(pool.wait_each_success("b")) == {}
# wait_each_exception() filters the other way
- assert_equals(dict(pool.wait_each_exception()), dict(b=bogub))
- assert_equals(dict(pool.wait_each_exception("ab")), dict(b=bogub))
- assert_equals(dict(pool.wait_each_exception("a")), {})
- assert_equals(dict(pool.wait_each_exception("b")), dict(b=bogub))
+ assert dict(pool.wait_each_exception()) == dict(b=bogub)
+ assert dict(pool.wait_each_exception("ab")) == dict(b=bogub)
+ assert dict(pool.wait_each_exception("a")) == {}
+ assert dict(pool.wait_each_exception("b")) == dict(b=bogub)
++++++ eventlet-0.32.0.tar.gz -> eventlet-0.33.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/NEWS new/eventlet-0.33.0/NEWS
--- old/eventlet-0.32.0/NEWS 2021-09-01 12:49:47.000000000 +0200
+++ new/eventlet-0.33.0/NEWS 2021-11-16 20:59:32.000000000 +0100
@@ -1,3 +1,11 @@
+0.33.0
+======
+* green.thread: unlocked Lock().release() should raise exception, returned
True https://github.com/eventlet/eventlet/issues/697
+* wsgi: Don't break HTTP framing during 100-continue handling
https://github.com/eventlet/eventlet/pull/578
+* Python 3.10 partial support https://github.com/eventlet/eventlet/pull/715
+* greendns: Create a DNS resolver lazily rather than on import
https://github.com/eventlet/eventlet/issues/462
+* ssl: GreenSSLContext minimum_version and maximum_version setters
https://github.com/eventlet/eventlet/issues/726
+
0.32.0
======
* greendns: compatibility with dnspython v2
https://github.com/eventlet/eventlet/pull/722
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/PKG-INFO new/eventlet-0.33.0/PKG-INFO
--- old/eventlet-0.32.0/PKG-INFO 2021-09-01 12:54:37.135779000 +0200
+++ new/eventlet-0.33.0/PKG-INFO 2021-11-16 21:00:11.708561400 +0100
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: eventlet
-Version: 0.32.0
+Version: 0.33.0
Summary: Highly concurrent networking library
Home-page: http://eventlet.net
Author: Linden Lab
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/__init__.py
new/eventlet-0.33.0/eventlet/__init__.py
--- old/eventlet-0.32.0/eventlet/__init__.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/eventlet/__init__.py 2021-11-16 20:59:32.000000000
+0100
@@ -8,7 +8,7 @@
DeprecationWarning,
)
-version_info = (0, 32, 0)
+version_info = (0, 33, 0)
__version__ = '.'.join(map(str, version_info))
# This is to make Debian packaging easier, it ignores import
# errors of greenlet so that the packager can still at least
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/green/ssl.py
new/eventlet-0.33.0/eventlet/green/ssl.py
--- old/eventlet-0.32.0/eventlet/green/ssl.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/eventlet/green/ssl.py 2021-11-16 20:59:32.000000000
+0100
@@ -465,6 +465,16 @@
def verify_mode(self, value):
super(_original_sslcontext,
_original_sslcontext).verify_mode.__set__(self, value)
+ if hasattr(_original_sslcontext, "maximum_version"):
+ @_original_sslcontext.maximum_version.setter
+ def maximum_version(self, value):
+ super(_original_sslcontext,
_original_sslcontext).maximum_version.__set__(self, value)
+
+ if hasattr(_original_sslcontext, "minimum_version"):
+ @_original_sslcontext.minimum_version.setter
+ def minimum_version(self, value):
+ super(_original_sslcontext,
_original_sslcontext).minimum_version.__set__(self, value)
+
SSLContext = GreenSSLContext
if hasattr(__ssl, 'create_default_context'):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/green/thread.py
new/eventlet-0.33.0/eventlet/green/thread.py
--- old/eventlet-0.32.0/eventlet/green/thread.py 2021-09-01
12:49:47.000000000 +0200
+++ new/eventlet-0.33.0/eventlet/green/thread.py 2021-11-16
20:59:32.000000000 +0100
@@ -3,15 +3,16 @@
import six
from eventlet.support import greenlets as greenlet
from eventlet import greenthread
-from eventlet.semaphore import Semaphore as LockType
+from eventlet.lock import Lock
import sys
__patched__ = ['get_ident', 'start_new_thread', 'start_new', 'allocate_lock',
'allocate', 'exit', 'interrupt_main', 'stack_size', '_local',
- 'LockType', '_count']
+ 'LockType', 'Lock', '_count']
error = __thread.error
+LockType = Lock
__threadcount = 0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/greenio/base.py
new/eventlet-0.33.0/eventlet/greenio/base.py
--- old/eventlet-0.32.0/eventlet/greenio/base.py 2021-09-01
12:49:47.000000000 +0200
+++ new/eventlet-0.33.0/eventlet/greenio/base.py 2021-11-16
20:59:32.000000000 +0100
@@ -29,7 +29,10 @@
_original_socket = eventlet.patcher.original('socket').socket
-socket_timeout = eventlet.timeout.wrap_is_timeout(socket.timeout)
+if sys.version_info >= (3, 10):
+ socket_timeout = socket.timeout # Really, TimeoutError
+else:
+ socket_timeout = eventlet.timeout.wrap_is_timeout(socket.timeout)
def socket_connect(descriptor, address):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/greenio/py3.py
new/eventlet-0.33.0/eventlet/greenio/py3.py
--- old/eventlet-0.32.0/eventlet/greenio/py3.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/eventlet/greenio/py3.py 2021-11-16 20:59:32.000000000
+0100
@@ -191,9 +191,12 @@
FileIO=GreenFileIO,
os=_original_os,
))
+if hasattr(_original_pyio, 'text_encoding'):
+ _open_environment['text_encoding'] = _original_pyio.text_encoding
+_pyio_open = getattr(_original_pyio.open, '__wrapped__', _original_pyio.open)
_open = FunctionType(
- six.get_function_code(_original_pyio.open),
+ six.get_function_code(_pyio_open),
_open_environment,
)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/lock.py
new/eventlet-0.33.0/eventlet/lock.py
--- old/eventlet-0.32.0/eventlet/lock.py 1970-01-01 01:00:00.000000000
+0100
+++ new/eventlet-0.33.0/eventlet/lock.py 2021-11-16 20:59:32.000000000
+0100
@@ -0,0 +1,28 @@
+from eventlet.semaphore import Semaphore
+
+
+class Lock(Semaphore):
+
+ """A lock.
+ This is API-compatible with :class:`threading.Lock`.
+
+ It is a context manager, and thus can be used in a with block::
+
+ lock = Lock()
+ with lock:
+ do_some_stuff()
+ """
+
+ def release(self, blocking=True):
+ """Modify behaviour vs :class:`Semaphore` to raise a RuntimeError
+ exception if the value is greater than zero. This corrects behaviour
+ to realign with :class:`threading.Lock`.
+ """
+ if self.counter > 0:
+ raise RuntimeError("release unlocked lock")
+
+ return super(Lock, self).release(blocking=blocking)
+
+ def _at_fork_reinit(self):
+ self.counter = 1
+ self._waiters.clear()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/semaphore.py
new/eventlet-0.33.0/eventlet/semaphore.py
--- old/eventlet-0.32.0/eventlet/semaphore.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/eventlet/semaphore.py 2021-11-16 20:59:32.000000000
+0100
@@ -39,7 +39,6 @@
if value < 0:
msg = 'Semaphore() expect value >= 0, actual:
{0}'.format(repr(value))
raise ValueError(msg)
- self._original_value = value
self.counter = value
self._waiters = collections.deque()
@@ -52,10 +51,6 @@
params = (self.__class__.__name__, self.counter, len(self._waiters))
return '<%s c=%s _w[%s]>' % params
- def _at_fork_reinit(self):
- self.counter = self._original_value
- self._waiters.clear()
-
def locked(self):
"""Returns true if a call to acquire would block.
"""
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/support/greendns.py
new/eventlet-0.33.0/eventlet/support/greendns.py
--- old/eventlet-0.32.0/eventlet/support/greendns.py 2021-09-01
12:49:47.000000000 +0200
+++ new/eventlet-0.33.0/eventlet/support/greendns.py 2021-11-16
20:59:32.000000000 +0100
@@ -322,7 +322,21 @@
"""
self._hosts = hosts_resolver
self._filename = filename
- self.clear()
+ # NOTE(dtantsur): we cannot create a resolver here since this code is
+ # executed on eventlet import. In an environment without DNS, creating
+ # a Resolver will fail making eventlet unusable at all. See
+ # https://github.com/eventlet/eventlet/issues/736 for details.
+ self._cached_resolver = None
+
+ @property
+ def _resolver(self):
+ if self._cached_resolver is None:
+ self.clear()
+ return self._cached_resolver
+
+ @_resolver.setter
+ def _resolver(self, value):
+ self._cached_resolver = value
def clear(self):
self._resolver = dns.resolver.Resolver(filename=self._filename)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/timeout.py
new/eventlet-0.33.0/eventlet/timeout.py
--- old/eventlet-0.32.0/eventlet/timeout.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/eventlet/timeout.py 2021-11-16 20:59:32.000000000
+0100
@@ -174,6 +174,11 @@
return fun
+if isinstance(__builtins__, dict): # seen when running tests on py310, but
HOW??
+ _timeout_err = __builtins__.get('TimeoutError', Timeout)
+else:
+ _timeout_err = getattr(__builtins__, 'TimeoutError', Timeout)
+
+
def is_timeout(obj):
- py3err = getattr(__builtins__, 'TimeoutError', Timeout)
- return bool(getattr(obj, 'is_timeout', False)) or isinstance(obj, py3err)
+ return bool(getattr(obj, 'is_timeout', False)) or isinstance(obj,
_timeout_err)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet/wsgi.py
new/eventlet-0.33.0/eventlet/wsgi.py
--- old/eventlet-0.32.0/eventlet/wsgi.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/eventlet/wsgi.py 2021-11-16 20:59:32.000000000
+0100
@@ -134,8 +134,12 @@
# Reinitialize chunk_length (expect more data)
self.chunk_length = -1
+ @property
+ def should_send_hundred_continue(self):
+ return self.wfile is not None and not
self.is_hundred_continue_response_sent
+
def _do_read(self, reader, length=None):
- if self.wfile is not None and not
self.is_hundred_continue_response_sent:
+ if self.should_send_hundred_continue:
# 100 Continue response
self.send_hundred_continue_response()
self.is_hundred_continue_response_sent = True
@@ -152,7 +156,7 @@
return read
def _chunked_read(self, rfile, length=None, use_readline=False):
- if self.wfile is not None and not
self.is_hundred_continue_response_sent:
+ if self.should_send_hundred_continue:
# 100 Continue response
self.send_hundred_continue_response()
self.is_hundred_continue_response_sent = True
@@ -464,9 +468,11 @@
start = time.time()
headers_set = []
headers_sent = []
+ # Grab the request input now; app may try to replace it in the environ
+ request_input = self.environ['eventlet.input']
# Push the headers-sent state into the Input so it won't send a
# 100 Continue response if we've already started a response.
- self.environ['wsgi.input'].headers_sent = headers_sent
+ request_input.headers_sent = headers_sent
wfile = self.wfile
result = None
@@ -563,10 +569,15 @@
result = self.application(self.environ, start_response)
# Set content-length if possible
- if headers_set and \
- not headers_sent and hasattr(result, '__len__') and \
- 'Content-Length' not in [h for h, _v in
headers_set[1]]:
- headers_set[1].append(('Content-Length', str(sum(map(len,
result)))))
+ if headers_set and not headers_sent and hasattr(result,
'__len__'):
+ # We've got a complete final response
+ if 'Content-Length' not in [h for h, _v in headers_set[1]]:
+ headers_set[1].append(('Content-Length',
str(sum(map(len, result)))))
+ if request_input.should_send_hundred_continue:
+ # We've got a complete final response, and never sent
a 100 Continue.
+ # There's no chance we'll need to read the body as we
stream out the
+ # response, so we can be nice and send a Connection:
close header.
+ self.close_connection = 1
towrite = []
towrite_size = 0
@@ -607,11 +618,22 @@
finally:
if hasattr(result, 'close'):
result.close()
- request_input = self.environ['eventlet.input']
+ if request_input.should_send_hundred_continue:
+ # We just sent the final response, no 100 Continue. Client may
or
+ # may not have started to send a body, and if we keep the
connection
+ # open we've seen clients either
+ # * send a body, then start a new request
+ # * skip the body and go straight to a new request
+ # Looks like the most broadly compatible option is to close the
+ # connection and let the client retry.
+ # https://curl.se/mail/lib-2004-08/0002.html
+ # Note that we likely *won't* send a Connection: close header
at this point
+ self.close_connection = 1
+
if (request_input.chunked_input or
request_input.position < (request_input.content_length or
0)):
- # Read and discard body if there was no pending 100-continue
- if not request_input.wfile and self.close_connection == 0:
+ # Read and discard body if connection is going to be reused
+ if self.close_connection == 0:
try:
request_input.discard()
except ChunkReadError as e:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet.egg-info/PKG-INFO
new/eventlet-0.33.0/eventlet.egg-info/PKG-INFO
--- old/eventlet-0.32.0/eventlet.egg-info/PKG-INFO 2021-09-01
12:54:36.000000000 +0200
+++ new/eventlet-0.33.0/eventlet.egg-info/PKG-INFO 2021-11-16
21:00:11.000000000 +0100
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: eventlet
-Version: 0.32.0
+Version: 0.33.0
Summary: Highly concurrent networking library
Home-page: http://eventlet.net
Author: Linden Lab
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/eventlet.egg-info/SOURCES.txt
new/eventlet-0.33.0/eventlet.egg-info/SOURCES.txt
--- old/eventlet-0.32.0/eventlet.egg-info/SOURCES.txt 2021-09-01
12:54:36.000000000 +0200
+++ new/eventlet-0.33.0/eventlet.egg-info/SOURCES.txt 2021-11-16
21:00:11.000000000 +0100
@@ -52,6 +52,7 @@
eventlet/event.py
eventlet/greenpool.py
eventlet/greenthread.py
+eventlet/lock.py
eventlet/patcher.py
eventlet/pools.py
eventlet/queue.py
@@ -233,6 +234,7 @@
tests/isolated/patcher_threadpoolexecutor.py
tests/isolated/regular_file_readall.py
tests/isolated/socket_resolve_green.py
+tests/isolated/ssl_context_version_setters.py
tests/isolated/subprocess_exception_identity.py
tests/isolated/subprocess_patched_communicate.py
tests/isolated/tpool_exception_leak.py
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/setup.cfg
new/eventlet-0.33.0/setup.cfg
--- old/eventlet-0.32.0/setup.cfg 2021-09-01 12:54:37.137286000 +0200
+++ new/eventlet-0.33.0/setup.cfg 2021-11-16 21:00:11.710361700 +0100
@@ -1,5 +1,5 @@
[metadata]
-description-file = README.rst
+description_file = README.rst
[wheel]
universal = True
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/tests/__init__.py
new/eventlet-0.33.0/tests/__init__.py
--- old/eventlet-0.32.0/tests/__init__.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/tests/__init__.py 2021-11-16 20:59:32.000000000
+0100
@@ -383,7 +383,7 @@
def check_is_timeout(obj):
value_text = getattr(obj, 'is_timeout', '(missing)')
- assert obj.is_timeout, 'type={0} str={1}
.is_timeout={2}'.format(type(obj), str(obj), value_text)
+ assert eventlet.is_timeout(obj), 'type={0} str={1}
.is_timeout={2}'.format(type(obj), str(obj), value_text)
@contextlib.contextmanager
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/tests/backdoor_test.py
new/eventlet-0.33.0/tests/backdoor_test.py
--- old/eventlet-0.32.0/tests/backdoor_test.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/tests/backdoor_test.py 2021-11-16 20:59:32.000000000
+0100
@@ -1,5 +1,6 @@
import os
import os.path
+import sys
import eventlet
@@ -22,7 +23,9 @@
def _run_test_on_client_and_server(self, client, server_thread):
f = client.makefile('rw')
assert 'Python' in f.readline()
- f.readline() # build info
+ if sys.version_info < (3, 10):
+ # Starting in py310, build info is included in version line
+ f.readline() # build info
f.readline() # help info
assert 'InteractiveConsole' in f.readline()
self.assertEqual('>>> ', f.read(4))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/tests/greendns_test.py
new/eventlet-0.33.0/tests/greendns_test.py
--- old/eventlet-0.32.0/tests/greendns_test.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/tests/greendns_test.py 2021-11-16 20:59:32.000000000
+0100
@@ -297,8 +297,11 @@
def test_clear(self):
rp = greendns.ResolverProxy()
+ assert rp._cached_resolver is None
resolver = rp._resolver
+ assert resolver is not None
rp.clear()
+ assert rp._resolver is not None
assert rp._resolver != resolver
def _make_mock_hostsresolver(self):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/eventlet-0.32.0/tests/isolated/ssl_context_version_setters.py
new/eventlet-0.33.0/tests/isolated/ssl_context_version_setters.py
--- old/eventlet-0.32.0/tests/isolated/ssl_context_version_setters.py
1970-01-01 01:00:00.000000000 +0100
+++ new/eventlet-0.33.0/tests/isolated/ssl_context_version_setters.py
2021-11-16 20:59:32.000000000 +0100
@@ -0,0 +1,12 @@
+__test__ = False
+
+if __name__ == "__main__":
+ import eventlet
+ eventlet.monkey_patch()
+ import ssl
+
+ context = ssl.create_default_context()
+ context.minimum_version = ssl.TLSVersion.TLSv1_2
+ context.maximum_version = ssl.TLSVersion.TLSv1_2
+
+ print("pass")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/tests/semaphore_test.py
new/eventlet-0.33.0/tests/semaphore_test.py
--- old/eventlet-0.32.0/tests/semaphore_test.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/tests/semaphore_test.py 2021-11-16 20:59:32.000000000
+0100
@@ -42,27 +42,6 @@
sem = eventlet.Semaphore()
self.assertRaises(ValueError, sem.acquire, blocking=False, timeout=1)
- def test_reinit(self):
- # py39+ expects locks to have a _at_fork_reinit() method; since we
- # patch in Semaphores in eventlet.green.thread, they need it, too
- sem = eventlet.Semaphore()
- sem.acquire()
- sem._at_fork_reinit()
- self.assertEqual(sem.acquire(blocking=False), True)
- self.assertEqual(sem.acquire(blocking=False), False)
-
- sem = eventlet.Semaphore(0)
- sem.release()
- sem._at_fork_reinit()
- self.assertEqual(sem.acquire(blocking=False), False)
-
- sem = eventlet.Semaphore(2)
- sem.acquire()
- sem._at_fork_reinit()
- self.assertEqual(sem.acquire(blocking=False), True)
- self.assertEqual(sem.acquire(blocking=False), True)
- self.assertEqual(sem.acquire(blocking=False), False)
-
def test_semaphore_contention():
g_mutex = eventlet.Semaphore()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/tests/ssl_test.py
new/eventlet-0.33.0/tests/ssl_test.py
--- old/eventlet-0.32.0/tests/ssl_test.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/tests/ssl_test.py 2021-11-16 20:59:32.000000000
+0100
@@ -395,3 +395,7 @@
client.send(b"check_hostname works")
client.recv(64)
server_coro.wait()
+
+ @tests.skip_if(sys.version_info < (3, 7))
+ def test_context_version_setters(self):
+ tests.run_isolated("ssl_context_version_setters.py")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/tests/thread_test.py
new/eventlet-0.33.0/tests/thread_test.py
--- old/eventlet-0.32.0/tests/thread_test.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/tests/thread_test.py 2021-11-16 20:59:32.000000000
+0100
@@ -5,6 +5,7 @@
from eventlet import corolocal
from eventlet import event
from eventlet import greenthread
+from eventlet import patcher
from eventlet.green import thread
import six
@@ -99,3 +100,26 @@
gc.collect()
# at this point all our coros have terminated
self.assertEqual(len(refs), 1)
+
+
+def test_compat_lock_release():
+ # https://github.com/eventlet/eventlet/issues/697
+ for mod in (patcher.original("threading"), thread):
+ try:
+ mod.Lock().release()
+ except RuntimeError as e:
+ # python3
+ assert "release unlocked lock" in str(e).lower(), str((mod, e))
+ except thread.error as e:
+ # python2.7
+ assert "release unlocked lock" in str(e).lower(), str((mod, e))
+
+
+def test_reinit():
+ # py39+ expects locks to have a _at_fork_reinit() method
+ # https://github.com/eventlet/eventlet/pull/721#pullrequestreview-769377850
+ lk = thread.Lock()
+ lk.acquire()
+ lk._at_fork_reinit()
+ assert lk.acquire(blocking=False)
+ assert not lk.acquire(blocking=False)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/eventlet-0.32.0/tests/wsgi_test.py
new/eventlet-0.33.0/tests/wsgi_test.py
--- old/eventlet-0.32.0/tests/wsgi_test.py 2021-09-01 12:49:47.000000000
+0200
+++ new/eventlet-0.33.0/tests/wsgi_test.py 2021-11-16 20:59:32.000000000
+0100
@@ -608,6 +608,57 @@
self.assertEqual('keep-alive', result2.headers_lower['connection'])
sock.close()
+ def test_018b_http_10_keepalive_framing(self):
+ # verify that if an http/1.0 client sends connection: keep-alive
+ # that we don't mangle the request framing if the app doesn't read the
request
+ def app(environ, start_response):
+ resp_body = {
+ '/1': b'first response',
+ '/2': b'second response',
+ '/3': b'third response',
+ }.get(environ['PATH_INFO'])
+ if resp_body is None:
+ resp_body = 'Unexpected path: ' + environ['PATH_INFO']
+ if six.PY3:
+ resp_body = resp_body.encode('latin1')
+ # Never look at wsgi.input!
+ start_response('200 OK', [('Content-type', 'text/plain')])
+ return [resp_body]
+
+ self.site.application = app
+ sock = eventlet.connect(self.server_addr)
+ req_body = b'GET /tricksy HTTP/1.1\r\n'
+ body_len = str(len(req_body)).encode('ascii')
+
+ sock.sendall(b'PUT /1 HTTP/1.0\r\nHost: localhost\r\nConnection:
keep-alive\r\n'
+ b'Content-Length: ' + body_len + b'\r\n\r\n' + req_body)
+ result1 = read_http(sock)
+ self.assertEqual(b'first response', result1.body)
+ self.assertEqual(result1.headers_original.get('Connection'),
'keep-alive')
+
+ sock.sendall(b'PUT /2 HTTP/1.0\r\nHost: localhost\r\nConnection:
keep-alive\r\n'
+ b'Content-Length: ' + body_len + b'\r\nExpect:
100-continue\r\n\r\n')
+ # Client may have a short timeout waiting on that 100 Continue
+ # and basically immediately send its body
+ sock.sendall(req_body)
+ result2 = read_http(sock)
+ self.assertEqual(b'second response', result2.body)
+ self.assertEqual(result2.headers_original.get('Connection'), 'close')
+
+ sock.sendall(b'PUT /3 HTTP/1.0\r\nHost: localhost\r\nConnection:
close\r\n\r\n')
+ with self.assertRaises(ConnectionClosed):
+ read_http(sock)
+ sock.close()
+
+ # retry
+ sock = eventlet.connect(self.server_addr)
+ sock.sendall(b'PUT /3 HTTP/1.0\r\nHost: localhost\r\nConnection:
close\r\n\r\n')
+ result3 = read_http(sock)
+ self.assertEqual(b'third response', result3.body)
+ self.assertEqual(result3.headers_original.get('Connection'), 'close')
+
+ sock.close()
+
def test_019_fieldstorage_compat(self):
def use_fieldstorage(environ, start_response):
cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
@@ -756,9 +807,23 @@
b'Expect: 100-continue\r\n\r\n')
fd.flush()
result = read_http(sock)
+ # No "100 Continue" -- straight to final response
self.assertEqual(result.status, 'HTTP/1.1 417 Expectation Failed')
self.assertEqual(result.body, b'failure')
+ self.assertEqual(result.headers_original.get('Connection'), 'close')
+ # Client may still try to send the body
+ fd.write(b'x' * 25)
+ fd.flush()
+ # But if they keep using this socket, it's going to close on them
eventually
+ fd.write(b'x' * 25)
+ with self.assertRaises(socket.error) as caught:
+ fd.flush()
+ self.assertEqual(caught.exception.errno, errno.EPIPE)
+ sock.close()
+ sock = eventlet.connect(self.server_addr)
+ fd = sock.makefile('rwb')
+ # If we send the "100 Continue", we can pipeline requests through the
one connection
for expect_value in ('100-continue', '100-Continue'):
fd.write(
'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\n'
@@ -767,6 +832,8 @@
header_lines = []
while True:
line = fd.readline()
+ if not line:
+ raise ConnectionClosed
if line == b'\r\n':
break
else:
@@ -775,11 +842,14 @@
header_lines = []
while True:
line = fd.readline()
+ if not line:
+ raise ConnectionClosed
if line == b'\r\n':
break
else:
header_lines.append(line)
assert header_lines[0].startswith(b'HTTP/1.1 200 OK')
+ assert 'Connection: close' not in header_lines
assert fd.read(7) == b'testing'
fd.close()
sock.close()
@@ -806,6 +876,14 @@
result = read_http(sock)
self.assertEqual(result.status, 'HTTP/1.1 417 Expectation Failed')
self.assertEqual(result.body, b'failure')
+ self.assertEqual(result.headers_original.get('Connection'), 'close')
+ # At this point, the client needs to either kill the connection or
send the bytes
+ # because even though the server sent the response without reading the
body,
+ # it has no way of knowing whether the client already started sending
or not
+ sock.close()
+ sock = eventlet.connect(self.server_addr)
+ fd = sock.makefile('rwb')
+
fd.write(
b'PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\n'
b'Expect: 100-continue\r\n\r\ntesting')
++++++ newdnspython.patch ++++++
--- /var/tmp/diff_new_pack.Hx8NKw/_old 2021-12-23 17:53:31.427710724 +0100
+++ /var/tmp/diff_new_pack.Hx8NKw/_new 2021-12-23 17:53:31.431710726 +0100
@@ -1,9 +1,9 @@
-Index: eventlet-0.32.0/eventlet/support/greendns.py
+Index: eventlet-0.33.0/eventlet/support/greendns.py
===================================================================
---- eventlet-0.32.0.orig/eventlet/support/greendns.py
-+++ eventlet-0.32.0/eventlet/support/greendns.py
-@@ -325,7 +325,7 @@ class ResolverProxy(object):
- self.clear()
+--- eventlet-0.33.0.orig/eventlet/support/greendns.py
++++ eventlet-0.33.0/eventlet/support/greendns.py
+@@ -339,7 +339,7 @@ class ResolverProxy(object):
+ self._cached_resolver = value
def clear(self):
- self._resolver = dns.resolver.Resolver(filename=self._filename)
@@ -11,11 +11,11 @@
self._resolver.cache = dns.resolver.LRUCache()
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
-Index: eventlet-0.32.0/tests/greendns_test.py
+Index: eventlet-0.33.0/tests/greendns_test.py
===================================================================
---- eventlet-0.32.0.orig/tests/greendns_test.py
-+++ eventlet-0.32.0/tests/greendns_test.py
-@@ -885,7 +885,7 @@ class TinyDNSTests(tests.LimitedTestCase
+--- eventlet-0.33.0.orig/tests/greendns_test.py
++++ eventlet-0.33.0/tests/greendns_test.py
+@@ -888,7 +888,7 @@ class TinyDNSTests(tests.LimitedTestCase
# https://github.com/eventlet/eventlet/issues/499
# None means we don't want the server to find the IP
with tests.dns_tcp_server(None) as dnsaddr:
@@ -24,7 +24,7 @@
resolver.nameservers = [dnsaddr[0]]
resolver.nameserver_ports[dnsaddr[0]] = dnsaddr[1]
-@@ -896,7 +896,7 @@ class TinyDNSTests(tests.LimitedTestCase
+@@ -899,7 +899,7 @@ class TinyDNSTests(tests.LimitedTestCase
# https://github.com/eventlet/eventlet/issues/499
expected_ip = "192.168.1.1"
with tests.dns_tcp_server(expected_ip) as dnsaddr: