Author: Ronan Lamy <[email protected]>
Branch: py3.6
Changeset: r97063:64416f48a678
Date: 2019-08-05 15:35 +0000
http://bitbucket.org/pypy/pypy/changeset/64416f48a678/
Log: Merged in stdlib-3.6.9 (pull request #661)
Update stdlib to 3.6.9
diff too long, truncating to 2000 out of 60350 lines
diff --git a/lib-python/3/_collections_abc.py b/lib-python/3/_collections_abc.py
--- a/lib-python/3/_collections_abc.py
+++ b/lib-python/3/_collections_abc.py
@@ -901,6 +901,9 @@
def index(self, value, start=0, stop=None):
'''S.index(value, [start, [stop]]) -> integer -- return first index of
value.
Raises ValueError if the value is not present.
+
+ Supporting start and stop arguments is optional, but
+ recommended.
'''
if start is not None and start < 0:
start = max(len(self) + start, 0)
@@ -910,7 +913,8 @@
i = start
while stop is None or i < stop:
try:
- if self[i] == value:
+ v = self[i]
+ if v is value or v == value:
return i
except IndexError:
break
@@ -919,7 +923,7 @@
def count(self, value):
'S.count(value) -> integer -- return number of occurrences of value'
- return sum(1 for v in self if v == value)
+ return sum(1 for v in self if v is value or v == value)
Sequence.register(tuple)
Sequence.register(str)
diff --git a/lib-python/3/_osx_support.py b/lib-python/3/_osx_support.py
--- a/lib-python/3/_osx_support.py
+++ b/lib-python/3/_osx_support.py
@@ -17,7 +17,7 @@
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
- 'PY_CORE_CFLAGS')
+ 'PY_CORE_CFLAGS', 'PY_CORE_LDFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
@@ -212,7 +212,7 @@
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
- flags = re.sub(r'-arch\s+\w+\s', ' ', flags, re.ASCII)
+ flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
diff --git a/lib-python/3/_pydecimal.py b/lib-python/3/_pydecimal.py
--- a/lib-python/3/_pydecimal.py
+++ b/lib-python/3/_pydecimal.py
@@ -2062,7 +2062,7 @@
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
- 'and 2nd argument must be nonzero ;'
+ 'and 2nd argument must be nonzero; '
'0**0 is not defined')
# compute sign of result
diff --git a/lib-python/3/_pyio.py b/lib-python/3/_pyio.py
--- a/lib-python/3/_pyio.py
+++ b/lib-python/3/_pyio.py
@@ -2064,6 +2064,7 @@
self.buffer.write(b)
if self._line_buffering and (haslf or "\r" in s):
self.flush()
+ self._set_decoded_chars('')
self._snapshot = None
if self._decoder:
self._decoder.reset()
diff --git a/lib-python/3/_threading_local.py b/lib-python/3/_threading_local.py
--- a/lib-python/3/_threading_local.py
+++ b/lib-python/3/_threading_local.py
@@ -56,11 +56,7 @@
>>> class MyLocal(local):
... number = 2
- ... initialized = False
... def __init__(self, **kw):
- ... if self.initialized:
- ... raise SystemError('__init__ called too many times')
- ... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
@@ -97,7 +93,7 @@
>>> thread.start()
>>> thread.join()
>>> log
- [[('color', 'red'), ('initialized', True)], 11]
+ [[('color', 'red')], 11]
without affecting this thread's data:
diff --git a/lib-python/3/abc.py b/lib-python/3/abc.py
--- a/lib-python/3/abc.py
+++ b/lib-python/3/abc.py
@@ -129,8 +129,8 @@
# external code.
_abc_invalidation_counter = 0
- def __new__(mcls, name, bases, namespace):
- cls = super().__new__(mcls, name, bases, namespace)
+ def __new__(mcls, name, bases, namespace, **kwargs):
+ cls = super().__new__(mcls, name, bases, namespace, **kwargs)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
@@ -170,9 +170,11 @@
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__qualname__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
- for name in sorted(cls.__dict__.keys()):
+ for name in sorted(cls.__dict__):
if name.startswith("_abc_"):
value = getattr(cls, name)
+ if isinstance(value, WeakSet):
+ value = set(value)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
diff --git a/lib-python/3/aifc.py b/lib-python/3/aifc.py
--- a/lib-python/3/aifc.py
+++ b/lib-python/3/aifc.py
@@ -322,6 +322,7 @@
else:
raise Error('not an AIFF or AIFF-C file')
self._comm_chunk_read = 0
+ self._ssnd_chunk = None
while 1:
self._ssnd_seek_needed = 1
try:
diff --git a/lib-python/3/antigravity.py b/lib-python/3/antigravity.py
--- a/lib-python/3/antigravity.py
+++ b/lib-python/3/antigravity.py
@@ -11,7 +11,7 @@
37.857713 -122.544543
'''
- # http://xkcd.com/426/
+ # https://xkcd.com/426/
h = hashlib.md5(datedow).hexdigest()
p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
diff --git a/lib-python/3/argparse.py b/lib-python/3/argparse.py
--- a/lib-python/3/argparse.py
+++ b/lib-python/3/argparse.py
@@ -325,7 +325,11 @@
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
- part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
+ part_regexp = (
+ r'\(.*?\)+(?=\s|$)|'
+ r'\[.*?\]+(?=\s|$)|'
+ r'\S+'
+ )
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
diff --git a/lib-python/3/asyncio/base_events.py
b/lib-python/3/asyncio/base_events.py
--- a/lib-python/3/asyncio/base_events.py
+++ b/lib-python/3/asyncio/base_events.py
@@ -54,6 +54,11 @@
_FATAL_ERROR_IGNORE = (BrokenPipeError,
ConnectionResetError, ConnectionAbortedError)
+_HAS_IPv6 = hasattr(socket, 'AF_INET6')
+
+# Maximum timeout passed to select to avoid OS limitations
+MAXIMUM_SELECT_TIMEOUT = 24 * 3600
+
def _format_handle(handle):
cb = handle._callback
@@ -84,18 +89,24 @@
'SO_REUSEPORT defined but not implemented.')
-def _is_stream_socket(sock):
- # Linux's socket.type is a bitmask that can include extra info
- # about socket, therefore we can't do simple
- # `sock_type == socket.SOCK_STREAM`.
- return (sock.type & socket.SOCK_STREAM) == socket.SOCK_STREAM
+def _is_stream_socket(sock_type):
+ if hasattr(socket, 'SOCK_NONBLOCK'):
+ # Linux's socket.type is a bitmask that can include extra info
+ # about socket (like SOCK_NONBLOCK bit), therefore we can't do simple
+ # `sock_type == socket.SOCK_STREAM`, see
+ # https://github.com/torvalds/linux/blob/v4.13/include/linux/net.h#L77
+ # for more details.
+ return (sock_type & 0xF) == socket.SOCK_STREAM
+ else:
+ return sock_type == socket.SOCK_STREAM
-def _is_dgram_socket(sock):
- # Linux's socket.type is a bitmask that can include extra info
- # about socket, therefore we can't do simple
- # `sock_type == socket.SOCK_DGRAM`.
- return (sock.type & socket.SOCK_DGRAM) == socket.SOCK_DGRAM
+def _is_dgram_socket(sock_type):
+ if hasattr(socket, 'SOCK_NONBLOCK'):
+ # See the comment in `_is_stream_socket`.
+ return (sock_type & 0xF) == socket.SOCK_DGRAM
+ else:
+ return sock_type == socket.SOCK_DGRAM
def _ipaddr_info(host, port, family, type, proto):
@@ -108,14 +119,9 @@
host is None:
return None
- if type == socket.SOCK_STREAM:
- # Linux only:
- # getaddrinfo() can raise when socket.type is a bit mask.
- # So if socket.type is a bit mask of SOCK_STREAM, and say
- # SOCK_NONBLOCK, we simply return None, which will trigger
- # a call to getaddrinfo() letting it process this request.
+ if _is_stream_socket(type):
proto = socket.IPPROTO_TCP
- elif type == socket.SOCK_DGRAM:
+ elif _is_dgram_socket(type):
proto = socket.IPPROTO_UDP
else:
return None
@@ -135,7 +141,7 @@
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
- if hasattr(socket, 'AF_INET6'):
+ if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
@@ -151,7 +157,10 @@
try:
socket.inet_pton(af, host)
# The host has already been resolved.
- return af, type, proto, '', (host, port)
+ if _HAS_IPv6 and af == socket.AF_INET6:
+ return af, type, proto, '', (host, port, 0, 0)
+ else:
+ return af, type, proto, '', (host, port)
except OSError:
pass
@@ -173,6 +182,17 @@
proto=proto, flags=flags)
+if hasattr(socket, 'TCP_NODELAY'):
+ def _set_nodelay(sock):
+ if (sock.family in {socket.AF_INET, socket.AF_INET6} and
+ _is_stream_socket(sock.type) and
+ sock.proto == socket.IPPROTO_TCP):
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+else:
+ def _set_nodelay(sock):
+ pass
+
+
def _run_until_complete_cb(fut):
exc = fut._exception
if (isinstance(exc, BaseException)
@@ -359,10 +379,7 @@
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
- self.create_task(agen.aclose())
- # Wake up the loop if the finalizer was called from
- # a different thread.
- self._write_to_self()
+ self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
@@ -459,7 +476,8 @@
# local task.
future.exception()
raise
- future.remove_done_callback(_run_until_complete_cb)
+ finally:
+ future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
@@ -788,7 +806,7 @@
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
- if not _is_stream_socket(sock):
+ if not _is_stream_socket(sock.type):
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
@@ -840,7 +858,7 @@
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
- if not _is_dgram_socket(sock):
+ if not _is_dgram_socket(sock.type):
raise ValueError(
'A UDP Socket was expected, got {!r}'.format(sock))
if (local_addr or remote_addr or
@@ -995,7 +1013,6 @@
raise ValueError(
'host/port and sock can not be specified at the same time')
- AF_INET6 = getattr(socket, 'AF_INET6', 0)
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
@@ -1035,7 +1052,9 @@
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
- if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'):
+ if (_HAS_IPv6 and
+ af == socket.AF_INET6 and
+ hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
@@ -1053,7 +1072,7 @@
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
- if not _is_stream_socket(sock):
+ if not _is_stream_socket(sock.type):
raise ValueError(
'A Stream Socket was expected, got {!r}'.format(sock))
sockets = [sock]
@@ -1077,7 +1096,7 @@
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
- if not _is_stream_socket(sock):
+ if not _is_stream_socket(sock.type):
raise ValueError(
'A Stream Socket was expected, got {!r}'.format(sock))
@@ -1151,6 +1170,7 @@
if bufsize != 0:
raise ValueError("bufsize must be 0")
protocol = protocol_factory()
+ debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
@@ -1158,7 +1178,7 @@
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = yield from self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
- if self._debug:
+ if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
@@ -1180,6 +1200,7 @@
"a bytes or text string, not %s"
% type(arg).__name__)
protocol = protocol_factory()
+ debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
@@ -1188,7 +1209,7 @@
transport = yield from self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
- if self._debug:
+ if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
@@ -1221,6 +1242,11 @@
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
+ This default handler logs the error message and other
+ context-dependent information. In debug mode, a truncated
+ stack trace is also appended showing where the given object
+ (e.g. a handle or future or task) was created, if any.
+
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
@@ -1363,7 +1389,7 @@
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
- timeout = max(0, when - self.time())
+ timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
if self._debug and timeout != 0:
t0 = self.time()
diff --git a/lib-python/3/asyncio/constants.py
b/lib-python/3/asyncio/constants.py
--- a/lib-python/3/asyncio/constants.py
+++ b/lib-python/3/asyncio/constants.py
@@ -5,3 +5,8 @@
# Seconds to wait before retrying accept().
ACCEPT_RETRY_DELAY = 1
+
+# Number of stack entries to capture in debug mode.
+# The large the number, the slower the operation in debug mode
+# (see extract_stack() in events.py)
+DEBUG_STACK_DEPTH = 10
diff --git a/lib-python/3/asyncio/coroutines.py
b/lib-python/3/asyncio/coroutines.py
--- a/lib-python/3/asyncio/coroutines.py
+++ b/lib-python/3/asyncio/coroutines.py
@@ -10,6 +10,7 @@
import types
from . import compat
+from . import constants
from . import events
from . import base_futures
from .log import logger
@@ -91,7 +92,7 @@
assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
self.gen = gen
self.func = func # Used to unwrap @coroutine decorator
- self._source_traceback = traceback.extract_stack(sys._getframe(1))
+ self._source_traceback = events.extract_stack(sys._getframe(1))
self.__name__ = getattr(gen, '__name__', None)
self.__qualname__ = getattr(gen, '__qualname__', None)
@@ -183,8 +184,9 @@
tb = getattr(self, '_source_traceback', ())
if tb:
tb = ''.join(traceback.format_list(tb))
- msg += ('\nCoroutine object created at '
- '(most recent call last):\n')
+ msg += (f'\nCoroutine object created at '
+ f'(most recent call last, truncated to '
+ f'{constants.DEBUG_STACK_DEPTH} last lines):\n')
msg += tb.rstrip()
logger.error(msg)
@@ -197,7 +199,7 @@
"""
if _inspect_iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
- # defiend with "async def".
+ # defined with "async def".
# Wrapping in CoroWrapper will happen via
# 'sys.set_coroutine_wrapper' function.
return func
@@ -308,18 +310,25 @@
if coro_name is None:
coro_name = events._format_callback(func, (), {})
- try:
+ coro_code = None
+ if hasattr(coro, 'cr_code') and coro.cr_code:
+ coro_code = coro.cr_code
+ elif hasattr(coro, 'gi_code') and coro.gi_code:
coro_code = coro.gi_code
- except AttributeError:
- coro_code = coro.cr_code
- try:
+ coro_frame = None
+ if hasattr(coro, 'cr_frame') and coro.cr_frame:
+ coro_frame = coro.cr_frame
+ elif hasattr(coro, 'gi_frame') and coro.gi_frame:
coro_frame = coro.gi_frame
- except AttributeError:
- coro_frame = coro.cr_frame
- filename = coro_code.co_filename
+ filename = '<empty co_filename>'
+ if coro_code and coro_code.co_filename:
+ filename = coro_code.co_filename
+
lineno = 0
+ coro_repr = coro_name
+
if (isinstance(coro, CoroWrapper) and
not inspect.isgeneratorfunction(coro.func) and
coro.func is not None):
@@ -336,7 +345,7 @@
lineno = coro_frame.f_lineno
coro_repr = ('%s running at %s:%s'
% (coro_name, filename, lineno))
- else:
+ elif coro_code:
lineno = coro_code.co_firstlineno
coro_repr = ('%s done, defined at %s:%s'
% (coro_name, filename, lineno))
diff --git a/lib-python/3/asyncio/events.py b/lib-python/3/asyncio/events.py
--- a/lib-python/3/asyncio/events.py
+++ b/lib-python/3/asyncio/events.py
@@ -19,7 +19,8 @@
import threading
import traceback
-from asyncio import compat
+from . import compat
+from . import constants
def _get_function_source(func):
@@ -57,10 +58,10 @@
suffix = _format_args_and_kwargs(args, kwargs) + suffix
return _format_callback(func.func, func.args, func.keywords, suffix)
- if hasattr(func, '__qualname__'):
- func_repr = getattr(func, '__qualname__')
- elif hasattr(func, '__name__'):
- func_repr = getattr(func, '__name__')
+ if hasattr(func, '__qualname__') and func.__qualname__:
+ func_repr = func.__qualname__
+ elif hasattr(func, '__name__') and func.__name__:
+ func_repr = func.__name__
else:
func_repr = repr(func)
@@ -77,6 +78,23 @@
return func_repr
+def extract_stack(f=None, limit=None):
+ """Replacement for traceback.extract_stack() that only does the
+ necessary work for asyncio debug mode.
+ """
+ if f is None:
+ f = sys._getframe().f_back
+ if limit is None:
+ # Limit the amount of work to a reasonable amount, as extract_stack()
+ # can be called for each coroutine and future in debug mode.
+ limit = constants.DEBUG_STACK_DEPTH
+ stack = traceback.StackSummary.extract(traceback.walk_stack(f),
+ limit=limit,
+ lookup_lines=False)
+ stack.reverse()
+ return stack
+
+
class Handle:
"""Object returned by callback registration methods."""
@@ -90,7 +108,7 @@
self._cancelled = False
self._repr = None
if self._loop.get_debug():
- self._source_traceback = traceback.extract_stack(sys._getframe(1))
+ self._source_traceback = extract_stack(sys._getframe(1))
else:
self._source_traceback = None
@@ -611,8 +629,7 @@
# A TLS for the running event loop, used by _get_running_loop.
class _RunningLoop(threading.local):
- _loop = None
- _pid = None
+ loop_pid = (None, None)
_running_loop = _RunningLoop()
@@ -624,8 +641,8 @@
This is a low-level function intended to be used by event loops.
This function is thread-specific.
"""
- running_loop = _running_loop._loop
- if running_loop is not None and _running_loop._pid == os.getpid():
+ running_loop, pid = _running_loop.loop_pid
+ if running_loop is not None and pid == os.getpid():
return running_loop
@@ -635,8 +652,7 @@
This is a low-level function intended to be used by event loops.
This function is thread-specific.
"""
- _running_loop._pid = os.getpid()
- _running_loop._loop = loop
+ _running_loop.loop_pid = (loop, os.getpid())
def _init_event_loop_policy():
diff --git a/lib-python/3/asyncio/futures.py b/lib-python/3/asyncio/futures.py
--- a/lib-python/3/asyncio/futures.py
+++ b/lib-python/3/asyncio/futures.py
@@ -123,11 +123,13 @@
Differences:
+ - This class is not thread-safe.
+
- result() and exception() do not take a timeout argument and
raise an exception when the future isn't done yet.
- Callbacks registered with add_done_callback() are always called
- via the event loop's call_soon_threadsafe().
+ via the event loop's call_soon().
- This class is not compatible with the wait() and as_completed()
methods in the concurrent.futures package.
@@ -152,8 +154,7 @@
# `yield Future()` (incorrect).
_asyncio_future_blocking = False
- _log_traceback = False # Used for Python 3.4 and later
- _tb_logger = None # Used for Python 3.3 only
+ _log_traceback = False
def __init__(self, *, loop=None):
"""Initialize the future.
@@ -168,7 +169,7 @@
self._loop = loop
self._callbacks = []
if self._loop.get_debug():
- self._source_traceback = traceback.extract_stack(sys._getframe(1))
+ self._source_traceback = events.extract_stack(sys._getframe(1))
_repr_info = base_futures._future_repr_info
@@ -202,6 +203,7 @@
change the future's state to cancelled, schedule the callbacks and
return True.
"""
+ self._log_traceback = False
if self._state != _PENDING:
return False
self._state = _CANCELLED
@@ -248,9 +250,6 @@
if self._state != _FINISHED:
raise InvalidStateError('Result is not ready.')
self._log_traceback = False
- if self._tb_logger is not None:
- self._tb_logger.clear()
- self._tb_logger = None
if self._exception is not None:
raise self._exception
return self._result
@@ -268,9 +267,6 @@
if self._state != _FINISHED:
raise InvalidStateError('Exception is not set.')
self._log_traceback = False
- if self._tb_logger is not None:
- self._tb_logger.clear()
- self._tb_logger = None
return self._exception
def add_done_callback(self, fn):
@@ -423,6 +419,9 @@
source_loop.call_soon_threadsafe(source.cancel)
def _call_set_state(source):
+ if (destination.cancelled() and
+ dest_loop is not None and dest_loop.is_closed()):
+ return
if dest_loop is None or dest_loop is source_loop:
_set_state(destination, source)
else:
diff --git a/lib-python/3/asyncio/locks.py b/lib-python/3/asyncio/locks.py
--- a/lib-python/3/asyncio/locks.py
+++ b/lib-python/3/asyncio/locks.py
@@ -172,12 +172,22 @@
fut = self._loop.create_future()
self._waiters.append(fut)
+
+ # Finally block should be called before the CancelledError
+ # handling as we don't want CancelledError to call
+ # _wake_up_first() and attempt to wake up itself.
try:
- yield from fut
- self._locked = True
- return True
- finally:
- self._waiters.remove(fut)
+ try:
+ yield from fut
+ finally:
+ self._waiters.remove(fut)
+ except futures.CancelledError:
+ if not self._locked:
+ self._wake_up_first()
+ raise
+
+ self._locked = True
+ return True
def release(self):
"""Release a lock.
@@ -192,14 +202,23 @@
"""
if self._locked:
self._locked = False
- # Wake up the first waiter who isn't cancelled.
- for fut in self._waiters:
- if not fut.done():
- fut.set_result(True)
- break
+ self._wake_up_first()
else:
raise RuntimeError('Lock is not acquired.')
+ def _wake_up_first(self):
+ """Wake up the first waiter if it isn't done."""
+ try:
+ fut = next(iter(self._waiters))
+ except StopIteration:
+ return
+
+ # .done() necessarily means that a waiter will wake up later on and
+ # either take the lock, or, if it was cancelled and lock wasn't
+ # taken already, will hit this again and wake up a new waiter.
+ if not fut.done():
+ fut.set_result(True)
+
class Event:
"""Asynchronous equivalent to threading.Event.
@@ -330,12 +349,16 @@
finally:
# Must reacquire lock even if wait is cancelled
+ cancelled = False
while True:
try:
yield from self.acquire()
break
except futures.CancelledError:
- pass
+ cancelled = True
+
+ if cancelled:
+ raise futures.CancelledError
@coroutine
def wait_for(self, predicate):
diff --git a/lib-python/3/asyncio/proactor_events.py
b/lib-python/3/asyncio/proactor_events.py
--- a/lib-python/3/asyncio/proactor_events.py
+++ b/lib-python/3/asyncio/proactor_events.py
@@ -156,29 +156,29 @@
extra=None, server=None):
super().__init__(loop, sock, protocol, waiter, extra, server)
self._paused = False
+ self._reschedule_on_resume = False
self._loop.call_soon(self._loop_reading)
def pause_reading(self):
- if self._closing:
- raise RuntimeError('Cannot pause_reading() when closing')
- if self._paused:
- raise RuntimeError('Already paused')
+ if self._closing or self._paused:
+ return
self._paused = True
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
- if not self._paused:
- raise RuntimeError('Not paused')
+ if self._closing or not self._paused:
+ return
self._paused = False
- if self._closing:
- return
- self._loop.call_soon(self._loop_reading, self._read_fut)
+ if self._reschedule_on_resume:
+ self._loop.call_soon(self._loop_reading, self._read_fut)
+ self._reschedule_on_resume = False
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _loop_reading(self, fut=None):
if self._paused:
+ self._reschedule_on_resume = True
return
data = None
@@ -232,8 +232,9 @@
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
- raise TypeError('data argument must be byte-ish (%r)',
- type(data))
+ msg = ("data argument must be a bytes-like object, not '%s'" %
+ type(data).__name__)
+ raise TypeError(msg)
if self._eof_written:
raise RuntimeError('write_eof() already called')
@@ -349,6 +350,11 @@
transports.Transport):
"""Transport for connected sockets."""
+ def __init__(self, loop, sock, protocol, waiter=None,
+ extra=None, server=None):
+ super().__init__(loop, sock, protocol, waiter, extra, server)
+ base_events._set_nodelay(sock)
+
def _set_extra(self, sock):
self._extra['socket'] = sock
try:
diff --git a/lib-python/3/asyncio/queues.py b/lib-python/3/asyncio/queues.py
--- a/lib-python/3/asyncio/queues.py
+++ b/lib-python/3/asyncio/queues.py
@@ -167,6 +167,12 @@
yield from getter
except:
getter.cancel() # Just in case getter is not done yet.
+
+ try:
+ self._getters.remove(getter)
+ except ValueError:
+ pass
+
if not self.empty() and not getter.cancelled():
# We were woken up by put_nowait(), but can't take
# the call. Wake up the next in line.
diff --git a/lib-python/3/asyncio/selector_events.py
b/lib-python/3/asyncio/selector_events.py
--- a/lib-python/3/asyncio/selector_events.py
+++ b/lib-python/3/asyncio/selector_events.py
@@ -40,17 +40,6 @@
return bool(key.events & event)
-if hasattr(socket, 'TCP_NODELAY'):
- def _set_nodelay(sock):
- if (sock.family in {socket.AF_INET, socket.AF_INET6} and
- sock.type == socket.SOCK_STREAM and
- sock.proto == socket.IPPROTO_TCP):
- sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
-else:
- def _set_nodelay(sock):
- pass
-
-
class BaseSelectorEventLoop(base_events.BaseEventLoop):
"""Selector event loop.
@@ -363,25 +352,25 @@
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
- self._sock_recv(fut, False, sock, n)
+ self._sock_recv(fut, None, sock, n)
return fut
- def _sock_recv(self, fut, registered, sock, n):
+ def _sock_recv(self, fut, registered_fd, sock, n):
# _sock_recv() can add itself as an I/O callback if the operation can't
# be done immediately. Don't use it directly, call sock_recv().
- fd = sock.fileno()
- if registered:
+ if registered_fd is not None:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
- self.remove_reader(fd)
+ self.remove_reader(registered_fd)
if fut.cancelled():
return
try:
data = sock.recv(n)
except (BlockingIOError, InterruptedError):
- self.add_reader(fd, self._sock_recv, fut, True, sock, n)
+ fd = sock.fileno()
+ self.add_reader(fd, self._sock_recv, fut, fd, sock, n)
except Exception as exc:
fut.set_exception(exc)
else:
@@ -402,16 +391,14 @@
raise ValueError("the socket must be non-blocking")
fut = self.create_future()
if data:
- self._sock_sendall(fut, False, sock, data)
+ self._sock_sendall(fut, None, sock, data)
else:
fut.set_result(None)
return fut
- def _sock_sendall(self, fut, registered, sock, data):
- fd = sock.fileno()
-
- if registered:
- self.remove_writer(fd)
+ def _sock_sendall(self, fut, registered_fd, sock, data):
+ if registered_fd is not None:
+ self.remove_writer(registered_fd)
if fut.cancelled():
return
@@ -428,7 +415,8 @@
else:
if n:
data = data[n:]
- self.add_writer(fd, self._sock_sendall, fut, True, sock, data)
+ fd = sock.fileno()
+ self.add_writer(fd, self._sock_sendall, fut, fd, sock, data)
@coroutine
def sock_connect(self, sock, address):
@@ -674,6 +662,12 @@
def get_write_buffer_size(self):
return len(self._buffer)
+ def _add_reader(self, fd, callback, *args):
+ if self._closing:
+ return
+
+ self._loop._add_reader(fd, callback, *args)
+
class _SelectorSocketTransport(_SelectorTransport):
@@ -686,11 +680,11 @@
# Disable the Nagle algorithm -- small writes will be
# sent without waiting for the TCP ACK. This generally
# decreases the latency (in some cases significantly.)
- _set_nodelay(self._sock)
+ base_events._set_nodelay(self._sock)
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
- self._loop.call_soon(self._loop._add_reader,
+ self._loop.call_soon(self._add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
@@ -698,22 +692,18 @@
waiter, None)
def pause_reading(self):
- if self._closing:
- raise RuntimeError('Cannot pause_reading() when closing')
- if self._paused:
- raise RuntimeError('Already paused')
+ if self._closing or self._paused:
+ return
self._paused = True
self._loop._remove_reader(self._sock_fd)
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
- if not self._paused:
- raise RuntimeError('Not paused')
+ if self._closing or not self._paused:
+ return
self._paused = False
- if self._closing:
- return
- self._loop._add_reader(self._sock_fd, self._read_ready)
+ self._add_reader(self._sock_fd, self._read_ready)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
@@ -801,7 +791,7 @@
self._sock.shutdown(socket.SHUT_WR)
def write_eof(self):
- if self._eof:
+ if self._closing or self._eof:
return
self._eof = True
if not self._buffer:
@@ -1053,7 +1043,7 @@
self._address = address
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
- self._loop.call_soon(self._loop._add_reader,
+ self._loop.call_soon(self._add_reader,
self._sock_fd, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
diff --git a/lib-python/3/asyncio/sslproto.py b/lib-python/3/asyncio/sslproto.py
--- a/lib-python/3/asyncio/sslproto.py
+++ b/lib-python/3/asyncio/sslproto.py
@@ -294,11 +294,10 @@
class _SSLProtocolTransport(transports._FlowControlMixin,
transports.Transport):
- def __init__(self, loop, ssl_protocol, app_protocol):
+ def __init__(self, loop, ssl_protocol):
self._loop = loop
# SSLProtocol instance
self._ssl_protocol = ssl_protocol
- self._app_protocol = app_protocol
self._closed = False
def get_extra_info(self, name, default=None):
@@ -306,10 +305,10 @@
return self._ssl_protocol._get_extra_info(name, default)
def set_protocol(self, protocol):
- self._app_protocol = protocol
+ self._ssl_protocol._app_protocol = protocol
def get_protocol(self):
- return self._app_protocol
+ return self._ssl_protocol._app_protocol
def is_closing(self):
return self._closed
@@ -436,8 +435,7 @@
self._waiter = waiter
self._loop = loop
self._app_protocol = app_protocol
- self._app_transport = _SSLProtocolTransport(self._loop,
- self, self._app_protocol)
+ self._app_transport = _SSLProtocolTransport(self._loop, self)
# _SSLPipe instance (None until the connection is made)
self._sslpipe = None
self._session_established = False
@@ -499,6 +497,10 @@
The argument is a bytes object.
"""
+ if self._sslpipe is None:
+ # transport closing, sslpipe is destroyed
+ return
+
try:
ssldata, appdata = self._sslpipe.feed_ssldata(data)
except ssl.SSLError as e:
@@ -543,14 +545,19 @@
def _get_extra_info(self, name, default=None):
if name in self._extra:
return self._extra[name]
+ elif self._transport is not None:
+ return self._transport.get_extra_info(name, default)
else:
- return self._transport.get_extra_info(name, default)
+ return default
def _start_shutdown(self):
if self._in_shutdown:
return
- self._in_shutdown = True
- self._write_appdata(b'')
+ if self._in_handshake:
+ self._abort()
+ else:
+ self._in_shutdown = True
+ self._write_appdata(b'')
def _write_appdata(self, data):
self._write_backlog.append((data, 0))
@@ -567,7 +574,7 @@
# (b'', 1) is a special value in _process_write_backlog() to do
# the SSL handshake
self._write_backlog.append((b'', 1))
- self._loop.call_soon(self._process_write_backlog)
+ self._process_write_backlog()
def _on_handshake_complete(self, handshake_exc):
self._in_handshake = False
@@ -623,7 +630,7 @@
def _process_write_backlog(self):
# Try to make progress on the write backlog.
- if self._transport is None:
+ if self._transport is None or self._sslpipe is None:
return
try:
@@ -681,12 +688,14 @@
self._transport._force_close(exc)
def _finalize(self):
+ self._sslpipe = None
+
if self._transport is not None:
self._transport.close()
def _abort(self):
- if self._transport is not None:
- try:
+ try:
+ if self._transport is not None:
self._transport.abort()
- finally:
- self._finalize()
+ finally:
+ self._finalize()
diff --git a/lib-python/3/asyncio/streams.py b/lib-python/3/asyncio/streams.py
--- a/lib-python/3/asyncio/streams.py
+++ b/lib-python/3/asyncio/streams.py
@@ -35,6 +35,9 @@
self.partial = partial
self.expected = expected
+ def __reduce__(self):
+ return type(self), (self.partial, self.expected)
+
class LimitOverrunError(Exception):
"""Reached the buffer limit while looking for a separator.
@@ -46,6 +49,9 @@
super().__init__(message)
self.consumed = consumed
+ def __reduce__(self):
+ return type(self), (self.args[0], self.consumed)
+
@coroutine
def open_connection(host=None, port=None, *,
diff --git a/lib-python/3/asyncio/tasks.py b/lib-python/3/asyncio/tasks.py
--- a/lib-python/3/asyncio/tasks.py
+++ b/lib-python/3/asyncio/tasks.py
@@ -148,6 +148,7 @@
terminates with a CancelledError exception (even if cancel()
was not called).
"""
+ self._log_traceback = False
if self.done():
return False
if self._fut_waiter is not None:
@@ -180,7 +181,12 @@
else:
result = coro.throw(exc)
except StopIteration as exc:
- self.set_result(exc.value)
+ if self._must_cancel:
+ # Task is cancelled right before coro stops.
+ self._must_cancel = False
+ self.set_exception(futures.CancelledError())
+ else:
+ self.set_result(exc.value)
except futures.CancelledError:
super().cancel() # I.e., Future.cancel(self).
except Exception as exc:
@@ -227,7 +233,7 @@
self._step,
RuntimeError(
'yield was used instead of yield from for '
- 'generator in task {!r} with {}'.format(
+ 'generator in task {!r} with {!r}'.format(
self, result)))
else:
# Yielding something else is an error.
@@ -517,7 +523,8 @@
elif compat.PY35 and inspect.isawaitable(coro_or_future):
return ensure_future(_wrap_awaitable(coro_or_future), loop=loop)
else:
- raise TypeError('A Future, a coroutine or an awaitable is required')
+ raise TypeError('An asyncio.Future, a coroutine or an awaitable is '
+ 'required')
@coroutine
@@ -541,6 +548,7 @@
def __init__(self, children, *, loop=None):
super().__init__(loop=loop)
self._children = children
+ self._cancel_requested = False
def cancel(self):
if self.done():
@@ -549,6 +557,11 @@
for child in self._children:
if child.cancel():
ret = True
+ if ret:
+ # If any child tasks were actually cancelled, we should
+ # propagate the cancellation request regardless of
+ # *return_exceptions* argument. See issue 32684.
+ self._cancel_requested = True
return ret
@@ -629,7 +642,10 @@
results[i] = res
nfinished += 1
if nfinished == nchildren:
- outer.set_result(results)
+ if outer._cancel_requested:
+ outer.set_exception(futures.CancelledError())
+ else:
+ outer.set_result(results)
for i, fut in enumerate(children):
fut.add_done_callback(functools.partial(_done_callback, i))
diff --git a/lib-python/3/asyncio/test_utils.py
b/lib-python/3/asyncio/test_utils.py
--- a/lib-python/3/asyncio/test_utils.py
+++ b/lib-python/3/asyncio/test_utils.py
@@ -33,6 +33,7 @@
from . import tasks
from .coroutines import coroutine
from .log import logger
+from test import support
if sys.platform == 'win32': # pragma: no cover
@@ -41,6 +42,21 @@
from socket import socketpair # pragma: no cover
+def data_file(filename):
+ if hasattr(support, 'TEST_HOME_DIR'):
+ fullname = os.path.join(support.TEST_HOME_DIR, filename)
+ if os.path.isfile(fullname):
+ return fullname
+ fullname = os.path.join(os.path.dirname(os.__file__), 'test', filename)
+ if os.path.isfile(fullname):
+ return fullname
+ raise FileNotFoundError(filename)
+
+
+ONLYCERT = data_file('ssl_cert.pem')
+ONLYKEY = data_file('ssl_key.pem')
+
+
def dummy_ssl_context():
if ssl is None:
return None
@@ -113,12 +129,8 @@
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
- here = os.path.join(os.path.dirname(__file__), '..', 'tests')
- if not os.path.isdir(here):
- here = os.path.join(os.path.dirname(os.__file__),
- 'test', 'test_asyncio')
- keyfile = os.path.join(here, 'ssl_key.pem')
- certfile = os.path.join(here, 'ssl_cert.pem')
+ keyfile = ONLYKEY
+ certfile = ONLYCERT
context = ssl.SSLContext()
context.load_cert_chain(certfile, keyfile)
@@ -334,12 +346,19 @@
return False
def assert_reader(self, fd, callback, *args):
- assert fd in self.readers, 'fd {} is not registered'.format(fd)
+ if fd not in self.readers:
+ raise AssertionError(f'fd {fd} is not registered')
handle = self.readers[fd]
- assert handle._callback == callback, '{!r} != {!r}'.format(
- handle._callback, callback)
- assert handle._args == args, '{!r} != {!r}'.format(
- handle._args, args)
+ if handle._callback != callback:
+ raise AssertionError(
+ f'unexpected callback: {handle._callback} != {callback}')
+ if handle._args != args:
+ raise AssertionError(
+ f'unexpected callback args: {handle._args} != {args}')
+
+ def assert_no_reader(self, fd):
+ if fd in self.readers:
+ raise AssertionError(f'fd {fd} is registered')
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self)
@@ -437,12 +456,19 @@
class TestCase(unittest.TestCase):
+ @staticmethod
+ def close_loop(loop):
+ executor = loop._default_executor
+ if executor is not None:
+ executor.shutdown(wait=True)
+ loop.close()
+
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
- self.addCleanup(loop.close)
+ self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
@@ -455,6 +481,7 @@
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
+ self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
@@ -465,6 +492,10 @@
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
+ self.doCleanups()
+ support.threading_cleanup(*self._thread_cleanup)
+ support.reap_children()
+
if not compat.PY34:
# Python 3.3 compatibility
def subTest(self, *args, **kwargs):
diff --git a/lib-python/3/asyncio/unix_events.py
b/lib-python/3/asyncio/unix_events.py
--- a/lib-python/3/asyncio/unix_events.py
+++ b/lib-python/3/asyncio/unix_events.py
@@ -61,8 +61,17 @@
def close(self):
super().close()
- for sig in list(self._signal_handlers):
- self.remove_signal_handler(sig)
+ if not sys.is_finalizing():
+ for sig in list(self._signal_handlers):
+ self.remove_signal_handler(sig)
+ else:
+ if self._signal_handlers:
+ warnings.warn(f"Closing the loop {self!r} "
+ f"on interpreter shutdown "
+ f"stage, skipping signal handlers removal",
+ ResourceWarning,
+ source=self)
+ self._signal_handlers.clear()
def _process_self_data(self, data):
for signum in data:
@@ -242,7 +251,7 @@
if sock is None:
raise ValueError('no path and sock were specified')
if (sock.family != socket.AF_UNIX or
- not base_events._is_stream_socket(sock)):
+ not base_events._is_stream_socket(sock.type)):
raise ValueError(
'A UNIX Domain Stream Socket was expected, got {!r}'
.format(sock))
@@ -297,7 +306,7 @@
'path was not specified, and no sock specified')
if (sock.family != socket.AF_UNIX or
- not base_events._is_stream_socket(sock)):
+ not base_events._is_stream_socket(sock.type)):
raise ValueError(
'A UNIX Domain Stream Socket was expected, got {!r}'
.format(sock))
diff --git a/lib-python/3/asyncore.py b/lib-python/3/asyncore.py
--- a/lib-python/3/asyncore.py
+++ b/lib-python/3/asyncore.py
@@ -619,8 +619,9 @@
def close(self):
if self.fd < 0:
return
- os.close(self.fd)
+ fd = self.fd
self.fd = -1
+ os.close(fd)
def fileno(self):
return self.fd
diff --git a/lib-python/3/base64.py b/lib-python/3/base64.py
--- a/lib-python/3/base64.py
+++ b/lib-python/3/base64.py
@@ -231,23 +231,16 @@
raise binascii.Error('Non-base32 digit found') from None
decoded += acc.to_bytes(5, 'big')
# Process the last, partial quanta
- if padchars:
+ if l % 8 or padchars not in {0, 1, 3, 4, 6}:
+ raise binascii.Error('Incorrect padding')
+ if padchars and decoded:
acc <<= 5 * padchars
last = acc.to_bytes(5, 'big')
- if padchars == 1:
- decoded[-5:] = last[:-1]
- elif padchars == 3:
- decoded[-5:] = last[:-2]
- elif padchars == 4:
- decoded[-5:] = last[:-3]
- elif padchars == 6:
- decoded[-5:] = last[:-4]
- else:
- raise binascii.Error('Incorrect padding')
+ leftover = (43 - 5 * padchars) // 8 # 1: 4, 3: 3, 4: 2, 6: 1
+ decoded[-5:] = last[:leftover]
return bytes(decoded)
-
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
diff --git a/lib-python/3/bdb.py b/lib-python/3/bdb.py
--- a/lib-python/3/bdb.py
+++ b/lib-python/3/bdb.py
@@ -3,10 +3,13 @@
import fnmatch
import sys
import os
-from inspect import CO_GENERATOR
+from inspect import CO_GENERATOR, CO_COROUTINE, CO_ASYNC_GENERATOR
__all__ = ["BdbQuit", "Bdb", "Breakpoint"]
+GENERATOR_AND_COROUTINE_FLAGS = CO_GENERATOR | CO_COROUTINE |
CO_ASYNC_GENERATOR
+
+
class BdbQuit(Exception):
"""Exception to give up completely."""
@@ -77,7 +80,7 @@
# No need to trace this function
return # None
# Ignore call events in generator except when stepping.
- if self.stopframe and frame.f_code.co_flags & CO_GENERATOR:
+ if self.stopframe and frame.f_code.co_flags &
GENERATOR_AND_COROUTINE_FLAGS:
return self.trace_dispatch
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
@@ -86,7 +89,7 @@
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
# Ignore return events in generator except when stepping.
- if self.stopframe and frame.f_code.co_flags & CO_GENERATOR:
+ if self.stopframe and frame.f_code.co_flags &
GENERATOR_AND_COROUTINE_FLAGS:
return self.trace_dispatch
try:
self.frame_returning = frame
@@ -104,7 +107,7 @@
# When stepping with next/until/return in a generator frame, skip
# the internal StopIteration exception (with no traceback)
# triggered by a subiterator run with the 'yield from' statement.
- if not (frame.f_code.co_flags & CO_GENERATOR
+ if not (frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS
and arg[0] is StopIteration and arg[2] is None):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
@@ -113,7 +116,7 @@
# next/until command at the last statement in the generator before the
# exception.
elif (self.stopframe and frame is not self.stopframe
- and self.stopframe.f_code.co_flags & CO_GENERATOR
+ and self.stopframe.f_code.co_flags &
GENERATOR_AND_COROUTINE_FLAGS
and arg[0] in (StopIteration, GeneratorExit)):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
@@ -230,7 +233,7 @@
def set_return(self, frame):
"""Stop when returning from the given frame."""
- if frame.f_code.co_flags & CO_GENERATOR:
+ if frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:
self._set_stopinfo(frame, None, -1)
else:
self._set_stopinfo(frame.f_back, frame)
diff --git a/lib-python/3/cProfile.py b/lib-python/3/cProfile.py
--- a/lib-python/3/cProfile.py
+++ b/lib-python/3/cProfile.py
@@ -25,11 +25,11 @@
# ____________________________________________________________
class Profile(_lsprof.Profiler):
- """Profile(custom_timer=None, time_unit=None, subcalls=True, builtins=True)
+ """Profile(timer=None, timeunit=None, subcalls=True, builtins=True)
Builds a profiler object using the specified timer function.
The default timer is a fast built-in one based on real time.
- For custom timer functions returning integers, time_unit can
+ For custom timer functions returning integers, timeunit can
be a float specifying a scale (i.e. how long each integer unit
is, in seconds).
"""
@@ -121,7 +121,7 @@
# ____________________________________________________________
def main():
- import os, sys
+ import os, sys, pstats
from optparse import OptionParser
usage = "cProfile.py [-o output_file_path] [-s sort] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
@@ -130,7 +130,8 @@
help="Save stats to <outfile>", default=None)
parser.add_option('-s', '--sort', dest="sort",
help="Sort order when printing to stdout, based on pstats.Stats class",
- default=-1)
+ default=-1,
+ choices=sorted(pstats.Stats.sort_arg_dict_default))
if not sys.argv[1:]:
parser.print_usage()
diff --git a/lib-python/3/cgi.py b/lib-python/3/cgi.py
--- a/lib-python/3/cgi.py
+++ b/lib-python/3/cgi.py
@@ -404,7 +404,8 @@
"""
def __init__(self, fp=None, headers=None, outerboundary=b'',
environ=os.environ, keep_blank_values=0, strict_parsing=0,
- limit=None, encoding='utf-8', errors='replace'):
+ limit=None, encoding='utf-8', errors='replace',
+ max_num_fields=None):
"""Constructor. Read multipart/* until last part.
Arguments, all optional:
@@ -444,10 +445,14 @@
for the page sending the form (content-type : meta http-equiv or
header)
+ max_num_fields: int. If set, then __init__ throws a ValueError
+ if there are more than n fields read by parse_qsl().
+
"""
method = 'GET'
self.keep_blank_values = keep_blank_values
self.strict_parsing = strict_parsing
+ self.max_num_fields = max_num_fields
if 'REQUEST_METHOD' in environ:
method = environ['REQUEST_METHOD'].upper()
self.qs_on_post = None
@@ -670,12 +675,11 @@
qs = qs.decode(self.encoding, self.errors)
if self.qs_on_post:
qs += '&' + self.qs_on_post
- self.list = []
query = urllib.parse.parse_qsl(
qs, self.keep_blank_values, self.strict_parsing,
- encoding=self.encoding, errors=self.errors)
- for key, value in query:
- self.list.append(MiniFieldStorage(key, value))
+ encoding=self.encoding, errors=self.errors,
+ max_num_fields=self.max_num_fields)
+ self.list = [MiniFieldStorage(key, value) for key, value in query]
self.skip_lines()
FieldStorageClass = None
@@ -689,9 +693,9 @@
if self.qs_on_post:
query = urllib.parse.parse_qsl(
self.qs_on_post, self.keep_blank_values, self.strict_parsing,
- encoding=self.encoding, errors=self.errors)
- for key, value in query:
- self.list.append(MiniFieldStorage(key, value))
+ encoding=self.encoding, errors=self.errors,
+ max_num_fields=self.max_num_fields)
+ self.list.extend(MiniFieldStorage(key, value) for key, value in
query)
klass = self.FieldStorageClass or self.__class__
first_line = self.fp.readline() # bytes
@@ -706,6 +710,11 @@
first_line = self.fp.readline()
self.bytes_read += len(first_line)
+ # Propagate max_num_fields into the sub class appropriately
+ max_num_fields = self.max_num_fields
+ if max_num_fields is not None:
+ max_num_fields -= len(self.list)
+
while True:
parser = FeedParser()
hdr_text = b""
@@ -727,7 +736,15 @@
part = klass(self.fp, headers, ib, environ, keep_blank_values,
strict_parsing,self.limit-self.bytes_read,
- self.encoding, self.errors)
+ self.encoding, self.errors, max_num_fields)
+
+ if max_num_fields is not None:
+ max_num_fields -= 1
+ if part.list:
+ max_num_fields -= len(part.list)
+ if max_num_fields < 0:
+ raise ValueError('Max number of fields exceeded')
+
self.bytes_read += part.bytes_read
self.list.append(part)
if part.done or self.bytes_read >= self.length > 0:
diff --git a/lib-python/3/cgitb.py b/lib-python/3/cgitb.py
--- a/lib-python/3/cgitb.py
+++ b/lib-python/3/cgitb.py
@@ -124,7 +124,7 @@
args, varargs, varkw, locals = inspect.getargvalues(frame)
call = ''
if func != '?':
- call = 'in ' + strong(func) + \
+ call = 'in ' + strong(pydoc.html.escape(func)) + \
inspect.formatargvalues(args, varargs, varkw, locals,
formatvalue=lambda value: '=' + pydoc.html.repr(value))
@@ -282,7 +282,7 @@
if self.display:
if plain:
- doc = doc.replace('&', '&').replace('<', '<')
+ doc = pydoc.html.escape(doc)
self.file.write('<pre>' + doc + '</pre>\n')
else:
self.file.write(doc + '\n')
diff --git a/lib-python/3/codecs.py b/lib-python/3/codecs.py
--- a/lib-python/3/codecs.py
+++ b/lib-python/3/codecs.py
@@ -479,15 +479,17 @@
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
+ if chars < 0:
+ # For compatibility with other read() methods that take a
+ # single argument
+ chars = size
+
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
- elif size >= 0:
- if len(self.charbuffer) >= size:
- break
# we need more data
if size < 0:
newdata = self.stream.read()
diff --git a/lib-python/3/collections/__init__.py
b/lib-python/3/collections/__init__.py
--- a/lib-python/3/collections/__init__.py
+++ b/lib-python/3/collections/__init__.py
@@ -85,9 +85,7 @@
def __init__(*args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
- regular dictionaries, but keyword arguments are not recommended because
- their insertion order is arbitrary.
-
+ regular dictionaries. Keyword argument order is preserved.
'''
if not args:
raise TypeError("descriptor '__init__' of 'OrderedDict' object "
@@ -157,9 +155,9 @@
dict.clear(self)
def popitem(self, last=True):
- '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ '''Remove and return a (key, value) pair from the dictionary.
+
Pairs are returned in LIFO order if last is true or FIFO order if
false.
-
'''
if not self:
raise KeyError('dictionary is empty')
diff --git a/lib-python/3/compileall.py b/lib-python/3/compileall.py
--- a/lib-python/3/compileall.py
+++ b/lib-python/3/compileall.py
@@ -16,10 +16,6 @@
import py_compile
import struct
-try:
- from concurrent.futures import ProcessPoolExecutor
-except ImportError:
- ProcessPoolExecutor = None
from functools import partial
__all__ = ["compile_dir","compile_file","compile_path"]
@@ -68,9 +64,17 @@
optimize: optimization level or -1 for level of the interpreter
workers: maximum number of parallel workers
"""
- if workers is not None and workers < 0:
- raise ValueError('workers must be greater or equal to 0')
-
+ ProcessPoolExecutor = None
+ if workers is not None:
+ if workers < 0:
+ raise ValueError('workers must be greater or equal to 0')
+ elif workers != 1:
+ try:
+ # Only import when needed, as low resource platforms may
+ # fail to import it
+ from concurrent.futures import ProcessPoolExecutor
+ except ImportError:
+ workers = 1
files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels,
ddir=ddir)
success = True
diff --git a/lib-python/3/concurrent/futures/_base.py
b/lib-python/3/concurrent/futures/_base.py
--- a/lib-python/3/concurrent/futures/_base.py
+++ b/lib-python/3/concurrent/futures/_base.py
@@ -170,6 +170,29 @@
return waiter
+
+def _yield_finished_futures(fs, waiter, ref_collect):
+ """
+ Iterate on the list *fs*, yielding finished futures one by one in
+ reverse order.
+ Before yielding a future, *waiter* is removed from its waiters
+ and the future is removed from each set in the collection of sets
+ *ref_collect*.
+
+ The aim of this function is to avoid keeping stale references after
+ the future is yielded and before the iterator resumes.
+ """
+ while fs:
+ f = fs[-1]
+ for futures_set in ref_collect:
+ futures_set.remove(f)
+ with f._condition:
+ f._waiters.remove(waiter)
+ del f
+ # Careful not to keep a reference to the popped value
+ yield fs.pop()
+
+
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
@@ -189,28 +212,30 @@
before the given timeout.
"""
if timeout is not None:
- end_time = timeout + time.time()
+ end_time = timeout + time.monotonic()
fs = set(fs)
+ total_futures = len(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
-
+ finished = list(finished)
try:
- yield from finished
+ yield from _yield_finished_futures(finished, waiter,
+ ref_collect=(fs,))
while pending:
if timeout is None:
wait_timeout = None
else:
- wait_timeout = end_time - time.time()
+ wait_timeout = end_time - time.monotonic()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
- len(pending), len(fs)))
+ len(pending), total_futures))
waiter.event.wait(wait_timeout)
@@ -219,11 +244,13 @@
waiter.finished_futures = []
waiter.event.clear()
- for future in finished:
- yield future
- pending.remove(future)
+ # reverse to keep finishing order
+ finished.reverse()
+ yield from _yield_finished_futures(finished, waiter,
+ ref_collect=(fs, pending))
finally:
+ # Remove waiter from unfinished futures
for f in fs:
with f._condition:
f._waiters.remove(waiter)
@@ -543,7 +570,7 @@
Exception: If fn(*args) raises for any values.
"""
if timeout is not None:
- end_time = timeout + time.time()
+ end_time = timeout + time.monotonic()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
@@ -551,11 +578,14 @@
# before the first iterator value is required.
def result_iterator():
try:
- for future in fs:
+ # reverse to keep finishing order
+ fs.reverse()
+ while fs:
+ # Careful not to keep a reference to the popped future
if timeout is None:
- yield future.result()
+ yield fs.pop().result()
else:
- yield future.result(end_time - time.time())
+ yield fs.pop().result(end_time - time.monotonic())
finally:
for future in fs:
future.cancel()
diff --git a/lib-python/3/concurrent/futures/process.py
b/lib-python/3/concurrent/futures/process.py
--- a/lib-python/3/concurrent/futures/process.py
+++ b/lib-python/3/concurrent/futures/process.py
@@ -357,6 +357,18 @@
raise NotImplementedError(_system_limited)
+def _chain_from_iterable_of_lists(iterable):
+ """
+ Specialized implementation of itertools.chain.from_iterable.
+ Each item in *iterable* should be a list. This function is
+ careful not to keep references to yielded objects.
+ """
+ for element in iterable:
+ element.reverse()
+ while element:
+ yield element.pop()
+
+
class BrokenProcessPool(RuntimeError):
"""
Raised when a process in a ProcessPoolExecutor terminated abruptly
@@ -482,7 +494,7 @@
results = super().map(partial(_process_chunk, fn),
_get_chunks(*iterables, chunksize=chunksize),
timeout=timeout)
- return itertools.chain.from_iterable(results)
+ return _chain_from_iterable_of_lists(results)
def shutdown(self, wait=True):
with self._shutdown_lock:
diff --git a/lib-python/3/concurrent/futures/thread.py
b/lib-python/3/concurrent/futures/thread.py
--- a/lib-python/3/concurrent/futures/thread.py
+++ b/lib-python/3/concurrent/futures/thread.py
@@ -7,6 +7,7 @@
import atexit
from concurrent.futures import _base
+import itertools
import queue
import threading
import weakref
@@ -53,8 +54,10 @@
try:
result = self.fn(*self.args, **self.kwargs)
- except BaseException as e:
- self.future.set_exception(e)
+ except BaseException as exc:
+ self.future.set_exception(exc)
+ # Break a reference cycle with the exception 'exc'
+ self = None
else:
self.future.set_result(result)
@@ -81,6 +84,10 @@
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
+
+ # Used to assign unique thread names when thread_name_prefix is not
supplied.
+ _counter = itertools.count().__next__
+
def __init__(self, max_workers=None, thread_name_prefix=''):
"""Initializes a new ThreadPoolExecutor instance.
@@ -101,7 +108,8 @@
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
- self._thread_name_prefix = thread_name_prefix
+ self._thread_name_prefix = (thread_name_prefix or
+ ("ThreadPoolExecutor-%d" %
self._counter()))
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
diff --git a/lib-python/3/configparser.py b/lib-python/3/configparser.py
--- a/lib-python/3/configparser.py
+++ b/lib-python/3/configparser.py
@@ -80,7 +80,7 @@
Return list of configuration options for the named section.
read(filenames, encoding=None)
- Read and parse the list of named configuration files, given by
+ Read and parse the iterable of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
@@ -677,13 +677,13 @@
return list(opts.keys())
def read(self, filenames, encoding=None):
- """Read and parse a filename or a list of filenames.
+ """Read and parse a filename or an iterable of filenames.
Files that cannot be opened are silently ignored; this is
- designed so that you can specify a list of potential
+ designed so that you can specify an iterable of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
- configuration files in the list will be read. A single
+ configuration files in the iterable will be read. A single
filename may also be given.
Return list of successfully read files.
diff --git a/lib-python/3/contextlib.py b/lib-python/3/contextlib.py
--- a/lib-python/3/contextlib.py
+++ b/lib-python/3/contextlib.py
@@ -1,6 +1,7 @@
"""Utilities for with-statement contexts. See PEP 343."""
import abc
import sys
+import _collections_abc
from collections import deque
from functools import wraps
@@ -25,9 +26,7 @@
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractContextManager:
- if (any("__enter__" in B.__dict__ for B in C.__mro__) and
- any("__exit__" in B.__dict__ for B in C.__mro__)):
- return True
+ return _collections_abc._check_methods(C, "__enter__", "__exit__")
return NotImplemented
@@ -88,7 +87,7 @@
try:
next(self.gen)
except StopIteration:
- return
+ return False
else:
raise RuntimeError("generator didn't stop")
else:
@@ -98,7 +97,6 @@
value = type()
try:
self.gen.throw(type, value, traceback)
- raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress StopIteration *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
@@ -111,7 +109,7 @@
# Likewise, avoid suppressing if a StopIteration exception
# was passed to throw() and later wrapped into a RuntimeError
# (see PEP 479).
- if exc.__cause__ is value:
+ if type is StopIteration and exc.__cause__ is value:
return False
raise
except:
@@ -122,8 +120,10 @@
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
- if sys.exc_info()[1] is not value:
- raise
+ if sys.exc_info()[1] is value:
+ return False
+ raise
+ raise RuntimeError("generator didn't stop after throw()")
def contextmanager(func):
diff --git a/lib-python/3/copyreg.py b/lib-python/3/copyreg.py
--- a/lib-python/3/copyreg.py
+++ b/lib-python/3/copyreg.py
@@ -128,7 +128,11 @@
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
- names.append('_%s%s' % (c.__name__, name))
+ stripped = c.__name__.lstrip('_')
+ if stripped:
+ names.append('_%s%s' % (stripped, name))
+ else:
+ names.append(name)
else:
names.append(name)
diff --git a/lib-python/3/csv.py b/lib-python/3/csv.py
--- a/lib-python/3/csv.py
+++ b/lib-python/3/csv.py
@@ -217,7 +217,7 @@
matches = []
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space>
?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', #
".*?",
- r'(?P<delim>>[^\w\n"\'])(?P<space>
?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
+ r'(?P<delim>[^\w\n"\'])(?P<space>
?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'):
# ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
diff --git a/lib-python/3/ctypes/test/test_anon.py
b/lib-python/3/ctypes/test/test_anon.py
--- a/lib-python/3/ctypes/test/test_anon.py
+++ b/lib-python/3/ctypes/test/test_anon.py
@@ -1,4 +1,5 @@
import unittest
+import test.support
from ctypes import *
class AnonTest(unittest.TestCase):
@@ -35,6 +36,18 @@
_______________________________________________
pypy-commit mailing list
[email protected]
https://mail.python.org/mailman/listinfo/pypy-commit