D3937: ssh: avoid reading beyond the end of stream when using compression

2018-07-16 Thread joerg.sonnenberger (Joerg Sonnenberger)
This revision was automatically updated to reflect the committed changes.
Closed by commit rHG27391d74aaa2: ssh: avoid reading beyond the end of stream 
when using compression (authored by joerg.sonnenberger, committed by ).

REPOSITORY
  rHG Mercurial

CHANGES SINCE LAST UPDATE
  https://phab.mercurial-scm.org/D3937?vs=9576=9601

REVISION DETAIL
  https://phab.mercurial-scm.org/D3937

AFFECTED FILES
  mercurial/sshpeer.py
  mercurial/util.py

CHANGE DETAILS

diff --git a/mercurial/util.py b/mercurial/util.py
--- a/mercurial/util.py
+++ b/mercurial/util.py
@@ -322,6 +322,11 @@
 self._fillbuffer()
 return self._frombuffer(size)
 
+def unbufferedread(self, size):
+if not self._eof and self._lenbuf == 0:
+self._fillbuffer(max(size, _chunksize))
+return self._frombuffer(min(self._lenbuf, size))
+
 def readline(self, *args, **kwargs):
 if 1 < len(self._buffer):
 # this should not happen because both read and readline end with a
@@ -363,9 +368,9 @@
 self._lenbuf = 0
 return data
 
-def _fillbuffer(self):
+def _fillbuffer(self, size=_chunksize):
 """read data to the buffer"""
-data = os.read(self._input.fileno(), _chunksize)
+data = os.read(self._input.fileno(), size)
 if not data:
 self._eof = True
 else:
@@ -3302,6 +3307,104 @@
 """
 raise NotImplementedError()
 
+class _CompressedStreamReader(object):
+def __init__(self, fh):
+if safehasattr(fh, 'unbufferedread'):
+self._reader = fh.unbufferedread
+else:
+self._reader = fh.read
+self._pending = []
+self._pos = 0
+self._eof = False
+
+def _decompress(self, chunk):
+raise NotImplementedError()
+
+def read(self, l):
+buf = []
+while True:
+while self._pending:
+if len(self._pending[0]) > l + self._pos:
+newbuf = self._pending[0]
+buf.append(newbuf[self._pos:self._pos + l])
+self._pos += l
+return ''.join(buf)
+
+newbuf = self._pending.pop(0)
+if self._pos:
+buf.append(newbuf[self._pos:])
+l -= len(newbuf) - self._pos
+else:
+buf.append(newbuf)
+l -= len(newbuf)
+self._pos = 0
+
+if self._eof:
+return ''.join(buf)
+chunk = self._reader(65536)
+self._decompress(chunk)
+
+class _GzipCompressedStreamReader(_CompressedStreamReader):
+def __init__(self, fh):
+super(_GzipCompressedStreamReader, self).__init__(fh)
+self._decompobj = zlib.decompressobj()
+def _decompress(self, chunk):
+newbuf = self._decompobj.decompress(chunk)
+if newbuf:
+self._pending.append(newbuf)
+d = self._decompobj.copy()
+try:
+d.decompress('x')
+d.flush()
+if d.unused_data == 'x':
+self._eof = True
+except zlib.error:
+pass
+
+class _BZ2CompressedStreamReader(_CompressedStreamReader):
+def __init__(self, fh):
+super(_BZ2CompressedStreamReader, self).__init__(fh)
+self._decompobj = bz2.BZ2Decompressor()
+def _decompress(self, chunk):
+newbuf = self._decompobj.decompress(chunk)
+if newbuf:
+self._pending.append(newbuf)
+try:
+while True:
+newbuf = self._decompobj.decompress('')
+if newbuf:
+self._pending.append(newbuf)
+else:
+break
+except EOFError:
+self._eof = True
+
+class _TruncatedBZ2CompressedStreamReader(_BZ2CompressedStreamReader):
+def __init__(self, fh):
+super(_TruncatedBZ2CompressedStreamReader, self).__init__(fh)
+newbuf = self._decompobj.decompress('BZ')
+if newbuf:
+self._pending.append(newbuf)
+
+class _ZstdCompressedStreamReader(_CompressedStreamReader):
+def __init__(self, fh, zstd):
+super(_ZstdCompressedStreamReader, self).__init__(fh)
+self._zstd = zstd
+self._decompobj = zstd.ZstdDecompressor().decompressobj()
+def _decompress(self, chunk):
+newbuf = self._decompobj.decompress(chunk)
+if newbuf:
+self._pending.append(newbuf)
+try:
+while True:
+newbuf = self._decompobj.decompress('')
+if newbuf:
+self._pending.append(newbuf)
+else:
+break
+except self._zstd.ZstdError:
+self._eof = True
+
 class _zlibengine(compressionengine):
 def name(self):
 return 'zlib'
@@ -3335,15 +3438,7 @@
 yield z.flush()
 
 def decompressorreader(self, fh):
-def 

D3937: ssh: avoid reading beyond the end of stream when using compression

2018-07-12 Thread joerg.sonnenberger (Joerg Sonnenberger)
joerg.sonnenberger updated this revision to Diff 9576.

REPOSITORY
  rHG Mercurial

CHANGES SINCE LAST UPDATE
  https://phab.mercurial-scm.org/D3937?vs=9573=9576

REVISION DETAIL
  https://phab.mercurial-scm.org/D3937

AFFECTED FILES
  mercurial/sshpeer.py
  mercurial/util.py

CHANGE DETAILS

diff --git a/mercurial/util.py b/mercurial/util.py
--- a/mercurial/util.py
+++ b/mercurial/util.py
@@ -355,6 +355,11 @@
 self._fillbuffer()
 return self._frombuffer(size)
 
+def unbufferedread(self, size):
+if not self._eof and self._lenbuf == 0:
+self._fillbuffer(max(size, _chunksize))
+return self._frombuffer(min(self._lenbuf, size))
+
 def readline(self, *args, **kwargs):
 if 1 < len(self._buffer):
 # this should not happen because both read and readline end with a
@@ -396,9 +401,9 @@
 self._lenbuf = 0
 return data
 
-def _fillbuffer(self):
+def _fillbuffer(self, size=_chunksize):
 """read data to the buffer"""
-data = os.read(self._input.fileno(), _chunksize)
+data = os.read(self._input.fileno(), size)
 if not data:
 self._eof = True
 else:
@@ -3328,6 +,104 @@
 """
 raise NotImplementedError()
 
+class _CompressedStreamReader(object):
+def __init__(self, fh):
+if safehasattr(fh, 'unbufferedread'):
+self._reader = fh.unbufferedread
+else:
+self._reader = fh.read
+self._pending = []
+self._pos = 0
+self._eof = False
+
+def _decompress(self, chunk):
+raise NotImplementedError()
+
+def read(self, l):
+buf = []
+while True:
+while self._pending:
+if len(self._pending[0]) > l + self._pos:
+newbuf = self._pending[0]
+buf.append(newbuf[self._pos:self._pos + l])
+self._pos += l
+return ''.join(buf)
+
+newbuf = self._pending.pop(0)
+if self._pos:
+buf.append(newbuf[self._pos:])
+l -= len(newbuf) - self._pos
+else:
+buf.append(newbuf)
+l -= len(newbuf)
+self._pos = 0
+
+if self._eof:
+return ''.join(buf)
+chunk = self._reader(65536)
+self._decompress(chunk)
+
+class _GzipCompressedStreamReader(_CompressedStreamReader):
+def __init__(self, fh):
+super(_GzipCompressedStreamReader, self).__init__(fh)
+self._decompobj = zlib.decompressobj()
+def _decompress(self, chunk):
+newbuf = self._decompobj.decompress(chunk)
+if newbuf:
+self._pending.append(newbuf)
+d = self._decompobj.copy()
+try:
+d.decompress('x')
+d.flush()
+if d.unused_data == 'x':
+self._eof = True
+except zlib.error:
+pass
+
+class _BZ2CompressedStreamReader(_CompressedStreamReader):
+def __init__(self, fh):
+super(_BZ2CompressedStreamReader, self).__init__(fh)
+self._decompobj = bz2.BZ2Decompressor()
+def _decompress(self, chunk):
+newbuf = self._decompobj.decompress(chunk)
+if newbuf:
+self._pending.append(newbuf)
+try:
+while True:
+newbuf = self._decompobj.decompress('')
+if newbuf:
+self._pending.append(newbuf)
+else:
+break
+except EOFError:
+self._eof = True
+
+class _TruncatedBZ2CompressedStreamReader(_BZ2CompressedStreamReader):
+def __init__(self, fh):
+super(_TruncatedBZ2CompressedStreamReader, self).__init__(fh)
+newbuf = self._decompobj.decompress('BZ')
+if newbuf:
+self._pending.append(newbuf)
+
+class _ZstdCompressedStreamReader(_CompressedStreamReader):
+def __init__(self, fh, zstd):
+super(_ZstdCompressedStreamReader, self).__init__(fh)
+self._zstd = zstd
+self._decompobj = zstd.ZstdDecompressor().decompressobj()
+def _decompress(self, chunk):
+newbuf = self._decompobj.decompress(chunk)
+if newbuf:
+self._pending.append(newbuf)
+try:
+while True:
+newbuf = self._decompobj.decompress('')
+if newbuf:
+self._pending.append(newbuf)
+else:
+break
+except self._zstd.ZstdError:
+self._eof = True
+
 class _zlibengine(compressionengine):
 def name(self):
 return 'zlib'
@@ -3361,15 +3464,7 @@
 yield z.flush()
 
 def decompressorreader(self, fh):
-def gen():
-d = zlib.decompressobj()
-for chunk in filechunkiter(fh):
-while chunk:
-# Limit output size to limit 

D3937: ssh: avoid reading beyond the end of stream when using compression

2018-07-12 Thread joerg.sonnenberger (Joerg Sonnenberger)
joerg.sonnenberger created this revision.
Herald added a subscriber: mercurial-devel.
Herald added a reviewer: hg-reviewers.

REVISION SUMMARY
  Compressed streams can be used as part of getbundle. The normal read()
  operation of bufferedinputpipe will try to fulfill the request exactly
  and can deadlock if the server sends less as it is done. At the same
  time, the bundle2 logic will stop reading when it believes it has gotten
  all parts of the bundle, which can leave behind end of stream markers as
  used by bzip2 and zstd.
  
  To solve this, introduce a new optional unbufferedread interface and
  provided it in bufferedinputpipe and doublepipe. If there is buffered
  data left, it will be returned, otherwise it will issue a single read
  request and return whatever it obtains.
  
  Reorganize the decompression handlers to try harder to read until the
  end of stream, especially if the requested read can already be
  fulfilled. Check for end of stream is messy with Python 2, none of the
  standard compression modules properly exposes it. At least with zstd and
  bzip2, decompressing will remember EOS and fail for empty input after
  the EOS has been seen. For zlib, the only way to detect it with Python 2
  is to duplicate the decompressobj and force some additional data into
  it. The common handler can be further optimized, but works as PoC.

REPOSITORY
  rHG Mercurial

REVISION DETAIL
  https://phab.mercurial-scm.org/D3937

AFFECTED FILES
  mercurial/sshpeer.py
  mercurial/util.py

CHANGE DETAILS

diff --git a/mercurial/util.py b/mercurial/util.py
--- a/mercurial/util.py
+++ b/mercurial/util.py
@@ -355,6 +355,11 @@
 self._fillbuffer()
 return self._frombuffer(size)
 
+def unbufferedread(self, size):
+if not self._eof and self._lenbuf == 0:
+self._fillbuffer(max(size, _chunksize))
+return self._frombuffer(min(self._lenbuf, size))
+
 def readline(self, *args, **kwargs):
 if 1 < len(self._buffer):
 # this should not happen because both read and readline end with a
@@ -396,9 +401,9 @@
 self._lenbuf = 0
 return data
 
-def _fillbuffer(self):
+def _fillbuffer(self, size = _chunksize):
 """read data to the buffer"""
-data = os.read(self._input.fileno(), _chunksize)
+data = os.read(self._input.fileno(), size)
 if not data:
 self._eof = True
 else:
@@ -3328,6 +,98 @@
 """
 raise NotImplementedError()
 
+class _CompressedStreamReader(object):
+def __init__(self, fh):
+if safehasattr(fh, 'unbufferedread'):
+self._reader = fh.unbufferedread
+else:
+self._reader = fh.read
+self._pending = []
+self._eof = False
+
+def _decompress(self, chunk):
+raise NotImplementedError()
+
+def read(self, l):
+buf = []
+while True:
+while self._pending:
+if len(self._pending[0]) > l:
+newbuf = self._pending[0]
+buf.append(newbuf[:l])
+self._pending[0] = newbuf[l:]
+return ''.join(buf)
+
+newbuf = self._pending.pop(0)
+buf.append(newbuf)
+l -= len(newbuf)
+
+if self._eof:
+return ''.join(buf)
+chunk = self._reader(65536)
+self._decompress(chunk)
+
+class _GzipCompressedStreamReader(_CompressedStreamReader):
+def __init__(self, fh):
+super(_GzipCompressedStreamReader, self).__init__(fh)
+self._decompobj = zlib.decompressobj()
+def _decompress(self, chunk):
+newbuf = self._decompobj.decompress(chunk)
+if newbuf:
+self._pending.append(newbuf)
+d = self._decompobj.copy()
+try:
+d.decompress('x')
+d.flush()
+if d.unused_data == 'x':
+self._eof = True
+except zlib.error:
+pass
+
+class _BZ2CompressedStreamReader(_CompressedStreamReader):
+def __init__(self, fh):
+super(_BZ2CompressedStreamReader, self).__init__(fh)
+self._decompobj = bz2.BZ2Decompressor()
+def _decompress(self, chunk):
+newbuf = self._decompobj.decompress(chunk)
+if newbuf:
+self._pending.append(newbuf)
+try:
+while True:
+newbuf = self._decompobj.decompress('')
+if newbuf:
+self._pending.append(newbuf)
+else:
+break
+except EOFError:
+self._eof = True
+
+class _TruncatedBZ2CompressedStreamReader(_BZ2CompressedStreamReader):
+def __init__(self, fh):
+super(_TruncatedBZ2CompressedStreamReader, self).__init__(fh)
+newbuf = self._decompobj.decompress('BZ')
+if newbuf:
+self._pending.append(newbuf)
+
+class