Hello community, here is the log from the commit of package duplicity.1690 for openSUSE:12.3:Update checked in at 2013-05-27 16:00:32 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:12.3:Update/duplicity.1690 (Old) and /work/SRC/openSUSE:12.3:Update/.duplicity.1690.new (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "duplicity.1690" Changes: -------- New Changes file: --- /dev/null 2013-05-23 01:40:31.032032505 +0200 +++ /work/SRC/openSUSE:12.3:Update/.duplicity.1690.new/duplicity.changes 2013-05-27 16:00:33.000000000 +0200 @@ -0,0 +1,398 @@ +------------------------------------------------------------------- +Sat May 18 22:56:24 UTC 2013 - [email protected] + +- Add duplicity-data-corruption.patch, upstream fix for corruption + that occurs when resuming an interrupted backup, especially + unencrypted backups (bnc#813319) + +------------------------------------------------------------------- +Sat Nov 10 21:07:51 UTC 2012 - [email protected] + +- Update to version 0.6.20: + * several improvements and bugfixes + http://duplicity.nongnu.org/CHANGELOG + +------------------------------------------------------------------- +Mon Jun 18 06:03:26 UTC 2012 - [email protected] + +- Update to version 0.6.19: + * several bugfixes and ssh backends enhancements + +------------------------------------------------------------------- +Mon Mar 5 08:37:26 UTC 2012 - [email protected] + +- Update to version 0.6.18: + * Added option to not compress the backup, when no encryption + is selected + * Adding --file-prefix option so different sets of backups can + be stored in the same bucket + * more bugfixes + +------------------------------------------------------------------- +Mon Feb 20 19:11:26 UTC 2012 - [email protected] + +- factory has split librsync into devel and lib package + +------------------------------------------------------------------- +Wed Feb 8 20:35:18 UTC 2012 - [email protected] + +- Update to version 0.6.17: + * Added --rsync-options flag to allow user to pass options to + rsync at will + * Added --s3-use-multiprocessing to select the new s3 + multiprocessing backend. + Default is to use the single processing backend. A helper, + filechunkio.py, requires Python 2.6+, so this option is not + usable in earlier versions. +- Update to version 0.6.16: + * Usability enhancement: sign passphrase prompt has no second + verification prompt anymore, symmetric passphrases are + still verified + * Fixed Unicode errors when translations are used. + * Replaced old tarfile.py with Python 2.7 version, modded + to support Python 2.4 syntax. + +------------------------------------------------------------------- +Sat Oct 1 22:41:35 CEST 2011 - [email protected] + +- Update to version 0.6.15: + + Ignore 404 errors when deleting a file on Ubuntu One. + + Ignore ENOENT (file missing) errors where it is safe. + + Set minimum Python version to 2.4 in README. + + introduce --numeric-owner parameter + + duplicity:restore_check_hash "Invalid data - *** hash mismatch" + lists the offending filename + + fixes to unit tests to support SIGN_PASSPHRASE + + Bugs fixed: lp#524922, lp#703142, lp#794576, lp#815635, + lp#818178, lp#821368, lp#823556, lp#824678 +- Changes from version 0.6.14: + + Provide Ubuntu One integration + + Bugs fixed: lp#433591, lp#487720, lp#507904, lp#512628, + lp#680425, lp#705499, lp#739438, lp#753858, lp#761688, + lp#777377, lp#778215, lp#782294, lp#782321, lp#792704, + lp#782294, lp#782337, lp#794123, lp#797758, lp#793096. + +------------------------------------------------------------------- +Sun Sep 18 17:17:12 UTC 2011 - [email protected] + +- Remove redundant tags/sections from specfile + (cf. packaging guidelines) + +------------------------------------------------------------------- +Mon Jun 20 15:18:31 UTC 2011 - [email protected] + +- update to 0.6.13 + fixed bugs: + * Assertion error "time not moving forward at appropriate pace" + * silent data corruption with checkpoint/restore + * File "/usr/bin/duplicity", error after upgrade from 6.11 to 6.12 + features: + * In boto backend check for existing bucket before trying to create +- update to 0.6.12 + fixed bugs: + * Only full backups done on webdav + * Use log codes for common backend errors + * Inverted "Current directory" "Previous directory" in error message + * OSError: [Errno 2] No such file or directory + * sslerror: The read operation timed out with cf + * boto backend uses Python 2.5 conditional + * symbolic link ownership not preserved + * Cygwin: TypeError: basis_file must be a (true) file + * Duplicity 0.6.11 aborts if RSYNC_RSH not set + * Backup fails silently when target is full (sftp, verbosity=4) + * Exception in log module + features: + * ftps support using lftp (ftpsbackend) + +------------------------------------------------------------------- +Sat Jan 1 18:17:06 UTC 2011 - [email protected] + +- update to 0.6.11 + * Add an option to connect to S3 with regular HTTP (and not HTTPS) + * Use log codes for common backend errors + * missing ssh on rsyncd url - rsync: Failed to exec ssh: ... + * backed up to S3, wiped drive, reinstalled, unable to restore backup + * os.execve should get passed program as first argument + * sftp: "Couldn't delete file: Failure'" only logged on level 9 + * 0.6.10 does not work with S3 + * RsyncBackend instance has no attribute 'subprocess_popen_persist' + +------------------------------------------------------------------- +Tue Sep 28 08:18:11 UTC 2010 - [email protected] + +- update to 0.6.10 + - changes in 0.6.10 + * Offer command to remove old incremental backups from target + * Use log codes for common backend errors + * duplicity --short-filenames crashes with TypeError + * NameError: global name 'parsed_url' is not defined + * ftpbackend fails if target directory doesn't exist + * Command-line verbosity parsing crash + - changes in 0.6.9 + * Unknown error while uploading duplicity-full-signatures + * Duplicity returns 1 when continuing an interrupted backup + * duplicity doesn't handle with large files well + * --ssh-options options passing options to ssh do not work + * username not url decoded in backend (at least rsync) + * Assertion error "time not moving forward at appropriate pace" + * Diminishing performance on large files + * Upgraded tahoebackend to new parse_url. + * Fix two warning messages in sshbackend. +- GnuPGInterface and pexpect are part of duplicity + -> removed RPM requirements +- Require ncftp +- Recommend (optional components depending on which backup medium + should be used) + * ncftp + * python-boto (optional requirement for use with S3; module not + in Factory yet though) + +------------------------------------------------------------------- +Mon Mar 15 07:03:15 UTC 2010 - [email protected] + +- update to 0.6.08b: + * fix bug where encrypted backup without --gpg-options crashes; + +- changes from 0.6.08: + * fix lp#519110: need accurate man page info on use of scp/sftp usage + * fix lp#532051: rdiffdir attempts to reference undefined variables with + some command arguments + * fix lp#529869: TypeError: unsupported operand type(s) for -: + 'NoneType' and 'int' + * fix lp#530910: TypeError: unsupported operand type(s) for +: + 'NoneType' and 'str' + +- changes from 0.6.07: + * fix lp#459511: --tempdir option doesn't override TMPDIR + * fix lp#467391: WebDAV backend doesn't work + * fix lp#487686: re-add scp backend and make available via command line option + * fix lp#490619: use optparse not getopt + * fix lp#497243: 0.6.06, archive dir: cache desynchronization caused by remove* + * fix lp#501093: SSHBackend doesn't handle spaces in path + * fix lp#505739: "sslerror: The read operation timed out" with S3 + * fix lp#520470: don't warn when there's old backup to delete + * fix lp#522544: OSError: [Errno 40] Too many levels of symbolic links + * fix lp#388673: allow renaming paths as they are restored + +------------------------------------------------------------------- +Mon Feb 8 10:26:24 UTC 2010 - [email protected] + +- Update to version 0.6.06: + * Merged in lp:~mterry/duplicity/list-old-chains + List/keep old signature chains + * Applied patches from Kasper Brand that fixed device file handling. + * Applied 422477; [PATCH] IMAP Backend Error in delete() + * Merged in lp:~mterry/duplicity/iterate-warnings Add machine + codes to various warnings when iterating over source files + * Fixed 435975 gpg asks for password in 0.6.05, but not in 0.5.18 +- Bugs fixed in v0.6.05 (2009/08/28) + * 407968: GIO backend can't restore + * 408059: Failure due to _logger.log failure for content with special + characters: TypeError decoding Unicode not supported + * 409593: deja-dup (or duplicity) deletes all signatures + * 412667: "duplicity remove-older-than" asks for passphrase even though + not required + * 418170: [PATCH] file names longer then 512 symbols are not supported +- Bugs fixed in v0.6.04 + * 405734: duplicity fails to restore files that contain a newline character ++++ 201 more lines (skipped) ++++ between /dev/null ++++ and /work/SRC/openSUSE:12.3:Update/.duplicity.1690.new/duplicity.changes New: ---- duplicity-0.6.20.tar.gz duplicity-data-corruption.patch duplicity-remove_shebang.patch duplicity-rpmlintrc duplicity.changes duplicity.spec ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ duplicity.spec ++++++ # # spec file for package duplicity # # Copyright (c) 2013 SUSE LINUX Products GmbH, Nuernberg, Germany. # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed # upon. The license for this file, and modifications and additions to the # file, is the same license as for the pristine package itself (unless the # license for the pristine package is not an Open Source License, in which # case the license is the MIT License). An "Open Source License" is a # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. # Please submit bugfixes or comments via http://bugs.opensuse.org/ # %{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} %{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} Name: duplicity %if 0%{?suse_version} > 1210 BuildRequires: librsync-devel >= 0.9.6 %else BuildRequires: librsync >= 0.9.6 %endif BuildRequires: python-devel Summary: Encrypted bandwidth-efficient backup using the rsync algorithm License: GPL-3.0+ Group: Productivity/Archiving/Backup Version: 0.6.20 Release: 0 Url: http://duplicity.nongnu.org/ Source: http://code.launchpad.net/%{name}/0.6-series/%{version}/+download/%{name}-%{version}.tar.gz Source99: duplicity-rpmlintrc # PATCH-FIX-UPSTREAM duplicity-data-corruption.patch bnc#813319 [email protected] -- data corruption when resuming interrupted backup Patch0: duplicity-data-corruption.patch Patch1: duplicity-remove_shebang.patch Requires: gpg Recommends: python-boto ncftp BuildRoot: %{_tmppath}/%{name}-%{version}-build %{py_requires} %description Duplicity incrementally backs up files and directories by encrypting tar-format volumes with GnuPG and uploading them to a remote (or local) file server. In theory many remote backends are possible; right now local, ssh/scp, ftp, rsync, HSI, WebDAV, and Amazon S3 backends are written. Because duplicity uses librsync, the incremental archives are space efficient and only record the parts of files that have changed since the last backup. Currently duplicity supports deleted files, full unix permissions, directories, symbolic links, fifos, etc., but not hard links. %prep %setup -q %patch0 %patch1 -p1 %build %{__python} setup.py build %install %{__python} setup.py install --prefix=/usr --root="$RPM_BUILD_ROOT" --record-rpm=files.lst %__rm -rf $RPM_BUILD_ROOT/usr/share/doc/duplicity-%{version} %__perl -n -i -e 'print unless m,(%{_bindir}|%{_mandir}|%{_datadir}/doc|%{_datadir}/locale),' files.lst %find_lang %{name} cat %{name}.lang files.lst > rpmfiles.lst %files -f rpmfiles.lst %defattr(-,root,root) %doc CHANGELOG COPYING README %{_bindir}/duplicity %{_bindir}/rdiffdir %doc %{_mandir}/man1/duplicity.1%{ext_man} %doc %{_mandir}/man1/rdiffdir.1%{ext_man} %changelog ++++++ duplicity-data-corruption.patch ++++++ --- bin/duplicity 2012-09-29 19:45:14 +0000 +++ bin/duplicity 2013-03-18 00:43:09 +0000 @@ -221,7 +221,13 @@ # Just spin our wheels while tarblock_iter.next(): if (tarblock_iter.previous_index == last_index): - if (tarblock_iter.previous_block > last_block): + # If both the previous index and this index are done, exit now + # before we hit the next index, to prevent skipping its first + # block. + if not last_block and not tarblock_iter.previous_block: + break + # Only check block number if last_block is also a number + if last_block and tarblock_iter.previous_block > last_block: break if tarblock_iter.previous_index > last_index: log.Warn(_("File %s complete in backup set.\n" @@ -935,11 +941,10 @@ """ Copy data from src_iter to file at fn """ - block_size = 128 * 1024 file = open(filename, "wb") while True: try: - data = src_iter.next(block_size).data + data = src_iter.next().data except StopIteration: break file.write(data) @@ -987,9 +992,9 @@ def __init__(self, fileobj): self.fileobj = fileobj - def next(self, size): + def next(self): try: - res = Block(self.fileobj.read(size)) + res = Block(self.fileobj.read(self.get_read_size())) except Exception: if hasattr(self.fileobj, 'name'): name = self.fileobj.name @@ -1003,6 +1008,9 @@ raise StopIteration return res + def get_read_size(self): + return 128 * 1024 + def get_footer(self): return "" --- duplicity/diffdir.py 2012-09-28 15:48:21 +0000 +++ duplicity/diffdir.py 2013-03-18 00:43:09 +0000 @@ -481,14 +481,14 @@ filler_data = "" return TarBlock(index, "%s%s%s" % (headers, file_data, filler_data)) - def process(self, val, size): + def process(self, val): """ Turn next value of input_iter into a TarBlock """ assert not self.process_waiting XXX # Override in subclass @UndefinedVariable - def process_continued(self, size): + def process_continued(self): """ Get more tarblocks @@ -498,15 +498,15 @@ assert self.process_waiting XXX # Override in subclass @UndefinedVariable - def next(self, size = 1024 * 1024): + def next(self): """ - Return next block, no bigger than size, and update offset + Return next block and update offset """ if self.process_waiting: - result = self.process_continued(size) + result = self.process_continued() else: # Below a StopIteration exception will just be passed upwards - result = self.process(self.input_iter.next(), size) + result = self.process(self.input_iter.next()) block_number = self.process_next_vol_number self.offset += len(result.data) self.previous_index = result.index @@ -517,6 +517,13 @@ self.remember_next = False return result + def get_read_size(self): + # read size must always be the same, because if we are restarting a + # backup volume where the previous volume ended in a data block, we + # have to be able to assume it's length in order to continue reading + # the file from the right place. + return 64 * 1024 + def get_previous_index(self): """ Return index of last tarblock, or None if no previous index @@ -553,7 +560,7 @@ """ TarBlockIter that does no file reading """ - def process(self, delta_ropath, size): + def process(self, delta_ropath): """ Get a fake tarblock from delta_ropath """ @@ -577,13 +584,9 @@ """ TarBlockIter that yields blocks of a signature tar from path_iter """ - def process(self, path, size): + def process(self, path): """ Return associated signature TarBlock from path - - Here size is just ignored --- let's hope a signature isn't too - big. Also signatures are stored in multiple volumes so it - doesn't matter. """ ti = path.get_tarinfo() if path.isreg(): @@ -606,7 +609,7 @@ delta_path_iter, so the delta information has already been calculated. """ - def process(self, delta_ropath, size): + def process(self, delta_ropath): """ Get a tarblock from delta_ropath """ @@ -631,8 +634,7 @@ # Now handle single volume block case fp = delta_ropath.open("rb") - # Below the 512 is the usual length of a tar header - data, last_block = self.get_data_block(fp, size - 512) + data, last_block = self.get_data_block(fp) if stats: stats.RawDeltaSize += len(data) if last_block: @@ -654,11 +656,11 @@ self.process_next_vol_number = 2 return self.tarinfo2tarblock(index, ti, data) - def get_data_block(self, fp, max_size): + def get_data_block(self, fp): """ Return pair (next data block, boolean last data block) """ - read_size = min(64*1024, max(max_size, 512)) + read_size = self.get_read_size() buf = fp.read(read_size) if len(buf) < read_size: if fp.close(): @@ -667,7 +669,7 @@ else: return (buf, False) - def process_continued(self, size): + def process_continued(self): """ Return next volume in multivol diff or snapshot """ @@ -675,7 +677,7 @@ ropath = self.process_ropath ti, index = ropath.get_tarinfo(), ropath.index ti.name = "%s/%d" % (self.process_prefix, self.process_next_vol_number) - data, last_block = self.get_data_block(self.process_fp, size - 512) + data, last_block = self.get_data_block(self.process_fp) if stats: stats.RawDeltaSize += len(data) if last_block: --- duplicity/dup_temp.py 2012-09-28 15:48:21 +0000 +++ duplicity/dup_temp.py 2013-03-18 00:43:09 +0000 @@ -256,9 +256,9 @@ def __init__(self, src): self.src = src self.fp = src.open("rb") - def next(self, size): + def next(self): try: - res = Block(self.fp.read(size)) + res = Block(self.fp.read(self.get_read_size())) except Exception: log.FatalError(_("Failed to read %s: %s") % (self.src.name, sys.exc_info()), @@ -267,5 +267,7 @@ self.fp.close() raise StopIteration return res + def get_read_size(self): + return 128 * 1024 def get_footer(self): return "" --- duplicity/gpg.py 2012-10-15 17:13:08 +0000 +++ duplicity/gpg.py 2013-03-18 00:44:29 +0000 @@ -310,17 +310,16 @@ def get_current_size(): return os.stat(filename).st_size - block_size = 128 * 1024 # don't bother requesting blocks smaller, but also don't ask for bigger target_size = size - 50 * 1024 # fudge factor, compensate for gpg buffering data_size = target_size - max_footer_size file = GPGFile(True, path.Path(filename), profile) at_end_of_blockiter = 0 while True: bytes_to_go = data_size - get_current_size() - if bytes_to_go < block_size: + if bytes_to_go < block_iter.get_read_size(): break try: - data = block_iter.next(min(block_size, bytes_to_go)).data + data = block_iter.next().data except StopIteration: at_end_of_blockiter = 1 break @@ -369,10 +368,10 @@ at_end_of_blockiter = 0 while True: bytes_to_go = size - file_counted.byte_count - if bytes_to_go < 32 * 1024: + if bytes_to_go < block_iter.get_read_size(): break try: - new_block = block_iter.next(min(128*1024, bytes_to_go)) + new_block = block_iter.next() except StopIteration: at_end_of_blockiter = 1 break --- testing/tests/gpgtest.py 2011-11-04 12:48:04 +0000 +++ testing/tests/gpgtest.py 2013-03-18 00:43:09 +0000 @@ -134,7 +134,7 @@ #print os.stat("testfiles/output/gzwrite.gz").st_size-size assert size - 64 * 1024 <= os.stat("testfiles/output/gzwrite.gz").st_size <= size + 64 * 1024 gwfh.set_at_end() - gpg.GzipWriteFile(gwfh, "testfiles/output/gzwrite.gpg", size = size) + gpg.GzipWriteFile(gwfh, "testfiles/output/gzwrite.gz", size = size) #print os.stat("testfiles/output/gzwrite.gz").st_size @@ -157,13 +157,18 @@ s2 = size - s1 return "a"*s1 + self.from_random_fp.read(s2) - def next(self, size): + def next(self): if self.at_end: raise StopIteration - if random.randrange(2): real_size = size - else: real_size = random.randrange(0, size) - block_data = self.get_buffer(real_size) + block_data = self.get_buffer(self.get_read_size()) return GPGWriteHelper2(block_data) + def get_read_size(self): + size = 64 * 1024 + if random.randrange(2): + return size + else: + return random.randrange(0, size) + def get_footer(self): return "e" * random.randrange(0, 15000) --- testing/tests/restarttest.py 2012-10-02 22:18:43 +0000 +++ testing/tests/restarttest.py 2013-03-18 00:43:09 +0000 @@ -36,7 +36,6 @@ other_args = ["-v0", "--no-print-statistics"] #other_args = ["--short-filenames"] #other_args = ["--ssh-command 'ssh -v'", "--scp-command 'scp -C'"] -#other_args = ['--no-encryption'] # If this is set to true, after each backup, verify contents verify = 1 @@ -52,8 +51,9 @@ Test checkpoint/restart using duplicity binary """ def setUp(self): + self.class_args = [] assert not os.system("tar xzf testfiles.tar.gz > /dev/null 2>&1") - assert not os.system("rm -rf testfiles/output " + assert not os.system("rm -rf testfiles/output testfiles/largefiles " "testfiles/restore_out testfiles/cache") assert not os.system("mkdir testfiles/output testfiles/cache") backend = duplicity.backend.get_backend(backend_url) @@ -76,6 +76,7 @@ cmd_list.append("--current-time %s" % (current_time,)) if other_args: cmd_list.extend(other_args) + cmd_list.extend(self.class_args) cmd_list.extend(arglist) cmdline = " ".join(cmd_list) #print "Running '%s'." % cmdline @@ -140,11 +141,14 @@ self.verify(dirname, time = current_time, options = restore_options) - def make_largefiles(self): - # create 3 2M files + def make_largefiles(self, count=3, size=2): + """ + Makes a number of large files in testfiles/largefiles that each are + the specified number of megabytes. + """ assert not os.system("mkdir testfiles/largefiles") - for n in (1,2,3): - assert not os.system("dd if=/dev/urandom of=testfiles/largefiles/file%d bs=1024 count=2048 > /dev/null 2>&1" % n) + for n in range(count): + assert not os.system("dd if=/dev/urandom of=testfiles/largefiles/file%d bs=1024 count=%d > /dev/null 2>&1" % (n + 1, size * 1024)) def check_same(self, filename1, filename2): """ @@ -289,6 +293,128 @@ self.backup("inc", "testfiles/largefiles") self.verify("testfiles/largefiles") + def make_fake_second_volume(self, name): + """ + Takes a successful backup and pretend that we interrupted a backup + after two-volumes. (This is because we want to be able to model + restarting the second volume and duplicity deletes the last volume + found because it may have not finished uploading.) + """ + # First, confirm that we have signs of a successful backup + self.assertEqual(len(glob.glob("testfiles/output/*.manifest*")), 1) + self.assertEqual(len(glob.glob("testfiles/output/*.sigtar*")), 1) + self.assertEqual(len(glob.glob("testfiles/cache/%s/*" % name)), 2) + self.assertEqual(len(glob.glob( + "testfiles/cache/%s/*.manifest*" % name)), 1) + self.assertEqual(len(glob.glob( + "testfiles/cache/%s/*.sigtar*" % name)), 1) + # Alright, everything is in order; fake a second interrupted volume + assert not os.system("rm testfiles/output/*.manifest*") + assert not os.system("rm testfiles/output/*.sigtar*") + assert not os.system("rm -f testfiles/output/*.vol[23456789].*") + assert not os.system("rm -f testfiles/output/*.vol1[^.]+.*") + self.assertEqual(len(glob.glob("testfiles/output/*.difftar*")), 1) + assert not os.system("rm testfiles/cache/%s/*.sigtar*" % name) + assert not os.system("cp testfiles/output/*.difftar* " + "`ls testfiles/output/*.difftar* | " + " sed 's|vol1|vol2|'`") + assert not os.system("head -n6 testfiles/cache/%s/*.manifest > " + "testfiles/cache/%s/" + "`basename testfiles/cache/%s/*.manifest`" + ".part" % (name, name, name)) + assert not os.system("rm testfiles/cache/%s/*.manifest" % name) + assert not os.system("""echo 'Volume 2: + StartingPath foo + EndingPath bar + Hash SHA1 sha1' >> testfiles/cache/%s/*.manifest.part""" % name) + + def test_split_after_small(self): + """ + If we restart right after a volume that ended with a small + (one-block) file, make sure we restart in the right place. + """ + source = 'testfiles/largefiles' + assert not os.system("mkdir -p %s" % source) + assert not os.system("echo hello > %s/file1" % source) + self.backup("full", source, options=["--name=backup1"]) + # Fake an interruption + self.make_fake_second_volume("backup1") + # Add new file + assert not os.system("cp %s/file1 %s/newfile" % (source, source)) + # 'restart' the backup + self.backup("full", source, options=["--name=backup1"]) + # Confirm we actually resumed the previous backup + self.assertEqual(len(os.listdir("testfiles/output")), 4) + # Now make sure everything is byte-for-byte the same once restored + self.restore() + assert not os.system("diff -r %s testfiles/restore_out" % source) + + def test_split_after_large(self): + """ + If we restart right after a volume that ended with a large + (multi-block) file, make sure we restart in the right place. + """ + source = 'testfiles/largefiles' + self.make_largefiles(count=1, size=1) + self.backup("full", source, options=["--name=backup1"]) + # Fake an interruption + self.make_fake_second_volume("backup1") + # Add new file + assert not os.system("cp %s/file1 %s/newfile" % (source, source)) + # 'restart' the backup + self.backup("full", source, options=["--name=backup1"]) + # Confirm we actually resumed the previous backup + self.assertEqual(len(os.listdir("testfiles/output")), 4) + # Now make sure everything is byte-for-byte the same once restored + self.restore() + assert not os.system("diff -r %s testfiles/restore_out" % source) + + def test_split_inside_large(self): + """ + If we restart right after a volume that ended inside of a large + (multi-block) file, make sure we restart in the right place. + """ + source = 'testfiles/largefiles' + self.make_largefiles(count=1, size=3) + self.backup("full", source, options=["--vols 1", "--name=backup1"]) + # Fake an interruption + self.make_fake_second_volume("backup1") + # 'restart' the backup + self.backup("full", source, options=["--vols 1", "--name=backup1"]) + # Now make sure everything is byte-for-byte the same once restored + self.restore() + assert not os.system("diff -r %s testfiles/restore_out" % source) + + def test_new_file(self): + """ + If we restart right after a volume, but there are new files that would + have been backed up earlier in the volume, make sure we don't wig out. + (Expected result is to ignore new, ealier files, but pick up later + ones.) + """ + source = 'testfiles/largefiles' + self.make_largefiles(count=1, size=1) + self.backup("full", source, options=["--name=backup1"]) + # Fake an interruption + self.make_fake_second_volume("backup1") + # Add new files, earlier and later in filename sort order + assert not os.system("echo hello > %s/a" % source) + assert not os.system("echo hello > %s/z" % source) + # 'restart' the backup + self.backup("full", source, options=["--name=backup1"]) + # Now make sure everything is the same once restored, except 'a' + self.restore() + assert not os.system("test ! -e testfiles/restore_out/a") + assert not os.system("diff %s/file1 testfiles/restore_out/file1" % source) + assert not os.system("diff %s/z testfiles/restore_out/z" % source) + + +# Note that this class duplicates all the tests in RestartTest +class RestartTestWithoutEncryption(RestartTest): + def setUp(self): + RestartTest.setUp(self) + self.class_args.extend(["--no-encryption"]) + def test_no_write_double_snapshot(self): """ Test that restarting a full backup does not write duplicate entries @@ -299,12 +425,12 @@ self.make_largefiles() # Start backup try: - self.backup("full", "testfiles/largefiles", options = ["--fail 2", "--vols 1", "--no-encryption"]) + self.backup("full", "testfiles/largefiles", options = ["--fail 2", "--vols 1"]) self.fail() except CmdError, e: self.assertEqual(30, e.exit_status) # Finish it - self.backup("full", "testfiles/largefiles", options = ["--no-encryption"]) + self.backup("full", "testfiles/largefiles") # Now check sigtar sigtars = glob.glob("testfiles/output/duplicity-full*.sigtar.gz") self.assertEqual(1, len(sigtars)) @@ -321,7 +447,7 @@ https://launchpad.net/bugs/929067 """ # Intial normal backup - self.backup("full", "testfiles/blocktartest", options = ["--no-encryption"]) + self.backup("full", "testfiles/blocktartest") # Create an exact clone of the snapshot folder in the sigtar already. # Permissions and mtime must match. os.mkdir("testfiles/snapshot", 0755) @@ -341,7 +467,7 @@ self.assertEqual(0, os.system("rm -r testfiles/cache")) # Try a follow on incremental (which in buggy versions, would create # a deleted entry for the base dir) - self.backup("inc", "testfiles/blocktartest", options = ["--no-encryption"]) + self.backup("inc", "testfiles/blocktartest") self.assertEqual(1, len(glob.glob("testfiles/output/duplicity-new*.sigtar.gz"))) # Confirm we can restore it (which in buggy versions, would fail) self.restore() ++++++ duplicity-remove_shebang.patch ++++++ diff --git a/duplicity/compilec.py b/duplicity/compilec.py index 1176dc2..4577c28 100755 --- a/duplicity/compilec.py +++ b/duplicity/compilec.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright 2002 Ben Escoto <[email protected]> diff --git a/duplicity/tarfile.py b/duplicity/tarfile.py index ac93412..98fb4be 100644 --- a/duplicity/tarfile.py +++ b/duplicity/tarfile.py @@ -1,4 +1,3 @@ -#! /usr/bin/python2.7 # -*- coding: iso-8859-1 -*- #------------------------------------------------------------------- # tarfile.py ++++++ duplicity-rpmlintrc ++++++ addFilter("W: python-naming-policy-not-applied .*") -- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
