Re: [Qemu-block] [PATCH v2 11/11] iotests: 124 - transactional failure test

2015-04-17 Thread Max Reitz

On 27.03.2015 20:20, John Snow wrote:

Use a transaction to request an incremental backup across two drives.
Coerce one of the jobs to fail, and then re-run the transaction.

Verify that no bitmap data was lost due to the partial transaction
failure.

Signed-off-by: John Snow 
---
  tests/qemu-iotests/124 | 119 +
  tests/qemu-iotests/124.out |   4 +-
  2 files changed, 121 insertions(+), 2 deletions(-)


Just as patch 2, this will need fixup for v5 of the transaction-less series.

(the changes from v1 look good, though)

Max



[Qemu-block] [PATCH v2 11/11] iotests: 124 - transactional failure test

2015-03-27 Thread John Snow
Use a transaction to request an incremental backup across two drives.
Coerce one of the jobs to fail, and then re-run the transaction.

Verify that no bitmap data was lost due to the partial transaction
failure.

Signed-off-by: John Snow 
---
 tests/qemu-iotests/124 | 119 +
 tests/qemu-iotests/124.out |   4 +-
 2 files changed, 121 insertions(+), 2 deletions(-)

diff --git a/tests/qemu-iotests/124 b/tests/qemu-iotests/124
index 31946f9..ad82076 100644
--- a/tests/qemu-iotests/124
+++ b/tests/qemu-iotests/124
@@ -332,6 +332,125 @@ class TestIncrementalBackup(iotests.QMPTestCase):
 self.create_incremental()
 
 
+def test_transaction_failure(self):
+'''Test: Verify backups made from a transaction that partially fails.
+
+Add a second drive with its own unique pattern, and add a bitmap to 
each
+drive. Use blkdebug to interfere with the backup on just one drive and
+attempt to create a coherent incremental backup across both drives.
+
+verify a failure in one but not both, then delete the failed stubs and
+re-run the same transaction.
+
+verify that both incrementals are created successfully.
+'''
+
+# Create a second drive, with pattern:
+drive1 = self.add_node('drive1')
+self.img_create(drive1['file'], drive1['fmt'])
+io_write_patterns(drive1['file'], (('0x14', 0, 512),
+   ('0x5d', '1M', '32k'),
+   ('0xcd', '32M', '124k')))
+
+# Create a blkdebug interface to this img as 'drive1'
+result = self.vm.qmp('blockdev-add', options={
+'id': drive1['id'],
+'driver': drive1['fmt'],
+'file': {
+'driver': 'blkdebug',
+'image': {
+'driver': 'file',
+'filename': drive1['file']
+},
+'set-state': [{
+'event': 'flush_to_disk',
+'state': 1,
+'new_state': 2
+}],
+'inject-error': [{
+'event': 'read_aio',
+'errno': 5,
+'state': 2,
+'immediately': False,
+'once': True
+}],
+}
+})
+self.assert_qmp(result, 'return', {})
+
+# Create bitmaps and full backups for both drives
+drive0 = self.drives[0]
+dr0bm0 = self.add_bitmap('bitmap0', drive0)
+dr1bm0 = self.add_bitmap('bitmap0', drive1)
+self.create_full_backup(drive0)
+self.create_full_backup(drive1)
+self.assert_no_active_block_jobs()
+self.assertFalse(self.vm.get_qmp_events(wait=False))
+
+# Emulate some writes
+self.hmp_io_writes(drive0['id'], (('0xab', 0, 512),
+  ('0xfe', '16M', '256k'),
+  ('0x64', '32736k', '64k')))
+self.hmp_io_writes(drive1['id'], (('0xba', 0, 512),
+  ('0xef', '16M', '256k'),
+  ('0x46', '32736k', '64k')))
+
+# Create incremental backup targets
+target0 = self.prepare_backup(dr0bm0)
+target1 = self.prepare_backup(dr1bm0)
+
+# Ask for a new incremental backup per-each drive,
+# expecting drive1's backup to fail:
+transaction = [
+{
+'type': 'drive-backup',
+'data': { 'device': drive0['id'],
+  'sync': 'dirty-bitmap',
+  'format': drive0['fmt'],
+  'target': target0,
+  'mode': 'existing',
+  'bitmap': dr0bm0.name },
+},
+{
+'type': 'drive-backup',
+'data': { 'device': drive1['id'],
+  'sync': 'dirty-bitmap',
+  'format': drive1['fmt'],
+  'target': target1,
+  'mode': 'existing',
+  'bitmap': dr1bm0.name }
+}
+]
+result = self.vm.qmp('transaction', actions=transaction)
+self.assert_qmp(result, 'return', {})
+
+# Observe that drive0's backup completes, but drive1's does not.
+# Consume drive1's error and ensure all pending actions are completed.
+self.wait_incremental(dr0bm0, validate=True)
+self.wait_incremental(dr1bm0, validate=False)
+error = self.vm.event_wait('BLOCK_JOB_ERROR')
+self.assert_qmp(error, 'data', {'device': drive1['id'],
+'action': 'report',
+'operation': 'read'})
+self.assertFalse(self.vm.get_qmp_events(wait=False))
+sel