Re: [PATCH 09/11] iotests/264: add mirror-cancel test-case

2021-01-21 Thread Vladimir Sementsov-Ogievskiy

21.01.2021 04:26, Eric Blake wrote:

On 11/18/20 12:04 PM, Vladimir Sementsov-Ogievskiy wrote:

Check that cancel doesn't wait for 10s of nbd reconnect timeout.

Signed-off-by: Vladimir Sementsov-Ogievskiy 
---
  tests/qemu-iotests/264 | 38 ++
  tests/qemu-iotests/264.out |  4 ++--
  2 files changed, 32 insertions(+), 10 deletions(-)


  
+def test_mirror_cancel(self):

+# Mirror speed limit doesn't work well enough, it seems that mirror
+# will run many parallel requests anyway. MAX_IN_FLIGHT is 16 and
+# MAX_IO_BYTES is 1M in mirror.c, so let's use 20M disk.
+self.init_vm(20 * 1024 * 1024)
+self.start_job('blockdev-mirror')


Is this comment still accurate given recent work on the mirror filter?


Hmm, what do you mean? I missed it..


I'm fine taking the patch as-is and tweaking it with followups, though,
in order to make progress.


Good for me, of course




+
+result = self.vm.qmp('block-job-cancel', device='drive0')
+self.assert_qmp(result, 'return', {})
+
+start_t = time.time()
+self.vm.event_wait('BLOCK_JOB_CANCELLED')
+delta_t = time.time() - start_t
+self.assertTrue(delta_t < 2.0)


I hope this doesn't fail on CI platforms under heavy load.  It didn't
fail for me locally, but I hope we don't have to revisit it.  Is there
any way we can test this in a manner that is not as fragile?


Hmm, I don't know. We want to check that cancel is not as long as reconnect 
timeout.. If it fails, we'll adjust the constants :) And we have no limit in 
it, we can use 1hour for reconnect-timeout and 10min for mirror to cancel for 
example (but probably something other may fail with such big timeouts)



Reviewed-by: Eric Blake 




--
Best regards,
Vladimir



Re: [PATCH 09/11] iotests/264: add mirror-cancel test-case

2021-01-20 Thread Eric Blake
On 11/18/20 12:04 PM, Vladimir Sementsov-Ogievskiy wrote:
> Check that cancel doesn't wait for 10s of nbd reconnect timeout.
> 
> Signed-off-by: Vladimir Sementsov-Ogievskiy 
> ---
>  tests/qemu-iotests/264 | 38 ++
>  tests/qemu-iotests/264.out |  4 ++--
>  2 files changed, 32 insertions(+), 10 deletions(-)

>  
> +def test_mirror_cancel(self):
> +# Mirror speed limit doesn't work well enough, it seems that mirror
> +# will run many parallel requests anyway. MAX_IN_FLIGHT is 16 and
> +# MAX_IO_BYTES is 1M in mirror.c, so let's use 20M disk.
> +self.init_vm(20 * 1024 * 1024)
> +self.start_job('blockdev-mirror')

Is this comment still accurate given recent work on the mirror filter?
I'm fine taking the patch as-is and tweaking it with followups, though,
in order to make progress.

> +
> +result = self.vm.qmp('block-job-cancel', device='drive0')
> +self.assert_qmp(result, 'return', {})
> +
> +start_t = time.time()
> +self.vm.event_wait('BLOCK_JOB_CANCELLED')
> +delta_t = time.time() - start_t
> +self.assertTrue(delta_t < 2.0)

I hope this doesn't fail on CI platforms under heavy load.  It didn't
fail for me locally, but I hope we don't have to revisit it.  Is there
any way we can test this in a manner that is not as fragile?

Reviewed-by: Eric Blake 

-- 
Eric Blake, Principal Software Engineer
Red Hat, Inc.   +1-919-301-3226
Virtualization:  qemu.org | libvirt.org




[PATCH 09/11] iotests/264: add mirror-cancel test-case

2020-11-18 Thread Vladimir Sementsov-Ogievskiy
Check that cancel doesn't wait for 10s of nbd reconnect timeout.

Signed-off-by: Vladimir Sementsov-Ogievskiy 
---
 tests/qemu-iotests/264 | 38 ++
 tests/qemu-iotests/264.out |  4 ++--
 2 files changed, 32 insertions(+), 10 deletions(-)

diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264
index 8c61628921..3c6f29317f 100755
--- a/tests/qemu-iotests/264
+++ b/tests/qemu-iotests/264
@@ -26,25 +26,26 @@ from iotests import qemu_img_create, file_path, 
qemu_nbd_popen
 
 disk_a, disk_b, nbd_sock = file_path('disk_a', 'disk_b', 'nbd-sock')
 nbd_uri = 'nbd+unix:///?socket=' + nbd_sock
-size = 5 * 1024 * 1024
 wait_limit = 3.0
 wait_step = 0.2
 
 
 class TestNbdReconnect(iotests.QMPTestCase):
-def setUp(self):
-qemu_img_create('-f', iotests.imgfmt, disk_a, str(size))
-qemu_img_create('-f', iotests.imgfmt, disk_b, str(size))
+def init_vm(self, disk_size):
+qemu_img_create('-f', iotests.imgfmt, disk_a, str(disk_size))
+qemu_img_create('-f', iotests.imgfmt, disk_b, str(disk_size))
 self.vm = iotests.VM().add_drive(disk_a)
 self.vm.launch()
-self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
+self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(disk_size))
 
 def tearDown(self):
 self.vm.shutdown()
 os.remove(disk_a)
 os.remove(disk_b)
 
-def test(self):
+def start_job(self, job):
+"""Stat job with nbd target and kill the server"""
+assert job in ('blockdev-backup', 'blockdev-mirror')
 with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
 result = self.vm.qmp('blockdev-add',
  **{'node_name': 'backup0',
@@ -54,7 +55,7 @@ class TestNbdReconnect(iotests.QMPTestCase):
 'path': nbd_sock},
  'reconnect-delay': 10}})
 self.assert_qmp(result, 'return', {})
-result = self.vm.qmp('blockdev-backup', device='drive0',
+result = self.vm.qmp(job, device='drive0',
  sync='full', target='backup0',
  speed=(1 * 1024 * 1024))
 self.assert_qmp(result, 'return', {})
@@ -72,7 +73,8 @@ class TestNbdReconnect(iotests.QMPTestCase):
 
 jobs = self.vm.qmp('query-block-jobs')['return']
 # Check that job is still in progress
-self.assertTrue(jobs and jobs[0]['offset'] < jobs[0]['len'])
+self.assertTrue(jobs)
+self.assertTrue(jobs[0]['offset'] < jobs[0]['len'])
 
 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
 self.assert_qmp(result, 'return', {})
@@ -80,12 +82,32 @@ class TestNbdReconnect(iotests.QMPTestCase):
 # Emulate server down time for 1 second
 time.sleep(1)
 
+def test_backup(self):
+size = 5 * 1024 * 1024
+self.init_vm(size)
+self.start_job('blockdev-backup')
+
 with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
 e = self.vm.event_wait('BLOCK_JOB_COMPLETED')
 self.assertEqual(e['data']['offset'], size)
 result = self.vm.qmp('blockdev-del', node_name='backup0')
 self.assert_qmp(result, 'return', {})
 
+def test_mirror_cancel(self):
+# Mirror speed limit doesn't work well enough, it seems that mirror
+# will run many parallel requests anyway. MAX_IN_FLIGHT is 16 and
+# MAX_IO_BYTES is 1M in mirror.c, so let's use 20M disk.
+self.init_vm(20 * 1024 * 1024)
+self.start_job('blockdev-mirror')
+
+result = self.vm.qmp('block-job-cancel', device='drive0')
+self.assert_qmp(result, 'return', {})
+
+start_t = time.time()
+self.vm.event_wait('BLOCK_JOB_CANCELLED')
+delta_t = time.time() - start_t
+self.assertTrue(delta_t < 2.0)
+
 
 if __name__ == '__main__':
 iotests.main(supported_fmts=['qcow2'])
diff --git a/tests/qemu-iotests/264.out b/tests/qemu-iotests/264.out
index ae1213e6f8..fbc63e62f8 100644
--- a/tests/qemu-iotests/264.out
+++ b/tests/qemu-iotests/264.out
@@ -1,5 +1,5 @@
-.
+..
 --
-Ran 1 tests
+Ran 2 tests
 
 OK
-- 
2.21.3