Re: [PATCH v2 08/10] iotests/264: add mirror-cancel test-case

2022-07-21 Thread Vladimir Sementsov-Ogievskiy

On 7/21/22 11:50, Thomas Huth wrote:

On 05/02/2021 17.37, Vladimir Sementsov-Ogievskiy wrote:

Check that cancel doesn't wait for 10s of nbd reconnect timeout.

Signed-off-by: Vladimir Sementsov-Ogievskiy 
Reviewed-by: Eric Blake 
---
  tests/qemu-iotests/264 | 38 ++
  tests/qemu-iotests/264.out |  4 ++--
  2 files changed, 32 insertions(+), 10 deletions(-)

diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264
index 6feeaa4056..347e53add5 100755
--- a/tests/qemu-iotests/264
+++ b/tests/qemu-iotests/264
@@ -27,25 +27,26 @@ from iotests import qemu_img_create, file_path, 
qemu_nbd_popen
  disk_a, disk_b, nbd_sock = file_path('disk_a', 'disk_b', 'nbd-sock')
  nbd_uri = 'nbd+unix:///?socket=' + nbd_sock
-size = 5 * 1024 * 1024
  wait_limit = 3.0
  wait_step = 0.2
  class TestNbdReconnect(iotests.QMPTestCase):
-    def setUp(self):
-    qemu_img_create('-f', iotests.imgfmt, disk_a, str(size))
-    qemu_img_create('-f', iotests.imgfmt, disk_b, str(size))
+    def init_vm(self, disk_size):
+    qemu_img_create('-f', iotests.imgfmt, disk_a, str(disk_size))
+    qemu_img_create('-f', iotests.imgfmt, disk_b, str(disk_size))
  self.vm = iotests.VM().add_drive(disk_a)
  self.vm.launch()
-    self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
+    self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(disk_size))
  def tearDown(self):
  self.vm.shutdown()
  os.remove(disk_a)
  os.remove(disk_b)
-    def test(self):
+    def start_job(self, job):
+    """Stat job with nbd target and kill the server"""
+    assert job in ('blockdev-backup', 'blockdev-mirror')
  with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
  result = self.vm.qmp('blockdev-add',
   **{'node_name': 'backup0',
@@ -55,7 +56,7 @@ class TestNbdReconnect(iotests.QMPTestCase):
  'path': nbd_sock},
   'reconnect-delay': 10}})
  self.assert_qmp(result, 'return', {})
-    result = self.vm.qmp('blockdev-backup', device='drive0',
+    result = self.vm.qmp(job, device='drive0',
   sync='full', target='backup0',
   speed=(1 * 1024 * 1024))
  self.assert_qmp(result, 'return', {})
@@ -73,7 +74,8 @@ class TestNbdReconnect(iotests.QMPTestCase):
  jobs = self.vm.qmp('query-block-jobs')['return']
  # Check that job is still in progress
-    self.assertTrue(jobs and jobs[0]['offset'] < jobs[0]['len'])
+    self.assertTrue(jobs)
+    self.assertTrue(jobs[0]['offset'] < jobs[0]['len'])
  result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
  self.assert_qmp(result, 'return', {})
@@ -81,12 +83,32 @@ class TestNbdReconnect(iotests.QMPTestCase):
  # Emulate server down time for 1 second
  time.sleep(1)
+    def test_backup(self):
+    size = 5 * 1024 * 1024
+    self.init_vm(size)
+    self.start_job('blockdev-backup')
+
  with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
  e = self.vm.event_wait('BLOCK_JOB_COMPLETED')
  self.assertEqual(e['data']['offset'], size)
  result = self.vm.qmp('blockdev-del', node_name='backup0')
  self.assert_qmp(result, 'return', {})
+    def test_mirror_cancel(self):
+    # Mirror speed limit doesn't work well enough, it seems that mirror
+    # will run many parallel requests anyway. MAX_IN_FLIGHT is 16 and
+    # MAX_IO_BYTES is 1M in mirror.c, so let's use 20M disk.
+    self.init_vm(20 * 1024 * 1024)
+    self.start_job('blockdev-mirror')
+
+    result = self.vm.qmp('block-job-cancel', device='drive0')
+    self.assert_qmp(result, 'return', {})
+
+    start_t = time.time()
+    self.vm.event_wait('BLOCK_JOB_CANCELLED')
+    delta_t = time.time() - start_t
+    self.assertTrue(delta_t < 2.0)


  Hi!

For what it's worth, I've just run into this assert while running "make check -j8 
SPEED=slow":

--- /home/thuth/devel/qemu/tests/qemu-iotests/264.out
+++ /home/thuth/tmp/qemu-build/tests/qemu-iotests/scratch/264/264.out.bad
@@ -1,5 +1,15 @@
-...
+..F
+==
+FAIL: test_mirror_cancel (__main__.TestNbdReconnect)
+--
+Traceback (most recent call last):
+  File "/home/thuth/devel/qemu/tests/qemu-iotests/264", line 112, in 
test_mirror_cancel
+    self.cancel_job()
+  File "/home/thuth/devel/qemu/tests/qemu-iotests/264", line 104, in cancel_job
+    self.assertTrue(delta_t < 2.0)
+AssertionError: False is not true
+
  --
  Ran 3 tests

-OK
+FAILED (failures=1)

Re: [PATCH v2 08/10] iotests/264: add mirror-cancel test-case

2022-07-21 Thread Thomas Huth

On 05/02/2021 17.37, Vladimir Sementsov-Ogievskiy wrote:

Check that cancel doesn't wait for 10s of nbd reconnect timeout.

Signed-off-by: Vladimir Sementsov-Ogievskiy 
Reviewed-by: Eric Blake 
---
  tests/qemu-iotests/264 | 38 ++
  tests/qemu-iotests/264.out |  4 ++--
  2 files changed, 32 insertions(+), 10 deletions(-)

diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264
index 6feeaa4056..347e53add5 100755
--- a/tests/qemu-iotests/264
+++ b/tests/qemu-iotests/264
@@ -27,25 +27,26 @@ from iotests import qemu_img_create, file_path, 
qemu_nbd_popen
  
  disk_a, disk_b, nbd_sock = file_path('disk_a', 'disk_b', 'nbd-sock')

  nbd_uri = 'nbd+unix:///?socket=' + nbd_sock
-size = 5 * 1024 * 1024
  wait_limit = 3.0
  wait_step = 0.2
  
  
  class TestNbdReconnect(iotests.QMPTestCase):

-def setUp(self):
-qemu_img_create('-f', iotests.imgfmt, disk_a, str(size))
-qemu_img_create('-f', iotests.imgfmt, disk_b, str(size))
+def init_vm(self, disk_size):
+qemu_img_create('-f', iotests.imgfmt, disk_a, str(disk_size))
+qemu_img_create('-f', iotests.imgfmt, disk_b, str(disk_size))
  self.vm = iotests.VM().add_drive(disk_a)
  self.vm.launch()
-self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
+self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(disk_size))
  
  def tearDown(self):

  self.vm.shutdown()
  os.remove(disk_a)
  os.remove(disk_b)
  
-def test(self):

+def start_job(self, job):
+"""Stat job with nbd target and kill the server"""
+assert job in ('blockdev-backup', 'blockdev-mirror')
  with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
  result = self.vm.qmp('blockdev-add',
   **{'node_name': 'backup0',
@@ -55,7 +56,7 @@ class TestNbdReconnect(iotests.QMPTestCase):
  'path': nbd_sock},
   'reconnect-delay': 10}})
  self.assert_qmp(result, 'return', {})
-result = self.vm.qmp('blockdev-backup', device='drive0',
+result = self.vm.qmp(job, device='drive0',
   sync='full', target='backup0',
   speed=(1 * 1024 * 1024))
  self.assert_qmp(result, 'return', {})
@@ -73,7 +74,8 @@ class TestNbdReconnect(iotests.QMPTestCase):
  
  jobs = self.vm.qmp('query-block-jobs')['return']

  # Check that job is still in progress
-self.assertTrue(jobs and jobs[0]['offset'] < jobs[0]['len'])
+self.assertTrue(jobs)
+self.assertTrue(jobs[0]['offset'] < jobs[0]['len'])
  
  result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)

  self.assert_qmp(result, 'return', {})
@@ -81,12 +83,32 @@ class TestNbdReconnect(iotests.QMPTestCase):
  # Emulate server down time for 1 second
  time.sleep(1)
  
+def test_backup(self):

+size = 5 * 1024 * 1024
+self.init_vm(size)
+self.start_job('blockdev-backup')
+
  with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
  e = self.vm.event_wait('BLOCK_JOB_COMPLETED')
  self.assertEqual(e['data']['offset'], size)
  result = self.vm.qmp('blockdev-del', node_name='backup0')
  self.assert_qmp(result, 'return', {})
  
+def test_mirror_cancel(self):

+# Mirror speed limit doesn't work well enough, it seems that mirror
+# will run many parallel requests anyway. MAX_IN_FLIGHT is 16 and
+# MAX_IO_BYTES is 1M in mirror.c, so let's use 20M disk.
+self.init_vm(20 * 1024 * 1024)
+self.start_job('blockdev-mirror')
+
+result = self.vm.qmp('block-job-cancel', device='drive0')
+self.assert_qmp(result, 'return', {})
+
+start_t = time.time()
+self.vm.event_wait('BLOCK_JOB_CANCELLED')
+delta_t = time.time() - start_t
+self.assertTrue(delta_t < 2.0)


 Hi!

For what it's worth, I've just run into this assert while running "make 
check -j8 SPEED=slow":


--- /home/thuth/devel/qemu/tests/qemu-iotests/264.out
+++ /home/thuth/tmp/qemu-build/tests/qemu-iotests/scratch/264/264.out.bad
@@ -1,5 +1,15 @@
-...
+..F
+==
+FAIL: test_mirror_cancel (__main__.TestNbdReconnect)
+--
+Traceback (most recent call last):
+  File "/home/thuth/devel/qemu/tests/qemu-iotests/264", line 112, in 
test_mirror_cancel

+self.cancel_job()
+  File "/home/thuth/devel/qemu/tests/qemu-iotests/264", line 104, in cancel_job
+self.assertTrue(delta_t < 2.0)
+AssertionError: False is not true
+
 --
 Ran 3 tests

-OK
+FAILED (failures=1)

(te

[PATCH v2 08/10] iotests/264: add mirror-cancel test-case

2021-02-05 Thread Vladimir Sementsov-Ogievskiy
Check that cancel doesn't wait for 10s of nbd reconnect timeout.

Signed-off-by: Vladimir Sementsov-Ogievskiy 
Reviewed-by: Eric Blake 
---
 tests/qemu-iotests/264 | 38 ++
 tests/qemu-iotests/264.out |  4 ++--
 2 files changed, 32 insertions(+), 10 deletions(-)

diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264
index 6feeaa4056..347e53add5 100755
--- a/tests/qemu-iotests/264
+++ b/tests/qemu-iotests/264
@@ -27,25 +27,26 @@ from iotests import qemu_img_create, file_path, 
qemu_nbd_popen
 
 disk_a, disk_b, nbd_sock = file_path('disk_a', 'disk_b', 'nbd-sock')
 nbd_uri = 'nbd+unix:///?socket=' + nbd_sock
-size = 5 * 1024 * 1024
 wait_limit = 3.0
 wait_step = 0.2
 
 
 class TestNbdReconnect(iotests.QMPTestCase):
-def setUp(self):
-qemu_img_create('-f', iotests.imgfmt, disk_a, str(size))
-qemu_img_create('-f', iotests.imgfmt, disk_b, str(size))
+def init_vm(self, disk_size):
+qemu_img_create('-f', iotests.imgfmt, disk_a, str(disk_size))
+qemu_img_create('-f', iotests.imgfmt, disk_b, str(disk_size))
 self.vm = iotests.VM().add_drive(disk_a)
 self.vm.launch()
-self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size))
+self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(disk_size))
 
 def tearDown(self):
 self.vm.shutdown()
 os.remove(disk_a)
 os.remove(disk_b)
 
-def test(self):
+def start_job(self, job):
+"""Stat job with nbd target and kill the server"""
+assert job in ('blockdev-backup', 'blockdev-mirror')
 with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
 result = self.vm.qmp('blockdev-add',
  **{'node_name': 'backup0',
@@ -55,7 +56,7 @@ class TestNbdReconnect(iotests.QMPTestCase):
 'path': nbd_sock},
  'reconnect-delay': 10}})
 self.assert_qmp(result, 'return', {})
-result = self.vm.qmp('blockdev-backup', device='drive0',
+result = self.vm.qmp(job, device='drive0',
  sync='full', target='backup0',
  speed=(1 * 1024 * 1024))
 self.assert_qmp(result, 'return', {})
@@ -73,7 +74,8 @@ class TestNbdReconnect(iotests.QMPTestCase):
 
 jobs = self.vm.qmp('query-block-jobs')['return']
 # Check that job is still in progress
-self.assertTrue(jobs and jobs[0]['offset'] < jobs[0]['len'])
+self.assertTrue(jobs)
+self.assertTrue(jobs[0]['offset'] < jobs[0]['len'])
 
 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0)
 self.assert_qmp(result, 'return', {})
@@ -81,12 +83,32 @@ class TestNbdReconnect(iotests.QMPTestCase):
 # Emulate server down time for 1 second
 time.sleep(1)
 
+def test_backup(self):
+size = 5 * 1024 * 1024
+self.init_vm(size)
+self.start_job('blockdev-backup')
+
 with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b):
 e = self.vm.event_wait('BLOCK_JOB_COMPLETED')
 self.assertEqual(e['data']['offset'], size)
 result = self.vm.qmp('blockdev-del', node_name='backup0')
 self.assert_qmp(result, 'return', {})
 
+def test_mirror_cancel(self):
+# Mirror speed limit doesn't work well enough, it seems that mirror
+# will run many parallel requests anyway. MAX_IN_FLIGHT is 16 and
+# MAX_IO_BYTES is 1M in mirror.c, so let's use 20M disk.
+self.init_vm(20 * 1024 * 1024)
+self.start_job('blockdev-mirror')
+
+result = self.vm.qmp('block-job-cancel', device='drive0')
+self.assert_qmp(result, 'return', {})
+
+start_t = time.time()
+self.vm.event_wait('BLOCK_JOB_CANCELLED')
+delta_t = time.time() - start_t
+self.assertTrue(delta_t < 2.0)
+
 
 if __name__ == '__main__':
 iotests.main(supported_fmts=['qcow2'])
diff --git a/tests/qemu-iotests/264.out b/tests/qemu-iotests/264.out
index ae1213e6f8..fbc63e62f8 100644
--- a/tests/qemu-iotests/264.out
+++ b/tests/qemu-iotests/264.out
@@ -1,5 +1,5 @@
-.
+..
 --
-Ran 1 tests
+Ran 2 tests
 
 OK
-- 
2.29.2