[libvirt] [PATCH v3 3/3] Storage: Use errno parameter in virReportSystemError

2019-12-22 Thread Yi Li
Use errno parameter in virReportSystemError.
Remove hold function return values if don't need.

Signed-off-by: Yi Li 
---
 src/storage/storage_backend_rbd.c | 159 ++
 1 file changed, 76 insertions(+), 83 deletions(-)

diff --git a/src/storage/storage_backend_rbd.c 
b/src/storage/storage_backend_rbd.c
index 5f14156..78a8e95 100644
--- a/src/storage/storage_backend_rbd.c
+++ b/src/storage/storage_backend_rbd.c
@@ -184,7 +184,7 @@ static int
 virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
   virStoragePoolDefPtr def)
 {
-int rc, ret = -1;
+int ret = -1;
 virStoragePoolSourcePtr source = >source;
 virStorageAuthDefPtr authdef = source->auth;
 unsigned char *secret_value = NULL;
@@ -202,8 +202,8 @@ 
virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
 if (authdef) {
 VIR_DEBUG("Using cephx authorization, username: %s", 
authdef->username);
 
-if ((rc = rados_create(>cluster, authdef->username)) < 0) {
-virReportSystemError(-rc, "%s", _("failed to initialize RADOS"));
+if (rados_create(>cluster, authdef->username) < 0) {
+virReportSystemError(errno, "%s", _("failed to initialize RADOS"));
 goto cleanup;
 }
 
@@ -318,8 +318,8 @@ 
virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
 }
 
 ptr->starttime = time(0);
-if ((rc = rados_connect(ptr->cluster)) < 0) {
-virReportSystemError(-rc, _("failed to connect to the RADOS monitor 
on: %s"),
+if (rados_connect(ptr->cluster) < 0) {
+virReportSystemError(errno, _("failed to connect to the RADOS monitor 
on: %s"),
  mon_buff);
 goto cleanup;
 }
@@ -341,7 +341,7 @@ virStorageBackendRBDOpenIoCTX(virStorageBackendRBDStatePtr 
ptr,
 virStoragePoolDefPtr def = virStoragePoolObjGetDef(pool);
 int rc = rados_ioctx_create(ptr->cluster, def->source.name, >ioctx);
 if (rc < 0) {
-virReportSystemError(-rc, _("failed to create the RBD IoCTX. Does the 
pool '%s' exist?"),
+virReportSystemError(errno, _("failed to create the RBD IoCTX. Does 
the pool '%s' exist?"),
  def->source.name);
 }
 return rc;
@@ -410,7 +410,7 @@ volStorageBackendRBDGetFeatures(rbd_image_t image,
 int rc;
 
 if ((rc = rbd_get_features(image, features)) < 0) {
-virReportSystemError(-rc, _("failed to get the features of RBD image "
+virReportSystemError(errno, _("failed to get the features of RBD image 
"
  "%s"), volname);
 return rc;
 }
@@ -427,7 +427,7 @@ volStorageBackendRBDGetFlags(rbd_image_t image,
 int rc;
 
 if ((rc = rbd_get_flags(image, flags)) < 0) {
-virReportSystemError(-rc,
+virReportSystemError(errno,
  _("failed to get the flags of RBD image %s"),
  volname);
 return rc;
@@ -467,7 +467,7 @@ virStorageBackendRBDSetAllocation(virStorageVolDefPtr vol,
 if ((rc = rbd_diff_iterate2(image, NULL, 0, info->size, 0, 1,
,
)) < 0) {
-virReportSystemError(-rc, _("failed to iterate RBD image '%s'"),
+virReportSystemError(errno, _("failed to iterate RBD image '%s'"),
  vol->name);
 return rc;
 }
@@ -520,14 +520,14 @@ volStorageBackendRBDRefreshVolInfo(virStorageVolDefPtr 
vol,
 
 if ((rc = rbd_open_read_only(ptr->ioctx, vol->name, , NULL)) < 0) {
 ret = rc;
-virReportSystemError(-rc, _("failed to open the RBD image '%s'"),
+virReportSystemError(errno, _("failed to open the RBD image '%s'"),
  vol->name);
 goto cleanup;
 }
 
 if ((rc = rbd_stat(image, , sizeof(info))) < 0) {
 ret = rc;
-virReportSystemError(-rc, _("failed to stat the RBD image '%s'"),
+virReportSystemError(errno, _("failed to stat the RBD image '%s'"),
  vol->name);
 goto cleanup;
 }
@@ -600,7 +600,7 @@ 
virStorageBackendRBDGetVolNames(virStorageBackendRBDStatePtr ptr)
 if (rc >= 0)
 break;
 if (rc != -ERANGE) {
-virReportSystemError(-rc, "%s", _("Unable to list RBD images"));
+virReportSystemError(errno, "%s", _("Unable to list RBD images"));
 goto error;
 }
 }
@@ -641,7 +641,7 @@ 
virStorageBackendRBDGetVolNames(virStorageBackendRBDStatePtr ptr)
 if (rc >= 0)
 break;
 if (rc != -ERANGE) {
-virReportSystemError(-rc, "%s", _("Unable to list RBD images"));
+virReportSystemError(errno, "%s", _("Unable to list RBD images"));
 goto error;
 }
 VIR_FREE(namebuf);
@@ -687,13 +687,13 @@ virStorageBackendRBDRefreshPool(virStoragePoolObjPtr pool)

[libvirt] [PATCH v3 1/3] Storage: Use rc hold intermediate function return values.

2019-12-22 Thread Yi Li
most libvirt code uses 'int rc' to hold intermediate
function return values. consistent with the rest of libvirt.

Signed-off-by: Yi Li 
---
 src/storage/storage_backend_rbd.c | 202 ++
 1 file changed, 96 insertions(+), 106 deletions(-)

diff --git a/src/storage/storage_backend_rbd.c 
b/src/storage/storage_backend_rbd.c
index 34088f7..81f7cd5 100644
--- a/src/storage/storage_backend_rbd.c
+++ b/src/storage/storage_backend_rbd.c
@@ -184,8 +184,7 @@ static int
 virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
   virStoragePoolDefPtr def)
 {
-int ret = -1;
-int r = 0;
+int rc, ret = -1;
 virStoragePoolSourcePtr source = >source;
 virStorageAuthDefPtr authdef = source->auth;
 unsigned char *secret_value = NULL;
@@ -203,8 +202,8 @@ 
virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
 if (authdef) {
 VIR_DEBUG("Using cephx authorization, username: %s", 
authdef->username);
 
-if ((r = rados_create(>cluster, authdef->username)) < 0) {
-virReportSystemError(-r, "%s", _("failed to initialize RADOS"));
+if ((rc = rados_create(>cluster, authdef->username)) < 0) {
+virReportSystemError(-rc, "%s", _("failed to initialize RADOS"));
 goto cleanup;
 }
 
@@ -319,8 +318,8 @@ 
virStorageBackendRBDOpenRADOSConn(virStorageBackendRBDStatePtr ptr,
 }
 
 ptr->starttime = time(0);
-if ((r = rados_connect(ptr->cluster)) < 0) {
-virReportSystemError(-r, _("failed to connect to the RADOS monitor on: 
%s"),
+if ((rc = rados_connect(ptr->cluster)) < 0) {
+virReportSystemError(-rc, _("failed to connect to the RADOS monitor 
on: %s"),
  mon_buff);
 goto cleanup;
 }
@@ -340,12 +339,12 @@ 
virStorageBackendRBDOpenIoCTX(virStorageBackendRBDStatePtr ptr,
   virStoragePoolObjPtr pool)
 {
 virStoragePoolDefPtr def = virStoragePoolObjGetDef(pool);
-int r = rados_ioctx_create(ptr->cluster, def->source.name, >ioctx);
-if (r < 0) {
-virReportSystemError(-r, _("failed to create the RBD IoCTX. Does the 
pool '%s' exist?"),
+int rc = rados_ioctx_create(ptr->cluster, def->source.name, >ioctx);
+if (rc < 0) {
+virReportSystemError(-rc, _("failed to create the RBD IoCTX. Does the 
pool '%s' exist?"),
  def->source.name);
 }
-return r;
+return rc;
 }
 
 static void
@@ -408,10 +407,10 @@ volStorageBackendRBDGetFeatures(rbd_image_t image,
 const char *volname,
 uint64_t *features)
 {
-int r;
+int rc;
 
-if ((r = rbd_get_features(image, features)) < 0) {
-virReportSystemError(-r, _("failed to get the features of RBD image "
+if ((rc = rbd_get_features(image, features)) < 0) {
+virReportSystemError(-rc, _("failed to get the features of RBD image "
  "%s"), volname);
 return -1;
 }
@@ -462,13 +461,13 @@ virStorageBackendRBDSetAllocation(virStorageVolDefPtr vol,
   rbd_image_t *image,
   rbd_image_info_t *info)
 {
-int r;
+int rc;
 size_t allocation = 0;
 
-if ((r = rbd_diff_iterate2(image, NULL, 0, info->size, 0, 1,
+if ((rc = rbd_diff_iterate2(image, NULL, 0, info->size, 0, 1,
,
)) < 0) {
-virReportSystemError(-r, _("failed to iterate RBD image '%s'"),
+virReportSystemError(-rc, _("failed to iterate RBD image '%s'"),
  vol->name);
 return -1;
 }
@@ -512,24 +511,23 @@ volStorageBackendRBDRefreshVolInfo(virStorageVolDefPtr 
vol,
virStoragePoolObjPtr pool,
virStorageBackendRBDStatePtr ptr)
 {
-int ret = -1;
+int rc, ret = -1;
 virStoragePoolDefPtr def = virStoragePoolObjGetDef(pool);
-int r = 0;
 rbd_image_t image = NULL;
 rbd_image_info_t info;
 uint64_t features;
 uint64_t flags;
 
-if ((r = rbd_open_read_only(ptr->ioctx, vol->name, , NULL)) < 0) {
-ret = -r;
-virReportSystemError(-r, _("failed to open the RBD image '%s'"),
+if ((rc = rbd_open_read_only(ptr->ioctx, vol->name, , NULL)) < 0) {
+ret = -rc;
+virReportSystemError(-rc, _("failed to open the RBD image '%s'"),
  vol->name);
 goto cleanup;
 }
 
-if ((r = rbd_stat(image, , sizeof(info))) < 0) {
-ret = -r;
-virReportSystemError(-r, _("failed to stat the RBD image '%s'"),
+if ((rc = rbd_stat(image, , sizeof(info))) < 0) {
+ret = -rc;
+virReportSystemError(-rc, _("failed to stat the RBD image '%s'"),
  vol->name);
 goto cleanup;
 }
@@ 

[libvirt] [PATCH v3 2/3] storage: Fix volStorageBackendRBDRefreshVolInfo function return errors

2019-12-22 Thread Yi Li
Fix the return value status comparison checking for call to
volStorageBackendRBDRefreshVolInfo introduced by commit id f46d137e.

we only should fail when the return is < 0. -ENOENT, -ETIMEDOUT will
ignore according commit id f46d137e.

Signed-off-by: Yi Li 
---
 src/storage/storage_backend_rbd.c | 22 ++
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/src/storage/storage_backend_rbd.c 
b/src/storage/storage_backend_rbd.c
index 81f7cd5..5f14156 100644
--- a/src/storage/storage_backend_rbd.c
+++ b/src/storage/storage_backend_rbd.c
@@ -412,7 +412,7 @@ volStorageBackendRBDGetFeatures(rbd_image_t image,
 if ((rc = rbd_get_features(image, features)) < 0) {
 virReportSystemError(-rc, _("failed to get the features of RBD image "
  "%s"), volname);
-return -1;
+return rc;
 }
 
 return 0;
@@ -430,7 +430,7 @@ volStorageBackendRBDGetFlags(rbd_image_t image,
 virReportSystemError(-rc,
  _("failed to get the flags of RBD image %s"),
  volname);
-return -1;
+return rc;
 }
 
 return 0;
@@ -469,7 +469,7 @@ virStorageBackendRBDSetAllocation(virStorageVolDefPtr vol,
)) < 0) {
 virReportSystemError(-rc, _("failed to iterate RBD image '%s'"),
  vol->name);
-return -1;
+return rc;
 }
 
 VIR_DEBUG("Found %zu bytes allocated for RBD image %s",
@@ -519,24 +519,28 @@ volStorageBackendRBDRefreshVolInfo(virStorageVolDefPtr 
vol,
 uint64_t flags;
 
 if ((rc = rbd_open_read_only(ptr->ioctx, vol->name, , NULL)) < 0) {
-ret = -rc;
+ret = rc;
 virReportSystemError(-rc, _("failed to open the RBD image '%s'"),
  vol->name);
 goto cleanup;
 }
 
 if ((rc = rbd_stat(image, , sizeof(info))) < 0) {
-ret = -rc;
+ret = rc;
 virReportSystemError(-rc, _("failed to stat the RBD image '%s'"),
  vol->name);
 goto cleanup;
 }
 
-if (volStorageBackendRBDGetFeatures(image, vol->name, ) < 0)
+if ((rc = volStorageBackendRBDGetFeatures(image, vol->name, )) < 
0) {
+ret = rc;
 goto cleanup;
+}
 
-if (volStorageBackendRBDGetFlags(image, vol->name, ) < 0)
+if ((rc = volStorageBackendRBDGetFlags(image, vol->name, )) < 0) {
+ret = rc;
 goto cleanup;
+}
 
 vol->target.capacity = info.size;
 vol->type = VIR_STORAGE_VOL_NETWORK;
@@ -549,8 +553,10 @@ volStorageBackendRBDRefreshVolInfo(virStorageVolDefPtr vol,
   "Querying for actual allocation",
   def->source.name, vol->name);
 
-if (virStorageBackendRBDSetAllocation(vol, image, ) < 0)
+if ((rc = virStorageBackendRBDSetAllocation(vol, image, )) < 0) {
+ret = rc;
 goto cleanup;
+   }
 } else {
 vol->target.allocation = info.obj_size * info.num_objs;
 }
-- 
2.7.5




--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list



Re: [libvirt] [PATCH v2] storage: Fix daemon crash on lookup storagepool by targetpath

2019-12-22 Thread Cole Robinson
On 12/20/19 7:33 PM, Yi Li wrote:
> Causing a crash when storagePoolLookupByTargetPath beacuse of
> Some types of storage pool have no target elements.
> Use STREQ_NULLABLE instead of STREQ
> Avoids segfaults when using NULL arguments.
> 
> Core was generated by `/usr/sbin/libvirtd'.
> Program terminated with signal 11, Segmentation fault.
> (gdb) bt
> 0  0x9e951388 in strcmp () from /lib64/libc.so.6
> 1  0x92103e9c in storagePoolLookupByTargetPathCallback (
> obj=0x7009aab0, opaque=0x801058b0) at 
> storage/storage_driver.c:1649
> 2  0x9f2c52a4 in virStoragePoolObjListSearchCb (
> payload=0x801058b0, name=, opaque=)
> at conf/virstorageobj.c:476
> 3  0x9f1f2f7c in virHashSearch (ctable=0x800f4f60,
> iter=iter@entry=0x9f2c5278 ,
> data=data@entry=0x95af7488, name=name@entry=0x0) at util/virhash.c:696
> 4  0x9f2c64f0 in virStoragePoolObjListSearch (pools=0x800f2ce0,
> searcher=searcher@entry=0x92103e68 
> ,
>  opaque=) at conf/virstorageobj.c:505
> 5  0x92101f54 in storagePoolLookupByTargetPath (conn=0x5c0009f0,
> path=0x7009a850 "/vms/images") at storage/storage_driver.c:1672
> 
> Signed-off-by: Yi Li 

Reviewed-by: Cole Robinson 

and pushed now

- Cole

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list



Re: [libvirt] Add support for vhost-user-scsi-pci/vhost-user-blk-pci

2019-12-22 Thread Cole Robinson
FYI there's some more recent discussion over here:
https://www.redhat.com/archives/libvir-list/2019-December/msg00817.html

There isn't any objections to using  for
vhost-user-blk, so maybe that's a good place to start.

Thanks,
Cole


On 10/15/19 7:34 AM, Li Feng wrote:
> Cole Robinson  于2019年10月15日周二 上午1:48写道:
>>
>> On 10/14/19 3:12 AM, Li Feng wrote:
>>> Hi Cole & Michal,
>>>
>>> I'm sorry for my late response, I just end my journey today.
>>> Thank your response, your suggestion is very helpful to me.
>>>
>>> I have added Michal in this mail, Michal helps me review my initial 
>>> patchset.
>>> (https://www.spinics.net/linux/fedora/libvir/msg191339.html)
>>>
>>
>> Whoops I missed that posting, I didn't realize you had sent patches!
>>
>>> All concern about this feature is the XML design.
>>> My original XML design exposes more details of Qemu.
>>>
>>>  
>>>  
>>>  
>>>  
>>>  
>>>  
>>>
>>> As Cole's suggestion, the better design with all vhost-user-scsi/blk
>>> features would like this:
>>>
>>> vhost-user-blk:
>>>
>>> 
>>> 
>>> 
>>>  
>>> 
>>> 
>>>  >> function='0x0'/>
>>> 
>>>
>>> vhost-user-scsi:
>>>
>>> 
>>> 
>>> 
>>> 
>>> 
>>> 
>>> 
>>>
>>
>> I think my SCSI idea is wrong, sorry. vhost-user-scsi is for passing a
>> scsi host adapter to the VM, correct? If so, then it's not really a
>> , and so using the existing vhost-scsi support in  is
>> probably better.  could possible be used for vhost-user-blk as well
>>
>> Can you provide some examples of full qemu command lines using
>> vhost-user-blk and vhost-user-scsi? Just linking to examples else where
>> is fine, but I'm wondering if there's more context
> 
> You could check the vhost-user-scsi/blk examples from SPDK pages:
> https://spdk.io/doc/vhost.html
> 
>>
>> Internally we already have an abstraction for vhost-scsi:
>>
>> 
>>   
>> 
>>
>>
>> The obvious extension would be
>>
>> 
>>   > path='/path/to/vhost-user-scsi.sock' mode='client'/>
>> 
>>
>> Internally implementing this will be weird. The  parameters are
>> only dictated by the hostdev type= field, but in this case they would be
>> dictated by the  field, and we would want to reuse the
>> internal chardev abstraction.
>>
>> vhost-user-blk could be implemented similarly, but with type='storage'
>> which is the way we pass through block devices to LXC guests, but it
>> isn't currently supported in the qemu driver.
>>
>> I dunno. Maybe Michal or danpb can provide guidance
>>
> @Michal, @danpb, could you give some guidance?
>>
>>> Conclusion:
>>> 1. Add new type(vhostuser) for disk label;
>>> 2. Add queue sub-label for disk to support multiqueue(>> num='4'/>) or reusing the driver label
>>> (>> Qemu support multiqueue like this:
>>> -device vhost-user-scsi-pci,id=scsi0,chardev=spdk_vhost_scsi0,num_queues=4
>>> -device vhost-user-blk-pci,chardev=spdk_vhost_blk0,num-queues=4
>>>
>>
>> num-queues is already supported by libvirt for both  and 
>> with , so whether we use  or  you won't
>> need to add any new XML here.
> Got it.
> 
>>
>>> Another question:
>>> When qemu is connecting to a vhost-user-scsi controller[1],  there may
>>> exist multiple LUNs under one target,
>>> then one disklabel() will represent multiple SCSI LUNs,
>>> the 'dev' property() will be ignored, right?
>>> In other words, for vhost-user-scsi disk, it more likes a controller,
>>> maybe the controller label is suitable.
>>>
>>
>> Yes you are right, and this was my understanding. But then its not
>> really a  in libvirt's sense because we can't attach
>> emulated devices to it, so it's more a fit for the existing 
>> vhost-user support. Unfortunately it's not really a clean fit anywhere,
>> there will have to be some kind of compromise. And I'm not sure if
>>  or  is right for vhost-user-blk, but hopefully others
>> have more clear opinions.
> 
> I'm also confused about it.
> Thanks for your reply.
> 
> Thanks,
> Feng Li
> 
>>
>> Thanks,
>> Cole
>>
>>> I look forward to hearing from you as soon as possible.
>>>
>>> [1]: https://spdk.io/doc/vhost.html
>>>
>>> Feng Li
>>>
>>> Cole Robinson  于2019年10月10日周四 上午6:48写道:

 Sorry for the late reply, and thanks Jano for pointing out elsewhere
 that this didn't receive a response.

 On 8/12/19 5:56 AM, Li Feng wrote:
> Hi Guys,
>
> And I want to add the vhost-user-scsi-pci/vhost-user-blk-pci support
> for libvirt.
>
> The usage in qemu like this:
>
> Vhost-SCSI
> -chardev socket,id=char0,path=/var/tmp/vhost.0
> -device vhost-user-scsi-pci,id=scsi0,chardev=char0
> Vhost-BLK
> -chardev socket,id=char1,path=/var/tmp/vhost.1
> -device vhost-user-blk-pci,id=blk0,chardev=char1
>

 Indeed that matches what I see for the qemu commits too:

 https://git.qemu.org/?p=qemu.git;a=commit;h=00343e4b54b
 https://git.qemu.org/?p=qemu.git;a=commit;h=f12c1ebddf7

> What type should I 

Re: [libvirt] libvirt API/design questions

2019-12-22 Thread Cole Robinson
On 12/12/19 9:57 AM, Ján Tomko wrote:
> On Wed, Dec 11, 2019 at 09:45:07AM -0500, Cole Robinson wrote:
>> * Splitting domain_conf up. There's 30k lines in domain_conf.c and 3500
>> in domain_conf.h, it would be nice to unwind it a bit. Getting some sign
>> off on this ahead of time is critical IMO so pieces can be posted and
>> committed quickly because they will obviously go out of date fast.
> 
> Oh yes, so fast I did not even keep the branch for this failed attempt:
> https://www.redhat.com/archives/libvir-list/2019-July/msg01257.html
> 

Ah I must have missed that posting before.

If after the break you want to coordinate, I'm happy to do quick review
of any domain_conf split so it can go in quickly

Thanks,
Cole

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list



Re: [libvirt] libvirt API/design questions

2019-12-22 Thread Cole Robinson
On 12/12/19 6:13 AM, Daniel P. Berrangé wrote:
> On Wed, Dec 11, 2019 at 09:45:07AM -0500, Cole Robinson wrote:
>> There's some pre-existing libvirt design questions I would like some
>> feedback on, and one new one
>>
>>
>> * `virsh blockresize` doesn't prevent shrink by default, couple users
>> have blown their foot off and there's attempts at patches.
>> https://www.redhat.com/archives/libvir-list/2019-October/msg00258.html
>> https://www.redhat.com/archives/libvir-list/2019-November/msg00843.html
>>
>> Do we implement this as a protection in virsh, or change the API
>> behavior and require a new API flag to allow shrinking? Or some other idea?
> 
> Clearly we should have had protection when first implementing this.
> We can't change the default API behaviour now as that would break
> existing valid usage.
> 
> I think it is reasonable to change virsh though to try to add
> protection in some way.
> 
>> * vhost-user-scsi and vhost-user-blk XML schema
>> https://www.redhat.com/archives/libvirt-users/2019-October/msg00018.html
>>
>> Last proposal was using  for this, which revisiting it now
>> makes more sense than , because it vhost-user-X doesn't seem to
>> use qemu's block layer at all. So vhost-user-scsi would be:
>>
>> 
>>   > path='/path/to/vhost-user-scsi.sock' mode='client'/>
>> 
>>
>> vhost-user-blk maybe requires a new type='block' ? Otherwise it's
>> unclear how to differentiate between the two. Regardless it would be
>> nice to give the original patch author guidance here, they seemed
>> motivated to get this working
> 
> This is a really tricky question in general. It is basically saying
> whether we consider  to refer to the guest visible device
> type or the QEMU visible backend type.
> 
> Originally these were always the same thing, but offloading to
> vhostuser has blurred the boundaries. I think in non-QEMU hypervisors
> where you don't have a split of frontend in the config, you'd
> just have disks no matter what.
> 
> With network we've continued to use  with vhost-user.
> 
> This makes me biased towards , at least for vhost-user-blk.
> 

Okay, makes sense to me.

> I presume that with vhost-user-scsi we're exposing the entire
> SCSI controller, so we'd need a . As you show
> above we do have use of  already for scsi_host
> but that's for something that's known to the kernel. We
> can reasonably argue that vhost-uuser-scsi is not the same
> as scsi_host as its still emulated.
> 
> I think we should bear in mind that using vhost-user-blk/scsi
> does  *not* imply that QEMU's block layer is not there. The
> backend vhost-user process could be implemented in QEMU
> codebase & thus have a QMP monitor and full QEMU block backend
> featureset.  This isn't the case today, but it is at least
> conceivable for the future. This would bias towards 
> at least for vhostuser-blk.  vhost-user-scsi is more difficult
> still.
> 

The downside of using  for the scsi case is that it will
complicate libvirt SCSI address handling. Really in practical terms, a
libvirt controller is something you can assign other device 
onto. With vhost-user-scsi that won't be the case at first. So if we use
 or similar it's going to
make things a headache internally and possibly mess up bad app
assumptions. I guess it could be type='vhostuserscsi' too, to signify
it's entirely different.

So maybe  is the simpler path forward in that case even though
it's not a clean conceptual fit their either.

>> * Splitting domain_conf up. There's 30k lines in domain_conf.c and 3500
>> in domain_conf.h, it would be nice to unwind it a bit. Getting some sign
>> off on this ahead of time is critical IMO so pieces can be posted and
>> committed quickly because they will obviously go out of date fast. My
>> thoughts:
>>
>> - domain_def.[ch]: DomainDefXXX and enum definitions.
>>   - probably New and Free functions too
>> - domain_parse.[ch]: XML parsing
>> - domain_format.[ch]: XML formatting
>> - domain_validate.[ch]: validate and postparse handling
>> - domain_util.[ch]: everything else, or keep it in domain_conf?
> 
> FWIW, if we're introducing new files, I'd like to see is move
> to the new naming based on object type.
> 
> ievirdomaindef.[ch],  virdomainobj.[ch]
> 
> if virdomaindef.[ch] are too big, then virdomaindef{validate,util}.[ch]
> etc.  I'd keep the base file name as purely the XML parse/format code,
> and move any helper / addon logic out.
> 

Makes sense, though I think it would be helpful and an easy first step
to move the structs and enums to their own file. So maybe:

virdomaindef.[ch]: structs, enums, maybe New and Free functions
virdomaindefxml.[ch]: XML handling, parse + format
maybe validate and/or util like you suggest.

>> domain_def should be easy but no idea how intertwined the rest are. If
>> the domain_validate naming is acceptable for both validate and postparse
>> functions, we could use that naming for qemu_domain.c split too.
>>
>> Also maybe it's worth considering if there's some way to 

Re: [libvirt] virsh blockresize syntax is inconsistent with vol-resize and somewhat dangerous

2019-12-22 Thread Cole Robinson
On 11/18/19 3:16 PM, Cole Robinson wrote:
> On 10/25/19 4:28 AM, Patrik Martinsson wrote:
>> Hi Tim,
>>
>> I recently stumbled on the same thing, accidentally shrinking a blockdevice.
>>
>> I have written a patch for virsh that will force the user to append a
>> '--force' flag if shrinking is desired.
>>
>> The behavior is somewhat still inconsistent with the vol-resize
>> command, however a bigger rewrite is needed to make both commands
>> operate exactly the same, which I don't know if actually needed.
>>
>> Previous discussion can be found below,
>> - https://www.redhat.com/archives/libvir-list/2019-October/msg00258.html
>> - https://www.redhat.com/archives/libvir-list/2019-October/msg01437.html
>>
>> Best regards,
>> Patrik
>>
>>
>> On Thu, Oct 24, 2019 at 6:04 PM Tim Small  wrote:
>>>
>>> Hello,
>>>
>>> virsh has two commands which can be used to resize block devices -
>>> "blockresize" for volumes in use by and active guest, and "vol-resize"
>>> for volumes which are not in use.
>>>
>>> The vol-resize syntax allows to specify the size as a delta (increase or
>>> decrease vs. the current size), and also refuses to shrink a volume
>>> unless the "--shrink" argument is also passed.
>>>
>>> Most other tools which can be used for block device resizing (outside of
>>> libvirt) also have similar "--shrink" argument requirements when
>>> reducing the size of an existing block device.  e.g. ceph requires
>>> "--allow-shrink" when using the "rbd resize" command.
>>>
>>> The lack of such a safety device makes "blockresize" a foot-gun (which I
>>> recently found to great effect when I typoed the domain name to another
>>> valid domain).
>>>
>>> It seems I am not alone in making this error e.g.
>>> https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=902171
>>>
>>> One possible solution would be to make a new command e.g. "domblkresize"
>>> or perhaps "live-resize", which implement the "--shrink" and "--delta"
>>> behaviour to make it consistent with "vol-resize" syntax, and mark the
>>> "blockresize" command as deprecated in the documentation and help (so
>>> that existing automation which depends on the current behaviour doesn't
>>> break).
>>>
>>> Any thoughts?  Should I open this as an RFE?
>>>
> 
> Considering there's been multiple people hitting it, I think it's
> something we should fix in libvirt. Just need buy in from other devs. To
> summarize:
> 
> 'virsh blockresize' will online resize an image path for a running VM.
> It does this with the qemu block_resize monitor command via the
> virDomainBlockResize API. The API doesn't provide any protection against
> shrinking the disk image though, which I presume is both the less common
> intention of the operation, and much less often safe to do for a running
> VM. And a user typo can mean data loss
> 
> virsh vol-resize, which is storage API virStorageVolResize, is for
> offline image resizing, mostly using qemu-img. It has had a SHRINK API
> flag from the outset, rejecting requests to reduce the image size unless
> the flag is passed. Seems like a safe pattern to follow.
> 
> Can we change existing blockresize behavior? I think it's reasonable;
> we've added flags to other APIs that are required to restore old
> behavior, UNDEFINE_NVRAM for one example.
> 

I brought this question up in this thread:
https://www.redhat.com/archives/libvir-list/2019-December/msg00817.html

danpb suggested making this a protection that lives in virsh only. So,
change blockresize to reject shrinking, but add a --shrink option to
override that behavior, and all the code lives in tools/ so the old API
behavior is preserved. You can CC me on a patch and I'll review it (but
I'll be offline until January)

Thanks,
Cole

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list