Re: [Qemu-devel] [PATCH] block/nvme: fix Coverity reports
Philippe Mathieu-Daudé writes: > On 02/09/2018 12:24 PM, Paolo Bonzini wrote: >> 1) string not null terminated in sysfs_find_group_file > > CID 1385854 > >> >> 2) NULL pointer dereference and dead local variable in nvme_init. > > CID 1385855 > >> >> Signed-off-by: Paolo Bonzini >> --- >> block/nvme.c| 14 ++ >> util/vfio-helpers.c | 2 +- >> 2 files changed, 7 insertions(+), 9 deletions(-) >> >> diff --git a/block/nvme.c b/block/nvme.c >> index e9d0e218fc..ce217ffc81 100644 >> --- a/block/nvme.c >> +++ b/block/nvme.c >> @@ -553,7 +553,6 @@ static int nvme_init(BlockDriverState *bs, const char >> *device, int namespace, >> uint64_t cap; >> uint64_t timeout_ms; >> uint64_t deadline, now; >> -Error *local_err = NULL; >> >> qemu_co_mutex_init(&s->dma_map_lock); >> qemu_co_queue_init(&s->dma_flush_queue); >> @@ -645,11 +644,6 @@ static int nvme_init(BlockDriverState *bs, const char >> *device, int namespace, >> false, nvme_handle_event, nvme_poll_cb); >> >> nvme_identify(bs, namespace, errp); > > The problem seems local_err is not used as nvme_identify() argument; > however this big function uses both errp and local_err so maybe clean it > to keep one style is better. > > Isn't local_err + error_propagate() the cleaner way? Cleaner no, actually correct, probably. Passing errp directly is okay in certain circumstances. Quote include/qapi/error.h: * Receive an error and pass it on to the caller: * Error *err = NULL; * foo(arg, &err); * if (err) { * handle the error... * error_propagate(errp, err); * } * where Error **errp is a parameter, by convention the last one. * * Do *not* "optimize" this to * foo(arg, errp); * if (*errp) { // WRONG! * handle the error... * } * because errp may be NULL! * * But when all you do with the error is pass it on, please use * foo(arg, errp); * for readability. However, ... >> -if (local_err) { >> -error_propagate(errp, local_err); >> -ret = -EIO; >> -goto fail_handler; >> -} ... if nvme_identify() fails and sets an error, we now continue ... >> >> /* Set up command queues. */ >> if (!nvme_add_io_queue(bs, errp)) { ... to this call, where errp points to non-null. That's wrong, because it'll crash when nvme_add_io_queue() tries to set an error. include/qapi/error.h again: * Receive and accumulate multiple errors (first one wins): * Error *err = NULL, *local_err = NULL; * foo(arg, &err); * bar(arg, &local_err); * error_propagate(&err, local_err); * if (err) { * handle the error... * } * * Do *not* "optimize" this to * foo(arg, &err); * bar(arg, &err); // WRONG! * if (err) { * handle the error... * } * because this may pass a non-null err to bar(). I doubt you want to accumulate here. [...]
Re: [Qemu-devel] [PATCH] block/nvme: fix Coverity reports
On 02/09/2018 12:24 PM, Paolo Bonzini wrote: > 1) string not null terminated in sysfs_find_group_file CID 1385854 > > 2) NULL pointer dereference and dead local variable in nvme_init. CID 1385855 > > Signed-off-by: Paolo Bonzini > --- > block/nvme.c| 14 ++ > util/vfio-helpers.c | 2 +- > 2 files changed, 7 insertions(+), 9 deletions(-) > > diff --git a/block/nvme.c b/block/nvme.c > index e9d0e218fc..ce217ffc81 100644 > --- a/block/nvme.c > +++ b/block/nvme.c > @@ -553,7 +553,6 @@ static int nvme_init(BlockDriverState *bs, const char > *device, int namespace, > uint64_t cap; > uint64_t timeout_ms; > uint64_t deadline, now; > -Error *local_err = NULL; > > qemu_co_mutex_init(&s->dma_map_lock); > qemu_co_queue_init(&s->dma_flush_queue); > @@ -645,11 +644,6 @@ static int nvme_init(BlockDriverState *bs, const char > *device, int namespace, > false, nvme_handle_event, nvme_poll_cb); > > nvme_identify(bs, namespace, errp); The problem seems local_err is not used as nvme_identify() argument; however this big function uses both errp and local_err so maybe clean it to keep one style is better. Isn't local_err + error_propagate() the cleaner way? > -if (local_err) { > -error_propagate(errp, local_err); > -ret = -EIO; > -goto fail_handler; > -} > > /* Set up command queues. */ > if (!nvme_add_io_queue(bs, errp)) { > @@ -665,8 +659,12 @@ fail_queue: > nvme_free_queue_pair(bs, s->queues[0]); > fail: > g_free(s->queues); > -qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE); > -qemu_vfio_close(s->vfio); > +if (s->regs) { > +qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, > NVME_BAR_SIZE); > +} > +if (s->vfio) { > +qemu_vfio_close(s->vfio); > +} > event_notifier_cleanup(&s->irq_notifier); > return ret; > } > diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c > index f478b68400..006674c916 100644 > --- a/util/vfio-helpers.c > +++ b/util/vfio-helpers.c > @@ -104,7 +104,7 @@ static char *sysfs_find_group_file(const char *device, > Error **errp) > char *path = NULL; > > sysfs_link = g_strdup_printf("/sys/bus/pci/devices/%s/iommu_group", > device); > -sysfs_group = g_malloc(PATH_MAX); > +sysfs_group = g_malloc0(PATH_MAX); This looks odd... When can sysfs_group not be null-terminated? Since we have strlen(sysfs_link) > 0, readlink() can not return 0. Maybe this is enough to silent coverity: if (readlink(sysfs_link, sysfs_group, PATH_MAX - 1) <= 0) { > if (readlink(sysfs_link, sysfs_group, PATH_MAX - 1) == -1) { > error_setg_errno(errp, errno, "Failed to find iommu group sysfs > path"); > goto out; >
[Qemu-devel] [PATCH] block/nvme: fix Coverity reports
1) string not null terminated in sysfs_find_group_file 2) NULL pointer dereference and dead local variable in nvme_init. Signed-off-by: Paolo Bonzini --- block/nvme.c| 14 ++ util/vfio-helpers.c | 2 +- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/block/nvme.c b/block/nvme.c index e9d0e218fc..ce217ffc81 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -553,7 +553,6 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, uint64_t cap; uint64_t timeout_ms; uint64_t deadline, now; -Error *local_err = NULL; qemu_co_mutex_init(&s->dma_map_lock); qemu_co_queue_init(&s->dma_flush_queue); @@ -645,11 +644,6 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace, false, nvme_handle_event, nvme_poll_cb); nvme_identify(bs, namespace, errp); -if (local_err) { -error_propagate(errp, local_err); -ret = -EIO; -goto fail_handler; -} /* Set up command queues. */ if (!nvme_add_io_queue(bs, errp)) { @@ -665,8 +659,12 @@ fail_queue: nvme_free_queue_pair(bs, s->queues[0]); fail: g_free(s->queues); -qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE); -qemu_vfio_close(s->vfio); +if (s->regs) { +qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE); +} +if (s->vfio) { +qemu_vfio_close(s->vfio); +} event_notifier_cleanup(&s->irq_notifier); return ret; } diff --git a/util/vfio-helpers.c b/util/vfio-helpers.c index f478b68400..006674c916 100644 --- a/util/vfio-helpers.c +++ b/util/vfio-helpers.c @@ -104,7 +104,7 @@ static char *sysfs_find_group_file(const char *device, Error **errp) char *path = NULL; sysfs_link = g_strdup_printf("/sys/bus/pci/devices/%s/iommu_group", device); -sysfs_group = g_malloc(PATH_MAX); +sysfs_group = g_malloc0(PATH_MAX); if (readlink(sysfs_link, sysfs_group, PATH_MAX - 1) == -1) { error_setg_errno(errp, errno, "Failed to find iommu group sysfs path"); goto out; -- 2.14.3