These cases carefully work around 32-bit unpadded structures, but
the min integrated into ib_respond_udata() handles this
automatically. Zero-initialize data that would not have been copied.

Signed-off-by: Jason Gunthorpe <[email protected]>
---
 drivers/infiniband/hw/cxgb4/cq.c       | 8 +++-----
 drivers/infiniband/hw/cxgb4/provider.c | 5 ++---
 2 files changed, 5 insertions(+), 8 deletions(-)

diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e31fb9134aa818..47508df4cec023 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -1115,13 +1115,11 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct 
ib_cq_init_attr *attr,
                /* communicate to the userspace that
                 * kernel driver supports 64B CQE
                 */
-               uresp.flags |= C4IW_64B_CQE;
+               if (!ucontext->is_32b_cqe)
+                       uresp.flags |= C4IW_64B_CQE;
 
                spin_unlock(&ucontext->mmap_lock);
-               ret = ib_copy_to_udata(udata, &uresp,
-                                      ucontext->is_32b_cqe ?
-                                      sizeof(uresp) - sizeof(uresp.flags) :
-                                      sizeof(uresp));
+               ret = ib_respond_udata(udata, uresp);
                if (ret)
                        goto err_free_mm2;
 
diff --git a/drivers/infiniband/hw/cxgb4/provider.c 
b/drivers/infiniband/hw/cxgb4/provider.c
index a119e8793aef40..0e3827022c63da 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -80,7 +80,7 @@ static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
        struct ib_device *ibdev = ucontext->device;
        struct c4iw_ucontext *context = to_c4iw_ucontext(ucontext);
        struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
-       struct c4iw_alloc_ucontext_resp uresp;
+       struct c4iw_alloc_ucontext_resp uresp = {};
        int ret = 0;
        struct c4iw_mm_entry *mm = NULL;
 
@@ -106,8 +106,7 @@ static int c4iw_alloc_ucontext(struct ib_ucontext *ucontext,
                context->key += PAGE_SIZE;
                spin_unlock(&context->mmap_lock);
 
-               ret = ib_copy_to_udata(udata, &uresp,
-                                      sizeof(uresp) - sizeof(uresp.reserved));
+               ret = ib_respond_udata(udata, uresp);
                if (ret)
                        goto err_mm;
 
-- 
2.43.0


Reply via email to