The Mellanox drivers have a pattern where they compute the response length they think they need based on what the user asked for, then blindly write that ignoring the provided size limit on the response structure.
Drop this and just use ib_respond_udata() which caps the response struct to the user's memory, which is fine for what mlx5 is doing. Signed-off-by: Jason Gunthorpe <[email protected]> --- drivers/infiniband/hw/mlx4/main.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 2 +- drivers/infiniband/hw/mlx5/ah.c | 2 +- drivers/infiniband/hw/mlx5/main.c | 4 ++-- drivers/infiniband/hw/mlx5/mr.c | 2 +- drivers/infiniband/hw/mlx5/qp.c | 10 +++++----- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ce77e893065c92..4b187ec9e01738 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -626,7 +626,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, } if (uhw->outlen) { - err = ib_copy_to_udata(uhw, &resp, resp.response_length); + err = ib_respond_udata(uhw, resp); if (err) goto out; } diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index aca8a985ce33cd..8dc4196218bf05 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -4331,7 +4331,7 @@ int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table, if (udata->outlen) { resp.response_length = offsetof(typeof(resp), response_length) + sizeof(resp.response_length); - err = ib_copy_to_udata(udata, &resp, resp.response_length); + err = ib_respond_udata(udata, resp); } return err; diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c index 531a57f9ee7e8b..a3aa700d08355d 100644 --- a/drivers/infiniband/hw/mlx5/ah.c +++ b/drivers/infiniband/hw/mlx5/ah.c @@ -121,7 +121,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, resp.response_length = min_resp_len; memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN); - err = ib_copy_to_udata(udata, &resp, resp.response_length); + err = ib_respond_udata(udata, resp); if (err) return err; } diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 57d3b80e7550b6..84dddaded6fdef 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1355,7 +1355,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, } if (uhw_outlen) { - err = ib_copy_to_udata(uhw, &resp, resp.response_length); + err = ib_respond_udata(uhw, resp); if (err) return err; @@ -2280,7 +2280,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, goto out_mdev; resp.response_length = min(udata->outlen, sizeof(resp)); - err = ib_copy_to_udata(udata, &resp, resp.response_length); + err = ib_respond_udata(udata, resp); if (err) goto out_mdev; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 3ef467ac9e3d15..8eb922bd3b663d 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1811,7 +1811,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) resp.response_length = min(offsetofend(typeof(resp), response_length), udata->outlen); if (resp.response_length) { - err = ib_copy_to_udata(udata, &resp, resp.response_length); + err = ib_respond_udata(udata, resp); if (err) goto free_mkey; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 81d98b5010f1ca..4a7363327d2a8e 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3327,7 +3327,7 @@ int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr, * including MLX5_IB_QPT_DCT, which doesn't need it. * In that case, resp will be filled with zeros. */ - err = ib_copy_to_udata(udata, ¶ms.resp, params.outlen); + err = ib_respond_udata(udata, params.resp); if (err) goto destroy_qp; @@ -4626,7 +4626,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, resp.dctn = qp->dct.mdct.mqp.qpn; if (MLX5_CAP_GEN(dev->mdev, ece_support)) resp.ece_options = MLX5_GET(create_dct_out, out, ece); - err = ib_copy_to_udata(udata, &resp, resp.response_length); + err = ib_respond_udata(udata, resp); if (err) { mlx5_core_destroy_dct(dev, &qp->dct.mdct); return err; @@ -4785,7 +4785,7 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (!err && resp.response_length && udata->outlen >= resp.response_length) /* Return -EFAULT to the user and expect him to destroy QP. */ - err = ib_copy_to_udata(udata, &resp, resp.response_length); + err = ib_respond_udata(udata, resp); out: mutex_unlock(&qp->mutex); @@ -5485,7 +5485,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, if (udata->outlen) { resp.response_length = offsetofend( struct mlx5_ib_create_wq_resp, response_length); - err = ib_copy_to_udata(udata, &resp, resp.response_length); + err = ib_respond_udata(udata, resp); if (err) goto err_copy; } @@ -5576,7 +5576,7 @@ int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table, resp.response_length = offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, response_length); - err = ib_copy_to_udata(udata, &resp, resp.response_length); + err = ib_respond_udata(udata, resp); if (err) goto err_copy; } -- 2.43.0
