__ublk_check_and_get_req() checks that the passed in offset is within
the data length of the specified ublk request. However, only user copy
(ublk_check_and_get_req()) supports accessing ublk request data at a
nonzero offset. Zero-copy buffer registration (ublk_register_io_buf())
always passes 0 for the offset, so the check is unnecessary. Move the
check from __ublk_check_and_get_req() to ublk_check_and_get_req().

Signed-off-by: Caleb Sander Mateos <[email protected]>
---
 drivers/block/ublk_drv.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 273d580ddc46..7fa0a9f0bfae 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -252,11 +252,11 @@ struct ublk_params_header {
 
 static void ublk_io_release(void *priv);
 static void ublk_stop_dev_unlocked(struct ublk_device *ub);
 static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
-               u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
+               u16 q_id, u16 tag, struct ublk_io *io);
 static inline unsigned int ublk_req_build_flags(struct request *req);
 
 static inline struct ublksrv_io_desc *
 ublk_get_iod(const struct ublk_queue *ubq, unsigned tag)
 {
@@ -2245,11 +2245,11 @@ static int ublk_register_io_buf(struct io_uring_cmd 
*cmd,
        int ret;
 
        if (!ublk_dev_support_zero_copy(ub))
                return -EINVAL;
 
-       req = __ublk_check_and_get_req(ub, q_id, tag, io, 0);
+       req = __ublk_check_and_get_req(ub, q_id, tag, io);
        if (!req)
                return -EINVAL;
 
        ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
                                      issue_flags);
@@ -2539,11 +2539,11 @@ static int ublk_ch_uring_cmd_local(struct io_uring_cmd 
*cmd,
                        __func__, cmd_op, tag, ret, io ? io->flags : 0);
        return ret;
 }
 
 static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
-               u16 q_id, u16 tag, struct ublk_io *io, size_t offset)
+               u16 q_id, u16 tag, struct ublk_io *io)
 {
        struct request *req;
 
        /*
         * can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ,
@@ -2560,13 +2560,10 @@ static inline struct request 
*__ublk_check_and_get_req(struct ublk_device *ub,
                goto fail_put;
 
        if (!ublk_rq_has_data(req))
                goto fail_put;
 
-       if (offset > blk_rq_bytes(req))
-               goto fail_put;
-
        return req;
 fail_put:
        ublk_put_req_ref(io, req);
        return NULL;
 }
@@ -2644,14 +2641,19 @@ ublk_user_copy(struct kiocb *iocb, struct iov_iter 
*iter, int dir)
 
        if (tag >= ub->dev_info.queue_depth)
                return -EINVAL;
 
        io = &ubq->ios[tag];
-       req = __ublk_check_and_get_req(ub, q_id, tag, io, buf_off);
+       req = __ublk_check_and_get_req(ub, q_id, tag, io);
        if (!req)
                return -EINVAL;
 
+       if (buf_off > blk_rq_bytes(req)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (!ublk_check_ubuf_dir(req, dir)) {
                ret = -EACCES;
                goto out;
        }
 
-- 
2.45.2


Reply via email to