On 2020-06-04 18:56, Daejun Park wrote:
> +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
> +                                          struct ufshpb_subregion *srgn)
> +{
> +     struct ufshpb_req *map_req;
> +     struct request *req;
> +     struct bio *bio;
> +
> +     map_req = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
> +     if (!map_req)
> +             return NULL;
> +
> +     req = blk_get_request(hpb->sdev_ufs_lu->request_queue,
> +                           REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
> +     if (IS_ERR(req))
> +             goto free_map_req;
> +
> +     bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
> +     if (!bio) {
> +             blk_put_request(req);
> +             goto free_map_req;
> +     }
> +
> +     map_req->hpb = hpb;
> +     map_req->req = req;
> +     map_req->bio = bio;
> +
> +     map_req->rgn_idx = srgn->rgn_idx;
> +     map_req->srgn_idx = srgn->srgn_idx;
> +     map_req->mctx = srgn->mctx;
> +     map_req->lun = hpb->lun;
> +
> +     return map_req;
> +free_map_req:
> +     kmem_cache_free(hpb->map_req_cache, map_req);
> +     return NULL;
> +}

Will blk_get_request() fail if all tags have been allocated? Can that
cause a deadlock or infinite loop?

> +static inline void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
> +                                        int srgn_idx, int srgn_mem_size)
> +{
> +     cdb[0] = UFSHPB_READ_BUFFER;
> +     cdb[1] = UFSHPB_READ_BUFFER_ID;
> +
> +     put_unaligned_be32(srgn_mem_size, &cdb[5]);
> +     /* cdb[5] = 0x00; */
> +     put_unaligned_be16(rgn_idx, &cdb[2]);
> +     put_unaligned_be16(srgn_idx, &cdb[4]);
> +
> +     cdb[9] = 0x00;
> +}

So the put_unaligned_be32(srgn_mem_size, &cdb[5]) comes first because
the put_unaligned_be16(srgn_idx, &cdb[4]) overwrites byte cdb[5]? That
is really ugly. Please use put_unaligned_be24() instead if that is what
you meant and keep the put_*() calls in increasing cdb offset order.

> +static int ufshpb_map_req_add_bio_page(struct ufshpb_lu *hpb,
> +                                    struct request_queue *q, struct bio *bio,
> +                                    struct ufshpb_map_ctx *mctx)
> +{
> +     int i, ret = 0;
> +
> +     for (i = 0; i < hpb->pages_per_srgn; i++) {
> +             ret = bio_add_pc_page(q, bio, mctx->m_page[i], PAGE_SIZE, 0);
> +             if (ret != PAGE_SIZE) {
> +                     dev_notice(&hpb->hpb_lu_dev,
> +                                "bio_add_pc_page fail %d\n", ret);
> +                     return -ENOMEM;
> +             }
> +     }
> +
> +     return 0;
> +}

Why bio_add_pc_page() instead of bio_add_page()?

> +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
> +                               struct ufshpb_req *map_req)
> +{
> +     struct request_queue *q;
> +     struct request *req;
> +     struct scsi_request *rq;
> +     int ret = 0;
> +
> +     q = hpb->sdev_ufs_lu->request_queue;
> +     ret = ufshpb_map_req_add_bio_page(hpb, q, map_req->bio,
> +                                       map_req->mctx);
> +     if (ret) {
> +             dev_notice(&hpb->hpb_lu_dev,
> +                        "map_req_add_bio_page fail %d - %d\n",
> +                        map_req->rgn_idx, map_req->srgn_idx);
> +             return ret;
> +     }
> +
> +     req = map_req->req;
> +
> +     blk_rq_append_bio(req, &map_req->bio);
> +     req->rq_flags |= RQF_QUIET;
> +     req->timeout = MAP_REQ_TIMEOUT;
> +     req->end_io_data = (void *)map_req;
> +
> +     rq = scsi_req(req);
> +     ufshpb_set_read_buf_cmd(rq->cmd, map_req->rgn_idx,
> +                             map_req->srgn_idx, hpb->srgn_mem_size);
> +     rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
> +
> +     blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn);
> +
> +     atomic_inc(&hpb->stats.map_req_cnt);
> +     return 0;
> +}

Why RQF_QUIET?

Why a custom timeout instead of the SCSI LUN timeout?

Can this function be made asynchronous such that it does not have to be
executed on the context of a workqueue?

Thanks,

Bart.

Reply via email to