Re: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-13 Thread Bart Van Assche
On 2020-06-11 20:37, Daejun Park wrote:
>>> +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
>>> + struct ufshpb_req *map_req)
>>> +{
>>> +   struct request_queue *q;
>>> +   struct request *req;
>>> +   struct scsi_request *rq;
>>> +   int ret = 0;
>>> +
>>> +   q = hpb->sdev_ufs_lu->request_queue;
>>> +   ret = ufshpb_map_req_add_bio_page(hpb, q, map_req->bio,
>>> + map_req->mctx);
>>> +   if (ret) {
>>> +   dev_notice(>hpb_lu_dev,
>>> +  "map_req_add_bio_page fail %d - %d\n",
>>> +  map_req->rgn_idx, map_req->srgn_idx);
>>> +   return ret;
>>> +   }
>>> +
>>> +   req = map_req->req;
>>> +
>>> +   blk_rq_append_bio(req, _req->bio);
>>> +   req->rq_flags |= RQF_QUIET;
>>> +   req->timeout = MAP_REQ_TIMEOUT;
>>> +   req->end_io_data = (void *)map_req;
>>> +
>>> +   rq = scsi_req(req);
>>> +   ufshpb_set_read_buf_cmd(rq->cmd, map_req->rgn_idx,
>>> +   map_req->srgn_idx, hpb->srgn_mem_size);
>>> +   rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
>>> +
>>> +   blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn);
>>> +
>>> +   atomic_inc(>stats.map_req_cnt);
>>> +   return 0;
>>> +}
>> 
>> Why a custom timeout instead of the SCSI LUN timeout?
>
> There was no suitable timeout value to use. I've included sd.h, so I'll
> use sd_timeout.

Wouldn't that be a layering violation? The UFS driver is a SCSI LLD
driver and the sd driver is a SCSI ULD. A SCSI LLD must not make any
assumptions about which ULD driver has been attached.

How about leaving req->timeout zero such that blk_add_timer() sets it?
blk_add_timer() is called by blk_mq_start_request(). From blk_add_timer():

if (!req->timeout)
req->timeout = q->rq_timeout;

Thanks,

Bart.


Re: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-11 Thread Daejun Park
> > +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
> > +struct ufshpb_subregion *srgn)
> > +{
> > +   struct ufshpb_req *map_req;
> > +   struct request *req;
> > +   struct bio *bio;
> > +
> > +   map_req = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
> > +   if (!map_req)
> > +   return NULL;
> > +
> > +   req = blk_get_request(hpb->sdev_ufs_lu->request_queue,
> > + REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
> > +   if (IS_ERR(req))
> > +   goto free_map_req;
> > +
> > +   bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
> > +   if (!bio) {
> > +   blk_put_request(req);
> > +   goto free_map_req;
> > +   }
> > +
> > +   map_req->hpb = hpb;
> > +   map_req->req = req;
> > +   map_req->bio = bio;
> > +
> > +   map_req->rgn_idx = srgn->rgn_idx;
> > +   map_req->srgn_idx = srgn->srgn_idx;
> > +   map_req->mctx = srgn->mctx;
> > +   map_req->lun = hpb->lun;
> > +
> > +   return map_req;
> > +free_map_req:
> > +   kmem_cache_free(hpb->map_req_cache, map_req);
> > +   return NULL;
> > +}

> Will blk_get_request() fail if all tags have been allocated? Can that
> cause a deadlock or infinite loop?
If the worker fails to receive the tag, it stops and exits. The remained
lists are processed again at the next work. Therefore, no deadlock or
infinite loop occurs.

> > +static inline void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
> > +  int srgn_idx, int srgn_mem_size)
> > +{
> > +   cdb[0] = UFSHPB_READ_BUFFER;
> > +   cdb[1] = UFSHPB_READ_BUFFER_ID;
> > +
> > +   put_unaligned_be32(srgn_mem_size, [5]);
> > +   /* cdb[5] = 0x00; */
> > +   put_unaligned_be16(rgn_idx, [2]);
> > +   put_unaligned_be16(srgn_idx, [4]);
> > +
> > +   cdb[9] = 0x00;
> > +}

> So the put_unaligned_be32(srgn_mem_size, [5]) comes first because
> the put_unaligned_be16(srgn_idx, [4]) overwrites byte cdb[5]? That
> is really ugly. Please use put_unaligned_be24() instead if that is what
> you meant and keep the put_*() calls in increasing cdb offset order.
OK, I will.

> > +static int ufshpb_map_req_add_bio_page(struct ufshpb_lu *hpb,
> > +  struct request_queue *q, struct bio *bio,
> > +  struct ufshpb_map_ctx *mctx)
> > +{
> > +   int i, ret = 0;
> > +
> > +   for (i = 0; i < hpb->pages_per_srgn; i++) {
> > +   ret = bio_add_pc_page(q, bio, mctx->m_page[i], PAGE_SIZE, 0);
> > +   if (ret != PAGE_SIZE) {
> > +   dev_notice(>hpb_lu_dev,
> > +  "bio_add_pc_page fail %d\n", ret);
> > +   return -ENOMEM;
> > +   }
> > +   }
> > +
> > +   return 0;
> > +}

> Why bio_add_pc_page() instead of bio_add_page()?
Since this map request is created under the block layer and it is a
passthrough command, I think bio_add_pc_page is a more suitable API than
bio_add_page. If bio_add_page is used in scsi LLD, the checking codes that
examine the max segment size in the block layer is not performed.

> > +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
> > + struct ufshpb_req *map_req)
> > +{
> > +   struct request_queue *q;
> > +   struct request *req;
> > +   struct scsi_request *rq;
> > +   int ret = 0;
> > +
> > +   q = hpb->sdev_ufs_lu->request_queue;
> > +   ret = ufshpb_map_req_add_bio_page(hpb, q, map_req->bio,
> > + map_req->mctx);
> > +   if (ret) {
> > +   dev_notice(>hpb_lu_dev,
> > +  "map_req_add_bio_page fail %d - %d\n",
> > +  map_req->rgn_idx, map_req->srgn_idx);
> > +   return ret;
> > +   }
> > +
> > +   req = map_req->req;
> > +
> > +   blk_rq_append_bio(req, _req->bio);
> > +   req->rq_flags |= RQF_QUIET;
> > +   req->timeout = MAP_REQ_TIMEOUT;
> > +   req->end_io_data = (void *)map_req;
> > +
> > +   rq = scsi_req(req);
> > +   ufshpb_set_read_buf_cmd(rq->cmd, map_req->rgn_idx,
> > +   map_req->srgn_idx, hpb->srgn_mem_size);
> > +   rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
> > +
> > +   blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn);
> > +
> > +   atomic_inc(>stats.map_req_cnt);
> > +   return 0;
> > +}

> Why RQF_QUIET?
I refered scsi execute function. I will delete the needless flag.

> Why a custom timeout instead of the SCSI LUN timeout?
There was no suitable timeout value to use. I've included sd.h, so I'll
use sd_timeout.

> Can this function be made asynchronous such that it does not have to be
> executed on the context of a workqueue?
If this code doesn't work in your workq, map related task is handled in
interrupt context. Using workq, it avoids frequent active/inactive requests
to UFS devices by batched manner.

Thanks,

Daejun.




Re: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-10 Thread Bart Van Assche
On 2020-06-04 18:56, Daejun Park wrote:
> +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
> +  struct ufshpb_subregion *srgn)
> +{
> + struct ufshpb_req *map_req;
> + struct request *req;
> + struct bio *bio;
> +
> + map_req = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
> + if (!map_req)
> + return NULL;
> +
> + req = blk_get_request(hpb->sdev_ufs_lu->request_queue,
> +   REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
> + if (IS_ERR(req))
> + goto free_map_req;
> +
> + bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn);
> + if (!bio) {
> + blk_put_request(req);
> + goto free_map_req;
> + }
> +
> + map_req->hpb = hpb;
> + map_req->req = req;
> + map_req->bio = bio;
> +
> + map_req->rgn_idx = srgn->rgn_idx;
> + map_req->srgn_idx = srgn->srgn_idx;
> + map_req->mctx = srgn->mctx;
> + map_req->lun = hpb->lun;
> +
> + return map_req;
> +free_map_req:
> + kmem_cache_free(hpb->map_req_cache, map_req);
> + return NULL;
> +}

Will blk_get_request() fail if all tags have been allocated? Can that
cause a deadlock or infinite loop?

> +static inline void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
> +int srgn_idx, int srgn_mem_size)
> +{
> + cdb[0] = UFSHPB_READ_BUFFER;
> + cdb[1] = UFSHPB_READ_BUFFER_ID;
> +
> + put_unaligned_be32(srgn_mem_size, [5]);
> + /* cdb[5] = 0x00; */
> + put_unaligned_be16(rgn_idx, [2]);
> + put_unaligned_be16(srgn_idx, [4]);
> +
> + cdb[9] = 0x00;
> +}

So the put_unaligned_be32(srgn_mem_size, [5]) comes first because
the put_unaligned_be16(srgn_idx, [4]) overwrites byte cdb[5]? That
is really ugly. Please use put_unaligned_be24() instead if that is what
you meant and keep the put_*() calls in increasing cdb offset order.

> +static int ufshpb_map_req_add_bio_page(struct ufshpb_lu *hpb,
> +struct request_queue *q, struct bio *bio,
> +struct ufshpb_map_ctx *mctx)
> +{
> + int i, ret = 0;
> +
> + for (i = 0; i < hpb->pages_per_srgn; i++) {
> + ret = bio_add_pc_page(q, bio, mctx->m_page[i], PAGE_SIZE, 0);
> + if (ret != PAGE_SIZE) {
> + dev_notice(>hpb_lu_dev,
> +"bio_add_pc_page fail %d\n", ret);
> + return -ENOMEM;
> + }
> + }
> +
> + return 0;
> +}

Why bio_add_pc_page() instead of bio_add_page()?

> +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
> +   struct ufshpb_req *map_req)
> +{
> + struct request_queue *q;
> + struct request *req;
> + struct scsi_request *rq;
> + int ret = 0;
> +
> + q = hpb->sdev_ufs_lu->request_queue;
> + ret = ufshpb_map_req_add_bio_page(hpb, q, map_req->bio,
> +   map_req->mctx);
> + if (ret) {
> + dev_notice(>hpb_lu_dev,
> +"map_req_add_bio_page fail %d - %d\n",
> +map_req->rgn_idx, map_req->srgn_idx);
> + return ret;
> + }
> +
> + req = map_req->req;
> +
> + blk_rq_append_bio(req, _req->bio);
> + req->rq_flags |= RQF_QUIET;
> + req->timeout = MAP_REQ_TIMEOUT;
> + req->end_io_data = (void *)map_req;
> +
> + rq = scsi_req(req);
> + ufshpb_set_read_buf_cmd(rq->cmd, map_req->rgn_idx,
> + map_req->srgn_idx, hpb->srgn_mem_size);
> + rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
> +
> + blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn);
> +
> + atomic_inc(>stats.map_req_cnt);
> + return 0;
> +}

Why RQF_QUIET?

Why a custom timeout instead of the SCSI LUN timeout?

Can this function be made asynchronous such that it does not have to be
executed on the context of a workqueue?

Thanks,

Bart.


RE: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-09 Thread Daejun Park
> The spec does not define what the host should do in this case,
> e.g. when the device informs it that the entire db is no longer valid.
> What are you planning to do?
In Jedec spec, there is no decription about what the driver should do.
So, I will just inform to user about the "HPB reset" happening with kernel 
message.

Thanks,
Daejun


RE: RE: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-09 Thread Daejun Park
> This is not a concern, just a question.
> If a map request started while runtime/system suspend, can you share its flow?
When suspended, the worker is cancled. And it can just 
process pending active/inactive list after resume.

Thanks,
Daejun


RE: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-09 Thread Avri Altman
> > > dev_info(>hpb_lu_dev, "ufshpb resume");
> > > ufshpb_set_state(hpb, HPB_PRESENT);
> > > +   if (!ufshpb_is_empty_rsp_lists(hpb))
> > > +   queue_work(ufshpb_drv.ufshpb_wq, >map_work);
> > Ahha - so you are using the ufs driver pm flows to poll your work queue.
> > Why device recommendations isn't enough?
> I don't understand this comment. The code resumes map_work that was
> stopped by PM during the map request.
> Please explain your concerns.
This is not a concern, just a question.
If a map request started while runtime/system suspend, can you share its flow?



RE: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-09 Thread Avri Altman
> > > +   switch (rsp_field->hpb_type) {
> > > +   case HPB_RSP_REQ_REGION_UPDATE:
> > > +   WARN_ON(data_seg_len != DEV_DATA_SEG_LEN);
> > > +   ufshpb_rsp_req_region_update(hpb, rsp_field);
> > > +   break;
> > What about hpb dev reset - oper 0x2?
> Yes, I will change.
The spec does not define what the host should do in this case,
e.g. when the device informs it that the entire db is no longer valid.
What are you planning to do?



Re: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-08 Thread Bart Van Assche
On 2020-06-06 11:26, Avri Altman wrote:
>> +   data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
>> +   & MASK_RSP_UPIU_DATA_SEG_LEN;
> get_unaligned instead of be32_to_cpu ?

Since sparse checks that the argument of be32_to_cpu() has type __be32
and since no such check is performed for get_unaligned_*(), please keep
the be32_to_cpu().

Thanks,

Bart.


RE: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-08 Thread Daejun Park
> > The data structure for map data request and L2P map uses mempool API,
> > minimizing allocation overhead while avoiding static allocation.
> Maybe one or two more sentences to explain the L2P framework:
> Each hpb lun maintains 2 "to-do" lists: 
>  - hpb->lh_inact_rgn - regions to be inactivated, and 
>  - hpb->lh_act_srgn - subregions to be activated
> Those lists are being checked on every resume and completion interrupt.
OK, I will add more description of L2P framework.

> > 
> > Signed-off-by: Daejun Park 
> > ---
> > +   for (i = 0; i < hpb->pages_per_srgn; i++) {
> > +   mctx->m_page[i] = mempool_alloc(ufshpb_drv.ufshpb_page_pool,
> > +   GFP_KERNEL);
> > +   memset(page_address(mctx->m_page[i]), 0, PAGE_SIZE);
> Better move this memset after if (!mctx->m_page[i]).
> And maybe use clear_page instead?
OK, I will change the code.

> > +   if (!mctx->m_page[i]) {
> > +   for (j = 0; j < i; j++)
> > +   mempool_free(mctx->m_page[j],
> > +ufshpb_drv.ufshpb_page_pool);
> > +   goto release_ppn_dirty;
> > +   }
> > +static inline int ufshpb_add_region(struct ufshpb_lu *hpb,
> > +   struct ufshpb_region *rgn)
> > +{
> Maybe better describe what this function does - ufshpb_get_rgn_map_ctx ?
Yes, I think "ufshpb_get_rgn_map_ctx" is better name.

> > +   if (!list_empty(>list_lru_rgn)) {
> > +   if (ufshpb_check_issue_state_srgns(hpb, rgn)) {
> So if one of its subregions has inflight map request - you add it to the 
> "starved" list?
> Why call it starved?
"starved list" was wrong name. I will change it to "postponed_evict_list".

> > +* Since the region state change occurs only in the hpb task-work,
> > +* the state of the region cannot HPB_RGN_INACTIVE at this point.
> > +* The region state must be changed in the hpb task-work
> I think that you called this worker map_work?
Yes, "the hpb task-work" will be changed to the map_work.

> > +   spin_unlock_irqrestore(>hpb_state_lock, flags);
> > +   ret = ufshpb_add_region(hpb, rgn);
> If this is not an active region,
> Although the device indicated to activate a specific subregion, 
> You are activating all the subregions of that region.
> You should elaborate on that in your commit log,
> and explain why this is the correct activation course.
Yes, I'm going to change the code to activate only the subregions that are 
"activate state".

> get_unaligned instead of be16_to_cpu ?
Yes, I will change.

> > +
> > +   if (!data_seg_len) {
> data_seg_len should be DEV_DATA_SEG_LEN, and you should also check 
> HPB_UPDATE_ALERT,
> which you might want to do here and not in ufshpb_may_field_valid
Yes, I will change.

> > +   switch (rsp_field->hpb_type) {
> > +   case HPB_RSP_REQ_REGION_UPDATE:
> > +   WARN_ON(data_seg_len != DEV_DATA_SEG_LEN);
> > +   ufshpb_rsp_req_region_update(hpb, rsp_field);
> > +   break;
> What about hpb dev reset - oper 0x2?
Yes, I will change.

> > +static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
> > +  struct ufshpb_region *rgn,
> > +  struct ufshpb_subregion *srgn)
> > +{
> > +   if (!list_empty(>list_inact_rgn))
> > +   return;
> > +
> > +   if (!list_empty(>list_act_srgn)) {
> > +   list_move(>list_act_srgn, >lh_act_srgn);
> Why is this needed?
> Why updating this subregion position?
The "ufshpb_add_active_list()" is called from 
"ufshpb_run_active_subregion_list()" to retry activating subregion that failed 
to activate.
Therefore, it requeues the subregion to activate region list head.

> > @@ -195,8 +1047,15 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba
> > *hba, struct ufshpb_lu *hpb)
> >  release_srgn_table:
> > for (i = 0; i < rgn_idx; i++) {
> > rgn = rgn_table + i;
> > -   if (rgn->srgn_tbl)
> > +   if (rgn->srgn_tbl) {
> > +   for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt;
> > +srgn_idx++) {
> > +   srgn = rgn->srgn_tbl + srgn_idx;
> > +   if (srgn->mctx)
> How is it even possible that on init there is an active subregion?
> ufshpb_init_pinned_active_region does its own cleanup.
I will fix the duplicated cleanup codes.

> > +   hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
> > + sizeof(struct page *) * hpb->pages_per_srgn,
> > + 0, 0, NULL);
> What is the advantage in using an array of page pointers,
> Instead of a single pointer to pages_per_srgn?
To minimize memory fragmentation problem, I used pointer + single page rather 
than single array of pages. 

> > 

RE: [RFC PATCH 4/5] scsi: ufs: L2P map management for HPB read

2020-06-06 Thread Avri Altman
> 
> A pinned region is a pre-set regions on the UFS device that is always
> activate-state and
This sentence got cut off

> 
> The data structure for map data request and L2P map uses mempool API,
> minimizing allocation overhead while avoiding static allocation.

Maybe one or two more sentences to explain the L2P framework:
Each hpb lun maintains 2 "to-do" lists: 
 - hpb->lh_inact_rgn - regions to be inactivated, and 
 - hpb->lh_act_srgn - subregions to be activated
Those lists are being checked on every resume and completion interrupt.

> 
> Signed-off-by: Daejun Park 
> ---
> +   for (i = 0; i < hpb->pages_per_srgn; i++) {
> +   mctx->m_page[i] = mempool_alloc(ufshpb_drv.ufshpb_page_pool,
> +   GFP_KERNEL);
> +   memset(page_address(mctx->m_page[i]), 0, PAGE_SIZE);
Better move this memset after if (!mctx->m_page[i]).
And maybe use clear_page instead?

> +   if (!mctx->m_page[i]) {
> +   for (j = 0; j < i; j++)
> +   mempool_free(mctx->m_page[j],
> +ufshpb_drv.ufshpb_page_pool);
> +   goto release_ppn_dirty;
> +   }


> +static inline int ufshpb_add_region(struct ufshpb_lu *hpb,
> +   struct ufshpb_region *rgn)
> +{
Maybe better describe what this function does - ufshpb_get_rgn_map_ctx ?

> +
> +static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region
> *rgn)
> +{
> +   unsigned long flags;
> +   int ret = 0;
> +
> +   spin_lock_irqsave(>hpb_state_lock, flags);
> +   if (rgn->rgn_state == HPB_RGN_PINNED) {
> +   dev_warn(>hpb_lu_dev,
> +"pinned region cannot drop-out. region %d\n",
> +rgn->rgn_idx);
> +   goto out;
> +   }
> +
> +   if (!list_empty(>list_lru_rgn)) {
> +   if (ufshpb_check_issue_state_srgns(hpb, rgn)) {
So if one of its subregions has inflight map request - you add it to the 
"starved" list?
Why call it starved?


> +static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
> +   struct ufshpb_region *rgn,
> +   struct ufshpb_subregion *srgn)
> +{
> +   struct ufshpb_req *map_req;
> +   unsigned long flags;
> +   int ret = 0;
> +
> +   spin_lock_irqsave(>hpb_state_lock, flags);
> +   /*
> +* Since the region state change occurs only in the hpb task-work,
> +* the state of the region cannot HPB_RGN_INACTIVE at this point.
> +* The region state must be changed in the hpb task-work
I think that you called this worker map_work?


> +   spin_unlock_irqrestore(>hpb_state_lock, flags);
> +   ret = ufshpb_add_region(hpb, rgn);
If this is not an active region,
Although the device indicated to activate a specific subregion, 
You are activating all the subregions of that region.
You should elaborate on that in your commit log,
and explain why this is the correct activation course.

> +   /*
> +* If the active region and the inactive region are the same,
> +* we will inactivate this region.
> +* The device could check this (region inactivated) and
> +* will response the proper active region information
> +*/
> +   spin_lock(>rsp_list_lock);
> +   for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
> +   rgn_idx =
> +   
> be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
> +   srgn_idx =
> +   
> be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
get_unaligned instead of be16_to_cpu ?

> +
> +   dev_dbg(>hpb_lu_dev, "activate(%d) region %d - %d\n",
> +   i, rgn_idx, srgn_idx);
> +   ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
> +   atomic_inc(>stats.rb_active_cnt);
> +   }
> +
> +   for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
> +   rgn_idx = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
> +   dev_dbg(>hpb_lu_dev, "inactivate(%d) region %d\n",
> +   i, rgn_idx);
> +   ufshpb_update_inactive_info(hpb, rgn_idx);
> +   atomic_inc(>stats.rb_inactive_cnt);
> +   }
> +   spin_unlock(>rsp_list_lock);
> +
> +   dev_dbg(>hpb_lu_dev, "Noti: #ACT %u #INACT %u\n",
> +   rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
> +
> +   queue_work(ufshpb_drv.ufshpb_wq, >map_work);
> +}
> +
> +/* routine : isr (ufs) */
> +static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
> +{
> +   struct ufshpb_lu *hpb;
> +   struct ufshpb_rsp_field *rsp_field;
> +   int data_seg_len, ret;
> +
> +   data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
> +   & MASK_RSP_UPIU_DATA_SEG_LEN;