In host mode, the host is expected to send HPB-WRITE-BUFFER with
buffer-id = 0x1 when it inactivates a region.

Use the map-requests pool as there is no point in assigning a
designated cache for umap-requests.

Signed-off-by: Avri Altman <avri.alt...@wdc.com>
Reviewed-by: Daejun Park <daejun7.p...@samsung.com>
---
 drivers/scsi/ufs/ufshpb.c | 46 +++++++++++++++++++++++++++++++++------
 drivers/scsi/ufs/ufshpb.h |  1 +
 2 files changed, 40 insertions(+), 7 deletions(-)

diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index aefb6dc160ee..28aef5acffa2 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -691,7 +691,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb 
*lrbp)
 }
 
 static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
-                                        int rgn_idx, enum req_opf dir)
+                                        int rgn_idx, enum req_opf dir,
+                                        bool atomic)
 {
        struct ufshpb_req *rq;
        struct request *req;
@@ -705,7 +706,7 @@ static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu 
*hpb,
        req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
                              BLK_MQ_REQ_NOWAIT);
 
-       if ((PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
+       if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
                usleep_range(3000, 3100);
                goto retry;
        }
@@ -736,7 +737,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct 
ufshpb_lu *hpb,
        struct ufshpb_req *map_req;
        struct bio *bio;
 
-       map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN);
+       map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN, false);
        if (!map_req)
                return NULL;
 
@@ -914,6 +915,7 @@ static int ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
 
        blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
 
+       hpb->stats.umap_req_cnt++;
        return 0;
 }
 
@@ -1091,12 +1093,13 @@ static void ufshpb_purge_active_subregion(struct 
ufshpb_lu *hpb,
 }
 
 static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
-                                struct ufshpb_region *rgn)
+                                struct ufshpb_region *rgn,
+                                bool atomic)
 {
        struct ufshpb_req *umap_req;
        int rgn_idx = rgn ? rgn->rgn_idx : 0;
 
-       umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT);
+       umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT, atomic);
        if (!umap_req)
                return -ENOMEM;
 
@@ -1110,13 +1113,19 @@ static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
        return -EAGAIN;
 }
 
+static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
+                                       struct ufshpb_region *rgn)
+{
+       return ufshpb_issue_umap_req(hpb, rgn, true);
+}
+
 static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
 {
-       return ufshpb_issue_umap_req(hpb, NULL);
+       return ufshpb_issue_umap_req(hpb, NULL, false);
 }
 
 static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
-                                 struct ufshpb_region *rgn)
+                                struct ufshpb_region *rgn)
 {
        struct victim_select_info *lru_info;
        struct ufshpb_subregion *srgn;
@@ -1151,6 +1160,14 @@ static int ufshpb_evict_region(struct ufshpb_lu *hpb, 
struct ufshpb_region *rgn)
                        goto out;
                }
 
+               if (hpb->is_hcm) {
+                       spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
+                       ret = ufshpb_issue_umap_single_req(hpb, rgn);
+                       spin_lock_irqsave(&hpb->rgn_state_lock, flags);
+                       if (ret)
+                               goto out;
+               }
+
                __ufshpb_evict_region(hpb, rgn);
        }
 out:
@@ -1285,6 +1302,18 @@ static int ufshpb_add_region(struct ufshpb_lu *hpb, 
struct ufshpb_region *rgn)
                                "LRU full (%d), choose victim %d\n",
                                atomic_read(&lru_info->active_cnt),
                                victim_rgn->rgn_idx);
+
+                       if (hpb->is_hcm) {
+                               spin_unlock_irqrestore(&hpb->rgn_state_lock,
+                                                      flags);
+                               ret = ufshpb_issue_umap_single_req(hpb,
+                                                               victim_rgn);
+                               spin_lock_irqsave(&hpb->rgn_state_lock,
+                                                 flags);
+                               if (ret)
+                                       goto out;
+                       }
+
                        __ufshpb_evict_region(hpb, victim_rgn);
                }
 
@@ -1856,6 +1885,7 @@ ufshpb_sysfs_attr_show_func(rb_noti_cnt);
 ufshpb_sysfs_attr_show_func(rb_active_cnt);
 ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
 ufshpb_sysfs_attr_show_func(map_req_cnt);
+ufshpb_sysfs_attr_show_func(umap_req_cnt);
 
 static struct attribute *hpb_dev_stat_attrs[] = {
        &dev_attr_hit_cnt.attr,
@@ -1864,6 +1894,7 @@ static struct attribute *hpb_dev_stat_attrs[] = {
        &dev_attr_rb_active_cnt.attr,
        &dev_attr_rb_inactive_cnt.attr,
        &dev_attr_map_req_cnt.attr,
+       &dev_attr_umap_req_cnt.attr,
        NULL,
 };
 
@@ -1988,6 +2019,7 @@ static void ufshpb_stat_init(struct ufshpb_lu *hpb)
        hpb->stats.rb_active_cnt = 0;
        hpb->stats.rb_inactive_cnt = 0;
        hpb->stats.map_req_cnt = 0;
+       hpb->stats.umap_req_cnt = 0;
 }
 
 static void ufshpb_param_init(struct ufshpb_lu *hpb)
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
index 87495e59fcf1..1ea58c17a4de 100644
--- a/drivers/scsi/ufs/ufshpb.h
+++ b/drivers/scsi/ufs/ufshpb.h
@@ -191,6 +191,7 @@ struct ufshpb_stats {
        u64 rb_inactive_cnt;
        u64 map_req_cnt;
        u64 pre_req_cnt;
+       u64 umap_req_cnt;
 };
 
 struct ufshpb_lu {
-- 
2.25.1

Reply via email to