FROMLIST: scsi: ufs: Region inactivation in HCM

In host mode, the host is expected to send HPB-WRITE-BUFFER with
buffer-id = 0x1 when it inactivates a region.

Use the map-requests pool as there is no point in assigning a
designated cache for umap-requests.

Bug: 183467926
Bug: 170940265
Bug: 183454255

Link: https://lore.kernel.org/lkml/20210607061401.58884-7-avri.altman@wdc.com/
Signed-off-by: Avri Altman <avri.altman@wdc.com>
Change-Id: I1a6696b38d4abfb4d9fbe44e84016a6238825125
This commit is contained in:
Avri Altman
2020-06-30 15:14:31 +03:00
committed by Todd Kjos
parent d5b978446c
commit dbf4aa202c
2 changed files with 41 additions and 7 deletions

View File

@@ -693,7 +693,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
} }
static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
int rgn_idx, enum req_opf dir) int rgn_idx, enum req_opf dir,
bool atomic)
{ {
struct ufshpb_req *rq; struct ufshpb_req *rq;
struct request *req; struct request *req;
@@ -707,7 +708,7 @@ retry:
req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir, req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
BLK_MQ_REQ_NOWAIT); BLK_MQ_REQ_NOWAIT);
if ((PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
usleep_range(3000, 3100); usleep_range(3000, 3100);
goto retry; goto retry;
} }
@@ -738,7 +739,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
struct ufshpb_req *map_req; struct ufshpb_req *map_req;
struct bio *bio; struct bio *bio;
map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN); map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN, false);
if (!map_req) if (!map_req)
return NULL; return NULL;
@@ -915,6 +916,8 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
blk_execute_rq_nowait(req->q, NULL, req, 1, ufshpb_umap_req_compl_fn); blk_execute_rq_nowait(req->q, NULL, req, 1, ufshpb_umap_req_compl_fn);
hpb->stats.umap_req_cnt++;
} }
static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
@@ -1091,12 +1094,13 @@ static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
} }
static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
struct ufshpb_region *rgn) struct ufshpb_region *rgn,
bool atomic)
{ {
struct ufshpb_req *umap_req; struct ufshpb_req *umap_req;
int rgn_idx = rgn ? rgn->rgn_idx : 0; int rgn_idx = rgn ? rgn->rgn_idx : 0;
umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT); umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT, atomic);
if (!umap_req) if (!umap_req)
return -ENOMEM; return -ENOMEM;
@@ -1105,9 +1109,15 @@ static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
return 0; return 0;
} }
static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
struct ufshpb_region *rgn)
{
return ufshpb_issue_umap_req(hpb, rgn, true);
}
static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb) static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb)
{ {
return ufshpb_issue_umap_req(hpb, NULL); return ufshpb_issue_umap_req(hpb, NULL, false);
} }
static void __ufshpb_evict_region(struct ufshpb_lu *hpb, static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
@@ -1145,6 +1155,14 @@ static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
goto out; goto out;
} }
if (hpb->is_hcm) {
spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
ret = ufshpb_issue_umap_single_req(hpb, rgn);
spin_lock_irqsave(&hpb->rgn_state_lock, flags);
if (ret)
goto out;
}
__ufshpb_evict_region(hpb, rgn); __ufshpb_evict_region(hpb, rgn);
} }
out: out:
@@ -1279,6 +1297,18 @@ static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
"LRU full (%d), choose victim %d\n", "LRU full (%d), choose victim %d\n",
atomic_read(&lru_info->active_cnt), atomic_read(&lru_info->active_cnt),
victim_rgn->rgn_idx); victim_rgn->rgn_idx);
if (hpb->is_hcm) {
spin_unlock_irqrestore(&hpb->rgn_state_lock,
flags);
ret = ufshpb_issue_umap_single_req(hpb,
victim_rgn);
spin_lock_irqsave(&hpb->rgn_state_lock,
flags);
if (ret)
goto out;
}
__ufshpb_evict_region(hpb, victim_rgn); __ufshpb_evict_region(hpb, victim_rgn);
} }
@@ -1849,6 +1879,7 @@ ufshpb_sysfs_attr_show_func(rb_noti_cnt);
ufshpb_sysfs_attr_show_func(rb_active_cnt); ufshpb_sysfs_attr_show_func(rb_active_cnt);
ufshpb_sysfs_attr_show_func(rb_inactive_cnt); ufshpb_sysfs_attr_show_func(rb_inactive_cnt);
ufshpb_sysfs_attr_show_func(map_req_cnt); ufshpb_sysfs_attr_show_func(map_req_cnt);
ufshpb_sysfs_attr_show_func(umap_req_cnt);
static struct attribute *hpb_dev_stat_attrs[] = { static struct attribute *hpb_dev_stat_attrs[] = {
&dev_attr_hit_cnt.attr, &dev_attr_hit_cnt.attr,
@@ -1857,6 +1888,7 @@ static struct attribute *hpb_dev_stat_attrs[] = {
&dev_attr_rb_active_cnt.attr, &dev_attr_rb_active_cnt.attr,
&dev_attr_rb_inactive_cnt.attr, &dev_attr_rb_inactive_cnt.attr,
&dev_attr_map_req_cnt.attr, &dev_attr_map_req_cnt.attr,
&dev_attr_umap_req_cnt.attr,
NULL, NULL,
}; };
@@ -1982,6 +2014,7 @@ static void ufshpb_stat_init(struct ufshpb_lu *hpb)
hpb->stats.rb_active_cnt = 0; hpb->stats.rb_active_cnt = 0;
hpb->stats.rb_inactive_cnt = 0; hpb->stats.rb_inactive_cnt = 0;
hpb->stats.map_req_cnt = 0; hpb->stats.map_req_cnt = 0;
hpb->stats.umap_req_cnt = 0;
} }
static void ufshpb_param_init(struct ufshpb_lu *hpb) static void ufshpb_param_init(struct ufshpb_lu *hpb)

View File

@@ -191,6 +191,7 @@ struct ufshpb_stats {
u64 rb_inactive_cnt; u64 rb_inactive_cnt;
u64 map_req_cnt; u64 map_req_cnt;
u64 pre_req_cnt; u64 pre_req_cnt;
u64 umap_req_cnt;
}; };
struct ufshpb_lu { struct ufshpb_lu {