mmc: block: Add single read for 4k sector cards

[ Upstream commit b3fa3e6dccc465969721b8bd2824213bd235efeb ]

Cards with 4k native sector size may only be read 4k-aligned,
accommodate for this in the single read recovery and use it.

Fixes: 81196976ed (mmc: block: Add blk-mq support)
Signed-off-by: Christian Loehle <cloehle@hyperstone.com>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Avri Altman <avri.altman@wdc.com>
Link: https://lore.kernel.org/r/cf4f316274c5474586d0d99b17db4a4c@hyperstone.com
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Christian Loehle
2022-07-01 12:43:09 +00:00
committed by Greg Kroah-Hartman
parent 2985acdaf2
commit 59fd7c0b41

View File

@@ -169,7 +169,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type); unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card, struct mmc_card *card,
int disable_multi, int recovery_mode,
struct mmc_queue *mq); struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq); static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
@@ -1247,7 +1247,7 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
} }
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
int disable_multi, bool *do_rel_wr_p, int recovery_mode, bool *do_rel_wr_p,
bool *do_data_tag_p) bool *do_data_tag_p)
{ {
struct mmc_blk_data *md = mq->blkdata; struct mmc_blk_data *md = mq->blkdata;
@@ -1311,12 +1311,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
brq->data.blocks--; brq->data.blocks--;
/* /*
* After a read error, we redo the request one sector * After a read error, we redo the request one (native) sector
* at a time in order to accurately determine which * at a time in order to accurately determine which
* sectors can be read successfully. * sectors can be read successfully.
*/ */
if (disable_multi) if (recovery_mode)
brq->data.blocks = 1; brq->data.blocks = queue_physical_block_size(mq->queue) >> 9;
/* /*
* Some controllers have HW issues while operating * Some controllers have HW issues while operating
@@ -1533,7 +1533,7 @@ static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req)
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card, struct mmc_card *card,
int disable_multi, int recovery_mode,
struct mmc_queue *mq) struct mmc_queue *mq)
{ {
u32 readcmd, writecmd; u32 readcmd, writecmd;
@@ -1542,7 +1542,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->blkdata; struct mmc_blk_data *md = mq->blkdata;
bool do_rel_wr, do_data_tag; bool do_rel_wr, do_data_tag;
mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag); mmc_blk_data_prep(mq, mqrq, recovery_mode, &do_rel_wr, &do_data_tag);
brq->mrq.cmd = &brq->cmd; brq->mrq.cmd = &brq->cmd;
@@ -1633,7 +1633,7 @@ static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
#define MMC_READ_SINGLE_RETRIES 2 #define MMC_READ_SINGLE_RETRIES 2
/* Single sector read during recovery */ /* Single (native) sector read during recovery */
static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
{ {
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
@@ -1641,6 +1641,7 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card; struct mmc_card *card = mq->card;
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK; blk_status_t error = BLK_STS_OK;
size_t bytes_per_read = queue_physical_block_size(mq->queue);
do { do {
u32 status; u32 status;
@@ -1675,13 +1676,13 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
else else
error = BLK_STS_OK; error = BLK_STS_OK;
} while (blk_update_request(req, error, 512)); } while (blk_update_request(req, error, bytes_per_read));
return; return;
error_exit: error_exit:
mrq->data->bytes_xfered = 0; mrq->data->bytes_xfered = 0;
blk_update_request(req, BLK_STS_IOERR, 512); blk_update_request(req, BLK_STS_IOERR, bytes_per_read);
/* Let it try the remaining request again */ /* Let it try the remaining request again */
if (mqrq->retries > MMC_MAX_RETRIES - 1) if (mqrq->retries > MMC_MAX_RETRIES - 1)
mqrq->retries = MMC_MAX_RETRIES - 1; mqrq->retries = MMC_MAX_RETRIES - 1;
@@ -1822,10 +1823,9 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
return; return;
} }
/* FIXME: Missing single sector read for large sector size */ if (rq_data_dir(req) == READ && brq->data.blocks >
if (!mmc_large_sector(card) && rq_data_dir(req) == READ && queue_physical_block_size(mq->queue) >> 9) {
brq->data.blocks > 1) { /* Read one (native) sector at a time */
/* Read one sector at a time */
mmc_blk_read_single(mq, req); mmc_blk_read_single(mq, req);
return; return;
} }