Revert "BACKPORT: FROMGIT: scsi: ufs: Optimize host lock on transfer requests send/compl paths"

This reverts commit 7613068f95.

 Call trace:
  __switch_to+0x16c/0x2f8
  __schedule+0x4d0/0x94c
  schedule+0x80/0x100
  schedule_timeout+0x94/0x140
  io_schedule_timeout+0x48/0x70
  wait_for_common_io+0x80/0x108
  wait_for_completion_io_timeout+0x14/0x24
  blk_execute_rq+0xac/0x104
  __scsi_execute+0x104/0x1c8
  ufshcd_clear_ua_wlun+0x128/0x1cc
  ufshcd_err_handling_unprepare+0xd8/0x178
  ufshcd_err_handler+0x7d8/0x9d0
  process_one_work+0x218/0x634
  worker_thread+0x34c/0x588
  kthread+0x158/0x1b0
  ret_from_fork+0x10/0

Bug: 192095860
Signed-off-by: Jaegeuk Kim <jaegeuk@google.com>
Change-Id: Ifaa166902daa9dbd340fab68987200c5bf3009d9
This commit is contained in:
Jaegeuk Kim
2021-06-25 16:06:54 -07:00
parent 83d653257a
commit 46575badbb
2 changed files with 137 additions and 128 deletions

View File

@@ -730,7 +730,7 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
*/ */
static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
{ {
clear_bit(tag, &hba->outstanding_reqs); __clear_bit(tag, &hba->outstanding_reqs);
} }
/** /**
@@ -1956,19 +1956,15 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{ {
bool queue_resume_work = false; bool queue_resume_work = false;
ktime_t curr_t = ktime_get(); ktime_t curr_t = ktime_get();
unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba)) if (!ufshcd_is_clkscaling_supported(hba))
return; return;
spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.active_reqs++) if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true; queue_resume_work = true;
if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
spin_unlock_irqrestore(hba->host->host_lock, flags);
return; return;
}
if (queue_resume_work) if (queue_resume_work)
queue_work(hba->clk_scaling.workq, queue_work(hba->clk_scaling.workq,
@@ -1984,26 +1980,21 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
hba->clk_scaling.busy_start_t = curr_t; hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true; hba->clk_scaling.is_busy_started = true;
} }
spin_unlock_irqrestore(hba->host->host_lock, flags);
} }
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{ {
struct ufs_clk_scaling *scaling = &hba->clk_scaling; struct ufs_clk_scaling *scaling = &hba->clk_scaling;
unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba)) if (!ufshcd_is_clkscaling_supported(hba))
return; return;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_scaling.active_reqs--;
if (!hba->outstanding_reqs && scaling->is_busy_started) { if (!hba->outstanding_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t)); scaling->busy_start_t));
scaling->busy_start_t = 0; scaling->busy_start_t = 0;
scaling->is_busy_started = false; scaling->is_busy_started = false;
} }
spin_unlock_irqrestore(hba->host->host_lock, flags);
} }
static inline int ufshcd_monitor_opcode2dir(u8 opcode) static inline int ufshcd_monitor_opcode2dir(u8 opcode)
@@ -2029,20 +2020,15 @@ static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{ {
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
hba->monitor.busy_start_ts[dir] = ktime_get(); hba->monitor.busy_start_ts[dir] = ktime_get();
spin_unlock_irqrestore(hba->host->host_lock, flags);
} }
static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{ {
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
struct request *req = lrbp->cmd->request; struct request *req = lrbp->cmd->request;
struct ufs_hba_monitor *m = &hba->monitor; struct ufs_hba_monitor *m = &hba->monitor;
@@ -2066,7 +2052,6 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/* Push forward the busy start of monitor */ /* Push forward the busy start of monitor */
m->busy_start_ts[dir] = now; m->busy_start_ts[dir] = now;
} }
spin_unlock_irqrestore(hba->host->host_lock, flags);
} }
/** /**
@@ -2078,7 +2063,6 @@ static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{ {
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
unsigned long flags;
lrbp->issue_time_stamp = ktime_get(); lrbp->issue_time_stamp = ktime_get();
lrbp->compl_time_stamp = ktime_set(0, 0); lrbp->compl_time_stamp = ktime_set(0, 0);
@@ -2086,12 +2070,10 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
trace_android_vh_ufs_send_command(hba, lrbp); trace_android_vh_ufs_send_command(hba, lrbp);
ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_add_command_trace(hba, task_tag, "send");
ufshcd_clk_scaling_start_busy(hba); ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp); ufshcd_start_monitor(hba, lrbp);
spin_lock_irqsave(hba->host->host_lock, flags);
set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Make sure that doorbell is committed immediately */ /* Make sure that doorbell is committed immediately */
wmb(); wmb();
} }
@@ -2655,6 +2637,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{ {
struct ufshcd_lrb *lrbp; struct ufshcd_lrb *lrbp;
struct ufs_hba *hba; struct ufs_hba *hba;
unsigned long flags;
int tag; int tag;
int err = 0; int err = 0;
@@ -2671,43 +2654,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (!down_read_trylock(&hba->clk_scaling_lock)) if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
break;
case UFSHCD_STATE_EH_SCHEDULED_FATAL:
/*
* pm_runtime_get_sync() is used at error handling preparation
* stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
* PM ops, it can never be finished if we let SCSI layer keep
* retrying it, which gets err handler stuck forever. Neither
* can we let the scsi cmd pass through, because UFS is in bad
* state, the scsi cmd may eventually time out, which will get
* err handler blocked for too long. So, just fail the scsi cmd
* sent from PM ops, err handler can recover PM error anyways.
*/
if (hba->pm_op_in_progress) {
hba->force_reset = true;
set_host_byte(cmd, DID_BAD_TARGET);
cmd->scsi_done(cmd);
goto out;
}
fallthrough;
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out;
case UFSHCD_STATE_ERROR:
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
goto out;
default:
dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
__func__, hba->ufshcd_state);
set_host_byte(cmd, DID_BAD_TARGET);
cmd->scsi_done(cmd);
goto out;
}
hba->req_abort_count = 0; hba->req_abort_count = 0;
err = ufshcd_hold(hba, true); err = ufshcd_hold(hba, true);
@@ -2718,7 +2664,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(ufshcd_is_clkgating_allowed(hba) && WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
(hba->clk_gating.state != CLKS_ON)); (hba->clk_gating.state != CLKS_ON));
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { lrbp = &hba->lrb[tag];
if (unlikely(lrbp->in_use)) {
if (hba->pm_op_in_progress) if (hba->pm_op_in_progress)
set_host_byte(cmd, DID_BAD_TARGET); set_host_byte(cmd, DID_BAD_TARGET);
else else
@@ -2727,7 +2674,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out; goto out;
} }
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd); WARN_ON(lrbp->cmd);
lrbp->cmd = cmd; lrbp->cmd = cmd;
lrbp->sense_bufflen = UFS_SENSE_SIZE; lrbp->sense_bufflen = UFS_SENSE_SIZE;
@@ -2758,7 +2704,51 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Make sure descriptors are ready before ringing the doorbell */ /* Make sure descriptors are ready before ringing the doorbell */
wmb(); wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
break;
case UFSHCD_STATE_EH_SCHEDULED_FATAL:
/*
* pm_runtime_get_sync() is used at error handling preparation
* stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
* PM ops, it can never be finished if we let SCSI layer keep
* retrying it, which gets err handler stuck forever. Neither
* can we let the scsi cmd pass through, because UFS is in bad
* state, the scsi cmd may eventually time out, which will get
* err handler blocked for too long. So, just fail the scsi cmd
* sent from PM ops, err handler can recover PM error anyways.
*/
if (hba->pm_op_in_progress) {
hba->force_reset = true;
set_host_byte(cmd, DID_BAD_TARGET);
goto out_compl_cmd;
}
fallthrough;
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_compl_cmd;
case UFSHCD_STATE_ERROR:
set_host_byte(cmd, DID_ERROR);
goto out_compl_cmd;
default:
dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
__func__, hba->ufshcd_state);
set_host_byte(cmd, DID_BAD_TARGET);
goto out_compl_cmd;
}
ufshcd_send_command(hba, tag); ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
goto out;
out_compl_cmd:
scsi_dma_unmap(lrbp->cmd);
lrbp->cmd = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
if (!err)
cmd->scsi_done(cmd);
out: out:
up_read(&hba->clk_scaling_lock); up_read(&hba->clk_scaling_lock);
return err; return err;
@@ -2913,6 +2903,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
int err; int err;
int tag; int tag;
struct completion wait; struct completion wait;
unsigned long flags;
down_read(&hba->clk_scaling_lock); down_read(&hba->clk_scaling_lock);
@@ -2932,30 +2923,34 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
req->timeout = msecs_to_jiffies(2 * timeout); req->timeout = msecs_to_jiffies(2 * timeout);
blk_mq_start_request(req); blk_mq_start_request(req);
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { init_completion(&wait);
lrbp = &hba->lrb[tag];
if (unlikely(lrbp->in_use)) {
err = -EBUSY; err = -EBUSY;
goto out; goto out;
} }
init_completion(&wait);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd); WARN_ON(lrbp->cmd);
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err)) if (unlikely(err))
goto out; goto out_put_tag;
hba->dev_cmd.complete = &wait; hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, tag, "query_send"); ufshcd_add_query_upiu_trace(hba, tag, "query_send");
/* Make sure descriptors are ready before ringing the doorbell */ /* Make sure descriptors are ready before ringing the doorbell */
wmb(); wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_send_command(hba, tag); ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
out:
ufshcd_add_query_upiu_trace(hba, tag, ufshcd_add_query_upiu_trace(hba, tag,
err ? "query_complete_err" : "query_complete"); err ? "query_complete_err" : "query_complete");
out: out_put_tag:
blk_put_request(req); blk_put_request(req);
out_unlock: out_unlock:
up_read(&hba->clk_scaling_lock); up_read(&hba->clk_scaling_lock);
@@ -5088,24 +5083,6 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
return result; return result;
} }
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
u32 intr_mask)
{
if (!ufshcd_is_auto_hibern8_supported(hba) ||
!ufshcd_is_auto_hibern8_enabled(hba))
return false;
if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
return false;
if (hba->active_uic_cmd &&
(hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
return false;
return true;
}
/** /**
* ufshcd_uic_cmd_compl - handle completion of uic command * ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance * @hba: per adapter instance
@@ -5119,10 +5096,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{ {
irqreturn_t retval = IRQ_NONE; irqreturn_t retval = IRQ_NONE;
spin_lock(hba->host->host_lock);
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |= hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba); ufshcd_get_uic_cmd_result(hba);
@@ -5143,7 +5116,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
if (retval == IRQ_HANDLED) if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
"complete"); "complete");
spin_unlock(hba->host->host_lock);
return retval; return retval;
} }
@@ -5162,9 +5134,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
bool update_scaling = false; bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) { for_each_set_bit(index, &completed_reqs, hba->nutrs) {
if (!test_and_clear_bit(index, &hba->outstanding_reqs))
continue;
lrbp = &hba->lrb[index]; lrbp = &hba->lrb[index];
lrbp->in_use = false;
lrbp->compl_time_stamp = ktime_get(); lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd; cmd = lrbp->cmd;
if (cmd) { if (cmd) {
@@ -5180,7 +5151,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->cmd = NULL; lrbp->cmd = NULL;
/* Do not touch lrbp after scsi done */ /* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
ufshcd_release(hba); __ufshcd_release(hba);
update_scaling = true; update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
@@ -5192,9 +5163,14 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
update_scaling = true; update_scaling = true;
} }
} }
if (update_scaling) if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
ufshcd_clk_scaling_update_busy(hba); hba->clk_scaling.active_reqs--;
} }
/* clear corresponding bits of completed commands */
hba->outstanding_reqs ^= completed_reqs;
ufshcd_clk_scaling_update_busy(hba);
} }
/** /**
@@ -5207,7 +5183,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
*/ */
static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{ {
unsigned long completed_reqs, flags; unsigned long completed_reqs;
u32 tr_doorbell; u32 tr_doorbell;
/* Resetting interrupt aggregation counters first and reading the /* Resetting interrupt aggregation counters first and reading the
@@ -5221,10 +5197,8 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba); ufshcd_reset_intr_aggr(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs; completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (completed_reqs) { if (completed_reqs) {
__ufshcd_transfer_req_compl(hba, completed_reqs); __ufshcd_transfer_req_compl(hba, completed_reqs);
@@ -5982,11 +5956,13 @@ static void ufshcd_err_handler(struct work_struct *work)
ufshcd_set_eh_in_progress(hba); ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags); spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_prepare(hba); ufshcd_err_handling_prepare(hba);
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags); spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state != UFSHCD_STATE_ERROR) if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_RESET; hba->ufshcd_state = UFSHCD_STATE_RESET;
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
/* /*
* A full reset and restore might have happened after preparation * A full reset and restore might have happened after preparation
* is finished, double check whether we should stop. * is finished, double check whether we should stop.
@@ -6069,11 +6045,12 @@ static void ufshcd_err_handler(struct work_struct *work)
} }
lock_skip_pending_xfer_clear: lock_skip_pending_xfer_clear:
spin_lock_irqsave(hba->host->host_lock, flags);
/* Complete the requests that are cleared by s/w */ /* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba); ufshcd_complete_requests(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = false; hba->silence_err_logs = false;
if (err_xfer || err_tm) { if (err_xfer || err_tm) {
needs_reset = true; needs_reset = true;
goto do_reset; goto do_reset;
@@ -6223,23 +6200,37 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
return retval; return retval;
} }
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
u32 intr_mask)
{
if (!ufshcd_is_auto_hibern8_supported(hba) ||
!ufshcd_is_auto_hibern8_enabled(hba))
return false;
if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
return false;
if (hba->active_uic_cmd &&
(hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
return false;
return true;
}
/** /**
* ufshcd_check_errors - Check for errors that need s/w attention * ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance * @hba: per-adapter instance
* @intr_status: interrupt status generated by the controller
* *
* Returns * Returns
* IRQ_HANDLED - If interrupt is valid * IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt * IRQ_NONE - If invalid interrupt
*/ */
static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{ {
bool queue_eh_work = false; bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE; irqreturn_t retval = IRQ_NONE;
spin_lock(hba->host->host_lock);
hba->errors |= UFSHCD_ERROR_MASK & intr_status;
if (hba->errors & INT_FATAL_ERRORS) { if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
hba->errors); hba->errors);
@@ -6296,9 +6287,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
* itself without s/w intervention or errors that will be * itself without s/w intervention or errors that will be
* handled by the SCSI core layer. * handled by the SCSI core layer.
*/ */
hba->errors = 0;
hba->uic_error = 0;
spin_unlock(hba->host->host_lock);
return retval; return retval;
} }
@@ -6333,17 +6321,13 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
*/ */
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{ {
unsigned long flags;
struct request_queue *q = hba->tmf_queue; struct request_queue *q = hba->tmf_queue;
struct ctm_info ci = { struct ctm_info ci = {
.hba = hba, .hba = hba,
.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
}; };
spin_lock_irqsave(hba->host->host_lock, flags);
ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
spin_unlock_irqrestore(hba->host->host_lock, flags);
return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
} }
@@ -6360,12 +6344,17 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{ {
irqreturn_t retval = IRQ_NONE; irqreturn_t retval = IRQ_NONE;
hba->errors = UFSHCD_ERROR_MASK & intr_status;
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
if (hba->errors)
retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK) if (intr_status & UFSHCD_UIC_MASK)
retval |= ufshcd_uic_cmd_compl(hba, intr_status); retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
retval |= ufshcd_check_errors(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL) if (intr_status & UTP_TASK_REQ_COMPL)
retval |= ufshcd_tmc_handler(hba); retval |= ufshcd_tmc_handler(hba);
@@ -6391,6 +6380,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
struct ufs_hba *hba = __hba; struct ufs_hba *hba = __hba;
int retries = hba->nutrs; int retries = hba->nutrs;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
hba->ufs_stats.last_intr_status = intr_status; hba->ufs_stats.last_intr_status = intr_status;
hba->ufs_stats.last_intr_ts = ktime_get(); hba->ufs_stats.last_intr_ts = ktime_get();
@@ -6422,6 +6412,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
} }
spin_unlock(hba->host->host_lock);
return retval; return retval;
} }
@@ -6598,6 +6589,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
int err = 0; int err = 0;
int tag; int tag;
struct completion wait; struct completion wait;
unsigned long flags;
u8 upiu_flags; u8 upiu_flags;
down_read(&hba->clk_scaling_lock); down_read(&hba->clk_scaling_lock);
@@ -6610,13 +6602,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
tag = req->tag; tag = req->tag;
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { init_completion(&wait);
lrbp = &hba->lrb[tag];
if (unlikely(lrbp->in_use)) {
err = -EBUSY; err = -EBUSY;
goto out; goto out;
} }
init_completion(&wait);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd); WARN_ON(lrbp->cmd);
lrbp->cmd = NULL; lrbp->cmd = NULL;
lrbp->sense_bufflen = 0; lrbp->sense_bufflen = 0;
@@ -6654,8 +6646,10 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
/* Make sure descriptors are ready before ringing the doorbell */ /* Make sure descriptors are ready before ringing the doorbell */
wmb(); wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_send_command(hba, tag); ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* /*
* ignore the returning value here - ufshcd_check_query_response is * ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty. * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
@@ -6774,6 +6768,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
u32 pos; u32 pos;
int err; int err;
u8 resp = 0xF, lun; u8 resp = 0xF, lun;
unsigned long flags;
host = cmd->device->host; host = cmd->device->host;
hba = shost_priv(host); hba = shost_priv(host);
@@ -6792,9 +6787,11 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos); err = ufshcd_clear_cmd(hba, pos);
if (err) if (err)
break; break;
__ufshcd_transfer_req_compl(hba, pos);
} }
} }
spin_lock_irqsave(host->host_lock, flags);
ufshcd_transfer_req_compl(hba);
spin_unlock_irqrestore(host->host_lock, flags);
out: out:
hba->req_abort_count = 0; hba->req_abort_count = 0;
@@ -6970,16 +6967,20 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will fail, due to spec violation, scsi err handling next step * will fail, due to spec violation, scsi err handling next step
* will be to send LU reset which, again, is a spec violation. * will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up * To avoid these unnecessary/illegal steps, first we clean up
* the lrb taken by this cmd and re-set it in outstanding_reqs, * the lrb taken by this cmd and mark the lrb as in_use, then
* then queue the eh_work and bail. * queue the eh_work and bail.
*/ */
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
__ufshcd_transfer_req_compl(hba, (1UL << tag));
set_bit(tag, &hba->outstanding_reqs);
spin_lock_irqsave(host->host_lock, flags); spin_lock_irqsave(host->host_lock, flags);
if (lrbp->cmd) {
__ufshcd_transfer_req_compl(hba, (1UL << tag));
__set_bit(tag, &hba->outstanding_reqs);
lrbp->in_use = true;
hba->force_reset = true; hba->force_reset = true;
ufshcd_schedule_eh_work(hba); ufshcd_schedule_eh_work(hba);
}
spin_unlock_irqrestore(host->host_lock, flags); spin_unlock_irqrestore(host->host_lock, flags);
goto out; goto out;
} }
@@ -6992,7 +6993,9 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
if (!err) { if (!err) {
cleanup: cleanup:
spin_lock_irqsave(host->host_lock, flags);
__ufshcd_transfer_req_compl(hba, (1UL << tag)); __ufshcd_transfer_req_compl(hba, (1UL << tag));
spin_unlock_irqrestore(host->host_lock, flags);
out: out:
err = SUCCESS; err = SUCCESS;
} else { } else {
@@ -7022,15 +7025,19 @@ out:
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{ {
int err; int err;
unsigned long flags;
/* /*
* Stop the host controller and complete the requests * Stop the host controller and complete the requests
* cleared by h/w * cleared by h/w
*/ */
ufshcd_hba_stop(hba); ufshcd_hba_stop(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = true; hba->silence_err_logs = true;
ufshcd_complete_requests(hba); ufshcd_complete_requests(hba);
hba->silence_err_logs = false; hba->silence_err_logs = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */ /* scale up clocks to max frequency before full reinitialization */
ufshcd_set_clk_freq(hba, true); ufshcd_set_clk_freq(hba, true);

View File

@@ -188,6 +188,7 @@ struct ufs_pm_lvl_states {
* @crypto_key_slot: the key slot to use for inline crypto (-1 if none) * @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
* @data_unit_num: the data unit number for the first block for inline crypto * @data_unit_num: the data unit number for the first block for inline crypto
* @req_abort_skip: skip request abort task flag * @req_abort_skip: skip request abort task flag
* @in_use: indicates that this lrb is still in use
*/ */
struct ufshcd_lrb { struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr; struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -217,6 +218,7 @@ struct ufshcd_lrb {
#endif #endif
bool req_abort_skip; bool req_abort_skip;
bool in_use;
}; };
/** /**