|
|
|
@@ -45,6 +45,8 @@
|
|
|
|
|
#include "ufs_quirks.h"
|
|
|
|
|
#include "unipro.h"
|
|
|
|
|
|
|
|
|
|
#define UFSHCD_REQ_SENSE_SIZE 18
|
|
|
|
|
|
|
|
|
|
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
|
|
|
|
|
UTP_TASK_REQ_COMPL |\
|
|
|
|
|
UFSHCD_ERROR_MASK)
|
|
|
|
@@ -57,15 +59,9 @@
|
|
|
|
|
#define NOP_OUT_TIMEOUT 30 /* msecs */
|
|
|
|
|
|
|
|
|
|
/* Query request retries */
|
|
|
|
|
#define QUERY_REQ_RETRIES 10
|
|
|
|
|
#define QUERY_REQ_RETRIES 3
|
|
|
|
|
/* Query request timeout */
|
|
|
|
|
#define QUERY_REQ_TIMEOUT 30 /* msec */
|
|
|
|
|
/*
|
|
|
|
|
* Query request timeout for fDeviceInit flag
|
|
|
|
|
* fDeviceInit query response time for some devices is too large that default
|
|
|
|
|
* QUERY_REQ_TIMEOUT may not be enough for such devices.
|
|
|
|
|
*/
|
|
|
|
|
#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
|
|
|
|
|
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
|
|
|
|
|
|
|
|
|
|
/* Task management command timeout */
|
|
|
|
|
#define TM_CMD_TIMEOUT 100 /* msecs */
|
|
|
|
@@ -123,6 +119,7 @@ enum {
|
|
|
|
|
UFSHCD_STATE_RESET,
|
|
|
|
|
UFSHCD_STATE_ERROR,
|
|
|
|
|
UFSHCD_STATE_OPERATIONAL,
|
|
|
|
|
UFSHCD_STATE_EH_SCHEDULED,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/* UFSHCD error handling flags */
|
|
|
|
@@ -598,6 +595,20 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
if (ufshcd_is_clkscaling_enabled(hba)) {
|
|
|
|
|
devfreq_suspend_device(hba->devfreq);
|
|
|
|
|
hba->clk_scaling.window_start_t = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
if (ufshcd_is_clkscaling_enabled(hba))
|
|
|
|
|
devfreq_resume_device(hba->devfreq);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ufshcd_ungate_work(struct work_struct *work)
|
|
|
|
|
{
|
|
|
|
|
int ret;
|
|
|
|
@@ -631,8 +642,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
|
|
|
|
|
hba->clk_gating.is_suspended = false;
|
|
|
|
|
}
|
|
|
|
|
unblock_reqs:
|
|
|
|
|
if (ufshcd_is_clkscaling_enabled(hba))
|
|
|
|
|
devfreq_resume_device(hba->devfreq);
|
|
|
|
|
ufshcd_resume_clkscaling(hba);
|
|
|
|
|
scsi_unblock_requests(hba->host);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -660,6 +670,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
|
|
|
|
|
start:
|
|
|
|
|
switch (hba->clk_gating.state) {
|
|
|
|
|
case CLKS_ON:
|
|
|
|
|
/*
|
|
|
|
|
* Wait for the ungate work to complete if in progress.
|
|
|
|
|
* Though the clocks may be in ON state, the link could
|
|
|
|
|
* still be in hibner8 state if hibern8 is allowed
|
|
|
|
|
* during clock gating.
|
|
|
|
|
* Make sure we exit hibern8 state also in addition to
|
|
|
|
|
* clocks being ON.
|
|
|
|
|
*/
|
|
|
|
|
if (ufshcd_can_hibern8_during_gating(hba) &&
|
|
|
|
|
ufshcd_is_link_hibern8(hba)) {
|
|
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
|
|
|
flush_work(&hba->clk_gating.ungate_work);
|
|
|
|
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
|
|
|
|
goto start;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case REQ_CLKS_OFF:
|
|
|
|
|
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
|
|
|
|
@@ -709,7 +734,14 @@ static void ufshcd_gate_work(struct work_struct *work)
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
|
|
|
|
if (hba->clk_gating.is_suspended) {
|
|
|
|
|
/*
|
|
|
|
|
* In case you are here to cancel this work the gating state
|
|
|
|
|
* would be marked as REQ_CLKS_ON. In this case save time by
|
|
|
|
|
* skipping the gating work and exit after changing the clock
|
|
|
|
|
* state to CLKS_ON.
|
|
|
|
|
*/
|
|
|
|
|
if (hba->clk_gating.is_suspended ||
|
|
|
|
|
(hba->clk_gating.state == REQ_CLKS_ON)) {
|
|
|
|
|
hba->clk_gating.state = CLKS_ON;
|
|
|
|
|
goto rel_lock;
|
|
|
|
|
}
|
|
|
|
@@ -731,10 +763,7 @@ static void ufshcd_gate_work(struct work_struct *work)
|
|
|
|
|
ufshcd_set_link_hibern8(hba);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ufshcd_is_clkscaling_enabled(hba)) {
|
|
|
|
|
devfreq_suspend_device(hba->devfreq);
|
|
|
|
|
hba->clk_scaling.window_start_t = 0;
|
|
|
|
|
}
|
|
|
|
|
ufshcd_suspend_clkscaling(hba);
|
|
|
|
|
|
|
|
|
|
if (!ufshcd_is_link_active(hba))
|
|
|
|
|
ufshcd_setup_clocks(hba, false);
|
|
|
|
@@ -878,6 +907,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
|
|
|
|
|
ufshcd_clk_scaling_start_busy(hba);
|
|
|
|
|
__set_bit(task_tag, &hba->outstanding_reqs);
|
|
|
|
|
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
|
|
|
|
/* Make sure that doorbell is committed immediately */
|
|
|
|
|
wmb();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@@ -889,10 +920,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
|
|
|
|
|
int len;
|
|
|
|
|
if (lrbp->sense_buffer &&
|
|
|
|
|
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
|
|
|
|
|
int len_to_copy;
|
|
|
|
|
|
|
|
|
|
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
|
|
|
|
|
len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
|
|
|
|
|
|
|
|
|
|
memcpy(lrbp->sense_buffer,
|
|
|
|
|
lrbp->ucd_rsp_ptr->sr.sense_data,
|
|
|
|
|
min_t(int, len, SCSI_SENSE_BUFFERSIZE));
|
|
|
|
|
min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1088,7 +1123,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
|
|
|
|
|
*
|
|
|
|
|
* Returns 0 in case of success, non-zero value in case of failure
|
|
|
|
|
*/
|
|
|
|
|
static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
|
|
|
|
|
static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
|
|
|
|
{
|
|
|
|
|
struct ufshcd_sg_entry *prd_table;
|
|
|
|
|
struct scatterlist *sg;
|
|
|
|
@@ -1102,8 +1137,13 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
|
|
|
|
|
return sg_segments;
|
|
|
|
|
|
|
|
|
|
if (sg_segments) {
|
|
|
|
|
lrbp->utr_descriptor_ptr->prd_table_length =
|
|
|
|
|
cpu_to_le16((u16) (sg_segments));
|
|
|
|
|
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
|
|
|
|
|
lrbp->utr_descriptor_ptr->prd_table_length =
|
|
|
|
|
cpu_to_le16((u16)(sg_segments *
|
|
|
|
|
sizeof(struct ufshcd_sg_entry)));
|
|
|
|
|
else
|
|
|
|
|
lrbp->utr_descriptor_ptr->prd_table_length =
|
|
|
|
|
cpu_to_le16((u16) (sg_segments));
|
|
|
|
|
|
|
|
|
|
prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
|
|
|
|
|
|
|
|
|
@@ -1410,6 +1450,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
|
|
|
|
switch (hba->ufshcd_state) {
|
|
|
|
|
case UFSHCD_STATE_OPERATIONAL:
|
|
|
|
|
break;
|
|
|
|
|
case UFSHCD_STATE_EH_SCHEDULED:
|
|
|
|
|
case UFSHCD_STATE_RESET:
|
|
|
|
|
err = SCSI_MLQUEUE_HOST_BUSY;
|
|
|
|
|
goto out_unlock;
|
|
|
|
@@ -1457,7 +1498,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
|
|
|
|
|
|
|
|
|
WARN_ON(lrbp->cmd);
|
|
|
|
|
lrbp->cmd = cmd;
|
|
|
|
|
lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
|
|
|
|
|
lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
|
|
|
|
|
lrbp->sense_buffer = cmd->sense_buffer;
|
|
|
|
|
lrbp->task_tag = tag;
|
|
|
|
|
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
|
|
|
|
@@ -1465,15 +1506,18 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
|
|
|
|
|
|
|
|
|
ufshcd_comp_scsi_upiu(hba, lrbp);
|
|
|
|
|
|
|
|
|
|
err = ufshcd_map_sg(lrbp);
|
|
|
|
|
err = ufshcd_map_sg(hba, lrbp);
|
|
|
|
|
if (err) {
|
|
|
|
|
lrbp->cmd = NULL;
|
|
|
|
|
clear_bit_unlock(tag, &hba->lrb_in_use);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
/* Make sure descriptors are ready before ringing the doorbell */
|
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
|
|
/* issue command to the controller */
|
|
|
|
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
|
|
|
|
ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
|
|
|
|
|
ufshcd_send_command(hba, tag);
|
|
|
|
|
out_unlock:
|
|
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
|
|
@@ -1581,6 +1625,8 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
|
|
|
|
|
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
|
|
|
|
|
msecs_to_jiffies(max_timeout));
|
|
|
|
|
|
|
|
|
|
/* Make sure descriptors are ready before ringing the doorbell */
|
|
|
|
|
wmb();
|
|
|
|
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
|
|
|
|
hba->dev_cmd.complete = NULL;
|
|
|
|
|
if (likely(time_left)) {
|
|
|
|
@@ -1683,6 +1729,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
|
|
|
|
/* Make sure descriptors are ready before ringing the doorbell */
|
|
|
|
|
wmb();
|
|
|
|
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
|
|
|
|
ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
|
|
|
|
|
ufshcd_send_command(hba, tag);
|
|
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
|
|
|
|
|
|
|
|
@@ -1789,9 +1836,6 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
|
|
|
|
|
timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
|
|
|
|
|
|
|
|
|
|
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
|
|
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
@@ -1861,8 +1905,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
|
|
|
|
|
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
|
|
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
|
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
|
|
|
|
|
__func__, opcode, idn, err);
|
|
|
|
|
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
|
|
|
|
|
__func__, opcode, idn, index, err);
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1961,8 +2005,8 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
|
|
|
|
|
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
|
|
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
|
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
|
|
|
|
|
__func__, opcode, idn, err);
|
|
|
|
|
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
|
|
|
|
|
__func__, opcode, idn, index, err);
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -2055,18 +2099,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
|
|
|
|
|
desc_id, desc_index, 0, desc_buf,
|
|
|
|
|
&buff_len);
|
|
|
|
|
|
|
|
|
|
if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
|
|
|
|
|
(desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
|
|
|
|
|
ufs_query_desc_max_size[desc_id])
|
|
|
|
|
|| (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
|
|
|
|
|
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
|
|
|
|
|
__func__, desc_id, param_offset, buff_len, ret);
|
|
|
|
|
if (!ret)
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
if (ret) {
|
|
|
|
|
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
|
|
|
|
|
__func__, desc_id, desc_index, param_offset, ret);
|
|
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Sanity check */
|
|
|
|
|
if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
|
|
|
|
|
dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
|
|
|
|
|
__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* While reading variable size descriptors (like string descriptor),
|
|
|
|
|
* some UFS devices may report the "LENGTH" (field in "Transaction
|
|
|
|
|
* Specific fields" of Query Response UPIU) same as what was requested
|
|
|
|
|
* in Query Request UPIU instead of reporting the actual size of the
|
|
|
|
|
* variable size descriptor.
|
|
|
|
|
* Although it's safe to ignore the "LENGTH" field for variable size
|
|
|
|
|
* descriptors as we can always derive the length of the descriptor from
|
|
|
|
|
* the descriptor header fields. Hence this change impose the length
|
|
|
|
|
* match check only for fixed size descriptors (for which we always
|
|
|
|
|
* request the correct size as part of Query Request UPIU).
|
|
|
|
|
*/
|
|
|
|
|
if ((desc_id != QUERY_DESC_IDN_STRING) &&
|
|
|
|
|
(buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
|
|
|
|
|
dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
|
|
|
|
|
__func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (is_kmalloc)
|
|
|
|
|
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
|
|
|
|
|
out:
|
|
|
|
@@ -2088,7 +2155,18 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
|
|
|
|
|
u8 *buf,
|
|
|
|
|
u32 size)
|
|
|
|
|
{
|
|
|
|
|
return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
|
|
|
|
|
int err = 0;
|
|
|
|
|
int retries;
|
|
|
|
|
|
|
|
|
|
for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
|
|
|
|
|
/* Read descriptor*/
|
|
|
|
|
err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
|
|
|
|
|
if (!err)
|
|
|
|
|
break;
|
|
|
|
|
dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
|
|
|
|
@@ -2320,12 +2398,21 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
|
|
|
|
|
cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
|
|
|
|
|
|
|
|
|
|
/* Response upiu and prdt offset should be in double words */
|
|
|
|
|
utrdlp[i].response_upiu_offset =
|
|
|
|
|
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
|
|
|
|
|
utrdlp[i].response_upiu_offset =
|
|
|
|
|
cpu_to_le16(response_offset);
|
|
|
|
|
utrdlp[i].prd_table_offset =
|
|
|
|
|
cpu_to_le16(prdt_offset);
|
|
|
|
|
utrdlp[i].response_upiu_length =
|
|
|
|
|
cpu_to_le16(ALIGNED_UPIU_SIZE);
|
|
|
|
|
} else {
|
|
|
|
|
utrdlp[i].response_upiu_offset =
|
|
|
|
|
cpu_to_le16((response_offset >> 2));
|
|
|
|
|
utrdlp[i].prd_table_offset =
|
|
|
|
|
utrdlp[i].prd_table_offset =
|
|
|
|
|
cpu_to_le16((prdt_offset >> 2));
|
|
|
|
|
utrdlp[i].response_upiu_length =
|
|
|
|
|
utrdlp[i].response_upiu_length =
|
|
|
|
|
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
|
|
|
|
|
hba->lrb[i].ucd_req_ptr =
|
|
|
|
@@ -2429,10 +2516,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
|
|
|
|
|
set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
|
|
|
|
|
} while (ret && peer && --retries);
|
|
|
|
|
|
|
|
|
|
if (!retries)
|
|
|
|
|
if (ret)
|
|
|
|
|
dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
|
|
|
|
|
set, UIC_GET_ATTR_ID(attr_sel), mib_val,
|
|
|
|
|
retries);
|
|
|
|
|
set, UIC_GET_ATTR_ID(attr_sel), mib_val,
|
|
|
|
|
UFS_UIC_COMMAND_RETRIES - retries);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
@@ -2496,9 +2583,10 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
|
|
|
|
|
get, UIC_GET_ATTR_ID(attr_sel), ret);
|
|
|
|
|
} while (ret && peer && --retries);
|
|
|
|
|
|
|
|
|
|
if (!retries)
|
|
|
|
|
if (ret)
|
|
|
|
|
dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
|
|
|
|
|
get, UIC_GET_ATTR_ID(attr_sel), retries);
|
|
|
|
|
get, UIC_GET_ATTR_ID(attr_sel),
|
|
|
|
|
UFS_UIC_COMMAND_RETRIES - retries);
|
|
|
|
|
|
|
|
|
|
if (mib_val && !ret)
|
|
|
|
|
*mib_val = uic_cmd.argument3;
|
|
|
|
@@ -2651,6 +2739,8 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
|
|
|
|
|
int ret;
|
|
|
|
|
struct uic_command uic_cmd = {0};
|
|
|
|
|
|
|
|
|
|
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
|
|
|
|
|
|
|
|
|
|
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
|
|
|
|
|
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
|
|
|
|
|
|
|
|
|
@@ -2664,7 +2754,9 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
|
|
|
|
|
*/
|
|
|
|
|
if (ufshcd_link_recovery(hba))
|
|
|
|
|
ret = -ENOLINK;
|
|
|
|
|
}
|
|
|
|
|
} else
|
|
|
|
|
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
|
|
|
|
|
POST_CHANGE);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
@@ -2687,13 +2779,17 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
|
|
|
|
|
struct uic_command uic_cmd = {0};
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
|
|
|
|
|
|
|
|
|
|
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
|
|
|
|
|
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
|
|
|
|
|
if (ret) {
|
|
|
|
|
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
|
|
|
|
|
__func__, ret);
|
|
|
|
|
ret = ufshcd_link_recovery(hba);
|
|
|
|
|
}
|
|
|
|
|
} else
|
|
|
|
|
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
|
|
|
|
|
POST_CHANGE);
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
@@ -2725,8 +2821,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
|
|
|
|
|
if (hba->max_pwr_info.is_valid)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
pwr_info->pwr_tx = FASTAUTO_MODE;
|
|
|
|
|
pwr_info->pwr_rx = FASTAUTO_MODE;
|
|
|
|
|
pwr_info->pwr_tx = FAST_MODE;
|
|
|
|
|
pwr_info->pwr_rx = FAST_MODE;
|
|
|
|
|
pwr_info->hs_rate = PA_HS_MODE_B;
|
|
|
|
|
|
|
|
|
|
/* Get the connected lane count */
|
|
|
|
@@ -2757,7 +2853,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
|
|
|
|
|
__func__, pwr_info->gear_rx);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
pwr_info->pwr_rx = SLOWAUTO_MODE;
|
|
|
|
|
pwr_info->pwr_rx = SLOW_MODE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
|
|
|
|
@@ -2770,7 +2866,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
|
|
|
|
|
__func__, pwr_info->gear_tx);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
pwr_info->pwr_tx = SLOWAUTO_MODE;
|
|
|
|
|
pwr_info->pwr_tx = SLOW_MODE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hba->max_pwr_info.is_valid = true;
|
|
|
|
@@ -3090,7 +3186,16 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
int ret;
|
|
|
|
|
int retries = DME_LINKSTARTUP_RETRIES;
|
|
|
|
|
bool link_startup_again = false;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If UFS device isn't active then we will have to issue link startup
|
|
|
|
|
* 2 times to make sure the device state move to active.
|
|
|
|
|
*/
|
|
|
|
|
if (!ufshcd_is_ufs_dev_active(hba))
|
|
|
|
|
link_startup_again = true;
|
|
|
|
|
|
|
|
|
|
link_startup:
|
|
|
|
|
do {
|
|
|
|
|
ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
|
|
|
|
|
|
|
|
|
@@ -3116,6 +3221,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
|
|
|
|
|
/* failed to get the link up... retire */
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
if (link_startup_again) {
|
|
|
|
|
link_startup_again = false;
|
|
|
|
|
retries = DME_LINKSTARTUP_RETRIES;
|
|
|
|
|
goto link_startup;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
|
|
|
|
|
ret = ufshcd_disable_device_tx_lcc(hba);
|
|
|
|
|
if (ret)
|
|
|
|
@@ -3181,16 +3292,24 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
|
|
|
|
|
{
|
|
|
|
|
int ret = 0;
|
|
|
|
|
u8 lun_qdepth;
|
|
|
|
|
int retries;
|
|
|
|
|
struct ufs_hba *hba;
|
|
|
|
|
|
|
|
|
|
hba = shost_priv(sdev->host);
|
|
|
|
|
|
|
|
|
|
lun_qdepth = hba->nutrs;
|
|
|
|
|
ret = ufshcd_read_unit_desc_param(hba,
|
|
|
|
|
ufshcd_scsi_to_upiu_lun(sdev->lun),
|
|
|
|
|
UNIT_DESC_PARAM_LU_Q_DEPTH,
|
|
|
|
|
&lun_qdepth,
|
|
|
|
|
sizeof(lun_qdepth));
|
|
|
|
|
for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
|
|
|
|
|
/* Read descriptor*/
|
|
|
|
|
ret = ufshcd_read_unit_desc_param(hba,
|
|
|
|
|
ufshcd_scsi_to_upiu_lun(sdev->lun),
|
|
|
|
|
UNIT_DESC_PARAM_LU_Q_DEPTH,
|
|
|
|
|
&lun_qdepth,
|
|
|
|
|
sizeof(lun_qdepth));
|
|
|
|
|
if (!ret || ret == -ENOTSUPP)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Some WLUN doesn't support unit descriptor */
|
|
|
|
|
if (ret == -EOPNOTSUPP)
|
|
|
|
@@ -4097,6 +4216,17 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
|
|
/* PHY layer lane error */
|
|
|
|
|
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
|
|
|
|
|
/* Ignore LINERESET indication, as this is not an error */
|
|
|
|
|
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
|
|
|
|
|
(reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK))
|
|
|
|
|
/*
|
|
|
|
|
* To know whether this error is fatal or not, DB timeout
|
|
|
|
|
* must be checked but this error is handled separately.
|
|
|
|
|
*/
|
|
|
|
|
dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
|
|
|
|
|
|
|
|
|
|
/* PA_INIT_ERROR is fatal and needs UIC reset */
|
|
|
|
|
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
|
|
|
|
|
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
|
|
|
|
@@ -4158,7 +4288,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
|
|
|
|
|
/* block commands from scsi mid-layer */
|
|
|
|
|
scsi_block_requests(hba->host);
|
|
|
|
|
|
|
|
|
|
hba->ufshcd_state = UFSHCD_STATE_ERROR;
|
|
|
|
|
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
|
|
|
|
|
schedule_work(&hba->eh_work);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@@ -4311,6 +4441,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
|
|
|
|
|
task_req_upiup->input_param1 = cpu_to_be32(lun_id);
|
|
|
|
|
task_req_upiup->input_param2 = cpu_to_be32(task_id);
|
|
|
|
|
|
|
|
|
|
ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
|
|
|
|
|
|
|
|
|
|
/* send command to the controller */
|
|
|
|
|
__set_bit(free_slot, &hba->outstanding_tasks);
|
|
|
|
|
|
|
|
|
@@ -4318,6 +4450,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
|
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
|
|
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
|
|
|
|
|
/* Make sure that doorbell is committed immediately */
|
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(host->host_lock, flags);
|
|
|
|
|
|
|
|
|
@@ -4722,6 +4856,24 @@ out:
|
|
|
|
|
return icc_level;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
|
|
|
|
|
{
|
|
|
|
|
int ret = 0;
|
|
|
|
|
int retries;
|
|
|
|
|
|
|
|
|
|
for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
|
|
|
|
|
/* write attribute */
|
|
|
|
|
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
|
|
|
|
|
QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
|
|
|
|
|
if (!ret)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
int ret;
|
|
|
|
@@ -4742,9 +4894,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
|
|
|
|
|
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
|
|
|
|
|
__func__, hba->init_prefetch_data.icc_level);
|
|
|
|
|
|
|
|
|
|
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
|
|
|
|
|
QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
|
|
|
|
|
&hba->init_prefetch_data.icc_level);
|
|
|
|
|
ret = ufshcd_set_icc_levels_attr(hba,
|
|
|
|
|
hba->init_prefetch_data.icc_level);
|
|
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
|
dev_err(hba->dev,
|
|
|
|
@@ -4965,6 +5116,76 @@ out:
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
|
|
|
|
|
* less than device PA_TACTIVATE time.
|
|
|
|
|
* @hba: per-adapter instance
|
|
|
|
|
*
|
|
|
|
|
* Some UFS devices require host PA_TACTIVATE to be lower than device
|
|
|
|
|
* PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
|
|
|
|
|
* for such devices.
|
|
|
|
|
*
|
|
|
|
|
* Returns zero on success, non-zero error value on failure.
|
|
|
|
|
*/
|
|
|
|
|
static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
int ret = 0;
|
|
|
|
|
u32 granularity, peer_granularity;
|
|
|
|
|
u32 pa_tactivate, peer_pa_tactivate;
|
|
|
|
|
u32 pa_tactivate_us, peer_pa_tactivate_us;
|
|
|
|
|
u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
|
|
|
|
|
|
|
|
|
|
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
|
|
|
|
|
&granularity);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
|
|
|
|
|
&peer_granularity);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
if ((granularity < PA_GRANULARITY_MIN_VAL) ||
|
|
|
|
|
(granularity > PA_GRANULARITY_MAX_VAL)) {
|
|
|
|
|
dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
|
|
|
|
|
__func__, granularity);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
|
|
|
|
|
(peer_granularity > PA_GRANULARITY_MAX_VAL)) {
|
|
|
|
|
dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
|
|
|
|
|
__func__, peer_granularity);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
|
|
|
|
|
&peer_pa_tactivate);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
|
|
|
|
|
peer_pa_tactivate_us = peer_pa_tactivate *
|
|
|
|
|
gran_to_us_table[peer_granularity - 1];
|
|
|
|
|
|
|
|
|
|
if (pa_tactivate_us > peer_pa_tactivate_us) {
|
|
|
|
|
u32 new_peer_pa_tactivate;
|
|
|
|
|
|
|
|
|
|
new_peer_pa_tactivate = pa_tactivate_us /
|
|
|
|
|
gran_to_us_table[peer_granularity - 1];
|
|
|
|
|
new_peer_pa_tactivate++;
|
|
|
|
|
ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
|
|
|
|
|
new_peer_pa_tactivate);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
|
|
|
|
@@ -4975,6 +5196,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
|
|
|
|
|
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
|
|
|
|
|
/* set 1ms timeout for PA_TACTIVATE */
|
|
|
|
|
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
|
|
|
|
|
|
|
|
|
|
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
|
|
|
|
|
ufshcd_quirk_tune_host_pa_tactivate(hba);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@@ -5027,9 +5251,11 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
|
|
|
|
|
__func__);
|
|
|
|
|
} else {
|
|
|
|
|
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
|
|
|
|
|
if (ret)
|
|
|
|
|
if (ret) {
|
|
|
|
|
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
|
|
|
|
|
__func__, ret);
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* set the state as operational after switching to desired gear */
|
|
|
|
@@ -5062,8 +5288,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
|
|
|
|
|
hba->is_init_prefetch = true;
|
|
|
|
|
|
|
|
|
|
/* Resume devfreq after UFS device is detected */
|
|
|
|
|
if (ufshcd_is_clkscaling_enabled(hba))
|
|
|
|
|
devfreq_resume_device(hba->devfreq);
|
|
|
|
|
ufshcd_resume_clkscaling(hba);
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
/*
|
|
|
|
@@ -5389,6 +5614,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
|
|
|
|
|
if (!head || list_empty(head))
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
list_for_each_entry(clki, head, list) {
|
|
|
|
|
if (!IS_ERR_OR_NULL(clki->clk)) {
|
|
|
|
|
if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
|
|
|
|
@@ -5410,7 +5639,10 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = ufshcd_vops_setup_clocks(hba, on);
|
|
|
|
|
ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
if (ret) {
|
|
|
|
|
list_for_each_entry(clki, head, list) {
|
|
|
|
@@ -5500,8 +5732,6 @@ static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
|
|
|
|
|
if (!hba->vops)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
ufshcd_vops_setup_clocks(hba, false);
|
|
|
|
|
|
|
|
|
|
ufshcd_vops_setup_regulators(hba, false);
|
|
|
|
|
|
|
|
|
|
ufshcd_vops_exit(hba);
|
|
|
|
@@ -5564,6 +5794,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
|
|
|
|
|
if (hba->is_powered) {
|
|
|
|
|
ufshcd_variant_hba_exit(hba);
|
|
|
|
|
ufshcd_setup_vreg(hba, false);
|
|
|
|
|
ufshcd_suspend_clkscaling(hba);
|
|
|
|
|
ufshcd_setup_clocks(hba, false);
|
|
|
|
|
ufshcd_setup_hba_vreg(hba, false);
|
|
|
|
|
hba->is_powered = false;
|
|
|
|
@@ -5577,19 +5808,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
0,
|
|
|
|
|
SCSI_SENSE_BUFFERSIZE,
|
|
|
|
|
UFSHCD_REQ_SENSE_SIZE,
|
|
|
|
|
0};
|
|
|
|
|
char *buffer;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
|
|
|
|
|
buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
|
|
|
|
|
if (!buffer) {
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
|
|
|
|
|
SCSI_SENSE_BUFFERSIZE, NULL,
|
|
|
|
|
UFSHCD_REQ_SENSE_SIZE, NULL,
|
|
|
|
|
msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
|
|
|
|
|
if (ret)
|
|
|
|
|
pr_err("%s: failed with err %d\n", __func__, ret);
|
|
|
|
@@ -5766,7 +5997,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
|
|
|
|
|
!hba->dev_info.is_lu_power_on_wp) {
|
|
|
|
|
ret = ufshcd_setup_vreg(hba, true);
|
|
|
|
|
} else if (!ufshcd_is_ufs_dev_active(hba)) {
|
|
|
|
|
ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
|
|
|
|
|
if (!ret && !ufshcd_is_link_active(hba)) {
|
|
|
|
|
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
|
|
|
|
|
if (ret)
|
|
|
|
@@ -5775,6 +6005,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
|
|
|
|
|
if (ret)
|
|
|
|
|
goto vccq_lpm;
|
|
|
|
|
}
|
|
|
|
|
ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
|
|
|
|
|
}
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
@@ -5839,6 +6070,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
|
|
|
|
|
ufshcd_hold(hba, false);
|
|
|
|
|
hba->clk_gating.is_suspended = true;
|
|
|
|
|
|
|
|
|
|
ufshcd_suspend_clkscaling(hba);
|
|
|
|
|
|
|
|
|
|
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
|
|
|
|
|
req_link_state == UIC_LINK_ACTIVE_STATE) {
|
|
|
|
|
goto disable_clks;
|
|
|
|
@@ -5846,12 +6079,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
|
|
|
|
|
|
|
|
|
|
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
|
|
|
|
|
(req_link_state == hba->uic_link_state))
|
|
|
|
|
goto out;
|
|
|
|
|
goto enable_gating;
|
|
|
|
|
|
|
|
|
|
/* UFS device & link must be active before we enter in this function */
|
|
|
|
|
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
|
|
|
|
|
ret = -EINVAL;
|
|
|
|
|
goto out;
|
|
|
|
|
goto enable_gating;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ufshcd_is_runtime_pm(pm_op)) {
|
|
|
|
@@ -5887,15 +6120,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
|
|
|
|
|
ufshcd_vreg_set_lpm(hba);
|
|
|
|
|
|
|
|
|
|
disable_clks:
|
|
|
|
|
/*
|
|
|
|
|
* The clock scaling needs access to controller registers. Hence, Wait
|
|
|
|
|
* for pending clock scaling work to be done before clocks are
|
|
|
|
|
* turned off.
|
|
|
|
|
*/
|
|
|
|
|
if (ufshcd_is_clkscaling_enabled(hba)) {
|
|
|
|
|
devfreq_suspend_device(hba->devfreq);
|
|
|
|
|
hba->clk_scaling.window_start_t = 0;
|
|
|
|
|
}
|
|
|
|
|
/*
|
|
|
|
|
* Call vendor specific suspend callback. As these callbacks may access
|
|
|
|
|
* vendor specific host controller register space call them before the
|
|
|
|
@@ -5905,10 +6129,6 @@ disable_clks:
|
|
|
|
|
if (ret)
|
|
|
|
|
goto set_link_active;
|
|
|
|
|
|
|
|
|
|
ret = ufshcd_vops_setup_clocks(hba, false);
|
|
|
|
|
if (ret)
|
|
|
|
|
goto vops_resume;
|
|
|
|
|
|
|
|
|
|
if (!ufshcd_is_link_active(hba))
|
|
|
|
|
ufshcd_setup_clocks(hba, false);
|
|
|
|
|
else
|
|
|
|
@@ -5925,9 +6145,8 @@ disable_clks:
|
|
|
|
|
ufshcd_hba_vreg_set_lpm(hba);
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
vops_resume:
|
|
|
|
|
ufshcd_vops_resume(hba, pm_op);
|
|
|
|
|
set_link_active:
|
|
|
|
|
ufshcd_resume_clkscaling(hba);
|
|
|
|
|
ufshcd_vreg_set_hpm(hba);
|
|
|
|
|
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
|
|
|
|
|
ufshcd_set_link_active(hba);
|
|
|
|
@@ -5937,6 +6156,7 @@ set_dev_active:
|
|
|
|
|
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
|
|
|
|
|
ufshcd_disable_auto_bkops(hba);
|
|
|
|
|
enable_gating:
|
|
|
|
|
ufshcd_resume_clkscaling(hba);
|
|
|
|
|
hba->clk_gating.is_suspended = false;
|
|
|
|
|
ufshcd_release(hba);
|
|
|
|
|
out:
|
|
|
|
@@ -6015,8 +6235,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
|
|
|
|
|
ufshcd_urgent_bkops(hba);
|
|
|
|
|
hba->clk_gating.is_suspended = false;
|
|
|
|
|
|
|
|
|
|
if (ufshcd_is_clkscaling_enabled(hba))
|
|
|
|
|
devfreq_resume_device(hba->devfreq);
|
|
|
|
|
ufshcd_resume_clkscaling(hba);
|
|
|
|
|
|
|
|
|
|
/* Schedule clock gating in case of no access to UFS device yet */
|
|
|
|
|
ufshcd_release(hba);
|
|
|
|
@@ -6030,6 +6249,7 @@ disable_vreg:
|
|
|
|
|
ufshcd_vreg_set_lpm(hba);
|
|
|
|
|
disable_irq_and_vops_clks:
|
|
|
|
|
ufshcd_disable_irq(hba);
|
|
|
|
|
ufshcd_suspend_clkscaling(hba);
|
|
|
|
|
ufshcd_setup_clocks(hba, false);
|
|
|
|
|
out:
|
|
|
|
|
hba->pm_op_in_progress = 0;
|
|
|
|
@@ -6052,16 +6272,13 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
|
|
|
|
|
if (!hba || !hba->is_powered)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (pm_runtime_suspended(hba->dev)) {
|
|
|
|
|
if (hba->rpm_lvl == hba->spm_lvl)
|
|
|
|
|
/*
|
|
|
|
|
* There is possibility that device may still be in
|
|
|
|
|
* active state during the runtime suspend.
|
|
|
|
|
*/
|
|
|
|
|
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
|
|
|
|
|
hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
|
|
|
|
|
goto out;
|
|
|
|
|
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
|
|
|
|
|
hba->curr_dev_pwr_mode) &&
|
|
|
|
|
(ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
|
|
|
|
|
hba->uic_link_state))
|
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
|
|
if (pm_runtime_suspended(hba->dev)) {
|
|
|
|
|
/*
|
|
|
|
|
* UFS device and/or UFS link low power states during runtime
|
|
|
|
|
* suspend seems to be different than what is expected during
|
|
|
|
@@ -6092,7 +6309,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
|
|
|
|
|
|
|
|
|
|
int ufshcd_system_resume(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
|
|
|
|
|
if (!hba)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (!hba->is_powered || pm_runtime_suspended(hba->dev))
|
|
|
|
|
/*
|
|
|
|
|
* Let the runtime resume take care of resuming
|
|
|
|
|
* if runtime suspended.
|
|
|
|
@@ -6113,7 +6333,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
|
|
|
|
|
*/
|
|
|
|
|
int ufshcd_runtime_suspend(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
if (!hba || !hba->is_powered)
|
|
|
|
|
if (!hba)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (!hba->is_powered)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
return ufshcd_suspend(hba, UFS_RUNTIME_PM);
|
|
|
|
@@ -6143,10 +6366,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
|
|
|
|
|
*/
|
|
|
|
|
int ufshcd_runtime_resume(struct ufs_hba *hba)
|
|
|
|
|
{
|
|
|
|
|
if (!hba || !hba->is_powered)
|
|
|
|
|
if (!hba)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (!hba->is_powered)
|
|
|
|
|
return 0;
|
|
|
|
|
else
|
|
|
|
|
return ufshcd_resume(hba, UFS_RUNTIME_PM);
|
|
|
|
|
|
|
|
|
|
return ufshcd_resume(hba, UFS_RUNTIME_PM);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL(ufshcd_runtime_resume);
|
|
|
|
|
|
|
|
|
@@ -6198,11 +6424,7 @@ void ufshcd_remove(struct ufs_hba *hba)
|
|
|
|
|
ufshcd_disable_intr(hba, hba->intr_mask);
|
|
|
|
|
ufshcd_hba_stop(hba, true);
|
|
|
|
|
|
|
|
|
|
scsi_host_put(hba->host);
|
|
|
|
|
|
|
|
|
|
ufshcd_exit_clk_gating(hba);
|
|
|
|
|
if (ufshcd_is_clkscaling_enabled(hba))
|
|
|
|
|
devfreq_remove_device(hba->devfreq);
|
|
|
|
|
ufshcd_hba_exit(hba);
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(ufshcd_remove);
|
|
|
|
@@ -6324,15 +6546,47 @@ static int ufshcd_devfreq_target(struct device *dev,
|
|
|
|
|
{
|
|
|
|
|
int err = 0;
|
|
|
|
|
struct ufs_hba *hba = dev_get_drvdata(dev);
|
|
|
|
|
bool release_clk_hold = false;
|
|
|
|
|
unsigned long irq_flags;
|
|
|
|
|
|
|
|
|
|
if (!ufshcd_is_clkscaling_enabled(hba))
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(hba->host->host_lock, irq_flags);
|
|
|
|
|
if (ufshcd_eh_in_progress(hba)) {
|
|
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ufshcd_is_clkgating_allowed(hba) &&
|
|
|
|
|
(hba->clk_gating.state != CLKS_ON)) {
|
|
|
|
|
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
|
|
|
|
|
/* hold the vote until the scaling work is completed */
|
|
|
|
|
hba->clk_gating.active_reqs++;
|
|
|
|
|
release_clk_hold = true;
|
|
|
|
|
hba->clk_gating.state = CLKS_ON;
|
|
|
|
|
} else {
|
|
|
|
|
/*
|
|
|
|
|
* Clock gating work seems to be running in parallel
|
|
|
|
|
* hence skip scaling work to avoid deadlock between
|
|
|
|
|
* current scaling work and gating work.
|
|
|
|
|
*/
|
|
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
|
|
|
|
|
|
|
|
|
|
if (*freq == UINT_MAX)
|
|
|
|
|
err = ufshcd_scale_clks(hba, true);
|
|
|
|
|
else if (*freq == 0)
|
|
|
|
|
err = ufshcd_scale_clks(hba, false);
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(hba->host->host_lock, irq_flags);
|
|
|
|
|
if (release_clk_hold)
|
|
|
|
|
__ufshcd_release(hba);
|
|
|
|
|
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
|
|
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -6498,7 +6752,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ufshcd_is_clkscaling_enabled(hba)) {
|
|
|
|
|
hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
|
|
|
|
|
hba->devfreq = devm_devfreq_add_device(dev, &ufs_devfreq_profile,
|
|
|
|
|
"simple_ondemand", NULL);
|
|
|
|
|
if (IS_ERR(hba->devfreq)) {
|
|
|
|
|
dev_err(hba->dev, "Unable to register with devfreq %ld\n",
|
|
|
|
@@ -6507,18 +6761,19 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|
|
|
|
goto out_remove_scsi_host;
|
|
|
|
|
}
|
|
|
|
|
/* Suspend devfreq until the UFS device is detected */
|
|
|
|
|
devfreq_suspend_device(hba->devfreq);
|
|
|
|
|
hba->clk_scaling.window_start_t = 0;
|
|
|
|
|
ufshcd_suspend_clkscaling(hba);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Hold auto suspend until async scan completes */
|
|
|
|
|
pm_runtime_get_sync(dev);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The device-initialize-sequence hasn't been invoked yet.
|
|
|
|
|
* Set the device to power-off state
|
|
|
|
|
* We are assuming that device wasn't put in sleep/power-down
|
|
|
|
|
* state exclusively during the boot stage before kernel.
|
|
|
|
|
* This assumption helps avoid doing link startup twice during
|
|
|
|
|
* ufshcd_probe_hba().
|
|
|
|
|
*/
|
|
|
|
|
ufshcd_set_ufs_dev_poweroff(hba);
|
|
|
|
|
ufshcd_set_ufs_dev_active(hba);
|
|
|
|
|
|
|
|
|
|
async_schedule(ufshcd_async_scan, hba);
|
|
|
|
|
|
|
|
|
@@ -6530,7 +6785,6 @@ exit_gating:
|
|
|
|
|
ufshcd_exit_clk_gating(hba);
|
|
|
|
|
out_disable:
|
|
|
|
|
hba->is_irq_enabled = false;
|
|
|
|
|
scsi_host_put(host);
|
|
|
|
|
ufshcd_hba_exit(hba);
|
|
|
|
|
out_error:
|
|
|
|
|
return err;
|
|
|
|
|