Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly updates of the usual suspects: lpfc, qla2xxx, bnx2fc, qedf, hpsa, hisi_sas, smartpqi, cxlflash, aacraid, csiostor along with a host of minor and miscellaneous changes" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (276 commits) qla2xxx: Fix NVMe entry_type for iocb packet on BE system scsi: qla2xxx: avoid unused-function warning scsi: snic: fix a couple of spelling mistakes/typos scsi: qla2xxx: fix a bunch of typos and spelling mistakes scsi: lpfc: don't double count abort errors scsi: lpfc: spin_lock_irq() is not nestable scsi: hisi_sas: optimise DMA slot memory scsi: ibmvfc: constify dev_pm_ops structures. scsi: ibmvscsi: constify dev_pm_ops structures. scsi: cxlflash: Update debug prints in reset handlers scsi: cxlflash: Update send_tmf() parameters scsi: cxlflash: Avoid double free of character device scsi: Add STARGET_CREATED_REMOVE state to scsi_target_state scsi: ses: do not add a device to an enclosure if enclosure_add_links() fails. scsi: ufs: flush eh_work when eh_work scheduled. scsi: qla2xxx: Protect access to qpair members with qpair->qp_lock scsi: sun_esp: fix device reference leaks scsi: fnic: changing queue command to return result DID_IMM_RETRY when rport is init scsi: fnic: correct speed display and add support for 25,40 and 100G scsi: fnic: added timestamp reporting in fnic debug stats ...
Šī revīzija ir iekļauta:
@@ -756,6 +756,7 @@ struct lpfc_hba {
|
||||
uint8_t nvmet_support; /* driver supports NVMET */
|
||||
#define LPFC_NVMET_MAX_PORTS 32
|
||||
uint8_t mds_diags_support;
|
||||
uint32_t initial_imax;
|
||||
|
||||
/* HBA Config Parameters */
|
||||
uint32_t cfg_ack0;
|
||||
@@ -777,6 +778,7 @@ struct lpfc_hba {
|
||||
uint32_t cfg_poll_tmo;
|
||||
uint32_t cfg_task_mgmt_tmo;
|
||||
uint32_t cfg_use_msi;
|
||||
uint32_t cfg_auto_imax;
|
||||
uint32_t cfg_fcp_imax;
|
||||
uint32_t cfg_fcp_cpu_map;
|
||||
uint32_t cfg_fcp_io_channel;
|
||||
@@ -913,16 +915,16 @@ struct lpfc_hba {
|
||||
/*
|
||||
* stat counters
|
||||
*/
|
||||
uint64_t fc4ScsiInputRequests;
|
||||
uint64_t fc4ScsiOutputRequests;
|
||||
uint64_t fc4ScsiControlRequests;
|
||||
uint64_t fc4ScsiIoCmpls;
|
||||
uint64_t fc4NvmeInputRequests;
|
||||
uint64_t fc4NvmeOutputRequests;
|
||||
uint64_t fc4NvmeControlRequests;
|
||||
uint64_t fc4NvmeIoCmpls;
|
||||
uint64_t fc4NvmeLsRequests;
|
||||
uint64_t fc4NvmeLsCmpls;
|
||||
atomic_t fc4ScsiInputRequests;
|
||||
atomic_t fc4ScsiOutputRequests;
|
||||
atomic_t fc4ScsiControlRequests;
|
||||
atomic_t fc4ScsiIoCmpls;
|
||||
atomic_t fc4NvmeInputRequests;
|
||||
atomic_t fc4NvmeOutputRequests;
|
||||
atomic_t fc4NvmeControlRequests;
|
||||
atomic_t fc4NvmeIoCmpls;
|
||||
atomic_t fc4NvmeLsRequests;
|
||||
atomic_t fc4NvmeLsCmpls;
|
||||
|
||||
uint64_t bg_guard_err_cnt;
|
||||
uint64_t bg_apptag_err_cnt;
|
||||
@@ -1050,6 +1052,7 @@ struct lpfc_hba {
|
||||
|
||||
uint8_t temp_sensor_support;
|
||||
/* Fields used for heart beat. */
|
||||
unsigned long last_eqdelay_time;
|
||||
unsigned long last_completion_time;
|
||||
unsigned long skipped_hb;
|
||||
struct timer_list hb_tmofunc;
|
||||
|
@@ -148,9 +148,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct nvme_fc_remote_port *nrport;
|
||||
uint64_t data1, data2, data3, tot;
|
||||
char *statep;
|
||||
int len = 0;
|
||||
|
||||
@@ -171,7 +171,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
else
|
||||
statep = "INIT";
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"NVME Target: Enabled State %s\n",
|
||||
"NVME Target Enabled State %s\n",
|
||||
statep);
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
|
||||
@@ -245,11 +245,21 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
atomic_read(&tgtp->xmt_abort_rsp),
|
||||
atomic_read(&tgtp->xmt_abort_rsp_error));
|
||||
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
tot = phba->sli4_hba.nvmet_xri_cnt -
|
||||
(phba->sli4_hba.nvmet_ctx_get_cnt +
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"IO_CTX: %08x outstanding %08x total %x",
|
||||
phba->sli4_hba.nvmet_ctx_cnt,
|
||||
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
|
||||
"CTX Outstanding %08llx\n",
|
||||
phba->sli4_hba.nvmet_xri_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_total);
|
||||
phba->sli4_hba.nvmet_io_wait_total,
|
||||
tot);
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len, "\n");
|
||||
return len;
|
||||
@@ -265,7 +275,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
|
||||
/* Port state is only one of two values for now. */
|
||||
if (localport->port_id)
|
||||
@@ -281,9 +290,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
wwn_to_u64(vport->fc_nodename.u.wwn),
|
||||
localport->port_id, statep);
|
||||
|
||||
list_for_each_entry(rport, &lport->rport_list, list) {
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
if (!ndlp->nrport)
|
||||
continue;
|
||||
|
||||
/* local short-hand pointer. */
|
||||
nrport = rport->remoteport;
|
||||
nrport = ndlp->nrport->remoteport;
|
||||
|
||||
/* Port state is only one of two values for now. */
|
||||
switch (nrport->port_state) {
|
||||
@@ -311,25 +323,23 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
|
||||
nrport->port_id);
|
||||
|
||||
switch (nrport->port_role) {
|
||||
case FC_PORT_ROLE_NVME_INITIATOR:
|
||||
/* An NVME rport can have multiple roles. */
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"INITIATOR ");
|
||||
break;
|
||||
case FC_PORT_ROLE_NVME_TARGET:
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"TARGET ");
|
||||
break;
|
||||
case FC_PORT_ROLE_NVME_DISCOVERY:
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"DISCOVERY ");
|
||||
break;
|
||||
default:
|
||||
"DISCSRVC ");
|
||||
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
|
||||
FC_PORT_ROLE_NVME_TARGET |
|
||||
FC_PORT_ROLE_NVME_DISCOVERY))
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"UNKNOWN_ROLE x%x",
|
||||
"UNKNOWN ROLE x%x",
|
||||
nrport->port_role);
|
||||
break;
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
|
||||
/* Terminate the string. */
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
|
||||
@@ -338,19 +348,21 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"LS: Xmt %016llx Cmpl %016llx\n",
|
||||
phba->fc4NvmeLsRequests,
|
||||
phba->fc4NvmeLsCmpls);
|
||||
"LS: Xmt %016x Cmpl %016x\n",
|
||||
atomic_read(&phba->fc4NvmeLsRequests),
|
||||
atomic_read(&phba->fc4NvmeLsCmpls));
|
||||
|
||||
tot = atomic_read(&phba->fc4NvmeIoCmpls);
|
||||
data1 = atomic_read(&phba->fc4NvmeInputRequests);
|
||||
data2 = atomic_read(&phba->fc4NvmeOutputRequests);
|
||||
data3 = atomic_read(&phba->fc4NvmeControlRequests);
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
|
||||
phba->fc4NvmeInputRequests,
|
||||
phba->fc4NvmeOutputRequests,
|
||||
phba->fc4NvmeControlRequests);
|
||||
data1, data2, data3);
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
" Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
|
||||
|
||||
" Cmpl %016llx Outstanding %016llx\n",
|
||||
tot, (data1 + data2 + data3) - tot);
|
||||
return len;
|
||||
}
|
||||
|
||||
@@ -1342,6 +1354,8 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
|
||||
goto board_mode_out;
|
||||
}
|
||||
wait_for_completion(&online_compl);
|
||||
if (status)
|
||||
status = -EIO;
|
||||
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
|
||||
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
|
||||
else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
|
||||
@@ -3198,9 +3212,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
|
||||
|
||||
shost = lpfc_shost_from_vport(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
|
||||
if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport)
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
if (!NLP_CHK_NODE_ACT(ndlp))
|
||||
continue;
|
||||
if (ndlp->rport)
|
||||
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
|
||||
}
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
}
|
||||
|
||||
@@ -4467,9 +4484,11 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
|
||||
return -EINVAL;
|
||||
|
||||
phba->cfg_fcp_imax = (uint32_t)val;
|
||||
phba->initial_imax = phba->cfg_fcp_imax;
|
||||
|
||||
for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
lpfc_modify_hba_eq_delay(phba, i);
|
||||
lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
|
||||
val);
|
||||
|
||||
return strlen(buf);
|
||||
}
|
||||
@@ -4524,6 +4543,16 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
|
||||
static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
|
||||
lpfc_fcp_imax_show, lpfc_fcp_imax_store);
|
||||
|
||||
/*
|
||||
* lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
|
||||
* 0 No auto_imax support
|
||||
* 1 auto imax on
|
||||
* Auto imax will change the value of fcp_imax on a per EQ basis, using
|
||||
* the EQ Delay Multiplier, depending on the activity for that EQ.
|
||||
* Value range [0,1]. Default value is 1.
|
||||
*/
|
||||
LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax");
|
||||
|
||||
/**
|
||||
* lpfc_state_show - Display current driver CPU affinity
|
||||
* @dev: class converted to a Scsi_host structure.
|
||||
@@ -5150,6 +5179,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
||||
&dev_attr_lpfc_task_mgmt_tmo,
|
||||
&dev_attr_lpfc_use_msi,
|
||||
&dev_attr_lpfc_nvme_oas,
|
||||
&dev_attr_lpfc_auto_imax,
|
||||
&dev_attr_lpfc_fcp_imax,
|
||||
&dev_attr_lpfc_fcp_cpu_map,
|
||||
&dev_attr_lpfc_fcp_io_channel,
|
||||
@@ -6168,6 +6198,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
|
||||
lpfc_use_msi_init(phba, lpfc_use_msi);
|
||||
lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
|
||||
lpfc_auto_imax_init(phba, lpfc_auto_imax);
|
||||
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
|
||||
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
|
||||
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
|
||||
@@ -6212,6 +6243,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
|
||||
}
|
||||
|
||||
if (phba->cfg_auto_imax && !phba->cfg_fcp_imax)
|
||||
phba->cfg_auto_imax = 0;
|
||||
phba->initial_imax = phba->cfg_fcp_imax;
|
||||
|
||||
/* A value of 0 means use the number of CPUs found in the system */
|
||||
if (phba->cfg_fcp_io_channel == 0)
|
||||
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
|
||||
|
@@ -503,26 +503,23 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
|
||||
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
|
||||
|
||||
/*
|
||||
* This NPortID was previously a FCP target,
|
||||
* This NPortID was previously a FCP/NVMe target,
|
||||
* Don't even bother to send GFF_ID.
|
||||
*/
|
||||
ndlp = lpfc_findnode_did(vport, Did);
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp))
|
||||
ndlp->nlp_fc4_type = fc4_type;
|
||||
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
|
||||
ndlp->nlp_fc4_type = fc4_type;
|
||||
|
||||
if (ndlp->nlp_type & NLP_FCP_TARGET)
|
||||
lpfc_setup_disc_node(vport, Did);
|
||||
|
||||
else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
|
||||
0, Did) == 0)
|
||||
vport->num_disc_nodes++;
|
||||
|
||||
else
|
||||
lpfc_setup_disc_node(vport, Did);
|
||||
}
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
|
||||
(ndlp->nlp_type &
|
||||
(NLP_FCP_TARGET | NLP_NVME_TARGET))) {
|
||||
if (fc4_type == FC_TYPE_FCP)
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
|
||||
if (fc4_type == FC_TYPE_NVME)
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
|
||||
lpfc_setup_disc_node(vport, Did);
|
||||
} else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
|
||||
0, Did) == 0)
|
||||
vport->num_disc_nodes++;
|
||||
else
|
||||
lpfc_setup_disc_node(vport, Did);
|
||||
} else {
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
|
||||
"Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
|
||||
|
@@ -323,7 +323,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
|
||||
raw_index = phba->hbq_get[i];
|
||||
getidx = le32_to_cpu(raw_index);
|
||||
len += snprintf(buf+len, size-len,
|
||||
"entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
|
||||
"entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
|
||||
hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
|
||||
hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
|
||||
|
||||
@@ -550,8 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
struct lpfc_nodelist *ndlp;
|
||||
unsigned char *statep;
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct nvme_fc_remote_port *nrport;
|
||||
|
||||
@@ -623,6 +621,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
ndlp->nlp_sid);
|
||||
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
|
||||
len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
len += snprintf(buf + len,
|
||||
size - len, "NVME_TGT sid:%d ",
|
||||
NLP_NO_SID);
|
||||
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
|
||||
len += snprintf(buf + len,
|
||||
size - len, "NVME_INITIATOR ");
|
||||
len += snprintf(buf+len, size-len, "usgmap:%x ",
|
||||
ndlp->nlp_usg_map);
|
||||
len += snprintf(buf+len, size-len, "refcnt:%x",
|
||||
@@ -660,7 +665,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
goto out_exit;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
|
||||
/* Port state is only one of two values for now. */
|
||||
if (localport->port_id)
|
||||
@@ -673,9 +677,12 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
localport->port_id, statep);
|
||||
|
||||
len += snprintf(buf + len, size - len, "\tRport List:\n");
|
||||
list_for_each_entry(rport, &lport->rport_list, list) {
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
/* local short-hand pointer. */
|
||||
nrport = rport->remoteport;
|
||||
if (!ndlp->nrport)
|
||||
continue;
|
||||
|
||||
nrport = ndlp->nrport->remoteport;
|
||||
|
||||
/* Port state is only one of two values for now. */
|
||||
switch (nrport->port_state) {
|
||||
@@ -698,26 +705,23 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
nrport->port_name);
|
||||
len += snprintf(buf + len, size - len, "WWNN x%llx ",
|
||||
nrport->node_name);
|
||||
switch (nrport->port_role) {
|
||||
case FC_PORT_ROLE_NVME_INITIATOR:
|
||||
|
||||
/* An NVME rport can have multiple roles. */
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
|
||||
len += snprintf(buf + len, size - len,
|
||||
"NVME INITIATOR ");
|
||||
break;
|
||||
case FC_PORT_ROLE_NVME_TARGET:
|
||||
"INITIATOR ");
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
|
||||
len += snprintf(buf + len, size - len,
|
||||
"NVME TARGET ");
|
||||
break;
|
||||
case FC_PORT_ROLE_NVME_DISCOVERY:
|
||||
"TARGET ");
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
|
||||
len += snprintf(buf + len, size - len,
|
||||
"NVME DISCOVERY ");
|
||||
break;
|
||||
default:
|
||||
"DISCSRVC ");
|
||||
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
|
||||
FC_PORT_ROLE_NVME_TARGET |
|
||||
FC_PORT_ROLE_NVME_DISCOVERY))
|
||||
len += snprintf(buf + len, size - len,
|
||||
"UNKNOWN ROLE x%x",
|
||||
nrport->port_role);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Terminate the string. */
|
||||
len += snprintf(buf + len, size - len, "\n");
|
||||
}
|
||||
@@ -746,6 +750,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
|
||||
uint64_t tot, data1, data2, data3;
|
||||
int len = 0;
|
||||
int cnt;
|
||||
|
||||
@@ -843,11 +848,21 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
}
|
||||
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
tot = phba->sli4_hba.nvmet_xri_cnt -
|
||||
(phba->sli4_hba.nvmet_ctx_get_cnt +
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"IO_CTX: %08x outstanding %08x total %08x\n",
|
||||
phba->sli4_hba.nvmet_ctx_cnt,
|
||||
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
|
||||
"CTX Outstanding %08llx\n",
|
||||
phba->sli4_hba.nvmet_xri_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_total);
|
||||
phba->sli4_hba.nvmet_io_wait_total,
|
||||
tot);
|
||||
} else {
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return len;
|
||||
@@ -856,18 +871,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
"\nNVME Lport Statistics\n");
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"LS: Xmt %016llx Cmpl %016llx\n",
|
||||
phba->fc4NvmeLsRequests,
|
||||
phba->fc4NvmeLsCmpls);
|
||||
"LS: Xmt %016x Cmpl %016x\n",
|
||||
atomic_read(&phba->fc4NvmeLsRequests),
|
||||
atomic_read(&phba->fc4NvmeLsCmpls));
|
||||
|
||||
tot = atomic_read(&phba->fc4NvmeIoCmpls);
|
||||
data1 = atomic_read(&phba->fc4NvmeInputRequests);
|
||||
data2 = atomic_read(&phba->fc4NvmeOutputRequests);
|
||||
data3 = atomic_read(&phba->fc4NvmeControlRequests);
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
|
||||
phba->fc4NvmeInputRequests,
|
||||
phba->fc4NvmeOutputRequests,
|
||||
phba->fc4NvmeControlRequests);
|
||||
data1, data2, data3);
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
" Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
|
||||
" Cmpl %016llx Outstanding %016llx\n",
|
||||
tot, (data1 + data2 + data3) - tot);
|
||||
}
|
||||
|
||||
return len;
|
||||
@@ -3229,9 +3248,9 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
|
||||
|
||||
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
|
||||
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
|
||||
"bs:x%x proc:x%llx]\n",
|
||||
"bs:x%x proc:x%llx eqd %d]\n",
|
||||
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
|
||||
(unsigned long long)qp->q_cnt_4);
|
||||
(unsigned long long)qp->q_cnt_4, qp->q_mode);
|
||||
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
|
||||
"EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
|
||||
"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
|
||||
|
@@ -2168,6 +2168,19 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
ndlp->nlp_fc4_type, ndlp->nlp_DID);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* SLI3 ports don't support NVME. If this rport is a strict NVME
|
||||
* FC4 type, implicitly LOGO.
|
||||
*/
|
||||
if (phba->sli_rev == LPFC_SLI_REV3 &&
|
||||
ndlp->nlp_fc4_type == NLP_FC4_NVME) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
|
||||
ndlp->nlp_type);
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
|
||||
return 1;
|
||||
}
|
||||
|
||||
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
|
||||
ndlp->nlp_DID, elscmd);
|
||||
if (!elsiocb)
|
||||
@@ -2268,7 +2281,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
/* The driver supports 2 FC4 types. Make sure
|
||||
* a PRLI is issued for all types before exiting.
|
||||
*/
|
||||
if (local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
|
||||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
|
||||
goto send_next_prli;
|
||||
|
||||
return 0;
|
||||
@@ -3332,6 +3346,19 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
*/
|
||||
switch (stat.un.b.lsRjtRsnCode) {
|
||||
case LSRJT_UNABLE_TPC:
|
||||
/* The driver has a VALID PLOGI but the rport has
|
||||
* rejected the PRLI - can't do it now. Delay
|
||||
* for 1 second and try again - don't care about
|
||||
* the explanation.
|
||||
*/
|
||||
if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
|
||||
delay = 1000;
|
||||
maxretry = lpfc_max_els_tries + 1;
|
||||
retry = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Legacy bug fix code for targets with PLOGI delays. */
|
||||
if (stat.un.b.lsRjtRsnCodeExp ==
|
||||
LSEXP_CMD_IN_PROGRESS) {
|
||||
if (cmd == ELS_CMD_PLOGI) {
|
||||
@@ -3350,9 +3377,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
retry = 1;
|
||||
break;
|
||||
}
|
||||
if ((cmd == ELS_CMD_PLOGI) ||
|
||||
(cmd == ELS_CMD_PRLI) ||
|
||||
(cmd == ELS_CMD_NVMEPRLI)) {
|
||||
if (cmd == ELS_CMD_PLOGI) {
|
||||
delay = 1000;
|
||||
maxretry = lpfc_max_els_tries + 1;
|
||||
retry = 1;
|
||||
@@ -5678,27 +5703,13 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if (beacon->lcb_frequency == 0) {
|
||||
if (beacon->lcb_sub_command != LPFC_LCB_ON &&
|
||||
beacon->lcb_sub_command != LPFC_LCB_OFF) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if ((beacon->lcb_type != LPFC_LCB_GREEN) &&
|
||||
(beacon->lcb_type != LPFC_LCB_AMBER)) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if ((beacon->lcb_sub_command != LPFC_LCB_ON) &&
|
||||
(beacon->lcb_sub_command != LPFC_LCB_OFF)) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if ((beacon->lcb_sub_command == LPFC_LCB_ON) &&
|
||||
(beacon->lcb_type != LPFC_LCB_GREEN) &&
|
||||
(beacon->lcb_type != LPFC_LCB_AMBER)) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if (be16_to_cpu(beacon->lcb_duration) != 0) {
|
||||
if (beacon->lcb_sub_command == LPFC_LCB_ON &&
|
||||
be16_to_cpu(beacon->lcb_duration) != 0) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
|
@@ -4167,14 +4167,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
lpfc_unregister_remote_port(ndlp);
|
||||
}
|
||||
|
||||
/* Notify the NVME transport of this rport's loss on the
|
||||
* Initiator. For NVME Target, should upcall transport
|
||||
* in the else clause when API available.
|
||||
*/
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
if (vport->phba->nvmet_support == 0)
|
||||
/* Start devloss */
|
||||
lpfc_nvme_unregister_port(vport, ndlp);
|
||||
else
|
||||
/* NVMET has no upcall. */
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4182,8 +4182,10 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
|
||||
if (new_state == NLP_STE_MAPPED_NODE ||
|
||||
new_state == NLP_STE_UNMAPPED_NODE) {
|
||||
if ((ndlp->nlp_fc4_type & NLP_FC4_FCP) ||
|
||||
(ndlp->nlp_DID == Fabric_DID)) {
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
|
||||
ndlp->nlp_DID == Fabric_DID ||
|
||||
ndlp->nlp_DID == NameServer_DID ||
|
||||
ndlp->nlp_DID == FDMI_DID) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
/*
|
||||
* Tell the fc transport about the port, if we haven't
|
||||
@@ -4192,7 +4194,8 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
lpfc_register_remote_port(vport, ndlp);
|
||||
}
|
||||
/* Notify the NVME transport of this new rport. */
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
|
||||
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
if (vport->phba->nvmet_support == 0) {
|
||||
/* Register this rport with the transport.
|
||||
* Initiators take the NDLP ref count in
|
||||
|
@@ -197,6 +197,7 @@ struct lpfc_sli_intf {
|
||||
|
||||
/* Delay Multiplier constant */
|
||||
#define LPFC_DMULT_CONST 651042
|
||||
#define LPFC_DMULT_MAX 1023
|
||||
|
||||
/* Configuration of Interrupts / sec for entire HBA port */
|
||||
#define LPFC_MIN_IMAX 5000
|
||||
@@ -657,6 +658,15 @@ struct lpfc_register {
|
||||
#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
|
||||
#define LPFC_CTL_PORT_ER2_OFFSET 0x410
|
||||
|
||||
#define LPFC_CTL_PORT_EQ_DELAY_OFFSET 0x418
|
||||
#define lpfc_sliport_eqdelay_delay_SHIFT 16
|
||||
#define lpfc_sliport_eqdelay_delay_MASK 0xffff
|
||||
#define lpfc_sliport_eqdelay_delay_WORD word0
|
||||
#define lpfc_sliport_eqdelay_id_SHIFT 0
|
||||
#define lpfc_sliport_eqdelay_id_MASK 0xfff
|
||||
#define lpfc_sliport_eqdelay_id_WORD word0
|
||||
#define LPFC_SEC_TO_USEC 1000000
|
||||
|
||||
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
|
||||
* reside in BAR 2.
|
||||
*/
|
||||
@@ -3258,6 +3268,10 @@ struct lpfc_sli4_parameters {
|
||||
#define cfg_xib_SHIFT 4
|
||||
#define cfg_xib_MASK 0x00000001
|
||||
#define cfg_xib_WORD word19
|
||||
#define cfg_eqdr_SHIFT 8
|
||||
#define cfg_eqdr_MASK 0x00000001
|
||||
#define cfg_eqdr_WORD word19
|
||||
#define LPFC_NODELAY_MAX_IO 32
|
||||
};
|
||||
|
||||
#define LPFC_SET_UE_RECOVERY 0x10
|
||||
|
@@ -1249,6 +1249,12 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
|
||||
int retval, i;
|
||||
struct lpfc_sli *psli = &phba->sli;
|
||||
LIST_HEAD(completions);
|
||||
struct lpfc_queue *qp;
|
||||
unsigned long time_elapsed;
|
||||
uint32_t tick_cqe, max_cqe, val;
|
||||
uint64_t tot, data1, data2, data3;
|
||||
struct lpfc_register reg_data;
|
||||
void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
|
||||
|
||||
vports = lpfc_create_vport_work_array(phba);
|
||||
if (vports != NULL)
|
||||
@@ -1263,6 +1269,98 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
|
||||
(phba->pport->fc_flag & FC_OFFLINE_MODE))
|
||||
return;
|
||||
|
||||
if (phba->cfg_auto_imax) {
|
||||
if (!phba->last_eqdelay_time) {
|
||||
phba->last_eqdelay_time = jiffies;
|
||||
goto skip_eqdelay;
|
||||
}
|
||||
time_elapsed = jiffies - phba->last_eqdelay_time;
|
||||
phba->last_eqdelay_time = jiffies;
|
||||
|
||||
tot = 0xffff;
|
||||
/* Check outstanding IO count */
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
if (phba->nvmet_support) {
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
tot = phba->sli4_hba.nvmet_xri_cnt -
|
||||
(phba->sli4_hba.nvmet_ctx_get_cnt +
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
} else {
|
||||
tot = atomic_read(&phba->fc4NvmeIoCmpls);
|
||||
data1 = atomic_read(
|
||||
&phba->fc4NvmeInputRequests);
|
||||
data2 = atomic_read(
|
||||
&phba->fc4NvmeOutputRequests);
|
||||
data3 = atomic_read(
|
||||
&phba->fc4NvmeControlRequests);
|
||||
tot = (data1 + data2 + data3) - tot;
|
||||
}
|
||||
}
|
||||
|
||||
/* Interrupts per sec per EQ */
|
||||
val = phba->cfg_fcp_imax / phba->io_channel_irqs;
|
||||
tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
|
||||
|
||||
/* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
|
||||
max_cqe = time_elapsed * tick_cqe;
|
||||
|
||||
for (i = 0; i < phba->io_channel_irqs; i++) {
|
||||
/* Fast-path EQ */
|
||||
qp = phba->sli4_hba.hba_eq[i];
|
||||
if (!qp)
|
||||
continue;
|
||||
|
||||
/* Use no EQ delay if we don't have many outstanding
|
||||
* IOs, or if we are only processing 1 CQE/ISR or less.
|
||||
* Otherwise, assume we can process up to lpfc_fcp_imax
|
||||
* interrupts per HBA.
|
||||
*/
|
||||
if (tot < LPFC_NODELAY_MAX_IO ||
|
||||
qp->EQ_cqe_cnt <= max_cqe)
|
||||
val = 0;
|
||||
else
|
||||
val = phba->cfg_fcp_imax;
|
||||
|
||||
if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
|
||||
/* Use EQ Delay Register method */
|
||||
|
||||
/* Convert for EQ Delay register */
|
||||
if (val) {
|
||||
/* First, interrupts per sec per EQ */
|
||||
val = phba->cfg_fcp_imax /
|
||||
phba->io_channel_irqs;
|
||||
|
||||
/* us delay between each interrupt */
|
||||
val = LPFC_SEC_TO_USEC / val;
|
||||
}
|
||||
if (val != qp->q_mode) {
|
||||
reg_data.word0 = 0;
|
||||
bf_set(lpfc_sliport_eqdelay_id,
|
||||
®_data, qp->queue_id);
|
||||
bf_set(lpfc_sliport_eqdelay_delay,
|
||||
®_data, val);
|
||||
writel(reg_data.word0, eqdreg);
|
||||
}
|
||||
} else {
|
||||
/* Use mbox command method */
|
||||
if (val != qp->q_mode)
|
||||
lpfc_modify_hba_eq_delay(phba, i,
|
||||
1, val);
|
||||
}
|
||||
|
||||
/*
|
||||
* val is cfg_fcp_imax or 0 for mbox delay or us delay
|
||||
* between interrupts for EQDR.
|
||||
*/
|
||||
qp->q_mode = val;
|
||||
qp->EQ_cqe_cnt = 0;
|
||||
}
|
||||
}
|
||||
|
||||
skip_eqdelay:
|
||||
spin_lock_irq(&phba->pport->work_port_lock);
|
||||
|
||||
if (time_after(phba->last_completion_time +
|
||||
@@ -2707,13 +2805,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL,
|
||||
NLP_EVT_DEVICE_RECOVERY);
|
||||
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
/* Remove the NVME transport reference now and
|
||||
* continue to remove the node.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL,
|
||||
NLP_EVT_DEVICE_RM);
|
||||
}
|
||||
@@ -3392,7 +3483,6 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
|
||||
|
||||
/* For NVMET, ALL remaining XRIs are dedicated for IO processing */
|
||||
nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
|
||||
|
||||
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
|
||||
/* els xri-sgl expanded */
|
||||
xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
|
||||
@@ -3596,14 +3686,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
|
||||
LPFC_MBOXQ_t *mboxq;
|
||||
MAILBOX_t *mb;
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4) {
|
||||
/* Reset the port first */
|
||||
lpfc_sli_brdrestart(phba);
|
||||
rc = lpfc_sli_chipset_init(phba);
|
||||
if (rc)
|
||||
return (uint64_t)-1;
|
||||
}
|
||||
|
||||
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
|
||||
GFP_KERNEL);
|
||||
if (!mboxq)
|
||||
@@ -3757,8 +3839,19 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
||||
int i;
|
||||
uint64_t wwn;
|
||||
bool use_no_reset_hba = false;
|
||||
int rc;
|
||||
|
||||
wwn = lpfc_get_wwpn(phba);
|
||||
if (lpfc_no_hba_reset_cnt) {
|
||||
if (phba->sli_rev < LPFC_SLI_REV4 &&
|
||||
dev == &phba->pcidev->dev) {
|
||||
/* Reset the port first */
|
||||
lpfc_sli_brdrestart(phba);
|
||||
rc = lpfc_sli_chipset_init(phba);
|
||||
if (rc)
|
||||
return NULL;
|
||||
}
|
||||
wwn = lpfc_get_wwpn(phba);
|
||||
}
|
||||
|
||||
for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
|
||||
if (wwn == lpfc_no_hba_reset[i]) {
|
||||
@@ -5837,7 +5930,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
|
||||
|
||||
/* Fast-path XRI aborted CQ Event work queue list */
|
||||
@@ -5846,7 +5940,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
|
||||
/* This abort list used by worker thread */
|
||||
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
|
||||
spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
|
||||
|
||||
/*
|
||||
@@ -6731,6 +6826,16 @@ lpfc_create_shost(struct lpfc_hba *phba)
|
||||
phba->fc_arbtov = FF_DEF_ARBTOV;
|
||||
|
||||
atomic_set(&phba->sdev_cnt, 0);
|
||||
atomic_set(&phba->fc4ScsiInputRequests, 0);
|
||||
atomic_set(&phba->fc4ScsiOutputRequests, 0);
|
||||
atomic_set(&phba->fc4ScsiControlRequests, 0);
|
||||
atomic_set(&phba->fc4ScsiIoCmpls, 0);
|
||||
atomic_set(&phba->fc4NvmeInputRequests, 0);
|
||||
atomic_set(&phba->fc4NvmeOutputRequests, 0);
|
||||
atomic_set(&phba->fc4NvmeControlRequests, 0);
|
||||
atomic_set(&phba->fc4NvmeIoCmpls, 0);
|
||||
atomic_set(&phba->fc4NvmeLsRequests, 0);
|
||||
atomic_set(&phba->fc4NvmeLsCmpls, 0);
|
||||
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
|
||||
if (!vport)
|
||||
return -ENODEV;
|
||||
@@ -7247,6 +7352,9 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
|
||||
break;
|
||||
case LPFC_SLI_INTF_IF_TYPE_2:
|
||||
phba->sli4_hba.u.if_type2.EQDregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_EQ_DELAY_OFFSET;
|
||||
phba->sli4_hba.u.if_type2.ERR1regaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_ER1_OFFSET;
|
||||
@@ -8773,7 +8881,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
lpfc_modify_hba_eq_delay(phba, qidx);
|
||||
lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
|
||||
phba->cfg_fcp_imax);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -9655,6 +9764,7 @@ static int
|
||||
lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
{
|
||||
int vectors, rc, index;
|
||||
char *name;
|
||||
|
||||
/* Set up MSI-X multi-message vectors */
|
||||
vectors = phba->io_channel_irqs;
|
||||
@@ -9673,9 +9783,9 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
|
||||
/* Assign MSI-X vectors to interrupt handlers */
|
||||
for (index = 0; index < vectors; index++) {
|
||||
memset(&phba->sli4_hba.handler_name[index], 0, 16);
|
||||
snprintf((char *)&phba->sli4_hba.handler_name[index],
|
||||
LPFC_SLI4_HANDLER_NAME_SZ,
|
||||
name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
|
||||
memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
|
||||
snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
|
||||
LPFC_DRIVER_HANDLER_NAME"%d", index);
|
||||
|
||||
phba->sli4_hba.hba_eq_hdl[index].idx = index;
|
||||
@@ -9684,12 +9794,12 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
if (phba->cfg_fof && (index == (vectors - 1)))
|
||||
rc = request_irq(pci_irq_vector(phba->pcidev, index),
|
||||
&lpfc_sli4_fof_intr_handler, 0,
|
||||
(char *)&phba->sli4_hba.handler_name[index],
|
||||
name,
|
||||
&phba->sli4_hba.hba_eq_hdl[index]);
|
||||
else
|
||||
rc = request_irq(pci_irq_vector(phba->pcidev, index),
|
||||
&lpfc_sli4_hba_intr_handler, 0,
|
||||
(char *)&phba->sli4_hba.handler_name[index],
|
||||
name,
|
||||
&phba->sli4_hba.hba_eq_hdl[index]);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
@@ -10241,6 +10351,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
|
||||
phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
|
||||
|
||||
if (bf_get(cfg_eqdr, mbx_sli4_parameters))
|
||||
phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
|
||||
|
||||
/* Make sure that sge_supp_len can be handled by the driver */
|
||||
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
|
||||
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
|
||||
|
@@ -186,13 +186,12 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
||||
|
||||
/* Remove this rport from the lport's list - memory is owned by the
|
||||
* transport. Remove the ndlp reference for the NVME transport before
|
||||
* calling state machine to remove the node, this is devloss = 0
|
||||
* semantics.
|
||||
* calling state machine to remove the node.
|
||||
*/
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6146 remoteport delete complete %p\n",
|
||||
remoteport);
|
||||
list_del(&rport->list);
|
||||
ndlp->nrport = NULL;
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
rport_err:
|
||||
@@ -212,7 +211,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
struct lpfc_dmabuf *buf_ptr;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
vport->phba->fc4NvmeLsCmpls++;
|
||||
atomic_inc(&vport->phba->fc4NvmeLsCmpls);
|
||||
|
||||
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
|
||||
@@ -479,7 +478,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
|
||||
&pnvme_lsreq->rspdma);
|
||||
|
||||
vport->phba->fc4NvmeLsRequests++;
|
||||
atomic_inc(&vport->phba->fc4NvmeLsRequests);
|
||||
|
||||
/* Hardcode the wait to 30 seconds. Connections are failing otherwise.
|
||||
* This code allows it all to work.
|
||||
@@ -774,7 +773,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
wcqe);
|
||||
return;
|
||||
}
|
||||
phba->fc4NvmeIoCmpls++;
|
||||
atomic_inc(&phba->fc4NvmeIoCmpls);
|
||||
|
||||
nCmd = lpfc_ncmd->nvmeCmd;
|
||||
rport = lpfc_ncmd->nrport;
|
||||
@@ -999,7 +998,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
|
||||
NVME_WRITE_CMD);
|
||||
|
||||
phba->fc4NvmeOutputRequests++;
|
||||
atomic_inc(&phba->fc4NvmeOutputRequests);
|
||||
} else {
|
||||
/* Word 7 */
|
||||
bf_set(wqe_cmnd, &wqe->generic.wqe_com,
|
||||
@@ -1020,7 +1019,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
|
||||
NVME_READ_CMD);
|
||||
|
||||
phba->fc4NvmeInputRequests++;
|
||||
atomic_inc(&phba->fc4NvmeInputRequests);
|
||||
}
|
||||
} else {
|
||||
/* Word 4 */
|
||||
@@ -1041,7 +1040,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
/* Word 11 */
|
||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
|
||||
|
||||
phba->fc4NvmeControlRequests++;
|
||||
atomic_inc(&phba->fc4NvmeControlRequests);
|
||||
}
|
||||
/*
|
||||
* Finish initializing those WQE fields that are independent
|
||||
@@ -1362,6 +1361,13 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
return 0;
|
||||
|
||||
out_free_nvme_buf:
|
||||
if (lpfc_ncmd->nvmeCmd->sg_cnt) {
|
||||
if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
|
||||
atomic_dec(&phba->fc4NvmeOutputRequests);
|
||||
else
|
||||
atomic_dec(&phba->fc4NvmeInputRequests);
|
||||
} else
|
||||
atomic_dec(&phba->fc4NvmeControlRequests);
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
out_fail:
|
||||
return ret;
|
||||
@@ -1421,7 +1427,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_vport *vport;
|
||||
struct lpfc_hba *phba;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nvme_buf *lpfc_nbuf;
|
||||
struct lpfc_iocbq *abts_buf;
|
||||
@@ -1443,38 +1448,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
pnvme_rport->port_id,
|
||||
pnvme_fcreq);
|
||||
|
||||
/*
|
||||
* Catch race where our node has transitioned, but the
|
||||
* transport is still transitioning.
|
||||
*/
|
||||
ndlp = rport->ndlp;
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
|
||||
"6054 rport %p, ndlp %p, DID x%06x ndlp "
|
||||
" not ready.\n",
|
||||
rport, ndlp, pnvme_rport->port_id);
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
|
||||
if (!ndlp) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6055 Could not find node for "
|
||||
"DID %x\n",
|
||||
pnvme_rport->port_id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* The remote node has to be ready to send an abort. */
|
||||
if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
|
||||
!(ndlp->nlp_type & NLP_NVME_TARGET)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6048 rport %p, DID x%06x not ready for "
|
||||
"IO. State x%x, Type x%x\n",
|
||||
rport, pnvme_rport->port_id,
|
||||
ndlp->nlp_state, ndlp->nlp_type);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the hba is getting reset, this flag is set. It is
|
||||
* cleared when the reset is complete and rings reestablished.
|
||||
*/
|
||||
@@ -1535,7 +1508,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
|
||||
nvmereq_wqe->sli4_xritag,
|
||||
nvmereq_wqe->hba_wqidx, ndlp->nlp_DID);
|
||||
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
|
||||
|
||||
/* Outstanding abort is in progress */
|
||||
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
|
||||
@@ -2208,7 +2181,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
vport->localport = localport;
|
||||
lport->vport = vport;
|
||||
INIT_LIST_HEAD(&lport->rport_list);
|
||||
vport->nvmei_support = 1;
|
||||
len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
|
||||
vport->phba->total_nvme_bufs += len;
|
||||
@@ -2233,7 +2205,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
|
||||
int ret;
|
||||
|
||||
if (vport->nvmei_support == 0)
|
||||
@@ -2246,19 +2217,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6011 Destroying NVME localport %p\n",
|
||||
localport);
|
||||
list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
|
||||
/* The last node ref has to get released now before the rport
|
||||
* private memory area is released by the transport.
|
||||
*/
|
||||
list_del(&rport->list);
|
||||
|
||||
init_completion(&rport->rport_unreg_done);
|
||||
ret = nvme_fc_unregister_remoteport(rport->remoteport);
|
||||
if (ret)
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6008 rport fail destroy %x\n", ret);
|
||||
wait_for_completion_timeout(&rport->rport_unreg_done, 5);
|
||||
}
|
||||
|
||||
/* lport's rport list is clear. Unregister
|
||||
* lport and release resources.
|
||||
@@ -2340,99 +2298,68 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
localport = vport->localport;
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
|
||||
if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
|
||||
/* NVME rports are not preserved across devloss.
|
||||
* Just register this instance. Note, rpinfo->dev_loss_tmo
|
||||
* is left 0 to indicate accept transport defaults. The
|
||||
* driver communicates port role capabilities consistent
|
||||
* with the PRLI response data.
|
||||
*/
|
||||
memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
|
||||
rpinfo.port_id = ndlp->nlp_DID;
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
|
||||
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
|
||||
|
||||
/* The driver isn't expecting the rport wwn to change
|
||||
* but it might get a different DID on a different
|
||||
* fabric.
|
||||
if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
|
||||
|
||||
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
|
||||
if (!ret) {
|
||||
/* If the ndlp already has an nrport, this is just
|
||||
* a resume of the existing rport. Else this is a
|
||||
* new rport.
|
||||
*/
|
||||
list_for_each_entry(rport, &lport->rport_list, list) {
|
||||
if (rport->remoteport->port_name !=
|
||||
wwn_to_u64(ndlp->nlp_portname.u.wwn))
|
||||
continue;
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6035 lport %p, found matching rport "
|
||||
"at wwpn 0x%llx, Data: x%x x%x x%x "
|
||||
"x%06x\n",
|
||||
lport,
|
||||
rport->remoteport->port_name,
|
||||
rport->remoteport->port_id,
|
||||
rport->remoteport->port_role,
|
||||
rport = remote_port->private;
|
||||
if (ndlp->nrport == rport) {
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
|
||||
LOG_NVME_DISC,
|
||||
"6014 Rebinding lport to "
|
||||
"rport wwpn 0x%llx, "
|
||||
"Data: x%x x%x x%x x%06x\n",
|
||||
remote_port->port_name,
|
||||
remote_port->port_id,
|
||||
remote_port->port_role,
|
||||
ndlp->nlp_type,
|
||||
ndlp->nlp_DID);
|
||||
remote_port = rport->remoteport;
|
||||
if ((remote_port->port_id == 0) &&
|
||||
(remote_port->port_role ==
|
||||
FC_PORT_ROLE_NVME_DISCOVERY)) {
|
||||
remote_port->port_id = ndlp->nlp_DID;
|
||||
remote_port->port_role &=
|
||||
~FC_PORT_ROLE_NVME_DISCOVERY;
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
remote_port->port_role |=
|
||||
FC_PORT_ROLE_NVME_TARGET;
|
||||
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
|
||||
remote_port->port_role |=
|
||||
FC_PORT_ROLE_NVME_INITIATOR;
|
||||
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
|
||||
LOG_NVME_DISC,
|
||||
"6014 Rebinding lport to "
|
||||
"rport wwpn 0x%llx, "
|
||||
"Data: x%x x%x x%x x%06x\n",
|
||||
remote_port->port_name,
|
||||
remote_port->port_id,
|
||||
remote_port->port_role,
|
||||
ndlp->nlp_type,
|
||||
ndlp->nlp_DID);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* NVME rports are not preserved across devloss.
|
||||
* Just register this instance.
|
||||
*/
|
||||
rpinfo.port_id = ndlp->nlp_DID;
|
||||
rpinfo.port_role = 0;
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
|
||||
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
|
||||
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||
ret = nvme_fc_register_remoteport(localport, &rpinfo,
|
||||
&remote_port);
|
||||
if (!ret) {
|
||||
rport = remote_port->private;
|
||||
} else {
|
||||
/* New rport. */
|
||||
rport->remoteport = remote_port;
|
||||
rport->lport = lport;
|
||||
rport->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!rport->ndlp)
|
||||
return -1;
|
||||
ndlp->nrport = rport;
|
||||
INIT_LIST_HEAD(&rport->list);
|
||||
list_add_tail(&rport->list, &lport->rport_list);
|
||||
lpfc_printf_vlog(vport, KERN_INFO,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
"6022 Binding new rport to lport %p "
|
||||
"Rport WWNN 0x%llx, Rport WWPN 0x%llx "
|
||||
"DID x%06x Role x%x\n",
|
||||
"6022 Binding new rport to "
|
||||
"lport %p Rport WWNN 0x%llx, "
|
||||
"Rport WWPN 0x%llx DID "
|
||||
"x%06x Role x%x\n",
|
||||
lport,
|
||||
rpinfo.node_name, rpinfo.port_name,
|
||||
rpinfo.port_id, rpinfo.port_role);
|
||||
} else {
|
||||
lpfc_printf_vlog(vport, KERN_ERR,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
"6031 RemotePort Registration failed "
|
||||
"err: %d, DID x%06x\n",
|
||||
ret, ndlp->nlp_DID);
|
||||
}
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6027 Unknown nlp_type x%x on DID x%06x "
|
||||
"ndlp %p. Not Registering nvme rport\n",
|
||||
ndlp->nlp_type, ndlp->nlp_DID, ndlp);
|
||||
lpfc_printf_vlog(vport, KERN_ERR,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
"6031 RemotePort Registration failed "
|
||||
"err: %d, DID x%06x\n",
|
||||
ret, ndlp->nlp_DID);
|
||||
}
|
||||
|
||||
return ret;
|
||||
#else
|
||||
return 0;
|
||||
@@ -2460,7 +2387,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct nvme_fc_remote_port *remoteport;
|
||||
unsigned long wait_tmo;
|
||||
|
||||
localport = vport->localport;
|
||||
|
||||
@@ -2491,6 +2417,10 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
*/
|
||||
if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
|
||||
init_completion(&rport->rport_unreg_done);
|
||||
|
||||
/* No concern about the role change on the nvme remoteport.
|
||||
* The transport will update it.
|
||||
*/
|
||||
ret = nvme_fc_unregister_remoteport(remoteport);
|
||||
if (ret != 0) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
@@ -2499,17 +2429,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
ret, remoteport->port_state);
|
||||
}
|
||||
|
||||
/* Wait for the driver's delete completion routine to finish
|
||||
* before proceeding. This guarantees the transport and driver
|
||||
* have completed the unreg process.
|
||||
*/
|
||||
wait_tmo = msecs_to_jiffies(5000);
|
||||
ret = wait_for_completion_timeout(&rport->rport_unreg_done,
|
||||
wait_tmo);
|
||||
if (ret == 0) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6169 Unreg nvme wait timeout\n");
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
|
@@ -35,13 +35,11 @@ struct lpfc_nvme_qhandle {
|
||||
/* Declare nvme-based local and remote port definitions. */
|
||||
struct lpfc_nvme_lport {
|
||||
struct lpfc_vport *vport;
|
||||
struct list_head rport_list;
|
||||
struct completion lport_unreg_done;
|
||||
/* Add sttats counters here */
|
||||
};
|
||||
|
||||
struct lpfc_nvme_rport {
|
||||
struct list_head list;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct nvme_fc_remote_port *remoteport;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
@@ -112,6 +112,15 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
||||
result = wcqe->parameter;
|
||||
ctxp = cmdwqe->context2;
|
||||
|
||||
if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6410 NVMET LS cmpl state mismatch IO x%x: "
|
||||
"%d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
}
|
||||
|
||||
if (!phba->targetport)
|
||||
goto out;
|
||||
|
||||
@@ -123,15 +132,14 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
|
||||
|
||||
out:
|
||||
ctxp = cmdwqe->context2;
|
||||
rsp = &ctxp->ctx.ls_req;
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
|
||||
ctxp->oxid, status, result);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
|
||||
ctxp, status, result);
|
||||
"6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
|
||||
status, result, ctxp->oxid);
|
||||
|
||||
lpfc_nlp_put(cmdwqe->context1);
|
||||
cmdwqe->context2 = NULL;
|
||||
@@ -162,7 +170,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct fc_frame_header *fc_hdr;
|
||||
struct rqb_dmabuf *nvmebuf;
|
||||
struct lpfc_dmabuf *hbufp;
|
||||
uint32_t *payload;
|
||||
uint32_t size, oxid, sid, rc;
|
||||
unsigned long iflag;
|
||||
@@ -173,11 +180,16 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
ctxp->txrdy = NULL;
|
||||
ctxp->txrdy_phys = 0;
|
||||
}
|
||||
|
||||
if (ctxp->state == LPFC_NVMET_STE_FREE) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6411 NVMET free, already free IO x%x: %d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_FREE;
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
|
||||
if (phba->sli4_hba.nvmet_io_wait_cnt) {
|
||||
hbufp = &nvmebuf->hbuf;
|
||||
list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
|
||||
nvmebuf, struct rqb_dmabuf,
|
||||
hbuf.list);
|
||||
@@ -193,7 +205,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
||||
|
||||
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
|
||||
memset(ctxp, 0, sizeof(ctxp->ctx));
|
||||
ctxp->wqeq = NULL;
|
||||
ctxp->txrdy = NULL;
|
||||
ctxp->offset = 0;
|
||||
@@ -256,11 +267,11 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
|
||||
list_add_tail(&ctx_buf->list,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_list);
|
||||
phba->sli4_hba.nvmet_ctx_cnt++;
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt++;
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -580,8 +591,17 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
|
||||
int rc;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6023 %s: Entrypoint ctx %p %p\n", __func__,
|
||||
ctxp, tgtport);
|
||||
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
|
||||
|
||||
if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
|
||||
(ctxp->entry_cnt != 1)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6412 NVMET LS rsp state mismatch "
|
||||
"oxid x%x: %d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_LS_RSP;
|
||||
ctxp->entry_cnt++;
|
||||
|
||||
nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
|
||||
rsp->rsplen);
|
||||
@@ -751,15 +771,14 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
unsigned long flags;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6103 Abort op: oxri x%x flg x%x cnt %d\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
|
||||
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->state);
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
|
||||
"xri x%x flg x%x cnt x%x\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->state);
|
||||
|
||||
atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
|
||||
ctxp->entry_cnt++;
|
||||
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
|
||||
/* Since iaab/iaar are NOT set, we need to check
|
||||
@@ -770,12 +789,17 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
return;
|
||||
}
|
||||
ctxp->flag |= LPFC_NVMET_ABORT_OP;
|
||||
if (ctxp->flag & LPFC_NVMET_IO_INP)
|
||||
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
else
|
||||
|
||||
/* An state of LPFC_NVMET_STE_RCV means we have just received
|
||||
* the NVME command and have not started processing it.
|
||||
* (by issuing any IO WQEs on this exchange yet)
|
||||
*/
|
||||
if (ctxp->state == LPFC_NVMET_STE_RCV)
|
||||
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
else
|
||||
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
}
|
||||
|
||||
@@ -790,6 +814,13 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
|
||||
unsigned long flags;
|
||||
bool aborting = false;
|
||||
|
||||
if (ctxp->state != LPFC_NVMET_STE_DONE &&
|
||||
ctxp->state != LPFC_NVMET_STE_ABORT) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6413 NVMET release bad state %d %d oxid x%x\n",
|
||||
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
|
||||
(ctxp->flag & LPFC_NVMET_XBUSY)) {
|
||||
@@ -828,37 +859,55 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
|
||||
.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
|
||||
};
|
||||
|
||||
void
|
||||
static void
|
||||
lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
|
||||
unsigned long flags;
|
||||
|
||||
list_for_each_entry_safe(
|
||||
ctx_buf, next_ctx_buf,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
|
||||
spin_lock_irqsave(
|
||||
&phba->sli4_hba.abts_nvme_buf_list_lock, flags);
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_del_init(&ctx_buf->list);
|
||||
spin_unlock_irqrestore(
|
||||
&phba->sli4_hba.abts_nvme_buf_list_lock, flags);
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
__lpfc_clear_active_sglq(phba,
|
||||
ctx_buf->sglq->sli4_lxritag);
|
||||
ctx_buf->sglq->state = SGL_FREED;
|
||||
ctx_buf->sglq->ndlp = NULL;
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
list_add_tail(&ctx_buf->sglq->list,
|
||||
&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
|
||||
flags);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
|
||||
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
|
||||
kfree(ctx_buf->context);
|
||||
}
|
||||
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_del_init(&ctx_buf->list);
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
__lpfc_clear_active_sglq(phba,
|
||||
ctx_buf->sglq->sli4_lxritag);
|
||||
ctx_buf->sglq->state = SGL_FREED;
|
||||
ctx_buf->sglq->ndlp = NULL;
|
||||
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
list_add_tail(&ctx_buf->sglq->list,
|
||||
&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
|
||||
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
|
||||
kfree(ctx_buf->context);
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_nvmet_ctxbuf *ctx_buf;
|
||||
@@ -891,6 +940,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
|
||||
return -ENOMEM;
|
||||
}
|
||||
ctx_buf->context->ctxbuf = ctx_buf;
|
||||
ctx_buf->context->state = LPFC_NVMET_STE_FREE;
|
||||
|
||||
ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
|
||||
if (!ctx_buf->iocbq) {
|
||||
@@ -926,12 +976,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
|
||||
"6407 Ran out of NVMET XRIs\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock(&phba->sli4_hba.nvmet_io_lock);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
list_add_tail(&ctx_buf->list,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_list);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_io_lock);
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
}
|
||||
phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
|
||||
phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1103,7 +1153,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6318 XB aborted %x flg x%x (%x)\n",
|
||||
"6318 XB aborted oxid %x flg x%x (%x)\n",
|
||||
ctxp->oxid, ctxp->flag, released);
|
||||
if (released)
|
||||
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
||||
@@ -1253,7 +1303,8 @@ dropit:
|
||||
ctxp->oxid = oxid;
|
||||
ctxp->sid = sid;
|
||||
ctxp->wqeq = NULL;
|
||||
ctxp->state = LPFC_NVMET_STE_RCV;
|
||||
ctxp->state = LPFC_NVMET_STE_LS_RCV;
|
||||
ctxp->entry_cnt = 1;
|
||||
ctxp->rqb_buffer = (void *)nvmebuf;
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
|
||||
@@ -1268,8 +1319,8 @@ dropit:
|
||||
payload, size);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
|
||||
"%08x %08x %08x\n", __func__, ctxp, size, rc,
|
||||
"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
|
||||
"%08x %08x %08x\n", size, rc,
|
||||
*payload, *(payload+1), *(payload+2),
|
||||
*(payload+3), *(payload+4), *(payload+5));
|
||||
|
||||
@@ -1337,13 +1388,31 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
goto dropit;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
|
||||
if (phba->sli4_hba.nvmet_ctx_cnt) {
|
||||
list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
|
||||
if (phba->sli4_hba.nvmet_ctx_get_cnt) {
|
||||
list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
|
||||
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
|
||||
phba->sli4_hba.nvmet_ctx_cnt--;
|
||||
phba->sli4_hba.nvmet_ctx_get_cnt--;
|
||||
} else {
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
if (phba->sli4_hba.nvmet_ctx_put_cnt) {
|
||||
list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
|
||||
phba->sli4_hba.nvmet_ctx_get_cnt =
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt;
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt = 0;
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
|
||||
list_remove_head(
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
|
||||
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
|
||||
phba->sli4_hba.nvmet_ctx_get_cnt--;
|
||||
} else {
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
|
||||
|
||||
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
|
||||
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
@@ -1383,7 +1452,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
||||
|
||||
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
|
||||
memset(ctxp, 0, sizeof(ctxp->ctx));
|
||||
if (ctxp->state != LPFC_NVMET_STE_FREE) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6414 NVMET Context corrupt %d %d oxid x%x\n",
|
||||
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
|
||||
}
|
||||
ctxp->wqeq = NULL;
|
||||
ctxp->txrdy = NULL;
|
||||
ctxp->offset = 0;
|
||||
@@ -1547,9 +1620,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
|
||||
if (!lpfc_is_link_up(phba)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
||||
"6104 lpfc_nvmet_prep_ls_wqe: link err: "
|
||||
"NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6104 NVMET prep LS wqe: link err: "
|
||||
"NPORT x%x oxid:x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1557,9 +1630,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
nvmewqe = lpfc_sli_get_iocbq(phba);
|
||||
if (nvmewqe == NULL) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
||||
"6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
|
||||
"NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6105 NVMET prep LS wqe: No WQE: "
|
||||
"NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1568,9 +1641,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
||||
"6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
|
||||
"NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6106 NVMET prep LS wqe: No ndlp: "
|
||||
"NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
goto nvme_wqe_free_wqeq_exit;
|
||||
}
|
||||
ctxp->wqeq = nvmewqe;
|
||||
@@ -1642,9 +1715,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
|
||||
nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
|
||||
|
||||
/* Xmit NVME response to remote NPORT <did> */
|
||||
/* Xmit NVMET response to remote NPORT <did> */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6039 Xmit NVME LS response to remote "
|
||||
"6039 Xmit NVMET LS response to remote "
|
||||
"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
|
||||
ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
|
||||
rspsize);
|
||||
@@ -1676,9 +1749,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
|
||||
if (!lpfc_is_link_up(phba)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6107 lpfc_nvmet_prep_fcp_wqe: link err:"
|
||||
"NPORT x%x oxid:x%x\n", ctxp->sid,
|
||||
ctxp->oxid);
|
||||
"6107 NVMET prep FCP wqe: link err:"
|
||||
"NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1687,17 +1760,18 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
|
||||
"NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6108 NVMET prep FCP wqe: no ndlp: "
|
||||
"NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
|
||||
"NPORT x%x oxid:x%x cnt %d\n",
|
||||
ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
|
||||
"6109 NVMET prep FCP wqe: seg cnt err: "
|
||||
"NPORT x%x oxid x%x ste %d cnt %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state,
|
||||
phba->cfg_nvme_seg_cnt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1708,9 +1782,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
nvmewqe = ctxp->ctxbuf->iocbq;
|
||||
if (nvmewqe == NULL) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6110 lpfc_nvmet_prep_fcp_wqe: No "
|
||||
"WQE: NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6110 NVMET prep FCP wqe: No "
|
||||
"WQE: NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
ctxp->wqeq = nvmewqe;
|
||||
@@ -1722,13 +1796,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
/* Sanity check */
|
||||
if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
|
||||
(ctxp->entry_cnt == 1)) ||
|
||||
((ctxp->state == LPFC_NVMET_STE_DATA) &&
|
||||
(ctxp->entry_cnt > 1))) {
|
||||
(ctxp->state == LPFC_NVMET_STE_DATA)) {
|
||||
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6111 Wrong state %s: %d cnt %d\n",
|
||||
__func__, ctxp->state, ctxp->entry_cnt);
|
||||
"6111 Wrong state NVMET FCP: %d cnt %d\n",
|
||||
ctxp->state, ctxp->entry_cnt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -1832,7 +1905,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
|
||||
bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_DATA;
|
||||
break;
|
||||
|
||||
case NVMET_FCOP_WRITEDATA:
|
||||
@@ -1923,7 +1995,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = 0;
|
||||
sgl++;
|
||||
ctxp->state = LPFC_NVMET_STE_DATA;
|
||||
atomic_inc(&tgtp->xmt_fcp_write);
|
||||
break;
|
||||
|
||||
@@ -1980,7 +2051,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
|
||||
FCP_COMMAND_TRSP);
|
||||
bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
|
||||
ctxp->state = LPFC_NVMET_STE_RSP;
|
||||
|
||||
if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
|
||||
/* Good response - all zero's on wire */
|
||||
@@ -2029,6 +2099,8 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
sgl++;
|
||||
ctxp->offset += cnt;
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_DATA;
|
||||
ctxp->entry_cnt++;
|
||||
return nvmewqe;
|
||||
}
|
||||
|
||||
@@ -2124,10 +2196,6 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
||||
result = wcqe->parameter;
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
if (ctxp->flag & LPFC_NVMET_ABORT_OP)
|
||||
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
|
||||
|
||||
if (!ctxp) {
|
||||
/* if context is clear, related io alrady complete */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
@@ -2137,6 +2205,10 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
return;
|
||||
}
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
if (ctxp->flag & LPFC_NVMET_ABORT_OP)
|
||||
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
|
||||
|
||||
/* Sanity check */
|
||||
if (ctxp->state != LPFC_NVMET_STE_ABORT) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
@@ -2206,17 +2278,32 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
atomic_inc(&tgtp->xmt_ls_abort_cmpl);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
|
||||
"6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
|
||||
ctxp, wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
if (ctxp) {
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
kfree(ctxp);
|
||||
} else
|
||||
if (!ctxp) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6415 NVMET LS Abort No ctx: WCQE: "
|
||||
"%08x %08x %08x %08x\n",
|
||||
wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6416 NVMET LS abort cmpl state mismatch: "
|
||||
"oxid x%x: %d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
}
|
||||
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
kfree(ctxp);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -2240,7 +2327,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
||||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6134 Drop ABTS - wrong NDLP state x%x.\n",
|
||||
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
|
||||
|
||||
@@ -2250,7 +2337,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
||||
|
||||
abts_wqeq = ctxp->wqeq;
|
||||
wqe_abts = &abts_wqeq->wqe;
|
||||
ctxp->state = LPFC_NVMET_STE_ABORT;
|
||||
|
||||
/*
|
||||
* Since we zero the whole WQE, we need to ensure we set the WQE fields
|
||||
@@ -2338,7 +2424,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6160 Drop ABORT - wrong NDLP state x%x.\n",
|
||||
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
|
||||
|
||||
@@ -2351,7 +2437,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
|
||||
if (!ctxp->abort_wqeq) {
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6161 ABORT failed: No wqeqs: "
|
||||
"xri: x%x\n", ctxp->oxid);
|
||||
/* No failure to an ABTS request. */
|
||||
@@ -2437,6 +2523,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
abts_wqeq->iocb_cmpl = 0;
|
||||
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
|
||||
abts_wqeq->context2 = ctxp;
|
||||
abts_wqeq->vport = phba->pport;
|
||||
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
if (rc == WQE_SUCCESS) {
|
||||
@@ -2471,6 +2558,15 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
ctxp->wqeq->hba_wqidx = 0;
|
||||
}
|
||||
|
||||
if (ctxp->state == LPFC_NVMET_STE_FREE) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
|
||||
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
|
||||
rc = WQE_BUSY;
|
||||
goto aerr;
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_ABORT;
|
||||
ctxp->entry_cnt++;
|
||||
rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
|
||||
if (rc == 0)
|
||||
goto aerr;
|
||||
@@ -2487,10 +2583,9 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
aerr:
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
|
||||
ctxp->oxid, rc);
|
||||
return 1;
|
||||
@@ -2507,12 +2602,24 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
|
||||
(ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
|
||||
ctxp->state = LPFC_NVMET_STE_LS_ABORT;
|
||||
ctxp->entry_cnt++;
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6418 NVMET LS abort state mismatch "
|
||||
"IO x%x: %d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
ctxp->state = LPFC_NVMET_STE_LS_ABORT;
|
||||
}
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
if (!ctxp->wqeq) {
|
||||
/* Issue ABTS for this WQE based on iotag */
|
||||
ctxp->wqeq = lpfc_sli_get_iocbq(phba);
|
||||
if (!ctxp->wqeq) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6068 Abort failed: No wqeqs: "
|
||||
"xri: x%x\n", xri);
|
||||
/* No failure to an ABTS request. */
|
||||
@@ -2523,7 +2630,10 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
abts_wqeq = ctxp->wqeq;
|
||||
wqe_abts = &abts_wqeq->wqe;
|
||||
|
||||
lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
|
||||
if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
|
||||
rc = WQE_BUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
|
||||
@@ -2535,13 +2645,13 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
atomic_inc(&tgtp->xmt_abort_unsol);
|
||||
return 0;
|
||||
}
|
||||
|
||||
out:
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
abts_wqeq->context2 = NULL;
|
||||
abts_wqeq->context3 = NULL;
|
||||
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
||||
kfree(ctxp);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6056 Failed to Issue ABTS. Status x%x\n", rc);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -93,12 +93,14 @@ struct lpfc_nvmet_rcv_ctx {
|
||||
uint16_t cpu;
|
||||
uint16_t state;
|
||||
/* States */
|
||||
#define LPFC_NVMET_STE_FREE 0
|
||||
#define LPFC_NVMET_STE_RCV 1
|
||||
#define LPFC_NVMET_STE_DATA 2
|
||||
#define LPFC_NVMET_STE_ABORT 3
|
||||
#define LPFC_NVMET_STE_RSP 4
|
||||
#define LPFC_NVMET_STE_DONE 5
|
||||
#define LPFC_NVMET_STE_LS_RCV 1
|
||||
#define LPFC_NVMET_STE_LS_ABORT 2
|
||||
#define LPFC_NVMET_STE_LS_RSP 3
|
||||
#define LPFC_NVMET_STE_RCV 4
|
||||
#define LPFC_NVMET_STE_DATA 5
|
||||
#define LPFC_NVMET_STE_ABORT 6
|
||||
#define LPFC_NVMET_STE_DONE 7
|
||||
#define LPFC_NVMET_STE_FREE 0xff
|
||||
uint16_t flag;
|
||||
#define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */
|
||||
#define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */
|
||||
|
@@ -3931,7 +3931,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
||||
struct Scsi_Host *shost;
|
||||
uint32_t logit = LOG_FCP;
|
||||
|
||||
phba->fc4ScsiIoCmpls++;
|
||||
atomic_inc(&phba->fc4ScsiIoCmpls);
|
||||
|
||||
/* Sanity check on return of outstanding command */
|
||||
cmd = lpfc_cmd->pCmd;
|
||||
@@ -4250,19 +4250,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
vport->cfg_first_burst_size;
|
||||
}
|
||||
fcp_cmnd->fcpCntl3 = WRITE_DATA;
|
||||
phba->fc4ScsiOutputRequests++;
|
||||
atomic_inc(&phba->fc4ScsiOutputRequests);
|
||||
} else {
|
||||
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
|
||||
iocb_cmd->ulpPU = PARM_READ_CHECK;
|
||||
fcp_cmnd->fcpCntl3 = READ_DATA;
|
||||
phba->fc4ScsiInputRequests++;
|
||||
atomic_inc(&phba->fc4ScsiInputRequests);
|
||||
}
|
||||
} else {
|
||||
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
|
||||
iocb_cmd->un.fcpi.fcpi_parm = 0;
|
||||
iocb_cmd->ulpPU = 0;
|
||||
fcp_cmnd->fcpCntl3 = 0;
|
||||
phba->fc4ScsiControlRequests++;
|
||||
atomic_inc(&phba->fc4ScsiControlRequests);
|
||||
}
|
||||
if (phba->sli_rev == 3 &&
|
||||
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
|
||||
@@ -4640,7 +4640,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
||||
(uint32_t)
|
||||
(cmnd->request->timeout / 1000));
|
||||
|
||||
|
||||
switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
|
||||
case WRITE_DATA:
|
||||
atomic_dec(&phba->fc4ScsiOutputRequests);
|
||||
break;
|
||||
case READ_DATA:
|
||||
atomic_dec(&phba->fc4ScsiInputRequests);
|
||||
break;
|
||||
default:
|
||||
atomic_dec(&phba->fc4ScsiControlRequests);
|
||||
}
|
||||
goto out_host_busy_free_buf;
|
||||
}
|
||||
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
|
||||
|
@@ -968,6 +968,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
|
||||
list_remove_head(lpfc_els_sgl_list, sglq,
|
||||
struct lpfc_sglq, list);
|
||||
if (sglq == start_sglq) {
|
||||
list_add_tail(&sglq->list, lpfc_els_sgl_list);
|
||||
sglq = NULL;
|
||||
break;
|
||||
} else
|
||||
@@ -4302,7 +4303,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
|
||||
|
||||
/* Perform FCoE PCI function reset before freeing queue memory */
|
||||
rc = lpfc_pci_function_reset(phba);
|
||||
lpfc_sli4_queue_destroy(phba);
|
||||
|
||||
/* Restore PCI cmd register */
|
||||
pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
|
||||
@@ -4427,6 +4427,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
|
||||
pci_disable_pcie_error_reporting(phba->pcidev);
|
||||
|
||||
lpfc_hba_down_post(phba);
|
||||
lpfc_sli4_queue_destroy(phba);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -6926,18 +6927,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
cnt = phba->cfg_iocb_cnt * 1024;
|
||||
/* We need 1 iocbq for every SGL, for IO processing */
|
||||
cnt += phba->sli4_hba.nvmet_xri_cnt;
|
||||
/* Initialize and populate the iocb list per host */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"2821 initialize iocb list %d total %d\n",
|
||||
phba->cfg_iocb_cnt, cnt);
|
||||
rc = lpfc_init_iocb_list(phba, cnt);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1413 Failed to init iocb list.\n");
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
|
||||
lpfc_nvmet_create_targetport(phba);
|
||||
} else {
|
||||
/* update host scsi xri-sgl sizes and mappings */
|
||||
rc = lpfc_sli4_scsi_sgl_update(phba);
|
||||
@@ -6958,18 +6947,24 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
cnt = phba->cfg_iocb_cnt * 1024;
|
||||
}
|
||||
|
||||
if (!phba->sli.iocbq_lookup) {
|
||||
/* Initialize and populate the iocb list per host */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"2820 initialize iocb list %d total %d\n",
|
||||
"2821 initialize iocb list %d total %d\n",
|
||||
phba->cfg_iocb_cnt, cnt);
|
||||
rc = lpfc_init_iocb_list(phba, cnt);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6301 Failed to init iocb list.\n");
|
||||
"1413 Failed to init iocb list.\n");
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
}
|
||||
|
||||
if (phba->nvmet_support)
|
||||
lpfc_nvmet_create_targetport(phba);
|
||||
|
||||
if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
|
||||
/* Post initial buffers to all RQs created */
|
||||
for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
|
||||
@@ -7512,7 +7507,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
|
||||
"(%d):0308 Mbox cmd issue - BUSY Data: "
|
||||
"x%x x%x x%x x%x\n",
|
||||
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
|
||||
mbx->mbxCommand, phba->pport->port_state,
|
||||
mbx->mbxCommand,
|
||||
phba->pport ? phba->pport->port_state : 0xff,
|
||||
psli->sli_flag, flag);
|
||||
|
||||
psli->slistat.mbox_busy++;
|
||||
@@ -7564,7 +7560,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
|
||||
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
|
||||
"x%x\n",
|
||||
pmbox->vport ? pmbox->vport->vpi : 0,
|
||||
mbx->mbxCommand, phba->pport->port_state,
|
||||
mbx->mbxCommand,
|
||||
phba->pport ? phba->pport->port_state : 0xff,
|
||||
psli->sli_flag, flag);
|
||||
|
||||
if (mbx->mbxCommand != MBX_HEARTBEAT) {
|
||||
@@ -10950,6 +10947,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_iocbq *iocbq;
|
||||
struct lpfc_iocbq *abtsiocb;
|
||||
struct lpfc_sli_ring *pring_s4;
|
||||
IOCB_t *cmd = NULL;
|
||||
int errcnt = 0, ret_val = 0;
|
||||
int i;
|
||||
@@ -11003,8 +11001,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
|
||||
/* Setup callback routine and issue the command. */
|
||||
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
|
||||
ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
|
||||
abtsiocb, 0);
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
|
||||
if (!pring_s4)
|
||||
continue;
|
||||
ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
|
||||
abtsiocb, 0);
|
||||
} else
|
||||
ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
|
||||
abtsiocb, 0);
|
||||
if (ret_val == IOCB_ERROR) {
|
||||
lpfc_sli_release_iocbq(phba, abtsiocb);
|
||||
errcnt++;
|
||||
@@ -13256,6 +13261,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"6126 Receive Frame Truncated!!\n");
|
||||
/* Drop thru */
|
||||
case FC_STATUS_RQ_SUCCESS:
|
||||
lpfc_sli4_rq_release(hrq, drq);
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
@@ -13466,6 +13472,7 @@ process_cq:
|
||||
/* Track the max number of CQEs processed in 1 EQ */
|
||||
if (ecount > cq->CQ_max_cqe)
|
||||
cq->CQ_max_cqe = ecount;
|
||||
cq->assoc_qp->EQ_cqe_cnt += ecount;
|
||||
|
||||
/* Catch the no cq entry condition */
|
||||
if (unlikely(ecount == 0))
|
||||
@@ -13547,6 +13554,9 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Save EQ associated with this CQ */
|
||||
cq->assoc_qp = phba->sli4_hba.fof_eq;
|
||||
|
||||
/* Process all the entries to the OAS CQ */
|
||||
while ((cqe = lpfc_sli4_cq_get(cq))) {
|
||||
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
|
||||
@@ -13557,6 +13567,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
|
||||
/* Track the max number of CQEs processed in 1 EQ */
|
||||
if (ecount > cq->CQ_max_cqe)
|
||||
cq->CQ_max_cqe = ecount;
|
||||
cq->assoc_qp->EQ_cqe_cnt += ecount;
|
||||
|
||||
/* Catch the no cq entry condition */
|
||||
if (unlikely(ecount == 0))
|
||||
@@ -13617,7 +13628,6 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
|
||||
|
||||
/* Check device state for handling interrupt */
|
||||
if (unlikely(lpfc_intr_state_check(phba))) {
|
||||
eq->EQ_badstate++;
|
||||
/* Check again for link_state with lock held */
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
if (phba->link_state < LPFC_LINK_DOWN)
|
||||
@@ -13729,7 +13739,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
|
||||
|
||||
/* Check device state for handling interrupt */
|
||||
if (unlikely(lpfc_intr_state_check(phba))) {
|
||||
fpeq->EQ_badstate++;
|
||||
/* Check again for link_state with lock held */
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
if (phba->link_state < LPFC_LINK_DOWN)
|
||||
@@ -13988,14 +13997,15 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
|
||||
* fails this function will return -ENXIO.
|
||||
**/
|
||||
int
|
||||
lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
|
||||
lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
uint32_t numq, uint32_t imax)
|
||||
{
|
||||
struct lpfc_mbx_modify_eq_delay *eq_delay;
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
struct lpfc_queue *eq;
|
||||
int cnt, rc, length, status = 0;
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
uint32_t result;
|
||||
uint32_t result, val;
|
||||
int qidx;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
uint16_t dmult;
|
||||
@@ -14014,22 +14024,45 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
|
||||
eq_delay = &mbox->u.mqe.un.eq_delay;
|
||||
|
||||
/* Calculate delay multiper from maximum interrupt per second */
|
||||
result = phba->cfg_fcp_imax / phba->io_channel_irqs;
|
||||
result = imax / phba->io_channel_irqs;
|
||||
if (result > LPFC_DMULT_CONST || result == 0)
|
||||
dmult = 0;
|
||||
else
|
||||
dmult = LPFC_DMULT_CONST/result - 1;
|
||||
if (dmult > LPFC_DMULT_MAX)
|
||||
dmult = LPFC_DMULT_MAX;
|
||||
|
||||
cnt = 0;
|
||||
for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
|
||||
eq = phba->sli4_hba.hba_eq[qidx];
|
||||
if (!eq)
|
||||
continue;
|
||||
eq->q_mode = imax;
|
||||
eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
|
||||
eq_delay->u.request.eq[cnt].phase = 0;
|
||||
eq_delay->u.request.eq[cnt].delay_multi = dmult;
|
||||
cnt++;
|
||||
if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
|
||||
/* q_mode is only used for auto_imax */
|
||||
if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
|
||||
/* Use EQ Delay Register method for q_mode */
|
||||
|
||||
/* Convert for EQ Delay register */
|
||||
val = phba->cfg_fcp_imax;
|
||||
if (val) {
|
||||
/* First, interrupts per sec per EQ */
|
||||
val = phba->cfg_fcp_imax /
|
||||
phba->io_channel_irqs;
|
||||
|
||||
/* us delay between each interrupt */
|
||||
val = LPFC_SEC_TO_USEC / val;
|
||||
}
|
||||
eq->q_mode = val;
|
||||
} else {
|
||||
eq->q_mode = imax;
|
||||
}
|
||||
|
||||
if (cnt >= numq)
|
||||
break;
|
||||
}
|
||||
eq_delay->u.request.num_eq = cnt;
|
||||
@@ -16126,9 +16159,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT;
|
||||
static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT;
|
||||
|
||||
/**
|
||||
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
|
||||
* @phba: pointer to lpfc_hba struct that the frame was received on
|
||||
@@ -16203,22 +16233,18 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
|
||||
"2538 Received frame rctl:%s (x%x), type:%s (x%x), "
|
||||
"2538 Received frame rctl:x%x, type:x%x, "
|
||||
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
|
||||
(fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" :
|
||||
lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
|
||||
(fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ?
|
||||
"Vendor Unique" : lpfc_type_names[fc_hdr->fh_type],
|
||||
fc_hdr->fh_type, be32_to_cpu(header[0]),
|
||||
be32_to_cpu(header[1]), be32_to_cpu(header[2]),
|
||||
be32_to_cpu(header[3]), be32_to_cpu(header[4]),
|
||||
be32_to_cpu(header[5]), be32_to_cpu(header[6]));
|
||||
fc_hdr->fh_r_ctl, fc_hdr->fh_type,
|
||||
be32_to_cpu(header[0]), be32_to_cpu(header[1]),
|
||||
be32_to_cpu(header[2]), be32_to_cpu(header[3]),
|
||||
be32_to_cpu(header[4]), be32_to_cpu(header[5]),
|
||||
be32_to_cpu(header[6]));
|
||||
return 0;
|
||||
drop:
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
|
||||
"2539 Dropped frame rctl:%s type:%s\n",
|
||||
lpfc_rctl_names[fc_hdr->fh_r_ctl],
|
||||
lpfc_type_names[fc_hdr->fh_type]);
|
||||
"2539 Dropped frame rctl:x%x type:x%x\n",
|
||||
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@@ -321,6 +321,7 @@ struct lpfc_sli {
|
||||
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
|
||||
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
|
||||
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
|
||||
#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
|
||||
|
||||
struct lpfc_sli_ring *sli3_ring;
|
||||
|
||||
|
@@ -168,7 +168,7 @@ struct lpfc_queue {
|
||||
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
|
||||
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
|
||||
|
||||
uint16_t sgl_list_cnt;
|
||||
uint32_t q_mode;
|
||||
uint16_t db_format;
|
||||
#define LPFC_DB_RING_FORMAT 0x01
|
||||
#define LPFC_DB_LIST_FORMAT 0x02
|
||||
@@ -181,7 +181,7 @@ struct lpfc_queue {
|
||||
/* defines for EQ stats */
|
||||
#define EQ_max_eqe q_cnt_1
|
||||
#define EQ_no_entry q_cnt_2
|
||||
#define EQ_badstate q_cnt_3
|
||||
#define EQ_cqe_cnt q_cnt_3
|
||||
#define EQ_processed q_cnt_4
|
||||
|
||||
/* defines for CQ stats */
|
||||
@@ -407,8 +407,10 @@ struct lpfc_max_cfg_param {
|
||||
|
||||
struct lpfc_hba;
|
||||
/* SLI4 HBA multi-fcp queue handler struct */
|
||||
#define LPFC_SLI4_HANDLER_NAME_SZ 16
|
||||
struct lpfc_hba_eq_hdl {
|
||||
uint32_t idx;
|
||||
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
|
||||
struct lpfc_hba *phba;
|
||||
atomic_t hba_eq_in_use;
|
||||
struct cpumask *cpumask;
|
||||
@@ -480,7 +482,6 @@ struct lpfc_sli4_lnk_info {
|
||||
|
||||
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
|
||||
LPFC_FOF_IO_CHAN_NUM)
|
||||
#define LPFC_SLI4_HANDLER_NAME_SZ 16
|
||||
|
||||
/* Used for IRQ vector to CPU mapping */
|
||||
struct lpfc_vector_map_info {
|
||||
@@ -522,6 +523,7 @@ struct lpfc_sli4_hba {
|
||||
#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
|
||||
#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
|
||||
#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
|
||||
void __iomem *EQDregaddr;
|
||||
} if_type2;
|
||||
} u;
|
||||
|
||||
@@ -548,7 +550,6 @@ struct lpfc_sli4_hba {
|
||||
uint32_t ue_to_rp;
|
||||
struct lpfc_register sli_intf;
|
||||
struct lpfc_pc_sli4_params pc_sli4_params;
|
||||
uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
|
||||
struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
|
||||
|
||||
/* Pointers to the constructed SLI4 queues */
|
||||
@@ -620,7 +621,8 @@ struct lpfc_sli4_hba {
|
||||
uint16_t scsi_xri_start;
|
||||
uint16_t els_xri_cnt;
|
||||
uint16_t nvmet_xri_cnt;
|
||||
uint16_t nvmet_ctx_cnt;
|
||||
uint16_t nvmet_ctx_get_cnt;
|
||||
uint16_t nvmet_ctx_put_cnt;
|
||||
uint16_t nvmet_io_wait_cnt;
|
||||
uint16_t nvmet_io_wait_total;
|
||||
struct list_head lpfc_els_sgl_list;
|
||||
@@ -629,7 +631,8 @@ struct lpfc_sli4_hba {
|
||||
struct list_head lpfc_abts_nvmet_ctx_list;
|
||||
struct list_head lpfc_abts_scsi_buf_list;
|
||||
struct list_head lpfc_abts_nvme_buf_list;
|
||||
struct list_head lpfc_nvmet_ctx_list;
|
||||
struct list_head lpfc_nvmet_ctx_get_list;
|
||||
struct list_head lpfc_nvmet_ctx_put_list;
|
||||
struct list_head lpfc_nvmet_io_wait_list;
|
||||
struct lpfc_sglq **lpfc_sglq_active_list;
|
||||
struct list_head lpfc_rpi_hdr_list;
|
||||
@@ -661,7 +664,8 @@ struct lpfc_sli4_hba {
|
||||
spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
|
||||
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
|
||||
spinlock_t sgl_list_lock; /* list of aborted els IOs */
|
||||
spinlock_t nvmet_io_lock;
|
||||
spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */
|
||||
spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */
|
||||
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
|
||||
uint32_t physical_port;
|
||||
|
||||
@@ -755,7 +759,8 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
|
||||
uint32_t);
|
||||
void lpfc_sli4_queue_free(struct lpfc_queue *);
|
||||
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
|
||||
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq);
|
||||
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
uint32_t numq, uint32_t imax);
|
||||
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
|
||||
struct lpfc_queue *, uint32_t, uint32_t);
|
||||
int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
|
@@ -20,7 +20,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "11.2.0.14"
|
||||
#define LPFC_DRIVER_VERSION "11.4.0.1"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user