Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This is mostly update of the usual drivers: qla2xxx, ufs, smartpqi, lpfc, hisi_sas, qedf, mpt3sas; plus a whole load of minor updates. The only core change this time around is the addition of request batching for virtio. Since batching requires an additional flag to use, it should be invisible to the rest of the drivers" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (264 commits) scsi: hisi_sas: Fix the conflict between device gone and host reset scsi: hisi_sas: Add BIST support for phy loopback scsi: hisi_sas: Add hisi_sas_debugfs_alloc() to centralise allocation scsi: hisi_sas: Remove some unused function arguments scsi: hisi_sas: Remove redundant work declaration scsi: hisi_sas: Remove hisi_sas_hw.slot_complete scsi: hisi_sas: Assign NCQ tag for all NCQ commands scsi: hisi_sas: Update all the registers after suspend and resume scsi: hisi_sas: Retry 3 times TMF IO for SAS disks when init device scsi: hisi_sas: Remove sleep after issue phy reset if sas_smp_phy_control() fails scsi: hisi_sas: Directly return when running I_T_nexus reset if phy disabled scsi: hisi_sas: Use true/false as input parameter of sas_phy_reset() scsi: hisi_sas: add debugfs auto-trigger for internal abort time out scsi: virtio_scsi: unplug LUNs when events missed scsi: scsi_dh_rdac: zero cdb in send_mode_select() scsi: fcoe: fix null-ptr-deref Read in fc_release_transport scsi: ufs-hisi: use devm_platform_ioremap_resource() to simplify code scsi: ufshcd: use devm_platform_ioremap_resource() to simplify code scsi: hisi_sas: use devm_platform_ioremap_resource() to simplify code scsi: ufs: Use kmemdup in ufshcd_read_string_desc() ...
This commit is contained in:
@@ -51,6 +51,8 @@ struct lpfc_sli2_slim;
|
||||
cmnd for menlo needs nearly twice as for firmware
|
||||
downloads using bsg */
|
||||
|
||||
#define LPFC_DEFAULT_XPSGL_SIZE 256
|
||||
#define LPFC_MAX_SG_TABLESIZE 0xffff
|
||||
#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
|
||||
#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
|
||||
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
|
||||
@@ -732,14 +734,13 @@ struct lpfc_hba {
|
||||
#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */
|
||||
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
|
||||
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
|
||||
#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */
|
||||
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
|
||||
#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
|
||||
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
|
||||
#define HBA_FORCED_LINK_SPEED 0x40000 /*
|
||||
* Firmware supports Forced Link Speed
|
||||
* capability
|
||||
*/
|
||||
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
|
||||
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
|
||||
|
||||
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
|
||||
@@ -795,10 +796,12 @@ struct lpfc_hba {
|
||||
uint8_t mds_diags_support;
|
||||
uint8_t bbcredit_support;
|
||||
uint8_t enab_exp_wqcq_pages;
|
||||
u8 nsler; /* Firmware supports FC-NVMe-2 SLER */
|
||||
|
||||
/* HBA Config Parameters */
|
||||
uint32_t cfg_ack0;
|
||||
uint32_t cfg_xri_rebalancing;
|
||||
uint32_t cfg_xpsgl;
|
||||
uint32_t cfg_enable_npiv;
|
||||
uint32_t cfg_enable_rrq;
|
||||
uint32_t cfg_topology;
|
||||
@@ -905,6 +908,7 @@ struct lpfc_hba {
|
||||
wait_queue_head_t work_waitq;
|
||||
struct task_struct *worker_thread;
|
||||
unsigned long data_flags;
|
||||
uint32_t border_sge_num;
|
||||
|
||||
uint32_t hbq_in_use; /* HBQs in use flag */
|
||||
uint32_t hbq_count; /* Count of configured HBQs */
|
||||
@@ -987,6 +991,7 @@ struct lpfc_hba {
|
||||
struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
|
||||
struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
|
||||
struct dma_pool *txrdy_payload_pool;
|
||||
struct dma_pool *lpfc_cmd_rsp_buf_pool;
|
||||
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
|
||||
|
||||
mempool_t *mbox_mem_pool;
|
||||
@@ -1034,8 +1039,6 @@ struct lpfc_hba {
|
||||
struct dentry *debug_hbqinfo;
|
||||
struct dentry *debug_dumpHostSlim;
|
||||
struct dentry *debug_dumpHBASlim;
|
||||
struct dentry *debug_dumpData; /* BlockGuard BPL */
|
||||
struct dentry *debug_dumpDif; /* BlockGuard BPL */
|
||||
struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
|
||||
struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
|
||||
struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
|
||||
|
@@ -841,7 +841,8 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
lpfc_vpd_t *vp = &phba->vpd;
|
||||
|
||||
lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
|
||||
return scnprintf(buf, PAGE_SIZE, "%s\n", hdw);
|
||||
return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
|
||||
vp->rev.smRev, vp->rev.smFwRev);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3682,8 +3683,8 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
|
||||
if (rport)
|
||||
remoteport = rport->remoteport;
|
||||
spin_unlock(&vport->phba->hbalock);
|
||||
if (remoteport)
|
||||
nvme_fc_set_remoteport_devloss(rport->remoteport,
|
||||
if (rport && remoteport)
|
||||
nvme_fc_set_remoteport_devloss(remoteport,
|
||||
vport->cfg_devloss_tmo);
|
||||
#endif
|
||||
}
|
||||
@@ -5467,15 +5468,12 @@ LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
|
||||
* lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
|
||||
* For the Initiator (I), enabling this parameter means that an NVMET
|
||||
* PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
|
||||
* processed by the initiator for subsequent NVME FCP IO. For the target
|
||||
* function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
|
||||
* driver parameter as the target function's first burst size returned to the
|
||||
* initiator in the target's NVME PRLI response. Parameter supported on physical
|
||||
* port only - no NPIV support.
|
||||
* processed by the initiator for subsequent NVME FCP IO.
|
||||
* Currently, this feature is not supported on the NVME target
|
||||
* Value range is [0,1]. Default value is 0 (disabled).
|
||||
*/
|
||||
LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
|
||||
"Enable First Burst feature on I and T functions.");
|
||||
"Enable First Burst feature for NVME Initiator.");
|
||||
|
||||
/*
|
||||
# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
|
||||
@@ -5927,7 +5925,7 @@ lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
|
||||
* 1 = MDS Diagnostics enabled
|
||||
* Value range is [0,1]. Default value is 0.
|
||||
*/
|
||||
LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
|
||||
LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
|
||||
|
||||
/*
|
||||
* lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
|
||||
@@ -6859,10 +6857,31 @@ lpfc_get_starget_port_name(struct scsi_target *starget)
|
||||
static void
|
||||
lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
|
||||
{
|
||||
struct lpfc_rport_data *rdata = rport->dd_data;
|
||||
struct lpfc_nodelist *ndlp = rdata->pnode;
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
struct lpfc_nvme_rport *nrport = NULL;
|
||||
#endif
|
||||
|
||||
if (timeout)
|
||||
rport->dev_loss_tmo = timeout;
|
||||
else
|
||||
rport->dev_loss_tmo = 1;
|
||||
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||
dev_info(&rport->dev, "Cannot find remote node to "
|
||||
"set rport dev loss tmo, port_id x%x\n",
|
||||
rport->port_id);
|
||||
return;
|
||||
}
|
||||
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
nrport = lpfc_ndlp_get_nrport(ndlp);
|
||||
|
||||
if (nrport && nrport->remoteport)
|
||||
nvme_fc_set_remoteport_devloss(nrport->remoteport,
|
||||
rport->dev_loss_tmo);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -7058,6 +7077,21 @@ struct fc_function_template lpfc_vport_transport_functions = {
|
||||
.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
|
||||
};
|
||||
|
||||
/**
|
||||
* lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE
|
||||
* Mode
|
||||
* @phba: lpfc_hba pointer.
|
||||
**/
|
||||
static void
|
||||
lpfc_get_hba_function_mode(struct lpfc_hba *phba)
|
||||
{
|
||||
/* If it's a SkyHawk FCoE adapter */
|
||||
if (phba->pcidev->device == PCI_DEVICE_ID_SKYHAWK)
|
||||
phba->hba_flag |= HBA_FCOE_MODE;
|
||||
else
|
||||
phba->hba_flag &= ~HBA_FCOE_MODE;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_get_cfgparam - Used during probe_one to init the adapter structure
|
||||
* @phba: lpfc_hba pointer.
|
||||
@@ -7114,8 +7148,18 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
else
|
||||
phba->cfg_poll = lpfc_poll;
|
||||
|
||||
if (phba->cfg_enable_bg)
|
||||
/* Get the function mode */
|
||||
lpfc_get_hba_function_mode(phba);
|
||||
|
||||
/* BlockGuard allowed for FC only. */
|
||||
if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"0581 BlockGuard feature not supported\n");
|
||||
/* If set, clear the BlockGuard support param */
|
||||
phba->cfg_enable_bg = 0;
|
||||
} else if (phba->cfg_enable_bg) {
|
||||
phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
|
||||
}
|
||||
|
||||
lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
|
||||
|
||||
@@ -7175,16 +7219,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
|
||||
lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
|
||||
|
||||
|
||||
/* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
|
||||
* accommodate 512K and 1M IOs in a single nvme buf and supply
|
||||
* enough NVME LS iocb buffers for larger connectivity counts.
|
||||
*/
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
|
||||
phba->cfg_iocb_cnt = 5;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -1040,7 +1040,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
if (!dmabuf) {
|
||||
lpfc_printf_log(phba, KERN_ERR,
|
||||
LOG_LIBDFC, "2616 No dmabuf "
|
||||
"found for iocbq 0x%p\n",
|
||||
"found for iocbq x%px\n",
|
||||
iocbq);
|
||||
kfree(evt_dat->data);
|
||||
kfree(evt_dat);
|
||||
@@ -1276,9 +1276,7 @@ lpfc_bsg_hba_set_event(struct bsg_job *job)
|
||||
return 0; /* call job done later */
|
||||
|
||||
job_error:
|
||||
if (dd_data != NULL)
|
||||
kfree(dd_data);
|
||||
|
||||
kfree(dd_data);
|
||||
job->dd_data = NULL;
|
||||
return rc;
|
||||
}
|
||||
@@ -1571,7 +1569,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
|
||||
"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
|
||||
icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
|
||||
|
||||
ctiocb->iocb_cmpl = NULL;
|
||||
ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
|
||||
ctiocb->vport = phba->pport;
|
||||
ctiocb->context1 = dd_data;
|
||||
@@ -5451,7 +5448,9 @@ ras_job_error:
|
||||
bsg_reply->result = rc;
|
||||
|
||||
/* complete the job back to userspace */
|
||||
bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
|
||||
if (!rc)
|
||||
bsg_job_done(job, bsg_reply->result,
|
||||
bsg_reply->reply_payload_rcv_len);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -5530,8 +5529,9 @@ ras_job_error:
|
||||
bsg_reply->result = rc;
|
||||
|
||||
/* complete the job back to userspace */
|
||||
bsg_job_done(job, bsg_reply->result,
|
||||
bsg_reply->reply_payload_rcv_len);
|
||||
if (!rc)
|
||||
bsg_job_done(job, bsg_reply->result,
|
||||
bsg_reply->reply_payload_rcv_len);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -5591,7 +5591,9 @@ ras_job_error:
|
||||
bsg_reply->result = rc;
|
||||
|
||||
/* complete the job back to userspace */
|
||||
bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
|
||||
if (!rc)
|
||||
bsg_job_done(job, bsg_reply->result,
|
||||
bsg_reply->reply_payload_rcv_len);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -5673,7 +5675,9 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
|
||||
|
||||
ras_job_error:
|
||||
bsg_reply->result = rc;
|
||||
bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
|
||||
if (!rc)
|
||||
bsg_job_done(job, bsg_reply->result,
|
||||
bsg_reply->reply_payload_rcv_len);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -5744,8 +5748,9 @@ lpfc_get_trunk_info(struct bsg_job *job)
|
||||
phba->sli4_hba.link_state.logical_speed / 1000;
|
||||
job_error:
|
||||
bsg_reply->result = rc;
|
||||
bsg_job_done(job, bsg_reply->result,
|
||||
bsg_reply->reply_payload_rcv_len);
|
||||
if (!rc)
|
||||
bsg_job_done(job, bsg_reply->result,
|
||||
bsg_reply->reply_payload_rcv_len);
|
||||
return rc;
|
||||
|
||||
}
|
||||
|
@@ -326,7 +326,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
|
||||
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
|
||||
void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);
|
||||
void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
|
||||
void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
|
||||
void lpfc_sli_flush_io_rings(struct lpfc_hba *phba);
|
||||
int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
|
||||
struct lpfc_dmabuf *);
|
||||
struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *,
|
||||
@@ -433,16 +433,6 @@ int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t,
|
||||
int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t,
|
||||
uint16_t *, uint16_t *);
|
||||
|
||||
/* externs BlockGuard */
|
||||
extern char *_dump_buf_data;
|
||||
extern unsigned long _dump_buf_data_order;
|
||||
extern char *_dump_buf_dif;
|
||||
extern unsigned long _dump_buf_dif_order;
|
||||
extern spinlock_t _dump_buf_lock;
|
||||
extern int _dump_buf_done;
|
||||
extern spinlock_t pgcnt_lock;
|
||||
extern unsigned int pgcnt;
|
||||
|
||||
/* Interface exported by fabric iocb scheduler */
|
||||
void lpfc_fabric_abort_nport(struct lpfc_nodelist *);
|
||||
void lpfc_fabric_abort_hba(struct lpfc_hba *);
|
||||
@@ -595,6 +585,7 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
|
||||
struct lpfc_sli4_hdw_queue *qp);
|
||||
void lpfc_nvme_cmd_template(void);
|
||||
void lpfc_nvmet_cmd_template(void);
|
||||
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
|
||||
extern int lpfc_enable_nvmet_cnt;
|
||||
extern unsigned long long lpfc_enable_nvmet[];
|
||||
extern int lpfc_no_hba_reset_cnt;
|
||||
|
@@ -462,6 +462,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
if ((vport->port_type != LPFC_NPIV_PORT) ||
|
||||
(fc4_type == FC_TYPE_FCP) ||
|
||||
!(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
|
||||
|
||||
ndlp = lpfc_setup_disc_node(vport, Did);
|
||||
@@ -480,10 +481,20 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"0238 Process x%06x NameServer Rsp "
|
||||
"Data: x%x x%x x%x x%x\n", Did,
|
||||
"Data: x%x x%x x%x x%x x%x\n", Did,
|
||||
ndlp->nlp_flag, ndlp->nlp_fc4_type,
|
||||
vport->fc_flag,
|
||||
ndlp->nlp_state, vport->fc_flag,
|
||||
vport->fc_rscn_id_cnt);
|
||||
|
||||
/* if ndlp needs to be discovered and prior
|
||||
* state of ndlp hit devloss, change state to
|
||||
* allow rediscovery.
|
||||
*/
|
||||
if (ndlp->nlp_flag & NLP_NPR_2B_DISC &&
|
||||
ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
|
||||
lpfc_nlp_set_state(vport, ndlp,
|
||||
NLP_STE_NPR_NODE);
|
||||
}
|
||||
} else {
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
|
||||
"Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
|
||||
@@ -491,9 +502,9 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"0239 Skip x%06x NameServer Rsp "
|
||||
"Data: x%x x%x\n", Did,
|
||||
vport->fc_flag,
|
||||
vport->fc_rscn_id_cnt);
|
||||
"Data: x%x x%x %p\n",
|
||||
Did, vport->fc_flag,
|
||||
vport->fc_rscn_id_cnt, ndlp);
|
||||
}
|
||||
} else {
|
||||
if (!(vport->fc_flag & FC_RSCN_MODE) ||
|
||||
@@ -751,9 +762,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
if (CTrsp->CommandResponse.bits.CmdRsp ==
|
||||
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"0208 NameServer Rsp Data: x%x x%x\n",
|
||||
"0208 NameServer Rsp Data: x%x x%x "
|
||||
"sz x%x\n",
|
||||
vport->fc_flag,
|
||||
CTreq->un.gid.Fc4Type);
|
||||
CTreq->un.gid.Fc4Type,
|
||||
irsp->un.genreq64.bdl.bdeSize);
|
||||
|
||||
lpfc_ns_rsp(vport,
|
||||
outp,
|
||||
@@ -814,6 +827,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
}
|
||||
vport->gidft_inp--;
|
||||
}
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"4216 GID_FT cmpl inp %d disc %d\n",
|
||||
vport->gidft_inp, vport->num_disc_nodes);
|
||||
|
||||
/* Link up / RSCN discovery */
|
||||
if ((vport->num_disc_nodes == 0) &&
|
||||
(vport->gidft_inp == 0)) {
|
||||
@@ -1209,14 +1227,34 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"3064 Setting ndlp %p, DID x%06x with "
|
||||
"FC4 x%08x, Data: x%08x x%08x\n",
|
||||
"3064 Setting ndlp x%px, DID x%06x "
|
||||
"with FC4 x%08x, Data: x%08x x%08x "
|
||||
"%d\n",
|
||||
ndlp, did, ndlp->nlp_fc4_type,
|
||||
FC_TYPE_FCP, FC_TYPE_NVME);
|
||||
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
|
||||
FC_TYPE_FCP, FC_TYPE_NVME,
|
||||
ndlp->nlp_state);
|
||||
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
|
||||
lpfc_issue_els_prli(vport, ndlp, 0);
|
||||
if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
|
||||
ndlp->nlp_fc4_type) {
|
||||
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
|
||||
|
||||
lpfc_nlp_set_state(vport, ndlp,
|
||||
NLP_STE_PRLI_ISSUE);
|
||||
lpfc_issue_els_prli(vport, ndlp, 0);
|
||||
} else if (!ndlp->nlp_fc4_type) {
|
||||
/* If fc4 type is still unknown, then LOGO */
|
||||
lpfc_printf_vlog(vport, KERN_INFO,
|
||||
LOG_DISCOVERY,
|
||||
"6443 Sending LOGO ndlp x%px,"
|
||||
"DID x%06x with fc4_type: "
|
||||
"x%08x, state: %d\n",
|
||||
ndlp, did, ndlp->nlp_fc4_type,
|
||||
ndlp->nlp_state);
|
||||
lpfc_issue_els_logo(vport, ndlp, 0);
|
||||
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
|
||||
lpfc_nlp_set_state(vport, ndlp,
|
||||
NLP_STE_NPR_NODE);
|
||||
}
|
||||
}
|
||||
} else
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
|
||||
@@ -2515,7 +2553,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
|
||||
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
|
||||
|
||||
hsp = (struct serv_parm *)&vport->fc_sparam;
|
||||
ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
|
||||
ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
|
||||
(uint32_t) hsp->cmn.bbRcvSizeLsb;
|
||||
ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt);
|
||||
size = FOURBYTES + sizeof(uint32_t);
|
||||
|
@@ -361,7 +361,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
|
||||
phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
|
||||
if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
|
||||
len += scnprintf(buf+len, size-len,
|
||||
"Buf%d: %p %06x\n", i,
|
||||
"Buf%d: x%px %06x\n", i,
|
||||
hbq_buf->dbuf.virt, hbq_buf->tag);
|
||||
found = 1;
|
||||
break;
|
||||
@@ -416,8 +416,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
|
||||
qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
|
||||
|
||||
len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
|
||||
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
|
||||
spin_lock(&qp->abts_nvme_buf_list_lock);
|
||||
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
|
||||
spin_lock(&qp->io_buf_list_get_lock);
|
||||
spin_lock(&qp->io_buf_list_put_lock);
|
||||
out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
|
||||
@@ -430,8 +429,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
|
||||
qp->abts_nvme_io_bufs, out);
|
||||
spin_unlock(&qp->io_buf_list_put_lock);
|
||||
spin_unlock(&qp->io_buf_list_get_lock);
|
||||
spin_unlock(&qp->abts_nvme_buf_list_lock);
|
||||
spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
|
||||
spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
|
||||
|
||||
lpfc_debugfs_last_xripool++;
|
||||
if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
|
||||
@@ -533,9 +531,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
|
||||
continue;
|
||||
pbl_pool = &multixri_pool->pbl_pool;
|
||||
pvt_pool = &multixri_pool->pvt_pool;
|
||||
txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
|
||||
if (qp->nvme_wq)
|
||||
txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
|
||||
txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
|
||||
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"%03d: %4d %4d %4d %4d | %10d %10d ",
|
||||
@@ -2166,89 +2162,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct lpfc_debug *debug;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
if (!_dump_buf_data)
|
||||
return -EBUSY;
|
||||
|
||||
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
|
||||
if (!debug)
|
||||
goto out;
|
||||
|
||||
/* Round to page boundary */
|
||||
pr_err("9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
|
||||
__func__, _dump_buf_data);
|
||||
debug->buffer = _dump_buf_data;
|
||||
if (!debug->buffer) {
|
||||
kfree(debug);
|
||||
goto out;
|
||||
}
|
||||
|
||||
debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT;
|
||||
file->private_data = debug;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct lpfc_debug *debug;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
if (!_dump_buf_dif)
|
||||
return -EBUSY;
|
||||
|
||||
debug = kmalloc(sizeof(*debug), GFP_KERNEL);
|
||||
if (!debug)
|
||||
goto out;
|
||||
|
||||
/* Round to page boundary */
|
||||
pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
|
||||
__func__, _dump_buf_dif, file);
|
||||
debug->buffer = _dump_buf_dif;
|
||||
if (!debug->buffer) {
|
||||
kfree(debug);
|
||||
goto out;
|
||||
}
|
||||
|
||||
debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT;
|
||||
file->private_data = debug;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
|
||||
size_t nbytes, loff_t *ppos)
|
||||
{
|
||||
/*
|
||||
* The Data/DIF buffers only save one failing IO
|
||||
* The write op is used as a reset mechanism after an IO has
|
||||
* already been saved to the next one can be saved
|
||||
*/
|
||||
spin_lock(&_dump_buf_lock);
|
||||
|
||||
memset((void *)_dump_buf_data, 0,
|
||||
((1 << PAGE_SHIFT) << _dump_buf_data_order));
|
||||
memset((void *)_dump_buf_dif, 0,
|
||||
((1 << PAGE_SHIFT) << _dump_buf_dif_order));
|
||||
|
||||
_dump_buf_done = 0;
|
||||
|
||||
spin_unlock(&_dump_buf_lock);
|
||||
|
||||
return nbytes;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
|
||||
size_t nbytes, loff_t *ppos)
|
||||
@@ -2461,17 +2374,6 @@ lpfc_debugfs_release(struct inode *inode, struct file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct lpfc_debug *debug = file->private_data;
|
||||
|
||||
debug->buffer = NULL;
|
||||
kfree(debug);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics
|
||||
* @file: The file pointer to read from.
|
||||
@@ -3786,23 +3688,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
|
||||
int qidx;
|
||||
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
|
||||
qp = phba->sli4_hba.hdwq[qidx].io_wq;
|
||||
if (qp->assoc_qid != cq_id)
|
||||
continue;
|
||||
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
|
||||
if (*len >= max_cnt)
|
||||
return 1;
|
||||
}
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
|
||||
if (qp->assoc_qid != cq_id)
|
||||
continue;
|
||||
*len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
|
||||
if (*len >= max_cnt)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3868,9 +3760,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
|
||||
struct lpfc_queue *qp;
|
||||
int rc;
|
||||
|
||||
qp = phba->sli4_hba.hdwq[eqidx].fcp_cq;
|
||||
qp = phba->sli4_hba.hdwq[eqidx].io_cq;
|
||||
|
||||
*len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
|
||||
*len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len);
|
||||
|
||||
/* Reset max counter */
|
||||
qp->CQ_max_cqe = 0;
|
||||
@@ -3878,28 +3770,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
|
||||
if (*len >= max_cnt)
|
||||
return 1;
|
||||
|
||||
rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
|
||||
rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len,
|
||||
max_cnt, qp->queue_id);
|
||||
if (rc)
|
||||
return 1;
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
qp = phba->sli4_hba.hdwq[eqidx].nvme_cq;
|
||||
|
||||
*len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
|
||||
|
||||
/* Reset max counter */
|
||||
qp->CQ_max_cqe = 0;
|
||||
|
||||
if (*len >= max_cnt)
|
||||
return 1;
|
||||
|
||||
rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
|
||||
max_cnt, qp->queue_id);
|
||||
if (rc)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) {
|
||||
/* NVMET CQset */
|
||||
qp = phba->sli4_hba.nvmet_cqset[eqidx];
|
||||
@@ -4348,7 +4223,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||
if (phba->sli4_hba.hdwq) {
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue;
|
||||
qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].fcp_cq;
|
||||
qp = phba->sli4_hba.hdwq[qidx].io_cq;
|
||||
if (qp && qp->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
@@ -4360,22 +4235,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||
}
|
||||
}
|
||||
}
|
||||
/* NVME complete queue */
|
||||
if (phba->sli4_hba.hdwq) {
|
||||
qidx = 0;
|
||||
do {
|
||||
qp = phba->sli4_hba.hdwq[qidx].nvme_cq;
|
||||
if (qp && qp->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
qp, index, count);
|
||||
if (rc)
|
||||
goto error_out;
|
||||
idiag.ptr_private = qp;
|
||||
goto pass_check;
|
||||
}
|
||||
} while (++qidx < phba->cfg_hdw_queue);
|
||||
}
|
||||
goto error_out;
|
||||
break;
|
||||
case LPFC_IDIAG_MQ:
|
||||
@@ -4419,20 +4278,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||
if (phba->sli4_hba.hdwq) {
|
||||
/* FCP/SCSI work queue */
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].fcp_wq;
|
||||
if (qp && qp->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
qp, index, count);
|
||||
if (rc)
|
||||
goto error_out;
|
||||
idiag.ptr_private = qp;
|
||||
goto pass_check;
|
||||
}
|
||||
}
|
||||
/* NVME work queue */
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = phba->sli4_hba.hdwq[qidx].nvme_wq;
|
||||
qp = phba->sli4_hba.hdwq[qidx].io_wq;
|
||||
if (qp && qp->queue_id == queid) {
|
||||
/* Sanity check */
|
||||
rc = lpfc_idiag_que_param_check(
|
||||
@@ -5508,26 +5354,6 @@ static const struct file_operations lpfc_debugfs_op_cpucheck = {
|
||||
.release = lpfc_debugfs_release,
|
||||
};
|
||||
|
||||
#undef lpfc_debugfs_op_dumpData
|
||||
static const struct file_operations lpfc_debugfs_op_dumpData = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = lpfc_debugfs_dumpData_open,
|
||||
.llseek = lpfc_debugfs_lseek,
|
||||
.read = lpfc_debugfs_read,
|
||||
.write = lpfc_debugfs_dumpDataDif_write,
|
||||
.release = lpfc_debugfs_dumpDataDif_release,
|
||||
};
|
||||
|
||||
#undef lpfc_debugfs_op_dumpDif
|
||||
static const struct file_operations lpfc_debugfs_op_dumpDif = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = lpfc_debugfs_dumpDif_open,
|
||||
.llseek = lpfc_debugfs_lseek,
|
||||
.read = lpfc_debugfs_read,
|
||||
.write = lpfc_debugfs_dumpDataDif_write,
|
||||
.release = lpfc_debugfs_dumpDataDif_release,
|
||||
};
|
||||
|
||||
#undef lpfc_debugfs_op_dif_err
|
||||
static const struct file_operations lpfc_debugfs_op_dif_err = {
|
||||
.owner = THIS_MODULE,
|
||||
@@ -5924,20 +5750,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
||||
} else
|
||||
phba->debug_dumpHostSlim = NULL;
|
||||
|
||||
/* Setup dumpData */
|
||||
snprintf(name, sizeof(name), "dumpData");
|
||||
phba->debug_dumpData =
|
||||
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
|
||||
phba->hba_debugfs_root,
|
||||
phba, &lpfc_debugfs_op_dumpData);
|
||||
|
||||
/* Setup dumpDif */
|
||||
snprintf(name, sizeof(name), "dumpDif");
|
||||
phba->debug_dumpDif =
|
||||
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
|
||||
phba->hba_debugfs_root,
|
||||
phba, &lpfc_debugfs_op_dumpDif);
|
||||
|
||||
/* Setup DIF Error Injections */
|
||||
snprintf(name, sizeof(name), "InjErrLBA");
|
||||
phba->debug_InjErrLBA =
|
||||
@@ -6315,12 +6127,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
|
||||
debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
|
||||
phba->debug_dumpHostSlim = NULL;
|
||||
|
||||
debugfs_remove(phba->debug_dumpData); /* dumpData */
|
||||
phba->debug_dumpData = NULL;
|
||||
|
||||
debugfs_remove(phba->debug_dumpDif); /* dumpDif */
|
||||
phba->debug_dumpDif = NULL;
|
||||
|
||||
debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
|
||||
phba->debug_InjErrLBA = NULL;
|
||||
|
||||
@@ -6442,12 +6248,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
|
||||
lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
|
||||
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
|
||||
}
|
||||
lpfc_debug_dump_wq(phba, DUMP_IO, idx);
|
||||
|
||||
lpfc_debug_dump_hdr_rq(phba);
|
||||
lpfc_debug_dump_dat_rq(phba);
|
||||
@@ -6459,12 +6260,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
|
||||
lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
|
||||
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (idx = 0; idx < phba->cfg_hdw_queue; idx++)
|
||||
lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
|
||||
}
|
||||
lpfc_debug_dump_cq(phba, DUMP_IO, idx);
|
||||
|
||||
/*
|
||||
* Dump Event Queues (EQs)
|
||||
|
@@ -291,8 +291,7 @@ struct lpfc_idiag {
|
||||
#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192
|
||||
|
||||
enum {
|
||||
DUMP_FCP,
|
||||
DUMP_NVME,
|
||||
DUMP_IO,
|
||||
DUMP_MBX,
|
||||
DUMP_ELS,
|
||||
DUMP_NVMELS,
|
||||
@@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
|
||||
struct lpfc_queue *wq;
|
||||
char *qtypestr;
|
||||
|
||||
if (qtype == DUMP_FCP) {
|
||||
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
|
||||
qtypestr = "FCP";
|
||||
} else if (qtype == DUMP_NVME) {
|
||||
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
|
||||
qtypestr = "NVME";
|
||||
if (qtype == DUMP_IO) {
|
||||
wq = phba->sli4_hba.hdwq[wqidx].io_wq;
|
||||
qtypestr = "IO";
|
||||
} else if (qtype == DUMP_MBX) {
|
||||
wq = phba->sli4_hba.mbx_wq;
|
||||
qtypestr = "MBX";
|
||||
@@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
|
||||
} else
|
||||
return;
|
||||
|
||||
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
|
||||
if (qtype == DUMP_IO)
|
||||
pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
|
||||
qtypestr, wqidx, wq->queue_id);
|
||||
else
|
||||
@@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
|
||||
char *qtypestr;
|
||||
int eqidx;
|
||||
|
||||
/* fcp/nvme wq and cq are 1:1, thus same indexes */
|
||||
/* io wq and cq are 1:1, thus same indexes */
|
||||
eq = NULL;
|
||||
|
||||
if (qtype == DUMP_FCP) {
|
||||
wq = phba->sli4_hba.hdwq[wqidx].fcp_wq;
|
||||
cq = phba->sli4_hba.hdwq[wqidx].fcp_cq;
|
||||
qtypestr = "FCP";
|
||||
} else if (qtype == DUMP_NVME) {
|
||||
wq = phba->sli4_hba.hdwq[wqidx].nvme_wq;
|
||||
cq = phba->sli4_hba.hdwq[wqidx].nvme_cq;
|
||||
qtypestr = "NVME";
|
||||
if (qtype == DUMP_IO) {
|
||||
wq = phba->sli4_hba.hdwq[wqidx].io_wq;
|
||||
cq = phba->sli4_hba.hdwq[wqidx].io_cq;
|
||||
qtypestr = "IO";
|
||||
} else if (qtype == DUMP_MBX) {
|
||||
wq = phba->sli4_hba.mbx_wq;
|
||||
cq = phba->sli4_hba.mbx_cq;
|
||||
@@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
|
||||
eq = phba->sli4_hba.hdwq[0].hba_eq;
|
||||
}
|
||||
|
||||
if (qtype == DUMP_FCP || qtype == DUMP_NVME)
|
||||
if (qtype == DUMP_IO)
|
||||
pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
|
||||
"->EQ[Idx:%d|Qid:%d]:\n",
|
||||
qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
|
||||
@@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
|
||||
int wq_idx;
|
||||
|
||||
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
|
||||
if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid)
|
||||
if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid)
|
||||
break;
|
||||
if (wq_idx < phba->cfg_hdw_queue) {
|
||||
pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq);
|
||||
return;
|
||||
}
|
||||
|
||||
for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++)
|
||||
if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid)
|
||||
break;
|
||||
if (wq_idx < phba->cfg_hdw_queue) {
|
||||
pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq);
|
||||
pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
|
||||
int cq_idx;
|
||||
|
||||
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
|
||||
if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid)
|
||||
if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid)
|
||||
break;
|
||||
|
||||
if (cq_idx < phba->cfg_hdw_queue) {
|
||||
pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq);
|
||||
return;
|
||||
}
|
||||
|
||||
for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++)
|
||||
if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid)
|
||||
break;
|
||||
|
||||
if (cq_idx < phba->cfg_hdw_queue) {
|
||||
pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq);
|
||||
pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
|
||||
lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -112,6 +112,8 @@ struct lpfc_nodelist {
|
||||
uint8_t nlp_retry; /* used for ELS retries */
|
||||
uint8_t nlp_fcp_info; /* class info, bits 0-3 */
|
||||
#define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */
|
||||
u8 nlp_nvme_info; /* NVME NSLER Support */
|
||||
#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */
|
||||
|
||||
uint16_t nlp_usg_map; /* ndlp management usage bitmap */
|
||||
#define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */
|
||||
@@ -157,6 +159,7 @@ struct lpfc_node_rrq {
|
||||
/* Defines for nlp_flag (uint32) */
|
||||
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
|
||||
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
|
||||
#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */
|
||||
#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */
|
||||
#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
|
||||
#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
|
||||
|
@@ -1052,17 +1052,18 @@ stop_rr_fcf_flogi:
|
||||
if (lpfc_els_retry(phba, cmdiocb, rspiocb))
|
||||
goto out;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
|
||||
"0150 FLOGI failure Status:x%x/x%x "
|
||||
"xri x%x TMO:x%x\n",
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4],
|
||||
cmdiocb->sli4_xritag, irsp->ulpTimeout);
|
||||
|
||||
/* If this is not a loop open failure, bail out */
|
||||
if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
|
||||
((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
|
||||
IOERR_LOOP_OPEN_FAILURE)))
|
||||
goto flogifail;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
|
||||
"0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n",
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4],
|
||||
cmdiocb->sli4_xritag, irsp->ulpTimeout);
|
||||
|
||||
/* FLOGI failed, so there is no fabric */
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
|
||||
@@ -1206,6 +1207,39 @@ out:
|
||||
lpfc_els_free_iocb(phba, cmdiocb);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_cmpl_els_link_down - Completion callback function for ELS command
|
||||
* aborted during a link down
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @cmdiocb: pointer to lpfc command iocb data structure.
|
||||
* @rspiocb: pointer to lpfc response iocb data structure.
|
||||
*
|
||||
*/
|
||||
static void
|
||||
lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_iocbq *rspiocb)
|
||||
{
|
||||
IOCB_t *irsp;
|
||||
uint32_t *pcmd;
|
||||
uint32_t cmd;
|
||||
|
||||
pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
|
||||
cmd = *pcmd;
|
||||
irsp = &rspiocb->iocb;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
|
||||
"6445 ELS completes after LINK_DOWN: "
|
||||
" Status %x/%x cmd x%x flg x%x\n",
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
|
||||
cmdiocb->iocb_flag);
|
||||
|
||||
if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
|
||||
cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
|
||||
atomic_dec(&phba->fabric_iocb_count);
|
||||
}
|
||||
lpfc_els_free_iocb(phba, cmdiocb);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
|
||||
* @vport: pointer to a host virtual N_Port data structure.
|
||||
@@ -2107,7 +2141,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
|
||||
!(vport->fc_flag & FC_OFFLINE_MODE)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"4110 Issue PLOGI x%x deferred "
|
||||
"on NPort x%x rpi x%x Data: %p\n",
|
||||
"on NPort x%x rpi x%x Data: x%px\n",
|
||||
ndlp->nlp_defer_did, ndlp->nlp_DID,
|
||||
ndlp->nlp_rpi, ndlp);
|
||||
|
||||
@@ -2401,6 +2435,10 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
npr_nvme = (struct lpfc_nvme_prli *)pcmd;
|
||||
bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
|
||||
bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
|
||||
if (phba->nsler) {
|
||||
bf_set(prli_nsler, npr_nvme, 1);
|
||||
bf_set(prli_conf, npr_nvme, 1);
|
||||
}
|
||||
|
||||
/* Only initiators request first burst. */
|
||||
if ((phba->cfg_nvme_enable_fb) &&
|
||||
@@ -4203,7 +4241,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
if (ndlp) {
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
|
||||
"0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
|
||||
"0006 rpi%x DID:%x flg:%x %d map:%x x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref),
|
||||
ndlp->nlp_usg_map, ndlp);
|
||||
@@ -5634,16 +5672,16 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
|
||||
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
|
||||
if (vport->fc_flag & FC_FABRIC) {
|
||||
memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
|
||||
sizeof(desc->port_names.wwnn));
|
||||
sizeof(desc->port_names.wwnn));
|
||||
|
||||
memcpy(desc->port_names.wwpn, &vport->fabric_portname,
|
||||
sizeof(desc->port_names.wwpn));
|
||||
sizeof(desc->port_names.wwpn));
|
||||
} else { /* Point to Point */
|
||||
memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
|
||||
sizeof(desc->port_names.wwnn));
|
||||
sizeof(desc->port_names.wwnn));
|
||||
|
||||
memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
|
||||
sizeof(desc->port_names.wwpn));
|
||||
memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
|
||||
sizeof(desc->port_names.wwpn));
|
||||
}
|
||||
|
||||
desc->length = cpu_to_be32(sizeof(desc->port_names));
|
||||
@@ -6327,7 +6365,11 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
|
||||
/* Check to see if we need to NVME rescan this target
|
||||
* remoteport.
|
||||
*/
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
|
||||
ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
|
||||
lpfc_nvme_rescan_port(vport, ndlp);
|
||||
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL,
|
||||
@@ -6441,7 +6483,11 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
*lp, vport->fc_flag, payload_len);
|
||||
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
|
||||
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
|
||||
/* Check to see if we need to NVME rescan this target
|
||||
* remoteport.
|
||||
*/
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
|
||||
ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
|
||||
lpfc_nvme_rescan_port(vport, ndlp);
|
||||
return 0;
|
||||
}
|
||||
@@ -7960,18 +8006,40 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_lock(&pring->ring_lock);
|
||||
|
||||
/* First we need to issue aborts to outstanding cmds on txcmpl */
|
||||
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
|
||||
if (piocb->iocb_flag & LPFC_IO_LIBDFC)
|
||||
continue;
|
||||
|
||||
if (piocb->vport != vport)
|
||||
continue;
|
||||
list_add_tail(&piocb->dlist, &abort_list);
|
||||
|
||||
/* On the ELS ring we can have ELS_REQUESTs or
|
||||
* GEN_REQUESTs waiting for a response.
|
||||
*/
|
||||
cmd = &piocb->iocb;
|
||||
if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
|
||||
list_add_tail(&piocb->dlist, &abort_list);
|
||||
|
||||
/* If the link is down when flushing ELS commands
|
||||
* the firmware will not complete them till after
|
||||
* the link comes back up. This may confuse
|
||||
* discovery for the new link up, so we need to
|
||||
* change the compl routine to just clean up the iocb
|
||||
* and avoid any retry logic.
|
||||
*/
|
||||
if (phba->link_state == LPFC_LINK_DOWN)
|
||||
piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
|
||||
}
|
||||
if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
|
||||
list_add_tail(&piocb->dlist, &abort_list);
|
||||
}
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_unlock(&pring->ring_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
/* Abort each iocb on the aborted list and remove the dlist links. */
|
||||
|
||||
/* Abort each txcmpl iocb on aborted list and remove the dlist links. */
|
||||
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
list_del_init(&piocb->dlist);
|
||||
@@ -7987,6 +8055,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_lock(&pring->ring_lock);
|
||||
|
||||
/* No need to abort the txq list,
|
||||
* just queue them up for lpfc_sli_cancel_iocbs
|
||||
*/
|
||||
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
|
||||
cmd = &piocb->iocb;
|
||||
|
||||
@@ -8007,11 +8078,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
|
||||
list_del_init(&piocb->list);
|
||||
list_add_tail(&piocb->list, &abort_list);
|
||||
}
|
||||
|
||||
/* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
|
||||
if (vport == phba->pport) {
|
||||
list_for_each_entry_safe(piocb, tmp_iocb,
|
||||
&phba->fabric_iocb_list, list) {
|
||||
cmd = &piocb->iocb;
|
||||
list_del_init(&piocb->list);
|
||||
list_add_tail(&piocb->list, &abort_list);
|
||||
}
|
||||
}
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_unlock(&pring->ring_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* Cancell all the IOCBs from the completions list */
|
||||
/* Cancel all the IOCBs from the completions list */
|
||||
lpfc_sli_cancel_iocbs(phba, &abort_list,
|
||||
IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
|
||||
|
||||
|
@@ -118,6 +118,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
|
||||
struct lpfc_work_evt *evtp;
|
||||
int put_node;
|
||||
int put_rport;
|
||||
unsigned long iflags;
|
||||
|
||||
rdata = rport->dd_data;
|
||||
ndlp = rdata->pnode;
|
||||
@@ -132,7 +133,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
|
||||
ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
|
||||
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
|
||||
"3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
|
||||
"3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
|
||||
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
|
||||
|
||||
/* Don't defer this if we are in the process of deleting the vport
|
||||
@@ -170,22 +171,22 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
|
||||
}
|
||||
|
||||
shost = lpfc_shost_from_vport(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
spin_lock_irqsave(shost->host_lock, iflags);
|
||||
ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
spin_unlock_irqrestore(shost->host_lock, iflags);
|
||||
|
||||
/* We need to hold the node by incrementing the reference
|
||||
* count until this queued work is done
|
||||
*/
|
||||
evtp->evt_arg1 = lpfc_nlp_get(ndlp);
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
if (evtp->evt_arg1) {
|
||||
evtp->evt = LPFC_EVT_DEV_LOSS;
|
||||
list_add_tail(&evtp->evt_listp, &phba->work_list);
|
||||
lpfc_worker_wake_up(phba);
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
|
||||
return;
|
||||
}
|
||||
@@ -212,14 +213,15 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
||||
int put_node;
|
||||
int warn_on = 0;
|
||||
int fcf_inuse = 0;
|
||||
unsigned long iflags;
|
||||
|
||||
rport = ndlp->rport;
|
||||
vport = ndlp->vport;
|
||||
shost = lpfc_shost_from_vport(vport);
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
spin_lock_irqsave(shost->host_lock, iflags);
|
||||
ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
spin_unlock_irqrestore(shost->host_lock, iflags);
|
||||
|
||||
if (!rport)
|
||||
return fcf_inuse;
|
||||
@@ -235,7 +237,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
||||
ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
|
||||
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
|
||||
"3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
|
||||
"3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
|
||||
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
|
||||
|
||||
/*
|
||||
@@ -903,6 +905,8 @@ lpfc_linkdown(struct lpfc_hba *phba)
|
||||
phba->trunk_link.link1.state = 0;
|
||||
phba->trunk_link.link2.state = 0;
|
||||
phba->trunk_link.link3.state = 0;
|
||||
phba->sli4_hba.link_state.logical_speed =
|
||||
LPFC_LINK_SPEED_UNKNOWN;
|
||||
}
|
||||
spin_lock_irq(shost->host_lock);
|
||||
phba->pport->fc_flag &= ~FC_LBIT;
|
||||
@@ -3115,8 +3119,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
int rc;
|
||||
struct fcf_record *fcf_record;
|
||||
uint32_t fc_flags = 0;
|
||||
unsigned long iflags;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
|
||||
|
||||
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
|
||||
@@ -3213,12 +3218,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
vport->fc_myDID = phba->fc_pref_DID;
|
||||
fc_flags |= FC_LBIT;
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
|
||||
if (fc_flags) {
|
||||
spin_lock_irq(shost->host_lock);
|
||||
spin_lock_irqsave(shost->host_lock, iflags);
|
||||
vport->fc_flag |= fc_flags;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
spin_unlock_irqrestore(shost->host_lock, iflags);
|
||||
}
|
||||
|
||||
lpfc_linkup(phba);
|
||||
@@ -3292,22 +3297,22 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
* The driver is expected to do FIP/FCF. Call the port
|
||||
* and get the FCF Table.
|
||||
*/
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
if (phba->hba_flag & FCF_TS_INPROG) {
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
return;
|
||||
}
|
||||
/* This is the initial FCF discovery scan */
|
||||
phba->fcf.fcf_flag |= FCF_INIT_DISC;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
|
||||
"2778 Start FCF table scan at linkup\n");
|
||||
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
|
||||
LPFC_FCOE_FCF_GET_FIRST);
|
||||
if (rc) {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
goto out;
|
||||
}
|
||||
/* Reset FCF roundrobin bmask for new discovery */
|
||||
@@ -3318,7 +3323,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
out:
|
||||
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
|
||||
"0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
|
||||
"0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
|
||||
vport->port_state, sparam_mbox, cfglink_mbox);
|
||||
lpfc_issue_clear_la(phba, vport);
|
||||
return;
|
||||
@@ -3366,6 +3371,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
MAILBOX_t *mb = &pmb->u.mb;
|
||||
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
|
||||
uint8_t attn_type;
|
||||
unsigned long iflags;
|
||||
|
||||
/* Unblock ELS traffic */
|
||||
pring = lpfc_phba_elsring(phba);
|
||||
@@ -3387,12 +3393,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
|
||||
memcpy(&phba->alpa_map[0], mp->virt, 128);
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
spin_lock_irqsave(shost->host_lock, iflags);
|
||||
if (bf_get(lpfc_mbx_read_top_pb, la))
|
||||
vport->fc_flag |= FC_BYPASSED_MODE;
|
||||
else
|
||||
vport->fc_flag &= ~FC_BYPASSED_MODE;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
spin_unlock_irqrestore(shost->host_lock, iflags);
|
||||
|
||||
if (phba->fc_eventTag <= la->eventTag) {
|
||||
phba->fc_stat.LinkMultiEvent++;
|
||||
@@ -3403,12 +3409,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
|
||||
phba->fc_eventTag = la->eventTag;
|
||||
if (phba->sli_rev < LPFC_SLI_REV4) {
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
if (bf_get(lpfc_mbx_read_top_mm, la))
|
||||
phba->sli.sli_flag |= LPFC_MENLO_MAINT;
|
||||
else
|
||||
phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
}
|
||||
|
||||
phba->link_events++;
|
||||
@@ -3529,7 +3535,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
pmb->ctx_ndlp = NULL;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
|
||||
"0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
|
||||
"0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref),
|
||||
ndlp->nlp_usg_map, ndlp);
|
||||
@@ -4041,7 +4047,7 @@ out:
|
||||
ndlp->nlp_type |= NLP_FABRIC;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
|
||||
"0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
|
||||
"0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref),
|
||||
ndlp->nlp_usg_map, ndlp);
|
||||
@@ -4160,7 +4166,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
fc_remote_port_rolechg(rport, rport_ids.roles);
|
||||
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
|
||||
"3183 rport register x%06x, rport %p role x%x\n",
|
||||
"3183 rport register x%06x, rport x%px role x%x\n",
|
||||
ndlp->nlp_DID, rport, rport_ids.roles);
|
||||
|
||||
if ((rport->scsi_target_id != -1) &&
|
||||
@@ -4184,7 +4190,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
|
||||
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"3184 rport unregister x%06x, rport %p\n",
|
||||
"3184 rport unregister x%06x, rport x%px\n",
|
||||
ndlp->nlp_DID, rport);
|
||||
|
||||
fc_remote_port_delete(rport);
|
||||
@@ -4196,8 +4202,9 @@ static void
|
||||
lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
|
||||
{
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
unsigned long iflags;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
spin_lock_irqsave(shost->host_lock, iflags);
|
||||
switch (state) {
|
||||
case NLP_STE_UNUSED_NODE:
|
||||
vport->fc_unused_cnt += count;
|
||||
@@ -4227,7 +4234,7 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
|
||||
vport->fc_npr_cnt += count;
|
||||
break;
|
||||
}
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
spin_unlock_irqrestore(shost->host_lock, iflags);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -4480,9 +4487,21 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
return NULL;
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
rpi = lpfc_sli4_alloc_rpi(vport->phba);
|
||||
if (rpi == LPFC_RPI_ALLOC_ERROR)
|
||||
if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
|
||||
rpi = lpfc_sli4_alloc_rpi(vport->phba);
|
||||
else
|
||||
rpi = ndlp->nlp_rpi;
|
||||
|
||||
if (rpi == LPFC_RPI_ALLOC_ERROR) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
|
||||
"0359 %s: ndlp:x%px "
|
||||
"usgmap:x%x refcnt:%d FAILED RPI "
|
||||
" ALLOC\n",
|
||||
__func__,
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->ndlp_lock, flags);
|
||||
@@ -4490,9 +4509,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (NLP_CHK_FREE_REQ(ndlp)) {
|
||||
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
|
||||
"0277 lpfc_enable_node: ndlp:x%p "
|
||||
"0277 %s: ndlp:x%px "
|
||||
"usgmap:x%x refcnt:%d\n",
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
__func__, (void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
goto free_rpi;
|
||||
}
|
||||
@@ -4500,9 +4519,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (NLP_CHK_NODE_ACT(ndlp)) {
|
||||
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
|
||||
"0278 lpfc_enable_node: ndlp:x%p "
|
||||
"0278 %s: ndlp:x%px "
|
||||
"usgmap:x%x refcnt:%d\n",
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
__func__, (void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
goto free_rpi;
|
||||
}
|
||||
@@ -4532,7 +4551,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
ndlp->nlp_rpi = rpi;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"0008 rpi:%x DID:%x flg:%x refcnt:%d "
|
||||
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
"map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref),
|
||||
ndlp->nlp_usg_map, ndlp);
|
||||
@@ -4541,6 +4560,14 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
|
||||
if (state != NLP_STE_UNUSED_NODE)
|
||||
lpfc_nlp_set_state(vport, ndlp, state);
|
||||
else
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"0013 rpi:%x DID:%x flg:%x refcnt:%d "
|
||||
"map:%x x%px STATE=UNUSED\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref),
|
||||
ndlp->nlp_usg_map, ndlp);
|
||||
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
|
||||
"node enable: did:x%x",
|
||||
@@ -4797,7 +4824,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
(ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"1434 UNREG cmpl deferred logo x%x "
|
||||
"on NPort x%x Data: x%x %p\n",
|
||||
"on NPort x%x Data: x%x x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
ndlp->nlp_defer_did, ndlp);
|
||||
|
||||
@@ -4805,6 +4832,10 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
|
||||
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
|
||||
} else {
|
||||
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
|
||||
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
|
||||
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
|
||||
}
|
||||
ndlp->nlp_flag &= ~NLP_UNREG_INP;
|
||||
}
|
||||
}
|
||||
@@ -4843,7 +4874,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"1436 unreg_rpi SKIP UNREG x%x on "
|
||||
"NPort x%x deferred x%x flg x%x "
|
||||
"Data: %p\n",
|
||||
"Data: x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
ndlp->nlp_defer_did,
|
||||
ndlp->nlp_flag, ndlp);
|
||||
@@ -4893,7 +4924,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"1433 unreg_rpi UNREG x%x on "
|
||||
"NPort x%x deferred flg x%x Data:%p\n",
|
||||
"NPort x%x deferred flg x%x "
|
||||
"Data:x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
ndlp->nlp_flag, ndlp);
|
||||
|
||||
@@ -5034,16 +5066,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
ndlp->nlp_state, ndlp->nlp_rpi);
|
||||
if (NLP_CHK_FREE_REQ(ndlp)) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
|
||||
"0280 lpfc_cleanup_node: ndlp:x%p "
|
||||
"0280 %s: ndlp:x%px "
|
||||
"usgmap:x%x refcnt:%d\n",
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
__func__, (void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
lpfc_dequeue_node(vport, ndlp);
|
||||
} else {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
|
||||
"0281 lpfc_cleanup_node: ndlp:x%p "
|
||||
"0281 %s: ndlp:x%px "
|
||||
"usgmap:x%x refcnt:%d\n",
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
__func__, (void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
lpfc_disable_node(vport, ndlp);
|
||||
}
|
||||
@@ -5104,6 +5136,8 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
list_del_init(&ndlp->els_retry_evt.evt_listp);
|
||||
list_del_init(&ndlp->dev_loss_evt.evt_listp);
|
||||
lpfc_cleanup_vports_rrqs(vport, ndlp);
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
ndlp->nlp_flag |= NLP_RELEASE_RPI;
|
||||
lpfc_unreg_rpi(vport, ndlp);
|
||||
|
||||
return 0;
|
||||
@@ -5132,7 +5166,7 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
* allocated by the firmware.
|
||||
*/
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
|
||||
"0005 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref),
|
||||
ndlp->nlp_usg_map, ndlp);
|
||||
@@ -5168,8 +5202,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
* for registered rport so need to cleanup rport
|
||||
*/
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
|
||||
"0940 removed node x%p DID x%x "
|
||||
" rport not null %p\n",
|
||||
"0940 removed node x%px DID x%x "
|
||||
" rport not null x%px\n",
|
||||
ndlp, ndlp->nlp_DID, ndlp->rport);
|
||||
rport = ndlp->rport;
|
||||
rdata = rport->dd_data;
|
||||
@@ -5243,15 +5277,15 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
|
||||
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
if (lpfc_matchdid(vport, ndlp, did)) {
|
||||
data1 = (((uint32_t) ndlp->nlp_state << 24) |
|
||||
((uint32_t) ndlp->nlp_xri << 16) |
|
||||
((uint32_t) ndlp->nlp_type << 8) |
|
||||
((uint32_t) ndlp->nlp_rpi & 0xff));
|
||||
data1 = (((uint32_t)ndlp->nlp_state << 24) |
|
||||
((uint32_t)ndlp->nlp_xri << 16) |
|
||||
((uint32_t)ndlp->nlp_type << 8) |
|
||||
((uint32_t)ndlp->nlp_usg_map & 0xff));
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"0929 FIND node DID "
|
||||
"Data: x%p x%x x%x x%x %p\n",
|
||||
"Data: x%px x%x x%x x%x x%x x%px\n",
|
||||
ndlp, ndlp->nlp_DID,
|
||||
ndlp->nlp_flag, data1,
|
||||
ndlp->nlp_flag, data1, ndlp->nlp_rpi,
|
||||
ndlp->active_rrqs_xri_bitmap);
|
||||
return ndlp;
|
||||
}
|
||||
@@ -5296,7 +5330,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport)
|
||||
spin_unlock_irqrestore(shost->host_lock, iflags);
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"2025 FIND node DID "
|
||||
"Data: x%p x%x x%x x%x %p\n",
|
||||
"Data: x%px x%x x%x x%x x%px\n",
|
||||
ndlp, ndlp->nlp_DID,
|
||||
ndlp->nlp_flag, data1,
|
||||
ndlp->active_rrqs_xri_bitmap);
|
||||
@@ -5336,8 +5370,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
|
||||
if (vport->phba->nvmet_support)
|
||||
return NULL;
|
||||
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
if (!ndlp)
|
||||
if (!ndlp) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
|
||||
"0014 Could not enable ndlp\n");
|
||||
return NULL;
|
||||
}
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
@@ -5960,7 +5997,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
ndlp->nlp_type |= NLP_FABRIC;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
|
||||
"0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
|
||||
"0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref),
|
||||
ndlp->nlp_usg_map, ndlp);
|
||||
@@ -6014,8 +6051,8 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
if (filter(ndlp, param)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"3185 FIND node filter %p DID "
|
||||
"ndlp %p did x%x flg x%x st x%x "
|
||||
"3185 FIND node filter %ps DID "
|
||||
"ndlp x%px did x%x flg x%x st x%x "
|
||||
"xri x%x type x%x rpi x%x\n",
|
||||
filter, ndlp, ndlp->nlp_DID,
|
||||
ndlp->nlp_flag, ndlp->nlp_state,
|
||||
@@ -6025,7 +6062,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
|
||||
}
|
||||
}
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"3186 FIND node filter %p NOT FOUND.\n", filter);
|
||||
"3186 FIND node filter %ps NOT FOUND.\n", filter);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -6065,10 +6102,11 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
|
||||
{
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
struct lpfc_nodelist *ndlp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
ndlp = __lpfc_findnode_rpi(vport, rpi);
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
return ndlp;
|
||||
}
|
||||
|
||||
@@ -6149,7 +6187,7 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
|
||||
ndlp->nlp_rpi = rpi;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
|
||||
"0007 rpi:%x DID:%x flg:%x refcnt:%d "
|
||||
"map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
"map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref),
|
||||
ndlp->nlp_usg_map, ndlp);
|
||||
@@ -6187,8 +6225,9 @@ lpfc_nlp_release(struct kref *kref)
|
||||
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
|
||||
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
|
||||
"0279 lpfc_nlp_release: ndlp:x%p did %x "
|
||||
"0279 %s: ndlp:x%px did %x "
|
||||
"usgmap:x%x refcnt:%d rpi:%x\n",
|
||||
__func__,
|
||||
(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref), ndlp->nlp_rpi);
|
||||
|
||||
@@ -6200,8 +6239,6 @@ lpfc_nlp_release(struct kref *kref)
|
||||
spin_lock_irqsave(&phba->ndlp_lock, flags);
|
||||
NLP_CLR_NODE_ACT(ndlp);
|
||||
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
|
||||
|
||||
/* free ndlp memory for final ndlp release */
|
||||
if (NLP_CHK_FREE_REQ(ndlp)) {
|
||||
@@ -6237,9 +6274,9 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp)
|
||||
if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
|
||||
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
|
||||
"0276 lpfc_nlp_get: ndlp:x%p "
|
||||
"0276 %s: ndlp:x%px "
|
||||
"usgmap:x%x refcnt:%d\n",
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
__func__, (void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
return NULL;
|
||||
} else
|
||||
@@ -6265,9 +6302,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
|
||||
return 1;
|
||||
|
||||
lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
|
||||
"node put: did:x%x flg:x%x refcnt:x%x",
|
||||
ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref));
|
||||
"node put: did:x%x flg:x%x refcnt:x%x",
|
||||
ndlp->nlp_DID, ndlp->nlp_flag,
|
||||
kref_read(&ndlp->kref));
|
||||
phba = ndlp->phba;
|
||||
spin_lock_irqsave(&phba->ndlp_lock, flags);
|
||||
/* Check the ndlp memory free acknowledge flag to avoid the
|
||||
@@ -6277,9 +6314,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
|
||||
if (NLP_CHK_FREE_ACK(ndlp)) {
|
||||
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
|
||||
"0274 lpfc_nlp_put: ndlp:x%p "
|
||||
"0274 %s: ndlp:x%px "
|
||||
"usgmap:x%x refcnt:%d\n",
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
__func__, (void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
return 1;
|
||||
}
|
||||
@@ -6290,9 +6327,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp)
|
||||
if (NLP_CHK_IACT_REQ(ndlp)) {
|
||||
spin_unlock_irqrestore(&phba->ndlp_lock, flags);
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
|
||||
"0275 lpfc_nlp_put: ndlp:x%p "
|
||||
"0275 %s: ndlp:x%px "
|
||||
"usgmap:x%x refcnt:%d\n",
|
||||
(void *)ndlp, ndlp->nlp_usg_map,
|
||||
__func__, (void *)ndlp, ndlp->nlp_usg_map,
|
||||
kref_read(&ndlp->kref));
|
||||
return 1;
|
||||
}
|
||||
|
@@ -843,7 +843,7 @@ typedef struct _ADISC { /* Structure is in Big Endian format */
|
||||
struct lpfc_name portName;
|
||||
struct lpfc_name nodeName;
|
||||
uint32_t DID;
|
||||
} ADISC;
|
||||
} __packed ADISC;
|
||||
|
||||
typedef struct _FARP { /* Structure is in Big Endian format */
|
||||
uint32_t Mflags:8;
|
||||
@@ -873,7 +873,7 @@ typedef struct _FAN { /* Structure is in Big Endian format */
|
||||
uint32_t Fdid;
|
||||
struct lpfc_name FportName;
|
||||
struct lpfc_name FnodeName;
|
||||
} FAN;
|
||||
} __packed FAN;
|
||||
|
||||
typedef struct _SCR { /* Structure is in Big Endian format */
|
||||
uint8_t resvd1;
|
||||
@@ -917,7 +917,7 @@ typedef struct _RNID { /* Structure is in Big Endian format */
|
||||
union {
|
||||
RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */
|
||||
} un;
|
||||
} RNID;
|
||||
} __packed RNID;
|
||||
|
||||
typedef struct _RPS { /* Structure is in Big Endian format */
|
||||
union {
|
||||
|
@@ -2050,6 +2050,23 @@ struct sli4_sge { /* SLI-4 */
|
||||
uint32_t sge_len;
|
||||
};
|
||||
|
||||
struct sli4_hybrid_sgl {
|
||||
struct list_head list_node;
|
||||
struct sli4_sge *dma_sgl;
|
||||
dma_addr_t dma_phys_sgl;
|
||||
};
|
||||
|
||||
struct fcp_cmd_rsp_buf {
|
||||
struct list_head list_node;
|
||||
|
||||
/* for storing cmd/rsp dma alloc'ed virt_addr */
|
||||
struct fcp_cmnd *fcp_cmnd;
|
||||
struct fcp_rsp *fcp_rsp;
|
||||
|
||||
/* for storing this cmd/rsp's dma mapped phys addr from per CPU pool */
|
||||
dma_addr_t fcp_cmd_rsp_dma_handle;
|
||||
};
|
||||
|
||||
struct sli4_sge_diseed { /* SLI-4 */
|
||||
uint32_t ref_tag;
|
||||
uint32_t ref_tag_tran;
|
||||
@@ -3449,6 +3466,9 @@ struct lpfc_sli4_parameters {
|
||||
#define cfg_xib_SHIFT 4
|
||||
#define cfg_xib_MASK 0x00000001
|
||||
#define cfg_xib_WORD word19
|
||||
#define cfg_xpsgl_SHIFT 6
|
||||
#define cfg_xpsgl_MASK 0x00000001
|
||||
#define cfg_xpsgl_WORD word19
|
||||
#define cfg_eqdr_SHIFT 8
|
||||
#define cfg_eqdr_MASK 0x00000001
|
||||
#define cfg_eqdr_WORD word19
|
||||
@@ -3460,6 +3480,10 @@ struct lpfc_sli4_parameters {
|
||||
#define cfg_bv1s_MASK 0x00000001
|
||||
#define cfg_bv1s_WORD word19
|
||||
|
||||
#define cfg_nsler_SHIFT 12
|
||||
#define cfg_nsler_MASK 0x00000001
|
||||
#define cfg_nsler_WORD word19
|
||||
|
||||
uint32_t word20;
|
||||
#define cfg_max_tow_xri_SHIFT 0
|
||||
#define cfg_max_tow_xri_MASK 0x0000ffff
|
||||
@@ -4314,6 +4338,12 @@ struct wqe_common {
|
||||
#define wqe_rcvoxid_SHIFT 16
|
||||
#define wqe_rcvoxid_MASK 0x0000FFFF
|
||||
#define wqe_rcvoxid_WORD word9
|
||||
#define wqe_sof_SHIFT 24
|
||||
#define wqe_sof_MASK 0x000000FF
|
||||
#define wqe_sof_WORD word9
|
||||
#define wqe_eof_SHIFT 16
|
||||
#define wqe_eof_MASK 0x000000FF
|
||||
#define wqe_eof_WORD word9
|
||||
uint32_t word10;
|
||||
#define wqe_ebde_cnt_SHIFT 0
|
||||
#define wqe_ebde_cnt_MASK 0x0000000f
|
||||
@@ -4595,6 +4625,7 @@ struct lpfc_nvme_prli {
|
||||
#define prli_type_code_WORD word1
|
||||
uint32_t word_rsvd2;
|
||||
uint32_t word_rsvd3;
|
||||
|
||||
uint32_t word4;
|
||||
#define prli_fba_SHIFT 0
|
||||
#define prli_fba_MASK 0x00000001
|
||||
@@ -4611,6 +4642,9 @@ struct lpfc_nvme_prli {
|
||||
#define prli_conf_SHIFT 7
|
||||
#define prli_conf_MASK 0x00000001
|
||||
#define prli_conf_WORD word4
|
||||
#define prli_nsler_SHIFT 8
|
||||
#define prli_nsler_MASK 0x00000001
|
||||
#define prli_nsler_WORD word4
|
||||
uint32_t word5;
|
||||
#define prli_fb_sz_SHIFT 0
|
||||
#define prli_fb_sz_MASK 0x0000ffff
|
||||
|
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
Load Diff
@@ -72,8 +72,8 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
|
||||
* lpfc_mem_alloc - create and allocate all PCI and memory pools
|
||||
* @phba: HBA to allocate pools for
|
||||
*
|
||||
* Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool,
|
||||
* lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
|
||||
* Description: Creates and allocates PCI pools lpfc_mbuf_pool,
|
||||
* lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
|
||||
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
|
||||
*
|
||||
* Notes: Not interrupt-safe. Must be called with no locks held. If any
|
||||
@@ -89,36 +89,12 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
|
||||
struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
|
||||
int i;
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
/* Calculate alignment */
|
||||
if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
|
||||
i = phba->cfg_sg_dma_buf_size;
|
||||
else
|
||||
i = SLI4_PAGE_SIZE;
|
||||
|
||||
phba->lpfc_sg_dma_buf_pool =
|
||||
dma_pool_create("lpfc_sg_dma_buf_pool",
|
||||
&phba->pcidev->dev,
|
||||
phba->cfg_sg_dma_buf_size,
|
||||
i, 0);
|
||||
if (!phba->lpfc_sg_dma_buf_pool)
|
||||
goto fail;
|
||||
|
||||
} else {
|
||||
phba->lpfc_sg_dma_buf_pool =
|
||||
dma_pool_create("lpfc_sg_dma_buf_pool",
|
||||
&phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
|
||||
align, 0);
|
||||
|
||||
if (!phba->lpfc_sg_dma_buf_pool)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
|
||||
LPFC_BPL_SIZE,
|
||||
align, 0);
|
||||
if (!phba->lpfc_mbuf_pool)
|
||||
goto fail_free_dma_buf_pool;
|
||||
goto fail;
|
||||
|
||||
pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
|
||||
sizeof(struct lpfc_dmabuf),
|
||||
@@ -208,9 +184,6 @@ fail_free_drb_pool:
|
||||
fail_free_lpfc_mbuf_pool:
|
||||
dma_pool_destroy(phba->lpfc_mbuf_pool);
|
||||
phba->lpfc_mbuf_pool = NULL;
|
||||
fail_free_dma_buf_pool:
|
||||
dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
|
||||
phba->lpfc_sg_dma_buf_pool = NULL;
|
||||
fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -248,25 +221,22 @@ lpfc_mem_free(struct lpfc_hba *phba)
|
||||
|
||||
/* Free HBQ pools */
|
||||
lpfc_sli_hbqbuf_free_all(phba);
|
||||
if (phba->lpfc_nvmet_drb_pool)
|
||||
dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
|
||||
dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
|
||||
phba->lpfc_nvmet_drb_pool = NULL;
|
||||
if (phba->lpfc_drb_pool)
|
||||
dma_pool_destroy(phba->lpfc_drb_pool);
|
||||
|
||||
dma_pool_destroy(phba->lpfc_drb_pool);
|
||||
phba->lpfc_drb_pool = NULL;
|
||||
if (phba->lpfc_hrb_pool)
|
||||
dma_pool_destroy(phba->lpfc_hrb_pool);
|
||||
|
||||
dma_pool_destroy(phba->lpfc_hrb_pool);
|
||||
phba->lpfc_hrb_pool = NULL;
|
||||
if (phba->txrdy_payload_pool)
|
||||
dma_pool_destroy(phba->txrdy_payload_pool);
|
||||
|
||||
dma_pool_destroy(phba->txrdy_payload_pool);
|
||||
phba->txrdy_payload_pool = NULL;
|
||||
|
||||
if (phba->lpfc_hbq_pool)
|
||||
dma_pool_destroy(phba->lpfc_hbq_pool);
|
||||
dma_pool_destroy(phba->lpfc_hbq_pool);
|
||||
phba->lpfc_hbq_pool = NULL;
|
||||
|
||||
if (phba->rrq_pool)
|
||||
mempool_destroy(phba->rrq_pool);
|
||||
mempool_destroy(phba->rrq_pool);
|
||||
phba->rrq_pool = NULL;
|
||||
|
||||
/* Free NLP memory pool */
|
||||
@@ -290,10 +260,6 @@ lpfc_mem_free(struct lpfc_hba *phba)
|
||||
dma_pool_destroy(phba->lpfc_mbuf_pool);
|
||||
phba->lpfc_mbuf_pool = NULL;
|
||||
|
||||
/* Free DMA buffer memory pool */
|
||||
dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
|
||||
phba->lpfc_sg_dma_buf_pool = NULL;
|
||||
|
||||
/* Free Device Data memory pool */
|
||||
if (phba->device_data_mem_pool) {
|
||||
/* Ensure all objects have been returned to the pool */
|
||||
@@ -366,6 +332,13 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
|
||||
/* Free and destroy all the allocated memory pools */
|
||||
lpfc_mem_free(phba);
|
||||
|
||||
/* Free DMA buffer memory pool */
|
||||
dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
|
||||
phba->lpfc_sg_dma_buf_pool = NULL;
|
||||
|
||||
dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
|
||||
phba->lpfc_cmd_rsp_buf_pool = NULL;
|
||||
|
||||
/* Free the iocb lookup array */
|
||||
kfree(psli->iocbq_lookup);
|
||||
psli->iocbq_lookup = NULL;
|
||||
|
@@ -614,7 +614,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
}
|
||||
out:
|
||||
/* If we are authenticated, move to the proper state */
|
||||
if (ndlp->nlp_type & NLP_FCP_TARGET)
|
||||
if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
|
||||
else
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
|
||||
@@ -799,9 +799,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (npr->writeXferRdyDis)
|
||||
ndlp->nlp_flag |= NLP_FIRSTBURST;
|
||||
}
|
||||
if (npr->Retry)
|
||||
if (npr->Retry && ndlp->nlp_type &
|
||||
(NLP_FCP_INITIATOR | NLP_FCP_TARGET))
|
||||
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
|
||||
|
||||
if (npr->Retry && phba->nsler &&
|
||||
ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET))
|
||||
ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
|
||||
|
||||
|
||||
/* If this driver is in nvme target mode, set the ndlp's fc4
|
||||
* type to NVME provided the PRLI response claims NVME FC4
|
||||
* type. Target mode does not issue gft_id so doesn't get
|
||||
@@ -885,7 +891,7 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport,
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"1435 release_rpi SKIP UNREG x%x on "
|
||||
"NPort x%x deferred x%x flg x%x "
|
||||
"Data: %p\n",
|
||||
"Data: x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
ndlp->nlp_defer_did,
|
||||
ndlp->nlp_flag, ndlp);
|
||||
@@ -1661,6 +1667,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
|
||||
LPFC_MBOXQ_t *mb;
|
||||
LPFC_MBOXQ_t *nextmb;
|
||||
struct lpfc_dmabuf *mp;
|
||||
struct lpfc_nodelist *ns_ndlp;
|
||||
|
||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
|
||||
@@ -1693,6 +1700,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
|
||||
}
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* software abort if any GID_FT is outstanding */
|
||||
if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
|
||||
ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
|
||||
if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp))
|
||||
lpfc_els_abort(phba, ns_ndlp);
|
||||
}
|
||||
|
||||
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
|
||||
return ndlp->nlp_state;
|
||||
}
|
||||
@@ -1814,7 +1828,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
|
||||
|
||||
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
|
||||
lpfc_issue_els_prli(vport, ndlp, 0);
|
||||
if (lpfc_issue_els_prli(vport, ndlp, 0)) {
|
||||
lpfc_issue_els_logo(vport, ndlp, 0);
|
||||
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
}
|
||||
} else {
|
||||
if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
|
||||
phba->targetport->port_id = vport->fc_myDID;
|
||||
@@ -2012,6 +2030,11 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (bf_get_be32(prli_init, nvpr))
|
||||
ndlp->nlp_type |= NLP_NVME_INITIATOR;
|
||||
|
||||
if (phba->nsler && bf_get_be32(prli_nsler, nvpr))
|
||||
ndlp->nlp_nvme_info |= NLP_NVME_NSLER;
|
||||
else
|
||||
ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
|
||||
|
||||
/* Target driver cannot solicit NVME FB. */
|
||||
if (bf_get_be32(prli_tgt, nvpr)) {
|
||||
/* Complete the nvme target roles. The transport
|
||||
@@ -2891,18 +2914,21 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
|
||||
uint32_t);
|
||||
uint32_t got_ndlp = 0;
|
||||
uint32_t data1;
|
||||
|
||||
if (lpfc_nlp_get(ndlp))
|
||||
got_ndlp = 1;
|
||||
|
||||
cur_state = ndlp->nlp_state;
|
||||
|
||||
data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
|
||||
((uint32_t)ndlp->nlp_type));
|
||||
/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"0211 DSM in event x%x on NPort x%x in "
|
||||
"state %d rpi x%x Data: x%x x%x\n",
|
||||
evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi,
|
||||
ndlp->nlp_flag, ndlp->nlp_fc4_type);
|
||||
ndlp->nlp_flag, data1);
|
||||
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
|
||||
"DSM in: evt:%d ste:%d did:x%x",
|
||||
@@ -2913,10 +2939,13 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
|
||||
/* DSM out state <rc> on NPort <nlp_DID> */
|
||||
if (got_ndlp) {
|
||||
data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) |
|
||||
((uint32_t)ndlp->nlp_type));
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"0212 DSM out state %d on NPort x%x "
|
||||
"rpi x%x Data: x%x\n",
|
||||
rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag);
|
||||
"rpi x%x Data: x%x x%x\n",
|
||||
rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag,
|
||||
data1);
|
||||
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
|
||||
"DSM out: ste:%d did:x%x flg:x%x",
|
||||
|
@@ -247,7 +247,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6073 Binding %s HdwQueue %d (cpu %d) to "
|
||||
"hdw_queue %d qhandle %p\n", str,
|
||||
"hdw_queue %d qhandle x%px\n", str,
|
||||
qidx, qhandle->cpu_id, qhandle->index, qhandle);
|
||||
*handle = (void *)qhandle;
|
||||
return 0;
|
||||
@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
|
||||
vport = lport->vport;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
|
||||
"6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
|
||||
lport, qidx, handle);
|
||||
kfree(handle);
|
||||
}
|
||||
@@ -293,7 +293,7 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
|
||||
struct lpfc_nvme_lport *lport = localport->private;
|
||||
|
||||
lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
|
||||
"6173 localport %p delete complete\n",
|
||||
"6173 localport x%px delete complete\n",
|
||||
lport);
|
||||
|
||||
/* release any threads waiting for the unreg to complete */
|
||||
@@ -332,7 +332,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
||||
* calling state machine to remove the node.
|
||||
*/
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6146 remoteport delete of remoteport %p\n",
|
||||
"6146 remoteport delete of remoteport x%px\n",
|
||||
remoteport);
|
||||
spin_lock_irq(&vport->phba->hbalock);
|
||||
|
||||
@@ -383,8 +383,8 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6047 nvme cmpl Enter "
|
||||
"Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
|
||||
"lsreg:%p bmp:%p ndlp:%p\n",
|
||||
"Data %px DID %x Xri: %x status %x reason x%x "
|
||||
"cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n",
|
||||
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
|
||||
cmdwqe->sli4_xritag, status,
|
||||
(wcqe->parameter & 0xffff),
|
||||
@@ -404,7 +404,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
else
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6046 nvme cmpl without done call back? "
|
||||
"Data %p DID %x Xri: %x status %x\n",
|
||||
"Data %px DID %x Xri: %x status %x\n",
|
||||
pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
|
||||
cmdwqe->sli4_xritag, status);
|
||||
if (ndlp) {
|
||||
@@ -436,6 +436,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
|
||||
return 1;
|
||||
|
||||
wqe = &genwqe->wqe;
|
||||
/* Initialize only 64 bytes */
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe));
|
||||
|
||||
genwqe->context3 = (uint8_t *)bmp;
|
||||
@@ -516,7 +517,8 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
|
||||
/* Issue GEN REQ WQE for NPORT <did> */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"6050 Issue GEN REQ WQE to NPORT x%x "
|
||||
"Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
|
||||
"Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px "
|
||||
"xmit:%d 1st:%d\n",
|
||||
ndlp->nlp_DID, genwqe->iotag,
|
||||
vport->port_state,
|
||||
genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
|
||||
@@ -594,7 +596,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
ndlp = rport->ndlp;
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
|
||||
"6051 Remoteport %p, rport has invalid ndlp. "
|
||||
"6051 Remoteport x%px, rport has invalid ndlp. "
|
||||
"Failing LS Req\n", pnvme_rport);
|
||||
return -ENODEV;
|
||||
}
|
||||
@@ -646,10 +648,10 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
|
||||
/* Expand print to include key fields. */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
|
||||
"lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
|
||||
ndlp->nlp_DID,
|
||||
pnvme_lport, pnvme_rport,
|
||||
"6149 Issue LS Req to DID 0x%06x lport x%px, "
|
||||
"rport x%px lsreq x%px rqstlen:%d rsplen:%d "
|
||||
"%pad %pad\n",
|
||||
ndlp->nlp_DID, pnvme_lport, pnvme_rport,
|
||||
pnvme_lsreq, pnvme_lsreq->rqstlen,
|
||||
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
|
||||
&pnvme_lsreq->rspdma);
|
||||
@@ -665,8 +667,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
if (ret != WQE_SUCCESS) {
|
||||
atomic_inc(&lport->xmt_ls_err);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6052 EXIT. issue ls wqe failed lport %p, "
|
||||
"rport %p lsreq%p Status %x DID %x\n",
|
||||
"6052 EXIT. issue ls wqe failed lport x%px, "
|
||||
"rport x%px lsreq x%px Status %x DID %x\n",
|
||||
pnvme_lport, pnvme_rport, pnvme_lsreq,
|
||||
ret, ndlp->nlp_DID);
|
||||
lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
|
||||
@@ -723,7 +725,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
|
||||
/* Expand print to include key fields. */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
|
||||
"6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d "
|
||||
"rsplen:%d %pad %pad\n",
|
||||
pnvme_lport, pnvme_rport,
|
||||
pnvme_lsreq, pnvme_lsreq->rqstlen,
|
||||
@@ -984,8 +986,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
if (!lpfc_ncmd->nvmeCmd) {
|
||||
spin_unlock(&lpfc_ncmd->buf_lock);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
|
||||
"6066 Missing cmpl ptrs: lpfc_ncmd %p, "
|
||||
"nvmeCmd %p\n",
|
||||
"6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
|
||||
"nvmeCmd x%px\n",
|
||||
lpfc_ncmd, lpfc_ncmd->nvmeCmd);
|
||||
|
||||
/* Release the lpfc_ncmd regardless of the missing elements. */
|
||||
@@ -998,9 +1000,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
|
||||
phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
|
||||
|
||||
if (vport->localport) {
|
||||
if (unlikely(status && vport->localport)) {
|
||||
lport = (struct lpfc_nvme_lport *)vport->localport->private;
|
||||
if (lport && status) {
|
||||
if (lport) {
|
||||
if (bf_get(lpfc_wcqe_c_xb, wcqe))
|
||||
atomic_inc(&lport->cmpl_fcp_xb);
|
||||
atomic_inc(&lport->cmpl_fcp_err);
|
||||
@@ -1100,8 +1102,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
|
||||
lpfc_printf_vlog(vport, KERN_INFO,
|
||||
LOG_NVME_IOERR,
|
||||
"6032 Delay Aborted cmd %p "
|
||||
"nvme cmd %p, xri x%x, "
|
||||
"6032 Delay Aborted cmd x%px "
|
||||
"nvme cmd x%px, xri x%x, "
|
||||
"xb %d\n",
|
||||
lpfc_ncmd, nCmd,
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag,
|
||||
@@ -1140,7 +1142,7 @@ out_err:
|
||||
phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
|
||||
lpfc_nvme_ktime(phba, lpfc_ncmd);
|
||||
}
|
||||
if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
|
||||
if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
|
||||
uint32_t cpu;
|
||||
idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
|
||||
cpu = raw_smp_processor_id();
|
||||
@@ -1253,6 +1255,9 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
sizeof(uint32_t) * 8);
|
||||
cstat->control_requests++;
|
||||
}
|
||||
|
||||
if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
|
||||
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
|
||||
/*
|
||||
* Finish initializing those WQE fields that are independent
|
||||
* of the nvme_cmnd request_buffer
|
||||
@@ -1304,14 +1309,16 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
|
||||
union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
|
||||
struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
|
||||
struct sli4_hybrid_sgl *sgl_xtra = NULL;
|
||||
struct scatterlist *data_sg;
|
||||
struct sli4_sge *first_data_sgl;
|
||||
struct ulp_bde64 *bde;
|
||||
dma_addr_t physaddr;
|
||||
dma_addr_t physaddr = 0;
|
||||
uint32_t num_bde = 0;
|
||||
uint32_t dma_len;
|
||||
uint32_t dma_len = 0;
|
||||
uint32_t dma_offset = 0;
|
||||
int nseg, i;
|
||||
int nseg, i, j;
|
||||
bool lsp_just_set = false;
|
||||
|
||||
/* Fix up the command and response DMA stuff. */
|
||||
lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
|
||||
@@ -1348,6 +1355,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
*/
|
||||
nseg = nCmd->sg_cnt;
|
||||
data_sg = nCmd->first_sgl;
|
||||
|
||||
/* for tracking the segment boundaries */
|
||||
j = 2;
|
||||
for (i = 0; i < nseg; i++) {
|
||||
if (data_sg == NULL) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
@@ -1356,23 +1366,76 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
lpfc_ncmd->seg_cnt = 0;
|
||||
return 1;
|
||||
}
|
||||
physaddr = data_sg->dma_address;
|
||||
dma_len = data_sg->length;
|
||||
sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
|
||||
sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
if ((num_bde + 1) == nseg)
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
else
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
|
||||
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(dma_len);
|
||||
|
||||
dma_offset += dma_len;
|
||||
data_sg = sg_next(data_sg);
|
||||
sgl++;
|
||||
sgl->word2 = 0;
|
||||
if ((num_bde + 1) == nseg) {
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
bf_set(lpfc_sli4_sge_type, sgl,
|
||||
LPFC_SGE_TYPE_DATA);
|
||||
} else {
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
|
||||
/* expand the segment */
|
||||
if (!lsp_just_set &&
|
||||
!((j + 1) % phba->border_sge_num) &&
|
||||
((nseg - 1) != i)) {
|
||||
/* set LSP type */
|
||||
bf_set(lpfc_sli4_sge_type, sgl,
|
||||
LPFC_SGE_TYPE_LSP);
|
||||
|
||||
sgl_xtra = lpfc_get_sgl_per_hdwq(
|
||||
phba, lpfc_ncmd);
|
||||
|
||||
if (unlikely(!sgl_xtra)) {
|
||||
lpfc_ncmd->seg_cnt = 0;
|
||||
return 1;
|
||||
}
|
||||
sgl->addr_lo = cpu_to_le32(putPaddrLow(
|
||||
sgl_xtra->dma_phys_sgl));
|
||||
sgl->addr_hi = cpu_to_le32(putPaddrHigh(
|
||||
sgl_xtra->dma_phys_sgl));
|
||||
|
||||
} else {
|
||||
bf_set(lpfc_sli4_sge_type, sgl,
|
||||
LPFC_SGE_TYPE_DATA);
|
||||
}
|
||||
}
|
||||
|
||||
if (!(bf_get(lpfc_sli4_sge_type, sgl) &
|
||||
LPFC_SGE_TYPE_LSP)) {
|
||||
if ((nseg - 1) == i)
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
|
||||
physaddr = data_sg->dma_address;
|
||||
dma_len = data_sg->length;
|
||||
sgl->addr_lo = cpu_to_le32(
|
||||
putPaddrLow(physaddr));
|
||||
sgl->addr_hi = cpu_to_le32(
|
||||
putPaddrHigh(physaddr));
|
||||
|
||||
bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(dma_len);
|
||||
|
||||
dma_offset += dma_len;
|
||||
data_sg = sg_next(data_sg);
|
||||
|
||||
sgl++;
|
||||
|
||||
lsp_just_set = false;
|
||||
} else {
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
|
||||
sgl->sge_len = cpu_to_le32(
|
||||
phba->cfg_sg_dma_buf_size);
|
||||
|
||||
sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
|
||||
i = i - 1;
|
||||
|
||||
lsp_just_set = true;
|
||||
}
|
||||
|
||||
j++;
|
||||
}
|
||||
if (phba->cfg_enable_pbde) {
|
||||
/* Use PBDE support for first SGL only, offset == 0 */
|
||||
@@ -1474,7 +1537,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
if (vport->load_flag & FC_UNLOADING) {
|
||||
if (unlikely(vport->load_flag & FC_UNLOADING)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6124 Fail IO, Driver unload\n");
|
||||
atomic_inc(&lport->xmt_fcp_err);
|
||||
@@ -1505,8 +1568,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
ndlp = rport->ndlp;
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
|
||||
"6053 Fail IO, ndlp not ready: rport %p "
|
||||
"ndlp %p, DID x%06x\n",
|
||||
"6053 Busy IO, ndlp not ready: rport x%px "
|
||||
"ndlp x%px, DID x%06x\n",
|
||||
rport, ndlp, pnvme_rport->port_id);
|
||||
atomic_inc(&lport->xmt_fcp_err);
|
||||
ret = -EBUSY;
|
||||
@@ -1758,7 +1821,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
/* Announce entry to new IO submit field. */
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6002 Abort Request to rport DID x%06x "
|
||||
"for nvme_fc_req %p\n",
|
||||
"for nvme_fc_req x%px\n",
|
||||
pnvme_rport->port_id,
|
||||
pnvme_fcreq);
|
||||
|
||||
@@ -1767,7 +1830,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
*/
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* driver queued commands are in process of being flushed */
|
||||
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
|
||||
if (phba->hba_flag & HBA_IOQ_FLUSH) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6139 Driver in reset cleanup - flushing "
|
||||
@@ -1805,8 +1868,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6143 NVME req mismatch: "
|
||||
"lpfc_nbuf %p nvmeCmd %p, "
|
||||
"pnvme_fcreq %p. Skipping Abort xri x%x\n",
|
||||
"lpfc_nbuf x%px nvmeCmd x%px, "
|
||||
"pnvme_fcreq x%px. Skipping Abort xri x%x\n",
|
||||
lpfc_nbuf, lpfc_nbuf->nvmeCmd,
|
||||
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
|
||||
goto out_unlock;
|
||||
@@ -1815,7 +1878,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
/* Don't abort IOs no longer on the pending queue. */
|
||||
if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6142 NVME IO req %p not queued - skipping "
|
||||
"6142 NVME IO req x%px not queued - skipping "
|
||||
"abort req xri x%x\n",
|
||||
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
|
||||
goto out_unlock;
|
||||
@@ -1830,8 +1893,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6144 Outstanding NVME I/O Abort Request "
|
||||
"still pending on nvme_fcreq %p, "
|
||||
"lpfc_ncmd %p xri x%x\n",
|
||||
"still pending on nvme_fcreq x%px, "
|
||||
"lpfc_ncmd %px xri x%x\n",
|
||||
pnvme_fcreq, lpfc_nbuf,
|
||||
nvmereq_wqe->sli4_xritag);
|
||||
goto out_unlock;
|
||||
@@ -1841,7 +1904,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
if (!abts_buf) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6136 No available abort wqes. Skipping "
|
||||
"Abts req for nvme_fcreq %p xri x%x\n",
|
||||
"Abts req for nvme_fcreq x%px xri x%x\n",
|
||||
pnvme_fcreq, nvmereq_wqe->sli4_xritag);
|
||||
goto out_unlock;
|
||||
}
|
||||
@@ -1855,7 +1918,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
/* WQEs are reused. Clear stale data and set key fields to
|
||||
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
|
||||
*/
|
||||
memset(abts_wqe, 0, sizeof(union lpfc_wqe));
|
||||
memset(abts_wqe, 0, sizeof(*abts_wqe));
|
||||
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
|
||||
|
||||
/* word 7 */
|
||||
@@ -1892,7 +1955,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
if (ret_val) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6137 Failed abts issue_wqe with status x%x "
|
||||
"for nvme_fcreq %p.\n",
|
||||
"for nvme_fcreq x%px.\n",
|
||||
ret_val, pnvme_fcreq);
|
||||
lpfc_sli_release_iocbq(phba, abts_buf);
|
||||
return;
|
||||
@@ -1982,7 +2045,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
/* Fill in word 3 / sgl_len during cmd submission */
|
||||
|
||||
/* Initialize WQE */
|
||||
/* Initialize 64 bytes only */
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe));
|
||||
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
|
||||
@@ -2028,11 +2091,11 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag,
|
||||
lpfc_ncmd->cur_iocbq.iotag);
|
||||
|
||||
spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
|
||||
spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
|
||||
list_add_tail(&lpfc_ncmd->list,
|
||||
&qp->lpfc_abts_nvme_buf_list);
|
||||
&qp->lpfc_abts_io_buf_list);
|
||||
qp->abts_nvme_io_bufs++;
|
||||
spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
|
||||
spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
|
||||
} else
|
||||
lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
|
||||
}
|
||||
@@ -2095,8 +2158,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
if (!ret) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
|
||||
"6005 Successfully registered local "
|
||||
"NVME port num %d, localP %p, private %p, "
|
||||
"sg_seg %d\n",
|
||||
"NVME port num %d, localP x%px, private "
|
||||
"x%px, sg_seg %d\n",
|
||||
localport->port_num, localport,
|
||||
localport->private,
|
||||
lpfc_nvme_template.max_sgl_segments);
|
||||
@@ -2157,14 +2220,14 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
|
||||
if (unlikely(!ret)) {
|
||||
pending = 0;
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
|
||||
pring = phba->sli4_hba.hdwq[i].io_wq->pring;
|
||||
if (!pring)
|
||||
continue;
|
||||
if (pring->txcmplq_cnt)
|
||||
pending += pring->txcmplq_cnt;
|
||||
}
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6176 Lport %p Localport %p wait "
|
||||
"6176 Lport x%px Localport x%px wait "
|
||||
"timed out. Pending %d. Renewing.\n",
|
||||
lport, vport->localport, pending);
|
||||
continue;
|
||||
@@ -2172,7 +2235,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
|
||||
break;
|
||||
}
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6177 Lport %p Localport %p Complete Success\n",
|
||||
"6177 Lport x%px Localport x%px Complete Success\n",
|
||||
lport, vport->localport);
|
||||
}
|
||||
#endif
|
||||
@@ -2203,7 +2266,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6011 Destroying NVME localport %p\n",
|
||||
"6011 Destroying NVME localport x%px\n",
|
||||
localport);
|
||||
|
||||
/* lport's rport list is clear. Unregister
|
||||
@@ -2253,12 +2316,12 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
if (!lport) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
|
||||
"6171 Update NVME fail. localP %p, No lport\n",
|
||||
"6171 Update NVME fail. localP x%px, No lport\n",
|
||||
localport);
|
||||
return;
|
||||
}
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6012 Update NVME lport %p did x%x\n",
|
||||
"6012 Update NVME lport x%px did x%x\n",
|
||||
localport, vport->fc_myDID);
|
||||
|
||||
localport->port_id = vport->fc_myDID;
|
||||
@@ -2268,7 +2331,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
|
||||
localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6030 bound lport %p to DID x%06x\n",
|
||||
"6030 bound lport x%px to DID x%06x\n",
|
||||
lport, localport->port_id);
|
||||
#endif
|
||||
}
|
||||
@@ -2317,9 +2380,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
|
||||
spin_lock_irq(&vport->phba->hbalock);
|
||||
oldrport = lpfc_ndlp_get_nrport(ndlp);
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
if (!oldrport)
|
||||
if (oldrport) {
|
||||
prev_ndlp = oldrport->ndlp;
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
} else {
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
lpfc_nlp_get(ndlp);
|
||||
}
|
||||
|
||||
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
|
||||
if (!ret) {
|
||||
@@ -2338,25 +2405,34 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
/* New remoteport record does not guarantee valid
|
||||
* host private memory area.
|
||||
*/
|
||||
prev_ndlp = oldrport->ndlp;
|
||||
if (oldrport == remote_port->private) {
|
||||
/* Same remoteport - ndlp should match.
|
||||
* Just reuse.
|
||||
*/
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
|
||||
LOG_NVME_DISC,
|
||||
"6014 Rebinding lport to "
|
||||
"remoteport %p wwpn 0x%llx, "
|
||||
"Data: x%x x%x %p %p x%x x%06x\n",
|
||||
"6014 Rebind lport to current "
|
||||
"remoteport x%px wwpn 0x%llx, "
|
||||
"Data: x%x x%x x%px x%px x%x "
|
||||
" x%06x\n",
|
||||
remote_port,
|
||||
remote_port->port_name,
|
||||
remote_port->port_id,
|
||||
remote_port->port_role,
|
||||
prev_ndlp,
|
||||
oldrport->ndlp,
|
||||
ndlp,
|
||||
ndlp->nlp_type,
|
||||
ndlp->nlp_DID);
|
||||
return 0;
|
||||
|
||||
/* It's a complete rebind only if the driver
|
||||
* is registering with the same ndlp. Otherwise
|
||||
* the driver likely executed a node swap
|
||||
* prior to this registration and the ndlp to
|
||||
* remoteport binding needs to be redone.
|
||||
*/
|
||||
if (prev_ndlp == ndlp)
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/* Sever the ndlp<->rport association
|
||||
@@ -2390,10 +2466,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
lpfc_printf_vlog(vport, KERN_INFO,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
"6022 Binding new rport to "
|
||||
"lport %p Remoteport %p rport %p WWNN 0x%llx, "
|
||||
"6022 Bind lport x%px to remoteport x%px "
|
||||
"rport x%px WWNN 0x%llx, "
|
||||
"Rport WWPN 0x%llx DID "
|
||||
"x%06x Role x%x, ndlp %p prev_ndlp %p\n",
|
||||
"x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
|
||||
lport, remote_port, rport,
|
||||
rpinfo.node_name, rpinfo.port_name,
|
||||
rpinfo.port_id, rpinfo.port_role,
|
||||
@@ -2423,20 +2499,23 @@ void
|
||||
lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct nvme_fc_remote_port *remoteport;
|
||||
struct lpfc_nvme_rport *nrport;
|
||||
struct nvme_fc_remote_port *remoteport = NULL;
|
||||
|
||||
rport = ndlp->nrport;
|
||||
spin_lock_irq(&vport->phba->hbalock);
|
||||
nrport = lpfc_ndlp_get_nrport(ndlp);
|
||||
if (nrport)
|
||||
remoteport = nrport->remoteport;
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6170 Rescan NPort DID x%06x type x%x "
|
||||
"state x%x rport %p\n",
|
||||
ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, rport);
|
||||
if (!rport)
|
||||
goto input_err;
|
||||
remoteport = rport->remoteport;
|
||||
if (!remoteport)
|
||||
goto input_err;
|
||||
"state x%x nrport x%px remoteport x%px\n",
|
||||
ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
|
||||
nrport, remoteport);
|
||||
|
||||
if (!nrport || !remoteport)
|
||||
goto rescan_exit;
|
||||
|
||||
/* Only rescan if we are an NVME target in the MAPPED state */
|
||||
if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
|
||||
@@ -2449,10 +2528,10 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
ndlp->nlp_DID, remoteport->port_state);
|
||||
}
|
||||
return;
|
||||
input_err:
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6169 State error: lport %p, rport%p FCID x%06x\n",
|
||||
vport->localport, ndlp->rport, ndlp->nlp_DID);
|
||||
rescan_exit:
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6169 Skip NVME Rport Rescan, NVME remoteport "
|
||||
"unregistered\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -2499,7 +2578,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
goto input_err;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6033 Unreg nvme remoteport %p, portname x%llx, "
|
||||
"6033 Unreg nvme remoteport x%px, portname x%llx, "
|
||||
"port_id x%06x, portstate x%x port type x%x\n",
|
||||
remoteport, remoteport->port_name,
|
||||
remoteport->port_id, remoteport->port_state,
|
||||
@@ -2537,7 +2616,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
input_err:
|
||||
#endif
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6168 State error: lport %p, rport%p FCID x%06x\n",
|
||||
"6168 State error: lport x%px, rport x%px FCID x%06x\n",
|
||||
vport->localport, ndlp->rport, ndlp->nlp_DID);
|
||||
}
|
||||
|
||||
@@ -2545,6 +2624,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @axri: pointer to the fcp xri abort wcqe structure.
|
||||
* @lpfc_ncmd: The nvme job structure for the request being aborted.
|
||||
*
|
||||
* This routine is invoked by the worker thread to process a SLI4 fast-path
|
||||
* NVME aborted xri. Aborted NVME IO commands are completed to the transport
|
||||
@@ -2552,59 +2632,33 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
|
||||
struct sli4_wcqe_xri_aborted *axri, int idx)
|
||||
struct sli4_wcqe_xri_aborted *axri,
|
||||
struct lpfc_io_buf *lpfc_ncmd)
|
||||
{
|
||||
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
|
||||
struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
|
||||
struct nvmefc_fcp_req *nvme_cmd = NULL;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_sli4_hdw_queue *qp;
|
||||
unsigned long iflag = 0;
|
||||
struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
|
||||
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return;
|
||||
qp = &phba->sli4_hba.hdwq[idx];
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
spin_lock(&qp->abts_nvme_buf_list_lock);
|
||||
list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
|
||||
&qp->lpfc_abts_nvme_buf_list, list) {
|
||||
if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
qp->abts_nvme_io_bufs--;
|
||||
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||
spin_unlock(&qp->abts_nvme_buf_list_lock);
|
||||
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
ndlp = lpfc_ncmd->ndlp;
|
||||
if (ndlp)
|
||||
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6311 nvme_cmd %p xri x%x tag x%x "
|
||||
"abort complete and xri released\n",
|
||||
lpfc_ncmd->nvmeCmd, xri,
|
||||
lpfc_ncmd->cur_iocbq.iotag);
|
||||
|
||||
/* Aborted NVME commands are required to not complete
|
||||
* before the abort exchange command fully completes.
|
||||
* Once completed, it is available via the put list.
|
||||
*/
|
||||
if (lpfc_ncmd->nvmeCmd) {
|
||||
nvme_cmd = lpfc_ncmd->nvmeCmd;
|
||||
nvme_cmd->done(nvme_cmd);
|
||||
lpfc_ncmd->nvmeCmd = NULL;
|
||||
}
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
return;
|
||||
}
|
||||
}
|
||||
spin_unlock(&qp->abts_nvme_buf_list_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
if (ndlp)
|
||||
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6312 XRI Aborted xri x%x not found\n", xri);
|
||||
"6311 nvme_cmd %p xri x%x tag x%x abort complete and "
|
||||
"xri released\n",
|
||||
lpfc_ncmd->nvmeCmd, xri,
|
||||
lpfc_ncmd->cur_iocbq.iotag);
|
||||
|
||||
/* Aborted NVME commands are required to not complete
|
||||
* before the abort exchange command fully completes.
|
||||
* Once completed, it is available via the put list.
|
||||
*/
|
||||
if (lpfc_ncmd->nvmeCmd) {
|
||||
nvme_cmd = lpfc_ncmd->nvmeCmd;
|
||||
nvme_cmd->done(nvme_cmd);
|
||||
lpfc_ncmd->nvmeCmd = NULL;
|
||||
}
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2626,13 +2680,13 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
|
||||
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
|
||||
return;
|
||||
|
||||
/* Cycle through all NVME rings and make sure all outstanding
|
||||
/* Cycle through all IO rings and make sure all outstanding
|
||||
* WQEs have been removed from the txcmplqs.
|
||||
*/
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
if (!phba->sli4_hba.hdwq[i].nvme_wq)
|
||||
if (!phba->sli4_hba.hdwq[i].io_wq)
|
||||
continue;
|
||||
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
|
||||
pring = phba->sli4_hba.hdwq[i].io_wq->pring;
|
||||
|
||||
if (!pring)
|
||||
continue;
|
||||
@@ -2653,3 +2707,50 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
|
||||
{
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
struct lpfc_io_buf *lpfc_ncmd;
|
||||
struct nvmefc_fcp_req *nCmd;
|
||||
struct lpfc_nvme_fcpreq_priv *freqpriv;
|
||||
|
||||
if (!pwqeIn->context1) {
|
||||
lpfc_sli_release_iocbq(phba, pwqeIn);
|
||||
return;
|
||||
}
|
||||
/* For abort iocb just return, IO iocb will do a done call */
|
||||
if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
|
||||
CMD_ABORT_XRI_CX) {
|
||||
lpfc_sli_release_iocbq(phba, pwqeIn);
|
||||
return;
|
||||
}
|
||||
lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
|
||||
|
||||
spin_lock(&lpfc_ncmd->buf_lock);
|
||||
if (!lpfc_ncmd->nvmeCmd) {
|
||||
spin_unlock(&lpfc_ncmd->buf_lock);
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
return;
|
||||
}
|
||||
|
||||
nCmd = lpfc_ncmd->nvmeCmd;
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6194 NVME Cancel xri %x\n",
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag);
|
||||
|
||||
nCmd->transferred_length = 0;
|
||||
nCmd->rcv_rsplen = 0;
|
||||
nCmd->status = NVME_SC_INTERNAL;
|
||||
freqpriv = nCmd->private;
|
||||
freqpriv->nvme_buf = NULL;
|
||||
lpfc_ncmd->nvmeCmd = NULL;
|
||||
|
||||
spin_unlock(&lpfc_ncmd->buf_lock);
|
||||
nCmd->done(nCmd);
|
||||
|
||||
/* Call release with XB=1 to queue the IO into the abort list. */
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
#endif
|
||||
}
|
||||
|
@@ -1026,7 +1026,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||
* WQE release CQE
|
||||
*/
|
||||
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
|
||||
wq = ctxp->hdwq->nvme_wq;
|
||||
wq = ctxp->hdwq->io_wq;
|
||||
pring = wq->pring;
|
||||
spin_lock_irqsave(&pring->ring_lock, iflags);
|
||||
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
|
||||
@@ -1104,7 +1104,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
wq = ctxp->hdwq->nvme_wq;
|
||||
wq = ctxp->hdwq->io_wq;
|
||||
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
|
||||
return;
|
||||
}
|
||||
@@ -1437,7 +1437,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
|
||||
infop = lpfc_get_ctx_list(phba, i, j);
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
|
||||
"6408 TOTAL NVMET ctx for CPU %d "
|
||||
"MRQ %d: cnt %d nextcpu %p\n",
|
||||
"MRQ %d: cnt %d nextcpu x%px\n",
|
||||
i, j, infop->nvmet_ctx_list_cnt,
|
||||
infop->nvmet_ctx_next_cpu);
|
||||
}
|
||||
@@ -1500,7 +1500,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6026 Registered NVME "
|
||||
"targetport: %p, private %p "
|
||||
"targetport: x%px, private x%px "
|
||||
"portnm %llx nodenm %llx segs %d qs %d\n",
|
||||
phba->targetport, tgtp,
|
||||
pinfo.port_name, pinfo.node_name,
|
||||
@@ -1555,7 +1555,7 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
|
||||
return 0;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6007 Update NVMET port %p did x%x\n",
|
||||
"6007 Update NVMET port x%px did x%x\n",
|
||||
phba->targetport, vport->fc_myDID);
|
||||
|
||||
phba->targetport->port_id = vport->fc_myDID;
|
||||
@@ -1790,12 +1790,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
|
||||
lpfc_nvmet_defer_release(phba, ctxp);
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
|
||||
}
|
||||
if (ctxp->state == LPFC_NVMET_STE_RCV)
|
||||
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
else
|
||||
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
|
||||
lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
|
||||
return 0;
|
||||
@@ -1922,7 +1918,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
||||
if (phba->targetport) {
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
wq = phba->sli4_hba.hdwq[qidx].nvme_wq;
|
||||
wq = phba->sli4_hba.hdwq[qidx].io_wq;
|
||||
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
|
||||
}
|
||||
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
|
||||
@@ -1930,7 +1926,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
||||
if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
|
||||
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
||||
"6179 Unreg targetport %p timeout "
|
||||
"6179 Unreg targetport x%px timeout "
|
||||
"reached.\n", phba->targetport);
|
||||
lpfc_nvmet_cleanup_io_context(phba);
|
||||
}
|
||||
@@ -3113,7 +3109,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
atomic_inc(&tgtp->xmt_ls_abort_cmpl);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
|
||||
"6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
|
||||
ctxp, wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
@@ -3299,7 +3295,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
*/
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* driver queued commands are in process of being flushed */
|
||||
if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
|
||||
if (phba->hba_flag & HBA_IOQ_FLUSH) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
||||
@@ -3334,7 +3330,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
/* WQEs are reused. Clear stale data and set key fields to
|
||||
* zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
|
||||
*/
|
||||
memset(abts_wqe, 0, sizeof(union lpfc_wqe));
|
||||
memset(abts_wqe, 0, sizeof(*abts_wqe));
|
||||
|
||||
/* word 3 */
|
||||
bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
|
||||
|
파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
Load Diff
@@ -1391,9 +1391,12 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
|
||||
|
||||
while (!list_empty(iocblist)) {
|
||||
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
|
||||
if (!piocb->iocb_cmpl)
|
||||
lpfc_sli_release_iocbq(phba, piocb);
|
||||
else {
|
||||
if (!piocb->iocb_cmpl) {
|
||||
if (piocb->iocb_flag & LPFC_IO_NVME)
|
||||
lpfc_nvme_cancel_iocb(phba, piocb);
|
||||
else
|
||||
lpfc_sli_release_iocbq(phba, piocb);
|
||||
} else {
|
||||
piocb->iocb.ulpStatus = ulpstatus;
|
||||
piocb->iocb.un.ulpWord[4] = ulpWord4;
|
||||
(piocb->iocb_cmpl) (phba, piocb, piocb);
|
||||
@@ -2426,6 +2429,20 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
unsigned long iflags;
|
||||
|
||||
if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
|
||||
lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
|
||||
spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
|
||||
ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
|
||||
ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
|
||||
spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
|
||||
}
|
||||
ndlp->nlp_flag &= ~NLP_UNREG_INP;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
|
||||
@@ -2497,7 +2514,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
vport,
|
||||
KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
|
||||
"1438 UNREG cmpl deferred mbox x%x "
|
||||
"on NPort x%x Data: x%x x%x %p\n",
|
||||
"on NPort x%x Data: x%x x%x %px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
|
||||
|
||||
@@ -2507,7 +2524,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
|
||||
lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
|
||||
} else {
|
||||
ndlp->nlp_flag &= ~NLP_UNREG_INP;
|
||||
__lpfc_sli_rpi_release(vport, ndlp);
|
||||
}
|
||||
pmb->ctx_ndlp = NULL;
|
||||
}
|
||||
@@ -2555,7 +2572,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
vport, KERN_INFO, LOG_MBOX | LOG_SLI,
|
||||
"0010 UNREG_LOGIN vpi:%x "
|
||||
"rpi:%x DID:%x defer x%x flg x%x "
|
||||
"map:%x %p\n",
|
||||
"map:%x %px\n",
|
||||
vport->vpi, ndlp->nlp_rpi,
|
||||
ndlp->nlp_DID, ndlp->nlp_defer_did,
|
||||
ndlp->nlp_flag,
|
||||
@@ -2573,7 +2590,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"4111 UNREG cmpl deferred "
|
||||
"clr x%x on "
|
||||
"NPort x%x Data: x%x %p\n",
|
||||
"NPort x%x Data: x%x x%px\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_DID,
|
||||
ndlp->nlp_defer_did, ndlp);
|
||||
ndlp->nlp_flag &= ~NLP_UNREG_INP;
|
||||
@@ -2582,7 +2599,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
lpfc_issue_els_plogi(
|
||||
vport, ndlp->nlp_DID, 0);
|
||||
} else {
|
||||
ndlp->nlp_flag &= ~NLP_UNREG_INP;
|
||||
__lpfc_sli_rpi_release(vport, ndlp);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2695,7 +2712,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
|
||||
|
||||
/* Mailbox cmd <cmd> Cmpl <cmpl> */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
|
||||
"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
|
||||
"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
|
||||
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
|
||||
"x%x x%x x%x\n",
|
||||
pmb->vport ? pmb->vport->vpi : 0,
|
||||
@@ -3961,7 +3978,7 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
|
||||
/* Look on all the FCP Rings for the iotag */
|
||||
if (phba->sli_rev >= LPFC_SLI_REV4) {
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
|
||||
pring = phba->sli4_hba.hdwq[i].io_wq->pring;
|
||||
lpfc_sli_abort_iocb_ring(phba, pring);
|
||||
}
|
||||
} else {
|
||||
@@ -3971,17 +3988,17 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
|
||||
* lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
|
||||
* @phba: Pointer to HBA context object.
|
||||
*
|
||||
* This function flushes all iocbs in the fcp ring and frees all the iocb
|
||||
* This function flushes all iocbs in the IO ring and frees all the iocb
|
||||
* objects in txq and txcmplq. This function will not issue abort iocbs
|
||||
* for all the iocb commands in txcmplq, they will just be returned with
|
||||
* IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
|
||||
* slot has been permanently disabled.
|
||||
**/
|
||||
void
|
||||
lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
|
||||
lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
|
||||
{
|
||||
LIST_HEAD(txq);
|
||||
LIST_HEAD(txcmplq);
|
||||
@@ -3992,13 +4009,13 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
/* Indicate the I/O queues are flushed */
|
||||
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
|
||||
phba->hba_flag |= HBA_IOQ_FLUSH;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* Look on all the FCP Rings for the iotag */
|
||||
if (phba->sli_rev >= LPFC_SLI_REV4) {
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
|
||||
pring = phba->sli4_hba.hdwq[i].io_wq->pring;
|
||||
|
||||
spin_lock_irq(&pring->ring_lock);
|
||||
/* Retrieve everything on txq */
|
||||
@@ -4045,56 +4062,6 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
|
||||
* @phba: Pointer to HBA context object.
|
||||
*
|
||||
* This function flushes all wqes in the nvme rings and frees all resources
|
||||
* in the txcmplq. This function does not issue abort wqes for the IO
|
||||
* commands in txcmplq, they will just be returned with
|
||||
* IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
|
||||
* slot has been permanently disabled.
|
||||
**/
|
||||
void
|
||||
lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
|
||||
{
|
||||
LIST_HEAD(txcmplq);
|
||||
struct lpfc_sli_ring *pring;
|
||||
uint32_t i;
|
||||
struct lpfc_iocbq *piocb, *next_iocb;
|
||||
|
||||
if ((phba->sli_rev < LPFC_SLI_REV4) ||
|
||||
!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return;
|
||||
|
||||
/* Hint to other driver operations that a flush is in progress. */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
/* Cycle through all NVME rings and complete each IO with
|
||||
* a local driver reason code. This is a flush so no
|
||||
* abort exchange to FW.
|
||||
*/
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
|
||||
|
||||
spin_lock_irq(&pring->ring_lock);
|
||||
list_for_each_entry_safe(piocb, next_iocb,
|
||||
&pring->txcmplq, list)
|
||||
piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
|
||||
/* Retrieve everything on the txcmplq */
|
||||
list_splice_init(&pring->txcmplq, &txcmplq);
|
||||
pring->txcmplq_cnt = 0;
|
||||
spin_unlock_irq(&pring->ring_lock);
|
||||
|
||||
/* Flush the txcmpq &&&PAE */
|
||||
lpfc_sli_cancel_iocbs(phba, &txcmplq,
|
||||
IOSTAT_LOCAL_REJECT,
|
||||
IOERR_SLI_DOWN);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli_brdready_s3 - Check for sli3 host ready status
|
||||
* @phba: Pointer to HBA context object.
|
||||
@@ -4495,7 +4462,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
|
||||
* checking during resets the device. The caller is not required to hold
|
||||
* any locks.
|
||||
*
|
||||
* This function returns 0 always.
|
||||
* This function returns 0 on success else returns negative error code.
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_brdreset(struct lpfc_hba *phba)
|
||||
@@ -4652,8 +4619,10 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
|
||||
hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
|
||||
|
||||
rc = lpfc_sli4_brdreset(phba);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (rc) {
|
||||
phba->link_state = LPFC_HBA_ERROR;
|
||||
goto hba_down_queue;
|
||||
}
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
phba->pport->stopped = 0;
|
||||
@@ -4668,6 +4637,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
|
||||
if (hba_aer_enabled)
|
||||
pci_disable_pcie_error_reporting(phba->pcidev);
|
||||
|
||||
hba_down_queue:
|
||||
lpfc_hba_down_post(phba);
|
||||
lpfc_sli4_queue_destroy(phba);
|
||||
|
||||
@@ -5584,10 +5554,8 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
|
||||
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
|
||||
qp = &sli4_hba->hdwq[qidx];
|
||||
/* ARM the corresponding CQ */
|
||||
sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0,
|
||||
LPFC_QUEUE_REARM);
|
||||
sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
|
||||
LPFC_QUEUE_REARM);
|
||||
sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
|
||||
LPFC_QUEUE_REARM);
|
||||
}
|
||||
|
||||
/* Loop thru all IRQ vectors */
|
||||
@@ -7243,7 +7211,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
else
|
||||
phba->hba_flag &= ~HBA_FIP_SUPPORT;
|
||||
|
||||
phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
|
||||
phba->hba_flag &= ~HBA_IOQ_FLUSH;
|
||||
|
||||
if (phba->sli_rev != LPFC_SLI_REV4) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
@@ -7972,7 +7940,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
|
||||
|
||||
/* Mbox cmd <mbxCommand> timeout */
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
|
||||
"0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
|
||||
mb->mbxCommand,
|
||||
phba->pport->port_state,
|
||||
phba->sli.sli_flag,
|
||||
@@ -9333,11 +9301,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe128));
|
||||
/* Some of the fields are in the right position already */
|
||||
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
|
||||
if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
|
||||
/* The ct field has moved so reset */
|
||||
wqe->generic.wqe_com.word7 = 0;
|
||||
wqe->generic.wqe_com.word10 = 0;
|
||||
}
|
||||
/* The ct field has moved so reset */
|
||||
wqe->generic.wqe_com.word7 = 0;
|
||||
wqe->generic.wqe_com.word10 = 0;
|
||||
|
||||
abort_tag = (uint32_t) iocbq->iotag;
|
||||
xritag = iocbq->sli4_xritag;
|
||||
@@ -9796,7 +9762,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
* we re-construct this WQE here based on information in
|
||||
* iocbq from scratch.
|
||||
*/
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe));
|
||||
memset(wqe, 0, sizeof(*wqe));
|
||||
/* OX_ID is invariable to who sent ABTS to CT exchange */
|
||||
bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
|
||||
bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
|
||||
@@ -9843,6 +9809,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
|
||||
break;
|
||||
case CMD_SEND_FRAME:
|
||||
bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
|
||||
bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */
|
||||
bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */
|
||||
bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
|
||||
bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
|
||||
bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
|
||||
bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
|
||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
|
||||
bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
|
||||
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
|
||||
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
|
||||
return 0;
|
||||
@@ -9904,7 +9879,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
|
||||
/* Get the WQ */
|
||||
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
|
||||
(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
|
||||
wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
|
||||
wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
|
||||
} else {
|
||||
wq = phba->sli4_hba.els_wq;
|
||||
}
|
||||
@@ -10051,7 +10026,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
|
||||
lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
|
||||
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
|
||||
}
|
||||
return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
|
||||
return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
|
||||
} else {
|
||||
if (unlikely(!phba->sli4_hba.els_wq))
|
||||
return NULL;
|
||||
@@ -10504,7 +10479,7 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
|
||||
INIT_LIST_HEAD(&psli->mboxq_cmpl);
|
||||
/* Initialize list headers for txq and txcmplq as double linked lists */
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
|
||||
pring = phba->sli4_hba.hdwq[i].io_wq->pring;
|
||||
pring->flag = 0;
|
||||
pring->ringno = LPFC_FCP_RING;
|
||||
pring->txcmplq_cnt = 0;
|
||||
@@ -10523,16 +10498,6 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba)
|
||||
spin_lock_init(&pring->ring_lock);
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
|
||||
pring->flag = 0;
|
||||
pring->ringno = LPFC_FCP_RING;
|
||||
pring->txcmplq_cnt = 0;
|
||||
INIT_LIST_HEAD(&pring->txq);
|
||||
INIT_LIST_HEAD(&pring->txcmplq);
|
||||
INIT_LIST_HEAD(&pring->iocb_continueq);
|
||||
spin_lock_init(&pring->ring_lock);
|
||||
}
|
||||
pring = phba->sli4_hba.nvmels_wq->pring;
|
||||
pring->flag = 0;
|
||||
pring->ringno = LPFC_ELS_RING;
|
||||
@@ -10796,9 +10761,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
|
||||
pring = qp->pring;
|
||||
if (!pring)
|
||||
continue;
|
||||
spin_lock_irq(&pring->ring_lock);
|
||||
spin_lock(&pring->ring_lock);
|
||||
list_splice_init(&pring->txq, &completions);
|
||||
spin_unlock_irq(&pring->ring_lock);
|
||||
spin_unlock(&pring->ring_lock);
|
||||
if (pring == phba->sli4_hba.els_wq->pring) {
|
||||
pring->flag |= LPFC_DEFERRED_RING_EVENT;
|
||||
/* Set the lpfc data pending flag */
|
||||
@@ -10979,7 +10944,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0402 Cannot find virtual addr for buffer tag on "
|
||||
"ring %d Data x%lx x%p x%p x%x\n",
|
||||
"ring %d Data x%lx x%px x%px x%x\n",
|
||||
pring->ringno, (unsigned long) tag,
|
||||
slp->next, slp->prev, pring->postbufq_cnt);
|
||||
|
||||
@@ -11023,7 +10988,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0410 Cannot find virtual addr for mapped buf on "
|
||||
"ring %d Data x%llx x%p x%p x%x\n",
|
||||
"ring %d Data x%llx x%px x%px x%x\n",
|
||||
pring->ringno, (unsigned long long)phys,
|
||||
slp->next, slp->prev, pring->postbufq_cnt);
|
||||
return NULL;
|
||||
@@ -11078,13 +11043,16 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
abort_iocb = phba->sli.iocbq_lookup[abort_context];
|
||||
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
|
||||
"0327 Cannot abort els iocb %p "
|
||||
"0327 Cannot abort els iocb x%px "
|
||||
"with tag %x context %x, abort status %x, "
|
||||
"abort code %x\n",
|
||||
abort_iocb, abort_iotag, abort_context,
|
||||
irsp->ulpStatus, irsp->un.ulpWord[4]);
|
||||
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
|
||||
irsp->un.ulpWord[4] == IOERR_SLI_ABORTED)
|
||||
lpfc_sli_release_iocbq(phba, abort_iocb);
|
||||
}
|
||||
release_iocb:
|
||||
lpfc_sli_release_iocbq(phba, cmdiocb);
|
||||
@@ -11493,7 +11461,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
int i;
|
||||
|
||||
/* all I/Os are in process of being flushed */
|
||||
if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
|
||||
if (phba->hba_flag & HBA_IOQ_FLUSH)
|
||||
return errcnt;
|
||||
|
||||
for (i = 1; i <= phba->sli.last_iotag; i++) {
|
||||
@@ -11603,7 +11571,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
|
||||
/* all I/Os are in process of being flushed */
|
||||
if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
|
||||
if (phba->hba_flag & HBA_IOQ_FLUSH) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
return 0;
|
||||
}
|
||||
@@ -11627,7 +11595,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
pring_s4 =
|
||||
phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
|
||||
phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
|
||||
if (!pring_s4) {
|
||||
spin_unlock(&lpfc_cmd->buf_lock);
|
||||
continue;
|
||||
@@ -13336,8 +13304,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
|
||||
unsigned long iflags;
|
||||
|
||||
switch (cq->subtype) {
|
||||
case LPFC_FCP:
|
||||
lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
|
||||
case LPFC_IO:
|
||||
lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
/* Notify aborted XRI for NVME work queue */
|
||||
if (phba->nvmet_support)
|
||||
lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
|
||||
}
|
||||
workposted = false;
|
||||
break;
|
||||
case LPFC_NVME_LS: /* NVME LS uses ELS resources */
|
||||
@@ -13355,15 +13328,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
workposted = true;
|
||||
break;
|
||||
case LPFC_NVME:
|
||||
/* Notify aborted XRI for NVME work queue */
|
||||
if (phba->nvmet_support)
|
||||
lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
|
||||
else
|
||||
lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
|
||||
|
||||
workposted = false;
|
||||
break;
|
||||
default:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"0603 Invalid CQ subtype %d: "
|
||||
@@ -13691,7 +13655,7 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
|
||||
&delay);
|
||||
break;
|
||||
case LPFC_WCQ:
|
||||
if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
|
||||
if (cq->subtype == LPFC_IO)
|
||||
workposted |= __lpfc_sli4_process_cq(phba, cq,
|
||||
lpfc_sli4_fp_handle_cqe,
|
||||
&delay);
|
||||
@@ -14008,10 +13972,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
cq->CQ_wq++;
|
||||
/* Process the WQ complete event */
|
||||
phba->last_completion_time = jiffies;
|
||||
if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
|
||||
lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
|
||||
(struct lpfc_wcqe_complete *)&wcqe);
|
||||
if (cq->subtype == LPFC_NVME_LS)
|
||||
if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
|
||||
lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
|
||||
(struct lpfc_wcqe_complete *)&wcqe);
|
||||
break;
|
||||
@@ -16918,6 +16879,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
|
||||
struct fc_vft_header *fc_vft_hdr;
|
||||
uint32_t *header = (uint32_t *) fc_hdr;
|
||||
|
||||
#define FC_RCTL_MDS_DIAGS 0xF4
|
||||
|
||||
switch (fc_hdr->fh_r_ctl) {
|
||||
case FC_RCTL_DD_UNCAT: /* uncategorized information */
|
||||
case FC_RCTL_DD_SOL_DATA: /* solicited data */
|
||||
@@ -17445,7 +17408,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
|
||||
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
|
||||
ctiocb->context1 = lpfc_nlp_get(ndlp);
|
||||
|
||||
ctiocb->iocb_cmpl = NULL;
|
||||
ctiocb->vport = phba->pport;
|
||||
ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
|
||||
ctiocb->sli4_lxritag = NO_XRI;
|
||||
@@ -17928,6 +17890,17 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
|
||||
fcfi = bf_get(lpfc_rcqe_fcf_id,
|
||||
&dmabuf->cq_event.cqe.rcqe_cmpl);
|
||||
|
||||
if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
|
||||
vport = phba->pport;
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"2023 MDS Loopback %d bytes\n",
|
||||
bf_get(lpfc_rcqe_length,
|
||||
&dmabuf->cq_event.cqe.rcqe_cmpl));
|
||||
/* Handle MDS Loopback frames */
|
||||
lpfc_sli4_handle_mds_loopback(vport, dmabuf);
|
||||
return;
|
||||
}
|
||||
|
||||
/* d_id this frame is directed to */
|
||||
did = sli4_did_from_fc_hdr(fc_hdr);
|
||||
|
||||
@@ -18211,6 +18184,10 @@ __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
|
||||
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
|
||||
phba->sli4_hba.rpi_count--;
|
||||
phba->sli4_hba.max_cfg_param.rpi_used--;
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"2016 rpi %x not inuse\n",
|
||||
rpi);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19461,7 +19438,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
|
||||
|
||||
if (phba->link_flag & LS_MDS_LOOPBACK) {
|
||||
/* MDS WQE are posted only to first WQ*/
|
||||
wq = phba->sli4_hba.hdwq[0].fcp_wq;
|
||||
wq = phba->sli4_hba.hdwq[0].io_wq;
|
||||
if (unlikely(!wq))
|
||||
return 0;
|
||||
pring = wq->pring;
|
||||
@@ -19712,10 +19689,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
|
||||
/* NVME_FCREQ and NVME_ABTS requests */
|
||||
if (pwqe->iocb_flag & LPFC_IO_NVME) {
|
||||
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
|
||||
wq = qp->nvme_wq;
|
||||
wq = qp->io_wq;
|
||||
pring = wq->pring;
|
||||
|
||||
bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
|
||||
bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
|
||||
|
||||
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
|
||||
qp, wq_access);
|
||||
@@ -19732,7 +19709,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
|
||||
/* NVMET requests */
|
||||
if (pwqe->iocb_flag & LPFC_IO_NVMET) {
|
||||
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
|
||||
wq = qp->nvme_wq;
|
||||
wq = qp->io_wq;
|
||||
pring = wq->pring;
|
||||
|
||||
ctxp = pwqe->context2;
|
||||
@@ -19743,7 +19720,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
|
||||
}
|
||||
bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
|
||||
pwqe->sli4_xritag);
|
||||
bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
|
||||
bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
|
||||
|
||||
lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
|
||||
qp, wq_access);
|
||||
@@ -19790,9 +19767,7 @@ void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
|
||||
if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
|
||||
pvt_pool = &qp->p_multixri_pool->pvt_pool;
|
||||
pbl_pool = &qp->p_multixri_pool->pbl_pool;
|
||||
txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
|
||||
if (qp->nvme_wq)
|
||||
txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
|
||||
txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
|
||||
|
||||
multixri_pool->stat_pbl_count = pbl_pool->count;
|
||||
multixri_pool->stat_pvt_count = pvt_pool->count;
|
||||
@@ -19862,12 +19837,9 @@ void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
|
||||
watermark_max = xri_limit;
|
||||
watermark_min = xri_limit / 2;
|
||||
|
||||
txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
|
||||
txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
|
||||
abts_io_bufs = qp->abts_scsi_io_bufs;
|
||||
if (qp->nvme_wq) {
|
||||
txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
|
||||
abts_io_bufs += qp->abts_nvme_io_bufs;
|
||||
}
|
||||
abts_io_bufs += qp->abts_nvme_io_bufs;
|
||||
|
||||
new_watermark = txcmplq_cnt + abts_io_bufs;
|
||||
new_watermark = min(watermark_max, new_watermark);
|
||||
@@ -20142,12 +20114,9 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
|
||||
pbl_pool = &qp->p_multixri_pool->pbl_pool;
|
||||
pvt_pool = &qp->p_multixri_pool->pvt_pool;
|
||||
|
||||
txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
|
||||
txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
|
||||
abts_io_bufs = qp->abts_scsi_io_bufs;
|
||||
if (qp->nvme_wq) {
|
||||
txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
|
||||
abts_io_bufs += qp->abts_nvme_io_bufs;
|
||||
}
|
||||
abts_io_bufs += qp->abts_nvme_io_bufs;
|
||||
|
||||
xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
|
||||
xri_limit = qp->p_multixri_pool->xri_limit;
|
||||
@@ -20188,6 +20157,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
|
||||
spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
|
||||
iflag);
|
||||
}
|
||||
|
||||
if (phba->cfg_xpsgl && !phba->nvmet_support &&
|
||||
!list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
|
||||
lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
|
||||
|
||||
if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
|
||||
lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -20402,3 +20378,288 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
|
||||
|
||||
return lpfc_cmd;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
* @lpfc_buf: IO buf structure to append the SGL chunk
|
||||
*
|
||||
* This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
|
||||
* and will allocate an SGL chunk if the pool is empty.
|
||||
*
|
||||
* Return codes:
|
||||
* NULL - Error
|
||||
* Pointer to sli4_hybrid_sgl - Success
|
||||
**/
|
||||
struct sli4_hybrid_sgl *
|
||||
lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
|
||||
{
|
||||
struct sli4_hybrid_sgl *list_entry = NULL;
|
||||
struct sli4_hybrid_sgl *tmp = NULL;
|
||||
struct sli4_hybrid_sgl *allocated_sgl = NULL;
|
||||
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
|
||||
struct list_head *buf_list = &hdwq->sgl_list;
|
||||
|
||||
spin_lock_irq(&hdwq->hdwq_lock);
|
||||
|
||||
if (likely(!list_empty(buf_list))) {
|
||||
/* break off 1 chunk from the sgl_list */
|
||||
list_for_each_entry_safe(list_entry, tmp,
|
||||
buf_list, list_node) {
|
||||
list_move_tail(&list_entry->list_node,
|
||||
&lpfc_buf->dma_sgl_xtra_list);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* allocate more */
|
||||
spin_unlock_irq(&hdwq->hdwq_lock);
|
||||
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
|
||||
cpu_to_node(smp_processor_id()));
|
||||
if (!tmp) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"8353 error kmalloc memory for HDWQ "
|
||||
"%d %s\n",
|
||||
lpfc_buf->hdwq_no, __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
|
||||
GFP_ATOMIC, &tmp->dma_phys_sgl);
|
||||
if (!tmp->dma_sgl) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"8354 error pool_alloc memory for HDWQ "
|
||||
"%d %s\n",
|
||||
lpfc_buf->hdwq_no, __func__);
|
||||
kfree(tmp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
spin_lock_irq(&hdwq->hdwq_lock);
|
||||
list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
|
||||
}
|
||||
|
||||
allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
|
||||
struct sli4_hybrid_sgl,
|
||||
list_node);
|
||||
|
||||
spin_unlock_irq(&hdwq->hdwq_lock);
|
||||
|
||||
return allocated_sgl;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
* @lpfc_buf: IO buf structure with the SGL chunk
|
||||
*
|
||||
* This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
|
||||
*
|
||||
* Return codes:
|
||||
* 0 - Success
|
||||
* -EINVAL - Error
|
||||
**/
|
||||
int
|
||||
lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
|
||||
{
|
||||
int rc = 0;
|
||||
struct sli4_hybrid_sgl *list_entry = NULL;
|
||||
struct sli4_hybrid_sgl *tmp = NULL;
|
||||
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
|
||||
struct list_head *buf_list = &hdwq->sgl_list;
|
||||
|
||||
spin_lock_irq(&hdwq->hdwq_lock);
|
||||
|
||||
if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
|
||||
list_for_each_entry_safe(list_entry, tmp,
|
||||
&lpfc_buf->dma_sgl_xtra_list,
|
||||
list_node) {
|
||||
list_move_tail(&list_entry->list_node,
|
||||
buf_list);
|
||||
}
|
||||
} else {
|
||||
rc = -EINVAL;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&hdwq->hdwq_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
|
||||
* @phba: phba object
|
||||
* @hdwq: hdwq to cleanup sgl buff resources on
|
||||
*
|
||||
* This routine frees all SGL chunks of hdwq SGL chunk pool.
|
||||
*
|
||||
* Return codes:
|
||||
* None
|
||||
**/
|
||||
void
|
||||
lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
|
||||
struct lpfc_sli4_hdw_queue *hdwq)
|
||||
{
|
||||
struct list_head *buf_list = &hdwq->sgl_list;
|
||||
struct sli4_hybrid_sgl *list_entry = NULL;
|
||||
struct sli4_hybrid_sgl *tmp = NULL;
|
||||
|
||||
spin_lock_irq(&hdwq->hdwq_lock);
|
||||
|
||||
/* Free sgl pool */
|
||||
list_for_each_entry_safe(list_entry, tmp,
|
||||
buf_list, list_node) {
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
list_entry->dma_sgl,
|
||||
list_entry->dma_phys_sgl);
|
||||
list_del(&list_entry->list_node);
|
||||
kfree(list_entry);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&hdwq->hdwq_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
* @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
|
||||
*
|
||||
* This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
|
||||
* and will allocate an CMD/RSP buffer if the pool is empty.
|
||||
*
|
||||
* Return codes:
|
||||
* NULL - Error
|
||||
* Pointer to fcp_cmd_rsp_buf - Success
|
||||
**/
|
||||
struct fcp_cmd_rsp_buf *
|
||||
lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
|
||||
struct lpfc_io_buf *lpfc_buf)
|
||||
{
|
||||
struct fcp_cmd_rsp_buf *list_entry = NULL;
|
||||
struct fcp_cmd_rsp_buf *tmp = NULL;
|
||||
struct fcp_cmd_rsp_buf *allocated_buf = NULL;
|
||||
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
|
||||
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
|
||||
|
||||
spin_lock_irq(&hdwq->hdwq_lock);
|
||||
|
||||
if (likely(!list_empty(buf_list))) {
|
||||
/* break off 1 chunk from the list */
|
||||
list_for_each_entry_safe(list_entry, tmp,
|
||||
buf_list,
|
||||
list_node) {
|
||||
list_move_tail(&list_entry->list_node,
|
||||
&lpfc_buf->dma_cmd_rsp_list);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* allocate more */
|
||||
spin_unlock_irq(&hdwq->hdwq_lock);
|
||||
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
|
||||
cpu_to_node(smp_processor_id()));
|
||||
if (!tmp) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"8355 error kmalloc memory for HDWQ "
|
||||
"%d %s\n",
|
||||
lpfc_buf->hdwq_no, __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
|
||||
GFP_ATOMIC,
|
||||
&tmp->fcp_cmd_rsp_dma_handle);
|
||||
|
||||
if (!tmp->fcp_cmnd) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"8356 error pool_alloc memory for HDWQ "
|
||||
"%d %s\n",
|
||||
lpfc_buf->hdwq_no, __func__);
|
||||
kfree(tmp);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
|
||||
sizeof(struct fcp_cmnd));
|
||||
|
||||
spin_lock_irq(&hdwq->hdwq_lock);
|
||||
list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
|
||||
}
|
||||
|
||||
allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
|
||||
struct fcp_cmd_rsp_buf,
|
||||
list_node);
|
||||
|
||||
spin_unlock_irq(&hdwq->hdwq_lock);
|
||||
|
||||
return allocated_buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
* @lpfc_buf: IO buf structure with the CMD/RSP buf
|
||||
*
|
||||
* This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
|
||||
*
|
||||
* Return codes:
|
||||
* 0 - Success
|
||||
* -EINVAL - Error
|
||||
**/
|
||||
int
|
||||
lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
|
||||
struct lpfc_io_buf *lpfc_buf)
|
||||
{
|
||||
int rc = 0;
|
||||
struct fcp_cmd_rsp_buf *list_entry = NULL;
|
||||
struct fcp_cmd_rsp_buf *tmp = NULL;
|
||||
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
|
||||
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
|
||||
|
||||
spin_lock_irq(&hdwq->hdwq_lock);
|
||||
|
||||
if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
|
||||
list_for_each_entry_safe(list_entry, tmp,
|
||||
&lpfc_buf->dma_cmd_rsp_list,
|
||||
list_node) {
|
||||
list_move_tail(&list_entry->list_node,
|
||||
buf_list);
|
||||
}
|
||||
} else {
|
||||
rc = -EINVAL;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&hdwq->hdwq_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
|
||||
* @phba: phba object
|
||||
* @hdwq: hdwq to cleanup cmd rsp buff resources on
|
||||
*
|
||||
* This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
|
||||
*
|
||||
* Return codes:
|
||||
* None
|
||||
**/
|
||||
void
|
||||
lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
|
||||
struct lpfc_sli4_hdw_queue *hdwq)
|
||||
{
|
||||
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
|
||||
struct fcp_cmd_rsp_buf *list_entry = NULL;
|
||||
struct fcp_cmd_rsp_buf *tmp = NULL;
|
||||
|
||||
spin_lock_irq(&hdwq->hdwq_lock);
|
||||
|
||||
/* Free cmd_rsp buf pool */
|
||||
list_for_each_entry_safe(list_entry, tmp,
|
||||
buf_list,
|
||||
list_node) {
|
||||
dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
|
||||
list_entry->fcp_cmnd,
|
||||
list_entry->fcp_cmd_rsp_dma_handle);
|
||||
list_del(&list_entry->list_node);
|
||||
kfree(list_entry);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&hdwq->hdwq_lock);
|
||||
}
|
||||
|
@@ -365,9 +365,18 @@ struct lpfc_io_buf {
|
||||
/* Common fields */
|
||||
struct list_head list;
|
||||
void *data;
|
||||
|
||||
dma_addr_t dma_handle;
|
||||
dma_addr_t dma_phys_sgl;
|
||||
struct sli4_sge *dma_sgl;
|
||||
|
||||
struct sli4_sge *dma_sgl; /* initial segment chunk */
|
||||
|
||||
/* linked list of extra sli4_hybrid_sge */
|
||||
struct list_head dma_sgl_xtra_list;
|
||||
|
||||
/* list head for fcp_cmd_rsp buf */
|
||||
struct list_head dma_cmd_rsp_list;
|
||||
|
||||
struct lpfc_iocbq cur_iocbq;
|
||||
struct lpfc_sli4_hdw_queue *hdwq;
|
||||
uint16_t hdwq_no;
|
||||
|
@@ -49,9 +49,6 @@
|
||||
#define LPFC_FCP_MQ_THRESHOLD_MAX 256
|
||||
#define LPFC_FCP_MQ_THRESHOLD_DEF 8
|
||||
|
||||
/* Common buffer size to accomidate SCSI and NVME IO buffers */
|
||||
#define LPFC_COMMON_IO_BUF_SZ 768
|
||||
|
||||
/*
|
||||
* Provide the default FCF Record attributes used by the driver
|
||||
* when nonFIP mode is configured and there is no other default
|
||||
@@ -114,9 +111,8 @@ enum lpfc_sli4_queue_type {
|
||||
enum lpfc_sli4_queue_subtype {
|
||||
LPFC_NONE,
|
||||
LPFC_MBOX,
|
||||
LPFC_FCP,
|
||||
LPFC_IO,
|
||||
LPFC_ELS,
|
||||
LPFC_NVME,
|
||||
LPFC_NVMET,
|
||||
LPFC_NVME_LS,
|
||||
LPFC_USOL
|
||||
@@ -646,22 +642,17 @@ struct lpfc_eq_intr_info {
|
||||
struct lpfc_sli4_hdw_queue {
|
||||
/* Pointers to the constructed SLI4 queues */
|
||||
struct lpfc_queue *hba_eq; /* Event queues for HBA */
|
||||
struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */
|
||||
struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */
|
||||
struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */
|
||||
struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */
|
||||
uint16_t fcp_cq_map;
|
||||
uint16_t nvme_cq_map;
|
||||
struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */
|
||||
struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */
|
||||
uint16_t io_cq_map;
|
||||
|
||||
/* Keep track of IO buffers for this hardware queue */
|
||||
spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */
|
||||
struct list_head lpfc_io_buf_list_get;
|
||||
spinlock_t io_buf_list_put_lock; /* Common buf free list lock */
|
||||
struct list_head lpfc_io_buf_list_put;
|
||||
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
|
||||
struct list_head lpfc_abts_scsi_buf_list;
|
||||
spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */
|
||||
struct list_head lpfc_abts_nvme_buf_list;
|
||||
spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */
|
||||
struct list_head lpfc_abts_io_buf_list;
|
||||
uint32_t total_io_bufs;
|
||||
uint32_t get_io_bufs;
|
||||
uint32_t put_io_bufs;
|
||||
@@ -685,6 +676,13 @@ struct lpfc_sli4_hdw_queue {
|
||||
uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
|
||||
uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
|
||||
#endif
|
||||
|
||||
/* Per HDWQ pool resources */
|
||||
struct list_head sgl_list;
|
||||
struct list_head cmd_rsp_buf_list;
|
||||
|
||||
/* Lock for syncing Per HDWQ pool resources */
|
||||
spinlock_t hdwq_lock;
|
||||
};
|
||||
|
||||
#ifdef LPFC_HDWQ_LOCK_STAT
|
||||
@@ -850,8 +848,8 @@ struct lpfc_sli4_hba {
|
||||
struct lpfc_queue **cq_lookup;
|
||||
struct list_head lpfc_els_sgl_list;
|
||||
struct list_head lpfc_abts_els_sgl_list;
|
||||
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
|
||||
struct list_head lpfc_abts_scsi_buf_list;
|
||||
spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */
|
||||
struct list_head lpfc_abts_io_buf_list;
|
||||
struct list_head lpfc_nvmet_sgl_list;
|
||||
spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */
|
||||
struct list_head lpfc_abts_nvmet_ctx_list;
|
||||
@@ -1056,10 +1054,11 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
|
||||
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
|
||||
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
|
||||
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
|
||||
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
|
||||
struct sli4_wcqe_xri_aborted *, int);
|
||||
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
|
||||
struct sli4_wcqe_xri_aborted *axri, int idx);
|
||||
struct sli4_wcqe_xri_aborted *axri,
|
||||
struct lpfc_io_buf *lpfc_ncmd);
|
||||
void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
|
||||
struct sli4_wcqe_xri_aborted *axri, int idx);
|
||||
void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||
struct sli4_wcqe_xri_aborted *axri);
|
||||
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
|
||||
@@ -1094,6 +1093,17 @@ int lpfc_sli4_post_status_check(struct lpfc_hba *);
|
||||
uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
|
||||
struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba,
|
||||
struct lpfc_io_buf *buf);
|
||||
struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
|
||||
struct lpfc_io_buf *buf);
|
||||
int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf);
|
||||
int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
|
||||
struct lpfc_io_buf *buf);
|
||||
void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
|
||||
struct lpfc_sli4_hdw_queue *hdwq);
|
||||
void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
|
||||
struct lpfc_sli4_hdw_queue *hdwq);
|
||||
static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx)
|
||||
{
|
||||
return q->q_pgs[idx / q->entry_cnt_per_pg] +
|
||||
|
@@ -20,7 +20,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "12.2.0.3"
|
||||
#define LPFC_DRIVER_VERSION "12.4.0.0"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
@@ -527,9 +527,11 @@ disable_vport(struct fc_vport *fc_vport)
|
||||
* scsi_host_put() to release the vport.
|
||||
*/
|
||||
lpfc_mbx_unreg_vpi(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
}
|
||||
|
||||
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
|
||||
|
Reference in New Issue
Block a user