Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull misc SCSI driver updates from James Bottomley: "This patch set is a set of driver updates (megaraid_sas, fnic, lpfc, ufs, hpsa) we also have a couple of bug fixes (sd out of bounds and ibmvfc error handling) and the first round of esas2r checker fixes and finally the much anticipated big endian additions for megaraid_sas" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits) [SCSI] fnic: fnic Driver Tuneables Exposed through CLI [SCSI] fnic: Kernel panic while running sh/nosh with max lun cfg [SCSI] fnic: Hitting BUG_ON(io_req->abts_done) in fnic_rport_exch_reset [SCSI] fnic: Remove QUEUE_FULL handling code [SCSI] fnic: On system with >1.1TB RAM, VIC fails multipath after boot up [SCSI] fnic: FC stat param seconds_since_last_reset not getting updated [SCSI] sd: Fix potential out-of-bounds access [SCSI] lpfc 8.3.42: Update lpfc version to driver version 8.3.42 [SCSI] lpfc 8.3.42: Fixed issue of task management commands having a fixed timeout [SCSI] lpfc 8.3.42: Fixed inconsistent spin lock usage. [SCSI] lpfc 8.3.42: Fix driver's abort loop functionality to skip IOs already getting aborted [SCSI] lpfc 8.3.42: Fixed failure to allocate SCSI buffer on PPC64 platform for SLI4 devices [SCSI] lpfc 8.3.42: Fix WARN_ON when driver unloads [SCSI] lpfc 8.3.42: Avoided making pci bar ioremap call during dual-chute WQ/RQ pci bar selection [SCSI] lpfc 8.3.42: Fixed driver iocbq structure's iocb_flag field running out of space [SCSI] lpfc 8.3.42: Fix crash on driver load due to cpu affinity logic [SCSI] lpfc 8.3.42: Fixed logging format of setting driver sysfs attributes hard to interpret [SCSI] lpfc 8.3.42: Fixed back to back RSCNs discovery failure. [SCSI] lpfc 8.3.42: Fixed race condition between BSG I/O dispatch and timeout handling [SCSI] lpfc 8.3.42: Fixed function mode field defined too small for not recognizing dual-chute mode ...
This commit is contained in:
@@ -708,6 +708,7 @@ struct lpfc_hba {
|
||||
uint32_t cfg_multi_ring_type;
|
||||
uint32_t cfg_poll;
|
||||
uint32_t cfg_poll_tmo;
|
||||
uint32_t cfg_task_mgmt_tmo;
|
||||
uint32_t cfg_use_msi;
|
||||
uint32_t cfg_fcp_imax;
|
||||
uint32_t cfg_fcp_cpu_map;
|
||||
|
@@ -1865,8 +1865,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
|
||||
{ \
|
||||
if (val >= minval && val <= maxval) {\
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
|
||||
"3053 lpfc_" #attr " changed from %d to %d\n", \
|
||||
vport->cfg_##attr, val); \
|
||||
"3053 lpfc_" #attr \
|
||||
" changed from %d (x%x) to %d (x%x)\n", \
|
||||
vport->cfg_##attr, vport->cfg_##attr, \
|
||||
val, val); \
|
||||
vport->cfg_##attr = val;\
|
||||
return 0;\
|
||||
}\
|
||||
@@ -4011,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
|
||||
# For [0], FCP commands are issued to Work Queues ina round robin fashion.
|
||||
# For [1], FCP commands are issued to a Work Queue associated with the
|
||||
# current CPU.
|
||||
# It would be set to 1 by the driver if it's able to set up cpu affinity
|
||||
# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
|
||||
# roundrobin scheduling of FCP I/Os through WQs will be used.
|
||||
*/
|
||||
LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
|
||||
LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
|
||||
"issuing commands [0] - Round Robin, [1] - Current CPU");
|
||||
|
||||
/*
|
||||
@@ -4110,6 +4115,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
|
||||
"Milliseconds driver will wait between polling FCP ring");
|
||||
|
||||
/*
|
||||
# lpfc_task_mgmt_tmo: Maximum time to wait for task management commands
|
||||
# to complete in seconds. Value range is [5,180], default value is 60.
|
||||
*/
|
||||
LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
|
||||
"Maximum time to wait for task management commands to complete");
|
||||
/*
|
||||
# lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
|
||||
# support this feature
|
||||
# 0 = MSI disabled
|
||||
@@ -4295,6 +4306,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
||||
&dev_attr_issue_reset,
|
||||
&dev_attr_lpfc_poll,
|
||||
&dev_attr_lpfc_poll_tmo,
|
||||
&dev_attr_lpfc_task_mgmt_tmo,
|
||||
&dev_attr_lpfc_use_msi,
|
||||
&dev_attr_lpfc_fcp_imax,
|
||||
&dev_attr_lpfc_fcp_cpu_map,
|
||||
@@ -5274,6 +5286,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
lpfc_topology_init(phba, lpfc_topology);
|
||||
lpfc_link_speed_init(phba, lpfc_link_speed);
|
||||
lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
|
||||
lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
|
||||
lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
|
||||
lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
|
||||
lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
|
||||
|
@@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
/* Close the timeout handler abort window */
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
|
||||
iocb = &dd_data->context_un.iocb;
|
||||
ndlp = iocb->ndlp;
|
||||
rmp = iocb->rmp;
|
||||
@@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
|
||||
int request_nseg;
|
||||
int reply_nseg;
|
||||
struct bsg_job_data *dd_data;
|
||||
unsigned long flags;
|
||||
uint32_t creg_val;
|
||||
int rc = 0;
|
||||
int iocb_stat;
|
||||
@@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
|
||||
}
|
||||
|
||||
iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
|
||||
if (iocb_stat == IOCB_SUCCESS)
|
||||
|
||||
if (iocb_stat == IOCB_SUCCESS) {
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* make sure the I/O had not been completed yet */
|
||||
if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
|
||||
/* open up abort window to timeout handler */
|
||||
cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
return 0; /* done for now */
|
||||
else if (iocb_stat == IOCB_BUSY)
|
||||
} else if (iocb_stat == IOCB_BUSY) {
|
||||
rc = -EAGAIN;
|
||||
else
|
||||
} else {
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
/* iocb failed so cleanup */
|
||||
job->dd_data = NULL;
|
||||
|
||||
free_rmp:
|
||||
lpfc_free_bsg_buffers(phba, rmp);
|
||||
@@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
/* Close the timeout handler abort window */
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
|
||||
rsp = &rspiocbq->iocb;
|
||||
pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
|
||||
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
|
||||
@@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
|
||||
struct lpfc_iocbq *cmdiocbq;
|
||||
uint16_t rpi = 0;
|
||||
struct bsg_job_data *dd_data;
|
||||
unsigned long flags;
|
||||
uint32_t creg_val;
|
||||
int rc = 0;
|
||||
|
||||
@@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
|
||||
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
|
||||
|
||||
if (rc == IOCB_SUCCESS)
|
||||
if (rc == IOCB_SUCCESS) {
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* make sure the I/O had not been completed/released */
|
||||
if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
|
||||
/* open up abort window to timeout handler */
|
||||
cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
return 0; /* done for now */
|
||||
else if (rc == IOCB_BUSY)
|
||||
} else if (rc == IOCB_BUSY) {
|
||||
rc = -EAGAIN;
|
||||
else
|
||||
} else {
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
/* iocb failed so cleanup */
|
||||
job->dd_data = NULL;
|
||||
|
||||
linkdown_err:
|
||||
|
||||
cmdiocbq->context1 = ndlp;
|
||||
lpfc_els_free_iocb(phba, cmdiocbq);
|
||||
|
||||
@@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct get_ct_event *event_req;
|
||||
struct get_ct_event_reply *event_reply;
|
||||
struct lpfc_bsg_event *evt;
|
||||
struct lpfc_bsg_event *evt, *evt_next;
|
||||
struct event_data *evt_dat = NULL;
|
||||
unsigned long flags;
|
||||
uint32_t rc = 0;
|
||||
@@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
|
||||
event_reply = (struct get_ct_event_reply *)
|
||||
job->reply->reply_data.vendor_reply.vendor_rsp;
|
||||
spin_lock_irqsave(&phba->ct_ev_lock, flags);
|
||||
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
|
||||
list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
|
||||
if (evt->reg_id == event_req->ev_reg_id) {
|
||||
if (list_empty(&evt->events_to_get))
|
||||
break;
|
||||
@@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
/* Close the timeout handler abort window */
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
|
||||
ndlp = dd_data->context_un.iocb.ndlp;
|
||||
cmp = cmdiocbq->context2;
|
||||
bmp = cmdiocbq->context3;
|
||||
@@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
|
||||
int rc = 0;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
struct bsg_job_data *dd_data;
|
||||
unsigned long flags;
|
||||
uint32_t creg_val;
|
||||
|
||||
/* allocate our bsg tracking structure */
|
||||
@@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
|
||||
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
|
||||
|
||||
if (rc == IOCB_SUCCESS)
|
||||
if (rc == IOCB_SUCCESS) {
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* make sure the I/O had not been completed/released */
|
||||
if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
|
||||
/* open up abort window to timeout handler */
|
||||
ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
return 0; /* done for now */
|
||||
}
|
||||
|
||||
/* iocb failed so cleanup */
|
||||
job->dd_data = NULL;
|
||||
|
||||
issue_ct_rsp_exit:
|
||||
lpfc_sli_release_iocbq(phba, ctiocb);
|
||||
@@ -5284,9 +5333,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
|
||||
* remove it from the txq queue and call cancel iocbs.
|
||||
* Otherwise, call abort iotag
|
||||
*/
|
||||
|
||||
cmdiocb = dd_data->context_un.iocb.cmdiocbq;
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* make sure the I/O abort window is still open */
|
||||
if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
return -EAGAIN;
|
||||
}
|
||||
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
|
||||
list) {
|
||||
if (check_iocb == cmdiocb) {
|
||||
@@ -5296,8 +5351,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
|
||||
}
|
||||
if (list_empty(&completions))
|
||||
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
if (!list_empty(&completions)) {
|
||||
lpfc_sli_cancel_iocbs(phba, &completions,
|
||||
IOSTAT_LOCAL_REJECT,
|
||||
@@ -5321,9 +5375,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
|
||||
* remove it from the txq queue and call cancel iocbs.
|
||||
* Otherwise, call abort iotag.
|
||||
*/
|
||||
|
||||
cmdiocb = dd_data->context_un.menlo.cmdiocbq;
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
|
||||
list) {
|
||||
if (check_iocb == cmdiocb) {
|
||||
@@ -5333,8 +5388,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
|
||||
}
|
||||
if (list_empty(&completions))
|
||||
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
if (!list_empty(&completions)) {
|
||||
lpfc_sli_cancel_iocbs(phba, &completions,
|
||||
IOSTAT_LOCAL_REJECT,
|
||||
|
@@ -4437,6 +4437,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
if (!ndlp)
|
||||
return;
|
||||
lpfc_issue_els_logo(vport, ndlp, 0);
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -4456,7 +4457,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
int rc;
|
||||
uint16_t rpi;
|
||||
|
||||
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
|
||||
if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
|
||||
ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
|
||||
if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
|
||||
"3366 RPI x%x needs to be "
|
||||
"unregistered nlp_flag x%x "
|
||||
"did x%x\n",
|
||||
ndlp->nlp_rpi, ndlp->nlp_flag,
|
||||
ndlp->nlp_DID);
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (mbox) {
|
||||
/* SLI4 ports require the physical rpi value. */
|
||||
|
@@ -3031,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.scsi_xri_max);
|
||||
|
||||
spin_lock_irq(&phba->scsi_buf_list_get_lock);
|
||||
spin_lock_irq(&phba->scsi_buf_list_put_lock);
|
||||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
|
||||
list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
||||
|
||||
if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
|
||||
@@ -3070,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
|
||||
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
}
|
||||
spin_lock_irq(&phba->scsi_buf_list_get_lock);
|
||||
spin_lock_irq(&phba->scsi_buf_list_put_lock);
|
||||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||||
list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
|
||||
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
||||
|
||||
return 0;
|
||||
@@ -4859,6 +4859,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
struct lpfc_mqe *mqe;
|
||||
int longs;
|
||||
|
||||
/* Get all the module params for configuring this host */
|
||||
lpfc_get_cfgparam(phba);
|
||||
|
||||
/* Before proceed, wait for POST done and device ready */
|
||||
rc = lpfc_sli4_post_status_check(phba);
|
||||
if (rc)
|
||||
@@ -4902,15 +4905,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
sizeof(struct lpfc_mbox_ext_buf_ctx));
|
||||
INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
|
||||
|
||||
/*
|
||||
* We need to do a READ_CONFIG mailbox command here before
|
||||
* calling lpfc_get_cfgparam. For VFs this will report the
|
||||
* MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
|
||||
* All of the resources allocated
|
||||
* for this Port are tied to these values.
|
||||
*/
|
||||
/* Get all the module params for configuring this host */
|
||||
lpfc_get_cfgparam(phba);
|
||||
phba->max_vpi = LPFC_MAX_VPI;
|
||||
|
||||
/* This will be set to correct value after the read_config mbox */
|
||||
@@ -7141,19 +7135,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.fcp_wq = NULL;
|
||||
}
|
||||
|
||||
if (phba->pci_bar0_memmap_p) {
|
||||
iounmap(phba->pci_bar0_memmap_p);
|
||||
phba->pci_bar0_memmap_p = NULL;
|
||||
}
|
||||
if (phba->pci_bar2_memmap_p) {
|
||||
iounmap(phba->pci_bar2_memmap_p);
|
||||
phba->pci_bar2_memmap_p = NULL;
|
||||
}
|
||||
if (phba->pci_bar4_memmap_p) {
|
||||
iounmap(phba->pci_bar4_memmap_p);
|
||||
phba->pci_bar4_memmap_p = NULL;
|
||||
}
|
||||
|
||||
/* Release FCP CQ mapping array */
|
||||
if (phba->sli4_hba.fcp_cq_map != NULL) {
|
||||
kfree(phba->sli4_hba.fcp_cq_map);
|
||||
@@ -7942,9 +7923,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
* particular PCI BARs regions is dependent on the type of
|
||||
* SLI4 device.
|
||||
*/
|
||||
if (pci_resource_start(pdev, 0)) {
|
||||
phba->pci_bar0_map = pci_resource_start(pdev, 0);
|
||||
bar0map_len = pci_resource_len(pdev, 0);
|
||||
if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
|
||||
phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
|
||||
bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
|
||||
|
||||
/*
|
||||
* Map SLI4 PCI Config Space Register base to a kernel virtual
|
||||
@@ -7958,6 +7939,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
"registers.\n");
|
||||
goto out;
|
||||
}
|
||||
phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
|
||||
/* Set up BAR0 PCI config space register memory map */
|
||||
lpfc_sli4_bar0_register_memmap(phba, if_type);
|
||||
} else {
|
||||
@@ -7980,13 +7962,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
|
||||
(pci_resource_start(pdev, 2))) {
|
||||
(pci_resource_start(pdev, PCI_64BIT_BAR2))) {
|
||||
/*
|
||||
* Map SLI4 if type 0 HBA Control Register base to a kernel
|
||||
* virtual address and setup the registers.
|
||||
*/
|
||||
phba->pci_bar1_map = pci_resource_start(pdev, 2);
|
||||
bar1map_len = pci_resource_len(pdev, 2);
|
||||
phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
|
||||
bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
|
||||
phba->sli4_hba.ctrl_regs_memmap_p =
|
||||
ioremap(phba->pci_bar1_map, bar1map_len);
|
||||
if (!phba->sli4_hba.ctrl_regs_memmap_p) {
|
||||
@@ -7994,17 +7976,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
"ioremap failed for SLI4 HBA control registers.\n");
|
||||
goto out_iounmap_conf;
|
||||
}
|
||||
phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
|
||||
lpfc_sli4_bar1_register_memmap(phba);
|
||||
}
|
||||
|
||||
if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
|
||||
(pci_resource_start(pdev, 4))) {
|
||||
(pci_resource_start(pdev, PCI_64BIT_BAR4))) {
|
||||
/*
|
||||
* Map SLI4 if type 0 HBA Doorbell Register base to a kernel
|
||||
* virtual address and setup the registers.
|
||||
*/
|
||||
phba->pci_bar2_map = pci_resource_start(pdev, 4);
|
||||
bar2map_len = pci_resource_len(pdev, 4);
|
||||
phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
|
||||
bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
|
||||
phba->sli4_hba.drbl_regs_memmap_p =
|
||||
ioremap(phba->pci_bar2_map, bar2map_len);
|
||||
if (!phba->sli4_hba.drbl_regs_memmap_p) {
|
||||
@@ -8012,6 +7995,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
|
||||
"ioremap failed for SLI4 HBA doorbell registers.\n");
|
||||
goto out_iounmap_ctrl;
|
||||
}
|
||||
phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
|
||||
error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
|
||||
if (error)
|
||||
goto out_iounmap_all;
|
||||
@@ -8405,7 +8389,8 @@ static int
|
||||
lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
|
||||
{
|
||||
int i, idx, saved_chann, used_chann, cpu, phys_id;
|
||||
int max_phys_id, num_io_channel, first_cpu;
|
||||
int max_phys_id, min_phys_id;
|
||||
int num_io_channel, first_cpu, chan;
|
||||
struct lpfc_vector_map_info *cpup;
|
||||
#ifdef CONFIG_X86
|
||||
struct cpuinfo_x86 *cpuinfo;
|
||||
@@ -8423,6 +8408,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
|
||||
phba->sli4_hba.num_present_cpu));
|
||||
|
||||
max_phys_id = 0;
|
||||
min_phys_id = 0xff;
|
||||
phys_id = 0;
|
||||
num_io_channel = 0;
|
||||
first_cpu = LPFC_VECTOR_MAP_EMPTY;
|
||||
@@ -8446,9 +8432,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
|
||||
|
||||
if (cpup->phys_id > max_phys_id)
|
||||
max_phys_id = cpup->phys_id;
|
||||
if (cpup->phys_id < min_phys_id)
|
||||
min_phys_id = cpup->phys_id;
|
||||
cpup++;
|
||||
}
|
||||
|
||||
phys_id = min_phys_id;
|
||||
/* Now associate the HBA vectors with specific CPUs */
|
||||
for (idx = 0; idx < vectors; idx++) {
|
||||
cpup = phba->sli4_hba.cpu_map;
|
||||
@@ -8459,13 +8448,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
|
||||
for (i = 1; i < max_phys_id; i++) {
|
||||
phys_id++;
|
||||
if (phys_id > max_phys_id)
|
||||
phys_id = 0;
|
||||
phys_id = min_phys_id;
|
||||
cpu = lpfc_find_next_cpu(phba, phys_id);
|
||||
if (cpu == LPFC_VECTOR_MAP_EMPTY)
|
||||
continue;
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Use round robin for scheduling */
|
||||
phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
|
||||
chan = 0;
|
||||
cpup = phba->sli4_hba.cpu_map;
|
||||
for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
|
||||
cpup->channel_id = chan;
|
||||
cpup++;
|
||||
chan++;
|
||||
if (chan >= phba->cfg_fcp_io_channel)
|
||||
chan = 0;
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3329 Cannot set affinity:"
|
||||
"Error mapping vector %d (%d)\n",
|
||||
@@ -8503,7 +8504,7 @@ found:
|
||||
/* Spread vector mapping across multple physical CPU nodes */
|
||||
phys_id++;
|
||||
if (phys_id > max_phys_id)
|
||||
phys_id = 0;
|
||||
phys_id = min_phys_id;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -8513,7 +8514,7 @@ found:
|
||||
* Base the remaining IO channel assigned, to IO channels already
|
||||
* assigned to other CPUs on the same phys_id.
|
||||
*/
|
||||
for (i = 0; i <= max_phys_id; i++) {
|
||||
for (i = min_phys_id; i <= max_phys_id; i++) {
|
||||
/*
|
||||
* If there are no io channels already mapped to
|
||||
* this phys_id, just round robin thru the io_channels.
|
||||
@@ -8595,10 +8596,11 @@ out:
|
||||
if (num_io_channel != phba->sli4_hba.num_present_cpu)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3333 Set affinity mismatch:"
|
||||
"%d chann != %d cpus: %d vactors\n",
|
||||
"%d chann != %d cpus: %d vectors\n",
|
||||
num_io_channel, phba->sli4_hba.num_present_cpu,
|
||||
vectors);
|
||||
|
||||
/* Enable using cpu affinity for scheduling */
|
||||
phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
|
||||
return 1;
|
||||
}
|
||||
@@ -8689,9 +8691,12 @@ enable_msix_vectors:
|
||||
|
||||
cfg_fail_out:
|
||||
/* free the irq already requested */
|
||||
for (--index; index >= 0; index--)
|
||||
for (--index; index >= 0; index--) {
|
||||
irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
|
||||
vector, NULL);
|
||||
free_irq(phba->sli4_hba.msix_entries[index].vector,
|
||||
&phba->sli4_hba.fcp_eq_hdl[index]);
|
||||
}
|
||||
|
||||
msi_fail_out:
|
||||
/* Unconfigure MSI-X capability structure */
|
||||
@@ -8712,9 +8717,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)
|
||||
int index;
|
||||
|
||||
/* Free up MSI-X multi-message vectors */
|
||||
for (index = 0; index < phba->cfg_fcp_io_channel; index++)
|
||||
for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
|
||||
irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
|
||||
vector, NULL);
|
||||
free_irq(phba->sli4_hba.msix_entries[index].vector,
|
||||
&phba->sli4_hba.fcp_eq_hdl[index]);
|
||||
}
|
||||
|
||||
/* Disable MSI-X */
|
||||
pci_disable_msix(phba->pcidev);
|
||||
|
@@ -926,10 +926,10 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
|
||||
|
||||
/* get all SCSI buffers need to repost to a local list */
|
||||
spin_lock_irq(&phba->scsi_buf_list_get_lock);
|
||||
spin_lock_irq(&phba->scsi_buf_list_put_lock);
|
||||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
|
||||
list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
||||
|
||||
/* post the list of scsi buffer sgls to port if available */
|
||||
@@ -1000,9 +1000,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
||||
}
|
||||
memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
|
||||
|
||||
/* Page alignment is CRITICAL, double check to be sure */
|
||||
if (((unsigned long)(psb->data) &
|
||||
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
|
||||
/*
|
||||
* 4K Page alignment is CRITICAL to BlockGuard, double check
|
||||
* to be sure.
|
||||
*/
|
||||
if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
|
||||
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
|
||||
pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
@@ -1134,22 +1137,21 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_scsi_buf * lpfc_cmd = NULL;
|
||||
struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
|
||||
unsigned long gflag = 0;
|
||||
unsigned long pflag = 0;
|
||||
unsigned long iflag = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
|
||||
list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
|
||||
list);
|
||||
if (!lpfc_cmd) {
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
|
||||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||||
list_splice(&phba->lpfc_scsi_buf_list_put,
|
||||
&phba->lpfc_scsi_buf_list_get);
|
||||
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
||||
list_remove_head(scsi_buf_list_get, lpfc_cmd,
|
||||
struct lpfc_scsi_buf, list);
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
|
||||
return lpfc_cmd;
|
||||
}
|
||||
/**
|
||||
@@ -1167,11 +1169,10 @@ static struct lpfc_scsi_buf*
|
||||
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
|
||||
unsigned long gflag = 0;
|
||||
unsigned long pflag = 0;
|
||||
unsigned long iflag = 0;
|
||||
int found = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
|
||||
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
|
||||
&phba->lpfc_scsi_buf_list_get, list) {
|
||||
if (lpfc_test_rrq_active(phba, ndlp,
|
||||
@@ -1182,11 +1183,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
break;
|
||||
}
|
||||
if (!found) {
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
|
||||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||||
list_splice(&phba->lpfc_scsi_buf_list_put,
|
||||
&phba->lpfc_scsi_buf_list_get);
|
||||
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
|
||||
&phba->lpfc_scsi_buf_list_get, list) {
|
||||
if (lpfc_test_rrq_active(
|
||||
@@ -1197,7 +1198,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
|
||||
if (!found)
|
||||
return NULL;
|
||||
return lpfc_cmd;
|
||||
@@ -3966,11 +3967,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
|
||||
/*
|
||||
* Check SLI validation that all the transfer was actually done
|
||||
* (fcpi_parm should be zero).
|
||||
* (fcpi_parm should be zero). Apply check only to reads.
|
||||
*/
|
||||
} else if (fcpi_parm) {
|
||||
} else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
|
||||
"9029 FCP Data Transfer Check Error: "
|
||||
"9029 FCP Read Check Error Data: "
|
||||
"x%x x%x x%x x%x x%x\n",
|
||||
be32_to_cpu(fcpcmd->fcpDl),
|
||||
be32_to_cpu(fcprsp->rspResId),
|
||||
@@ -4342,6 +4343,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
char tag[2];
|
||||
uint8_t *ptr;
|
||||
bool sli4;
|
||||
uint32_t fcpdl;
|
||||
|
||||
if (!pnode || !NLP_CHK_NODE_ACT(pnode))
|
||||
return;
|
||||
@@ -4389,8 +4391,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
iocb_cmd->ulpPU = PARM_READ_CHECK;
|
||||
if (vport->cfg_first_burst_size &&
|
||||
(pnode->nlp_flag & NLP_FIRSTBURST)) {
|
||||
piocbq->iocb.un.fcpi.fcpi_XRdy =
|
||||
vport->cfg_first_burst_size;
|
||||
fcpdl = scsi_bufflen(scsi_cmnd);
|
||||
if (fcpdl < vport->cfg_first_burst_size)
|
||||
piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
|
||||
else
|
||||
piocbq->iocb.un.fcpi.fcpi_XRdy =
|
||||
vport->cfg_first_burst_size;
|
||||
}
|
||||
fcp_cmnd->fcpCntl3 = WRITE_DATA;
|
||||
phba->fc4OutputRequests++;
|
||||
@@ -4878,6 +4884,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Indicate the IO is being aborted by the driver. */
|
||||
iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
|
||||
|
||||
/*
|
||||
* The scsi command can not be in txq and it is in flight because the
|
||||
* pCmd is still pointig at the SCSI command we have to abort. There
|
||||
@@ -5006,7 +5015,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
|
||||
lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
|
||||
if (lpfc_cmd == NULL)
|
||||
return FAILED;
|
||||
lpfc_cmd->timeout = 60;
|
||||
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
|
||||
lpfc_cmd->rdata = rdata;
|
||||
|
||||
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
|
||||
|
@@ -9831,6 +9831,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
abort_cmd) != 0)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If the iocbq is already being aborted, don't take a second
|
||||
* action, but do count it.
|
||||
*/
|
||||
if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
|
||||
continue;
|
||||
|
||||
/* issue ABTS for this IOCB based on iotag */
|
||||
abtsiocb = lpfc_sli_get_iocbq(phba);
|
||||
if (abtsiocb == NULL) {
|
||||
@@ -9838,6 +9845,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
continue;
|
||||
}
|
||||
|
||||
/* indicate the IO is being aborted by the driver. */
|
||||
iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
|
||||
|
||||
cmd = &iocbq->iocb;
|
||||
abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
|
||||
abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
|
||||
@@ -9847,7 +9857,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
|
||||
abtsiocb->iocb.ulpLe = 1;
|
||||
abtsiocb->iocb.ulpClass = cmd->ulpClass;
|
||||
abtsiocb->vport = phba->pport;
|
||||
abtsiocb->vport = vport;
|
||||
|
||||
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
|
||||
abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
|
||||
@@ -12233,7 +12243,6 @@ static void __iomem *
|
||||
lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
unsigned long bar_map, bar_map_len;
|
||||
|
||||
if (!phba->pcidev)
|
||||
return NULL;
|
||||
@@ -12242,25 +12251,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
|
||||
|
||||
switch (pci_barset) {
|
||||
case WQ_PCI_BAR_0_AND_1:
|
||||
if (!phba->pci_bar0_memmap_p) {
|
||||
bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
|
||||
bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
|
||||
phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
|
||||
}
|
||||
return phba->pci_bar0_memmap_p;
|
||||
case WQ_PCI_BAR_2_AND_3:
|
||||
if (!phba->pci_bar2_memmap_p) {
|
||||
bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
|
||||
bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
|
||||
phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
|
||||
}
|
||||
return phba->pci_bar2_memmap_p;
|
||||
case WQ_PCI_BAR_4_AND_5:
|
||||
if (!phba->pci_bar4_memmap_p) {
|
||||
bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
|
||||
bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
|
||||
phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
|
||||
}
|
||||
return phba->pci_bar4_memmap_p;
|
||||
default:
|
||||
break;
|
||||
@@ -15808,7 +15802,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
void
|
||||
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
{
|
||||
struct lpfc_fcf_pri *fcf_pri;
|
||||
struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
|
||||
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
|
||||
"2762 FCF (x%x) reached driver's book "
|
||||
@@ -15818,7 +15812,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
|
||||
}
|
||||
/* Clear the eligible FCF record index bmask */
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
|
||||
list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
|
||||
list) {
|
||||
if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
|
||||
list_del_init(&fcf_pri->list);
|
||||
break;
|
||||
|
@@ -58,7 +58,7 @@ struct lpfc_iocbq {
|
||||
|
||||
IOCB_t iocb; /* IOCB cmd */
|
||||
uint8_t retry; /* retry counter for IOCB cmd - if needed */
|
||||
uint16_t iocb_flag;
|
||||
uint32_t iocb_flag;
|
||||
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
|
||||
#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
|
||||
#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
|
||||
@@ -73,11 +73,11 @@ struct lpfc_iocbq {
|
||||
#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */
|
||||
#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */
|
||||
#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */
|
||||
#define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */
|
||||
|
||||
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
|
||||
#define LPFC_FIP_ELS_ID_SHIFT 14
|
||||
|
||||
uint8_t rsvd2;
|
||||
uint32_t drvrTimeout; /* driver timeout in seconds */
|
||||
uint32_t fcp_wqidx; /* index to FCP work queue */
|
||||
struct lpfc_vport *vport;/* virtual port pointer */
|
||||
|
@@ -523,7 +523,7 @@ struct lpfc_sli4_hba {
|
||||
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
|
||||
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
|
||||
|
||||
uint8_t fw_func_mode; /* FW function protocol mode */
|
||||
uint32_t fw_func_mode; /* FW function protocol mode */
|
||||
uint32_t ulp0_mode; /* ULP0 protocol mode */
|
||||
uint32_t ulp1_mode; /* ULP1 protocol mode */
|
||||
|
||||
|
@@ -18,7 +18,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "8.3.41"
|
||||
#define LPFC_DRIVER_VERSION "8.3.42"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
Reference in New Issue
Block a user