Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull misc SCSI driver updates from James Bottomley: "This patch set is a set of driver updates (megaraid_sas, fnic, lpfc, ufs, hpsa) we also have a couple of bug fixes (sd out of bounds and ibmvfc error handling) and the first round of esas2r checker fixes and finally the much anticipated big endian additions for megaraid_sas" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (47 commits) [SCSI] fnic: fnic Driver Tuneables Exposed through CLI [SCSI] fnic: Kernel panic while running sh/nosh with max lun cfg [SCSI] fnic: Hitting BUG_ON(io_req->abts_done) in fnic_rport_exch_reset [SCSI] fnic: Remove QUEUE_FULL handling code [SCSI] fnic: On system with >1.1TB RAM, VIC fails multipath after boot up [SCSI] fnic: FC stat param seconds_since_last_reset not getting updated [SCSI] sd: Fix potential out-of-bounds access [SCSI] lpfc 8.3.42: Update lpfc version to driver version 8.3.42 [SCSI] lpfc 8.3.42: Fixed issue of task management commands having a fixed timeout [SCSI] lpfc 8.3.42: Fixed inconsistent spin lock usage. [SCSI] lpfc 8.3.42: Fix driver's abort loop functionality to skip IOs already getting aborted [SCSI] lpfc 8.3.42: Fixed failure to allocate SCSI buffer on PPC64 platform for SLI4 devices [SCSI] lpfc 8.3.42: Fix WARN_ON when driver unloads [SCSI] lpfc 8.3.42: Avoided making pci bar ioremap call during dual-chute WQ/RQ pci bar selection [SCSI] lpfc 8.3.42: Fixed driver iocbq structure's iocb_flag field running out of space [SCSI] lpfc 8.3.42: Fix crash on driver load due to cpu affinity logic [SCSI] lpfc 8.3.42: Fixed logging format of setting driver sysfs attributes hard to interpret [SCSI] lpfc 8.3.42: Fixed back to back RSCNs discovery failure. [SCSI] lpfc 8.3.42: Fixed race condition between BSG I/O dispatch and timeout handling [SCSI] lpfc 8.3.42: Fixed function mode field defined too small for not recognizing dual-chute mode ...
This commit is contained in:
@@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
/* Close the timeout handler abort window */
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
|
||||
iocb = &dd_data->context_un.iocb;
|
||||
ndlp = iocb->ndlp;
|
||||
rmp = iocb->rmp;
|
||||
@@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
|
||||
int request_nseg;
|
||||
int reply_nseg;
|
||||
struct bsg_job_data *dd_data;
|
||||
unsigned long flags;
|
||||
uint32_t creg_val;
|
||||
int rc = 0;
|
||||
int iocb_stat;
|
||||
@@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
|
||||
}
|
||||
|
||||
iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
|
||||
if (iocb_stat == IOCB_SUCCESS)
|
||||
|
||||
if (iocb_stat == IOCB_SUCCESS) {
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* make sure the I/O had not been completed yet */
|
||||
if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
|
||||
/* open up abort window to timeout handler */
|
||||
cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
return 0; /* done for now */
|
||||
else if (iocb_stat == IOCB_BUSY)
|
||||
} else if (iocb_stat == IOCB_BUSY) {
|
||||
rc = -EAGAIN;
|
||||
else
|
||||
} else {
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
/* iocb failed so cleanup */
|
||||
job->dd_data = NULL;
|
||||
|
||||
free_rmp:
|
||||
lpfc_free_bsg_buffers(phba, rmp);
|
||||
@@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
/* Close the timeout handler abort window */
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
|
||||
rsp = &rspiocbq->iocb;
|
||||
pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
|
||||
prsp = (struct lpfc_dmabuf *)pcmd->list.next;
|
||||
@@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
|
||||
struct lpfc_iocbq *cmdiocbq;
|
||||
uint16_t rpi = 0;
|
||||
struct bsg_job_data *dd_data;
|
||||
unsigned long flags;
|
||||
uint32_t creg_val;
|
||||
int rc = 0;
|
||||
|
||||
@@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
|
||||
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
|
||||
|
||||
if (rc == IOCB_SUCCESS)
|
||||
if (rc == IOCB_SUCCESS) {
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* make sure the I/O had not been completed/released */
|
||||
if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
|
||||
/* open up abort window to timeout handler */
|
||||
cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
return 0; /* done for now */
|
||||
else if (rc == IOCB_BUSY)
|
||||
} else if (rc == IOCB_BUSY) {
|
||||
rc = -EAGAIN;
|
||||
else
|
||||
} else {
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
/* iocb failed so cleanup */
|
||||
job->dd_data = NULL;
|
||||
|
||||
linkdown_err:
|
||||
|
||||
cmdiocbq->context1 = ndlp;
|
||||
lpfc_els_free_iocb(phba, cmdiocbq);
|
||||
|
||||
@@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct get_ct_event *event_req;
|
||||
struct get_ct_event_reply *event_reply;
|
||||
struct lpfc_bsg_event *evt;
|
||||
struct lpfc_bsg_event *evt, *evt_next;
|
||||
struct event_data *evt_dat = NULL;
|
||||
unsigned long flags;
|
||||
uint32_t rc = 0;
|
||||
@@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
|
||||
event_reply = (struct get_ct_event_reply *)
|
||||
job->reply->reply_data.vendor_reply.vendor_rsp;
|
||||
spin_lock_irqsave(&phba->ct_ev_lock, flags);
|
||||
list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
|
||||
list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
|
||||
if (evt->reg_id == event_req->ev_reg_id) {
|
||||
if (list_empty(&evt->events_to_get))
|
||||
break;
|
||||
@@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
/* Close the timeout handler abort window */
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
|
||||
ndlp = dd_data->context_un.iocb.ndlp;
|
||||
cmp = cmdiocbq->context2;
|
||||
bmp = cmdiocbq->context3;
|
||||
@@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
|
||||
int rc = 0;
|
||||
struct lpfc_nodelist *ndlp = NULL;
|
||||
struct bsg_job_data *dd_data;
|
||||
unsigned long flags;
|
||||
uint32_t creg_val;
|
||||
|
||||
/* allocate our bsg tracking structure */
|
||||
@@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
|
||||
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
|
||||
|
||||
if (rc == IOCB_SUCCESS)
|
||||
if (rc == IOCB_SUCCESS) {
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* make sure the I/O had not been completed/released */
|
||||
if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
|
||||
/* open up abort window to timeout handler */
|
||||
ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
return 0; /* done for now */
|
||||
}
|
||||
|
||||
/* iocb failed so cleanup */
|
||||
job->dd_data = NULL;
|
||||
|
||||
issue_ct_rsp_exit:
|
||||
lpfc_sli_release_iocbq(phba, ctiocb);
|
||||
@@ -5284,9 +5333,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
|
||||
* remove it from the txq queue and call cancel iocbs.
|
||||
* Otherwise, call abort iotag
|
||||
*/
|
||||
|
||||
cmdiocb = dd_data->context_un.iocb.cmdiocbq;
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* make sure the I/O abort window is still open */
|
||||
if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
return -EAGAIN;
|
||||
}
|
||||
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
|
||||
list) {
|
||||
if (check_iocb == cmdiocb) {
|
||||
@@ -5296,8 +5351,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
|
||||
}
|
||||
if (list_empty(&completions))
|
||||
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
if (!list_empty(&completions)) {
|
||||
lpfc_sli_cancel_iocbs(phba, &completions,
|
||||
IOSTAT_LOCAL_REJECT,
|
||||
@@ -5321,9 +5375,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
|
||||
* remove it from the txq queue and call cancel iocbs.
|
||||
* Otherwise, call abort iotag.
|
||||
*/
|
||||
|
||||
cmdiocb = dd_data->context_un.menlo.cmdiocbq;
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
|
||||
list) {
|
||||
if (check_iocb == cmdiocb) {
|
||||
@@ -5333,8 +5388,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
|
||||
}
|
||||
if (list_empty(&completions))
|
||||
lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
if (!list_empty(&completions)) {
|
||||
lpfc_sli_cancel_iocbs(phba, &completions,
|
||||
IOSTAT_LOCAL_REJECT,
|
||||
|
Reference in New Issue
Block a user