Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull first round of SCSI updates from James Bottomley:
 "This is a large set of updates, mostly for drivers (qla2xxx [including
  support for new 83xx based card], qla4xxx, mpt2sas, bfa, zfcp, hpsa,
  be2iscsi, isci, lpfc, ipr, ibmvfc, ibmvscsi, megaraid_sas).

  There's also a rework for tape adding virtually unlimited numbers of
  tape drives plus a set of dif fixes for sd and a fix for a live lock
  on hot remove of SCSI devices.

  This round includes a signed tag pull of isci-for-3.6

  Signed-off-by: James Bottomley <JBottomley@Parallels.com>"

Fix up trivial conflict in drivers/scsi/qla2xxx/qla_nx.c due to new PCI
helper function use in a function that was removed by this pull.

* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (198 commits)
  [SCSI] st: remove st_mutex
  [SCSI] sd: Ensure we correctly disable devices with unknown protection type
  [SCSI] hpsa: gen8plus Smart Array IDs
  [SCSI] qla4xxx: Update driver version to 5.03.00-k1
  [SCSI] qla4xxx: Disable generating pause frames for ISP83XX
  [SCSI] qla4xxx: Fix double clearing of risc_intr for ISP83XX
  [SCSI] qla4xxx: IDC implementation for Loopback
  [SCSI] qla4xxx: update copyrights in LICENSE.qla4xxx
  [SCSI] qla4xxx: Fix panic while rmmod
  [SCSI] qla4xxx: Fail probe_adapter if IRQ allocation fails
  [SCSI] qla4xxx: Prevent MSI/MSI-X falling back to INTx for ISP82XX
  [SCSI] qla4xxx: Update idc reg in case of PCI AER
  [SCSI] qla4xxx: Fix double IDC locking in qla4_8xxx_error_recovery
  [SCSI] qla4xxx: Clear interrupt while unloading driver for ISP83XX
  [SCSI] qla4xxx: Print correct IDC version
  [SCSI] qla4xxx: Added new mbox cmd to pass driver version to FW
  [SCSI] scsi_dh_alua: Enable STPG for unavailable ports
  [SCSI] scsi_remove_target: fix softlockup regression on hot remove
  [SCSI] ibmvscsi: Fix host config length field overflow
  [SCSI] ibmvscsi: Remove backend abstraction
  ...
This commit is contained in:
Linus Torvalds
2012-10-02 19:01:32 -07:00
179 changed files with 13479 additions and 4445 deletions

View File

@@ -565,6 +565,23 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
#endif
/**
* ipr_lock_and_done - Acquire lock and complete command
* @ipr_cmd: ipr command struct
*
* Return value:
* none
**/
static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
{
unsigned long lock_flags;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_cmd->done(ipr_cmd);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
}
/**
* ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
* @ipr_cmd: ipr command struct
@@ -611,33 +628,49 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
* Return value:
* none
**/
static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
void (*fast_done) (struct ipr_cmnd *))
{
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->u.scratch = 0;
ipr_cmd->sibling = NULL;
ipr_cmd->fast_done = fast_done;
init_timer(&ipr_cmd->timer);
}
/**
* ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
* __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
* @ioa_cfg: ioa config struct
*
* Return value:
* pointer to ipr command struct
**/
static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd;
ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
list_del(&ipr_cmd->queue);
ipr_init_ipr_cmnd(ipr_cmd);
return ipr_cmd;
}
/**
* ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
* @ioa_cfg: ioa config struct
*
* Return value:
* pointer to ipr command struct
**/
static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
return ipr_cmd;
}
/**
* ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
* @ioa_cfg: ioa config struct
@@ -5116,8 +5149,9 @@ static irqreturn_t ipr_isr(int irq, void *devp)
u16 cmd_index;
int num_hrrq = 0;
int irq_none = 0;
struct ipr_cmnd *ipr_cmd;
struct ipr_cmnd *ipr_cmd, *temp;
irqreturn_t rc = IRQ_NONE;
LIST_HEAD(doneq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -5138,8 +5172,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return IRQ_HANDLED;
rc = IRQ_HANDLED;
goto unlock_out;
}
ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
@@ -5148,9 +5182,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
list_del(&ipr_cmd->queue);
del_timer(&ipr_cmd->timer);
ipr_cmd->done(ipr_cmd);
list_move_tail(&ipr_cmd->queue, &doneq);
rc = IRQ_HANDLED;
@@ -5180,8 +5212,8 @@ static irqreturn_t ipr_isr(int irq, void *devp)
} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
int_reg & IPR_PCII_HRRQ_UPDATED) {
ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return IRQ_HANDLED;
rc = IRQ_HANDLED;
goto unlock_out;
} else
break;
}
@@ -5189,7 +5221,14 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(rc == IRQ_NONE))
rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
unlock_out:
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
list_del(&ipr_cmd->queue);
del_timer(&ipr_cmd->timer);
ipr_cmd->fast_done(ipr_cmd);
}
return rc;
}
@@ -5770,21 +5809,28 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
unsigned long lock_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_dma_unmap(scsi_cmd);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
scsi_cmd->scsi_done(scsi_cmd);
} else
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
} else {
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_erp_start(ioa_cfg, ipr_cmd);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
}
}
/**
* ipr_queuecommand - Queue a mid-layer request
* @shost: scsi host struct
* @scsi_cmd: scsi command struct
* @done: done function
*
* This function queues a request generated by the mid-layer.
*
@@ -5793,61 +5839,61 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
* SCSI_MLQUEUE_DEVICE_BUSY if device is busy
* SCSI_MLQUEUE_HOST_BUSY if host is busy
**/
static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
void (*done) (struct scsi_cmnd *))
static int ipr_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scsi_cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
int rc = 0;
unsigned long lock_flags;
int rc;
scsi_cmd->scsi_done = done;
ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
res = scsi_cmd->device->hostdata;
ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
spin_lock_irqsave(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata;
/*
* We are currently blocking all devices due to a host reset
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
/*
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
return 0;
spin_unlock_irqrestore(shost->host_lock, lock_flags);
goto err_nodev;
}
if (ipr_is_gata(res) && res->sata_port)
return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
if (ipr_is_gata(res) && res->sata_port) {
rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return rc;
}
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
ioarcb = &ipr_cmd->ioarcb;
list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
ipr_cmd->scsi_cmd = scsi_cmd;
ioarcb->res_handle = res->res_handle;
ipr_cmd->done = ipr_scsi_done;
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_cmd->done = ipr_scsi_eh_done;
if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
if (scsi_cmd->underflow == 0)
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
if (res->needs_sync_complete) {
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
if (ipr_is_gscsi(res))
ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
@@ -5859,24 +5905,47 @@ static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
(!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
if (likely(rc == 0)) {
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
else
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
}
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
else
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
if (unlikely(rc != 0)) {
list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
spin_lock_irqsave(shost->host_lock, lock_flags);
if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
if (!rc)
scsi_dma_unmap(scsi_cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
if (unlikely(ioa_cfg->ioa_is_dead)) {
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
scsi_dma_unmap(scsi_cmd);
goto err_nodev;
}
ioarcb->res_handle = res->res_handle;
if (res->needs_sync_complete) {
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_send_command(ipr_cmd);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
err_nodev:
spin_lock_irqsave(shost->host_lock, lock_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
}
static DEF_SCSI_QCMD(ipr_queuecommand)
/**
* ipr_ioctl - IOCTL handler
* @sdev: scsi device struct
@@ -8775,8 +8844,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
sata_port_info.flags, &ipr_sata_ops);
ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);