Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This update includes the usual round of major driver updates (ncr5380,
  lpfc, hisi_sas, megaraid_sas, ufs, ibmvscsis, mpt3sas).

  There's also an assortment of minor fixes, mostly in error legs or
  other not very user visible stuff. The major change is the
  pci_alloc_irq_vectors replacement for the old pci_msix_.. calls; this
  effectively makes IRQ mapping generic for the drivers and allows
  blk_mq to use the information"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (256 commits)
  scsi: qla4xxx: switch to pci_alloc_irq_vectors
  scsi: hisi_sas: support deferred probe for v2 hw
  scsi: megaraid_sas: switch to pci_alloc_irq_vectors
  scsi: scsi_devinfo: remove synchronous ALUA for NETAPP devices
  scsi: be2iscsi: set errno on error path
  scsi: be2iscsi: set errno on error path
  scsi: hpsa: fallback to use legacy REPORT PHYS command
  scsi: scsi_dh_alua: Fix RCU annotations
  scsi: hpsa: use %phN for short hex dumps
  scsi: hisi_sas: fix free'ing in probe and remove
  scsi: isci: switch to pci_alloc_irq_vectors
  scsi: ipr: Fix runaway IRQs when falling back from MSI to LSI
  scsi: dpt_i2o: double free on error path
  scsi: cxlflash: Migrate scsi command pointer to AFU command
  scsi: cxlflash: Migrate IOARRIN specific routines to function pointers
  scsi: cxlflash: Cleanup queuecommand()
  scsi: cxlflash: Cleanup send_tmf()
  scsi: cxlflash: Remove AFU command lock
  scsi: cxlflash: Wait for active AFU commands to timeout upon tear down
  scsi: cxlflash: Remove private command pool
  ...
This commit is contained in:
Linus Torvalds
2016-12-14 10:49:33 -08:00
142 changed files with 5358 additions and 4470 deletions

View File

@@ -1998,6 +1998,15 @@ static void scsi_exit_request(void *data, struct request *rq,
kfree(cmd->sense_buffer);
}
static int scsi_map_queues(struct blk_mq_tag_set *set)
{
struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set);
if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
return blk_mq_map_queues(set);
}
static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
struct device *host_dev;
@@ -2090,6 +2099,7 @@ static struct blk_mq_ops scsi_mq_ops = {
.timeout = scsi_timeout,
.init_request = scsi_init_request,
.exit_request = scsi_exit_request,
.map_queues = scsi_map_queues,
};
struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
@@ -2731,6 +2741,39 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
}
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
* scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
* @sdev: SCSI device to count the number of scsi_request_fn() callers for.
*/
static int scsi_request_fn_active(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
int request_fn_active;
WARN_ON_ONCE(sdev->host->use_blk_mq);
spin_lock_irq(q->queue_lock);
request_fn_active = q->request_fn_active;
spin_unlock_irq(q->queue_lock);
return request_fn_active;
}
/**
* scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
* @sdev: SCSI device pointer.
*
* Wait until the ongoing shost->hostt->queuecommand() calls that are
* invoked from scsi_request_fn() have finished.
*/
static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
{
WARN_ON_ONCE(sdev->host->use_blk_mq);
while (scsi_request_fn_active(sdev))
msleep(20);
}
/**
* scsi_device_quiesce - Block user issued commands.
* @sdev: scsi device to quiesce.
@@ -2815,8 +2858,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* @sdev: device to block
*
* Block request made by scsi lld's to temporarily stop all
* scsi commands on the specified device. Called from interrupt
* or normal process context.
* scsi commands on the specified device. May sleep.
*
* Returns zero if successful or error if not
*
@@ -2825,6 +2867,10 @@ EXPORT_SYMBOL(scsi_target_resume);
* (which must be a legal transition). When the device is in this
* state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires.
*
* To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
* scsi_internal_device_block() has blocked a SCSI device and also
* remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
int
scsi_internal_device_block(struct scsi_device *sdev)
@@ -2852,6 +2898,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_wait_for_queuecommand(sdev);
}
return 0;