scsi: Introduce scsi_start_queue()
This patch does not change any functionality. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Israel Rukshin <israelr@mellanox.com> Cc: Max Gurtovoy <maxg@mellanox.com> Cc: Benjamin Block <bblock@linux.vnet.ibm.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:

committed by
Martin K. Petersen

parent
0db6ca8a5e
commit
66483a4a9f
@@ -3031,6 +3031,20 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void scsi_start_queue(struct scsi_device *sdev)
|
||||||
|
{
|
||||||
|
struct request_queue *q = sdev->request_queue;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (q->mq_ops) {
|
||||||
|
blk_mq_start_stopped_hw_queues(q, false);
|
||||||
|
} else {
|
||||||
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
|
blk_start_queue(q);
|
||||||
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* scsi_internal_device_unblock_nowait - resume a device after a block request
|
* scsi_internal_device_unblock_nowait - resume a device after a block request
|
||||||
* @sdev: device to resume
|
* @sdev: device to resume
|
||||||
@@ -3049,9 +3063,6 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
|
|||||||
int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
|
int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
|
||||||
enum scsi_device_state new_state)
|
enum scsi_device_state new_state)
|
||||||
{
|
{
|
||||||
struct request_queue *q = sdev->request_queue;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to transition the scsi device to SDEV_RUNNING or one of the
|
* Try to transition the scsi device to SDEV_RUNNING or one of the
|
||||||
* offlined states and goose the device queue if successful.
|
* offlined states and goose the device queue if successful.
|
||||||
@@ -3069,13 +3080,7 @@ int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
|
|||||||
sdev->sdev_state != SDEV_OFFLINE)
|
sdev->sdev_state != SDEV_OFFLINE)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (q->mq_ops) {
|
scsi_start_queue(sdev);
|
||||||
blk_mq_start_stopped_hw_queues(q, false);
|
|
||||||
} else {
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
|
||||||
blk_start_queue(q);
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -88,6 +88,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
|
|||||||
extern void scsi_requeue_run_queue(struct work_struct *work);
|
extern void scsi_requeue_run_queue(struct work_struct *work);
|
||||||
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
|
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
|
||||||
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
|
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
|
||||||
|
extern void scsi_start_queue(struct scsi_device *sdev);
|
||||||
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
|
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
|
||||||
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
|
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
|
||||||
extern int scsi_init_queue(void);
|
extern int scsi_init_queue(void);
|
||||||
|
Reference in New Issue
Block a user