Merge tag 'for-5.8/block-2020-06-01' of git://git.kernel.dk/linux-block

Pull block updates from Jens Axboe:
 "Core block changes that have been queued up for this release:

   - Remove dead blk-throttle and blk-wbt code (Guoqing)

   - Include pid in blktrace note traces (Jan)

   - Don't spew I/O errors on wouldblock termination (me)

   - Zone append addition (Johannes, Keith, Damien)

   - IO accounting improvements (Konstantin, Christoph)

   - blk-mq hardware map update improvements (Ming)

   - Scheduler dispatch improvement (Salman)

   - Inline block encryption support (Satya)

   - Request map fixes and improvements (Weiping)

   - blk-iocost tweaks (Tejun)

   - Fix for timeout failing with error injection (Keith)

   - Queue re-run fixes (Douglas)

   - CPU hotplug improvements (Christoph)

   - Queue entry/exit improvements (Christoph)

   - Move DMA drain handling to the few drivers that use it (Christoph)

   - Partition handling cleanups (Christoph)"

* tag 'for-5.8/block-2020-06-01' of git://git.kernel.dk/linux-block: (127 commits)
  block: mark bio_wouldblock_error() bio with BIO_QUIET
  blk-wbt: rename __wbt_update_limits to wbt_update_limits
  blk-wbt: remove wbt_update_limits
  blk-throttle: remove tg_drain_bios
  blk-throttle: remove blk_throtl_drain
  null_blk: force complete for timeout request
  blk-mq: drain I/O when all CPUs in a hctx are offline
  blk-mq: add blk_mq_all_tag_iter
  blk-mq: open code __blk_mq_alloc_request in blk_mq_alloc_request_hctx
  blk-mq: use BLK_MQ_NO_TAG in more places
  blk-mq: rename BLK_MQ_TAG_FAIL to BLK_MQ_NO_TAG
  blk-mq: move more request initialization to blk_mq_rq_ctx_init
  blk-mq: simplify the blk_mq_get_request calling convention
  blk-mq: remove the bio argument to ->prepare_request
  nvme: force complete cancelled requests
  blk-mq: blk-mq: provide forced completion method
  block: fix a warning when blkdev.h is included for !CONFIG_BLOCK builds
  block: blk-crypto-fallback: remove redundant initialization of variable err
  block: reduce part_stat_lock() scope
  block: use __this_cpu_add() instead of access by smp_processor_id()
  ...
This commit is contained in:
Linus Torvalds
2020-06-02 15:29:19 -07:00
122 changed files with 4514 additions and 1493 deletions

View File

@@ -978,28 +978,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_io_completion_action(cmd, result);
}
static blk_status_t scsi_init_sgtable(struct request *req,
struct scsi_data_buffer *sdb)
static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
struct request *rq)
{
int count;
/*
* If sg table allocation fails, requeue request later.
*/
if (unlikely(sg_alloc_table_chained(&sdb->table,
blk_rq_nr_phys_segments(req), sdb->table.sgl,
SCSI_INLINE_SG_CNT)))
return BLK_STS_RESOURCE;
/*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
sdb->length = blk_rq_payload_bytes(req);
return BLK_STS_OK;
return sdev->dma_drain_len && blk_rq_is_passthrough(rq) &&
!op_is_write(req_op(rq)) &&
sdev->host->hostt->dma_need_drain(rq);
}
/*
@@ -1015,19 +999,62 @@ static blk_status_t scsi_init_sgtable(struct request *req,
*/
blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request;
unsigned short nr_segs = blk_rq_nr_phys_segments(rq);
struct scatterlist *last_sg = NULL;
blk_status_t ret;
bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq);
int count;
if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
if (WARN_ON_ONCE(!nr_segs))
return BLK_STS_IOERR;
ret = scsi_init_sgtable(rq, &cmd->sdb);
if (ret)
return ret;
/*
* Make sure there is space for the drain. The driver must adjust
* max_hw_segments to be prepared for this.
*/
if (need_drain)
nr_segs++;
/*
* If sg table allocation fails, requeue request later.
*/
if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs,
cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT)))
return BLK_STS_RESOURCE;
/*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
*/
count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg);
if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) {
unsigned int pad_len =
(rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
last_sg->length += pad_len;
cmd->extra_len += pad_len;
}
if (need_drain) {
sg_unmark_end(last_sg);
last_sg = sg_next(last_sg);
sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len);
sg_mark_end(last_sg);
cmd->extra_len += sdev->dma_drain_len;
count++;
}
BUG_ON(count > cmd->sdb.table.nents);
cmd->sdb.table.nents = count;
cmd->sdb.length = blk_rq_payload_bytes(rq);
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs, count;
int ivecs;
if (WARN_ON_ONCE(!prot_sdb)) {
/*
@@ -1610,12 +1637,7 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct scsi_device *sdev = q->queuedata;
if (scsi_dev_queue_ready(q, sdev))
return true;
if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev))
blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
return false;
return scsi_dev_queue_ready(q, sdev);
}
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1684,6 +1706,7 @@ out_put_budget:
case BLK_STS_OK:
break;
case BLK_STS_RESOURCE:
case BLK_STS_ZONE_RESOURCE:
if (atomic_read(&sdev->device_busy) ||
scsi_device_blocked(sdev))
ret = BLK_STS_DEV_RESOURCE;