block: improve handling of the magic discard payload

Instead of allocating a single unused biovec for discard requests, send
them down without any payload.  Instead we allow the driver to add a
"special" payload using a biovec embedded into struct request (unioned
over other fields never used while in the driver), and overloading
the number of segments for this case.

This has a couple of advantages:

 - we don't have to allocate the bio_vec
 - the amount of special casing for discard requests in the block
   layer is significantly reduced
 - using this same scheme for other request types is trivial,
   which will be important for implementing the new WRITE_ZEROES
   op on devices where it actually requires a payload (e.g. SCSI)
 - we can get rid of playing games with the request length, as
   we'll never touch it and completions will work just fine
 - it will allow us to support ranged discard operations in the
   future by merging non-contiguous discard bios into a single
   request
 - last but not least it removes a lot of code

This patch is the common base for my WIP series for ranges discards and to
remove discard_zeroes_data in favor of always using REQ_OP_WRITE_ZEROES,
so it would be good to get it in quickly.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Christoph Hellwig
2016-12-08 15:20:32 -07:00
committed by Jens Axboe
parent be07e14f96
commit f9d03f96b9
13 changed files with 76 additions and 138 deletions

View File

@@ -302,14 +302,14 @@ static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
static __le64 **iod_list(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
return (__le64 **)(iod->sg + req->nr_phys_segments);
return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
}
static int nvme_init_iod(struct request *rq, unsigned size,
struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = rq->nr_phys_segments;
int nseg = blk_rq_nr_phys_segments(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -339,8 +339,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
__le64 **list = iod_list(req);
dma_addr_t prp_dma = iod->first_dma;
nvme_cleanup_cmd(req);
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
for (i = 0; i < iod->npages; i++) {
@@ -510,7 +508,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_TO_DEVICE : DMA_FROM_DEVICE;
int ret = BLK_MQ_RQ_QUEUE_ERROR;
sg_init_table(iod->sg, req->nr_phys_segments);
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
iod->nents = blk_rq_map_sg(q, req, iod->sg);
if (!iod->nents)
goto out;
@@ -566,6 +564,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
}
}
nvme_cleanup_cmd(req);
nvme_free_iod(dev, req);
}
@@ -596,20 +595,20 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
map_len = nvme_map_len(req);
ret = nvme_init_iod(req, map_len, dev);
ret = nvme_setup_cmd(ns, req, &cmnd);
if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
ret = nvme_setup_cmd(ns, req, &cmnd);
map_len = nvme_map_len(req);
ret = nvme_init_iod(req, map_len, dev);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
goto out_free_cmd;
if (req->nr_phys_segments)
if (blk_rq_nr_phys_segments(req))
ret = nvme_map_data(dev, req, map_len, &cmnd);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out;
goto out_cleanup_iod;
blk_mq_start_request(req);
@@ -620,14 +619,16 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
else
ret = BLK_MQ_RQ_QUEUE_ERROR;
spin_unlock_irq(&nvmeq->q_lock);
goto out;
goto out_cleanup_iod;
}
__nvme_submit_cmd(nvmeq, &cmnd);
nvme_process_cq(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
return BLK_MQ_RQ_QUEUE_OK;
out:
out_cleanup_iod:
nvme_free_iod(dev, req);
out_free_cmd:
nvme_cleanup_cmd(req);
return ret;
}