Merge branch 'for-4.11/next' into for-4.11/linus-merge
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
@@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q)
|
||||
dm_mq_stop_queue(q);
|
||||
}
|
||||
|
||||
static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return mempool_alloc(md->io_pool, gfp_mask);
|
||||
}
|
||||
|
||||
static void free_old_rq_tio(struct dm_rq_target_io *tio)
|
||||
{
|
||||
mempool_free(tio, tio->md->io_pool);
|
||||
}
|
||||
|
||||
static struct request *alloc_old_clone_request(struct mapped_device *md,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return mempool_alloc(md->rq_pool, gfp_mask);
|
||||
}
|
||||
|
||||
static void free_old_clone_request(struct mapped_device *md, struct request *rq)
|
||||
{
|
||||
mempool_free(rq, md->rq_pool);
|
||||
}
|
||||
|
||||
/*
|
||||
* Partial completion handling for request-based dm
|
||||
*/
|
||||
@@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone)
|
||||
|
||||
static struct dm_rq_target_io *tio_from_request(struct request *rq)
|
||||
{
|
||||
return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
|
||||
return blk_mq_rq_to_pdu(rq);
|
||||
}
|
||||
|
||||
static void rq_end_stats(struct mapped_device *md, struct request *orig)
|
||||
@@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
|
||||
dm_put(md);
|
||||
}
|
||||
|
||||
static void free_rq_clone(struct request *clone)
|
||||
{
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
struct mapped_device *md = tio->md;
|
||||
|
||||
blk_rq_unprep_clone(clone);
|
||||
|
||||
/*
|
||||
* It is possible for a clone_old_rq() allocated clone to
|
||||
* get passed in -- it may not yet have a request_queue.
|
||||
* This is known to occur if the error target replaces
|
||||
* a multipath target that has a request_fn queue stacked
|
||||
* on blk-mq queue(s).
|
||||
*/
|
||||
if (clone->q && clone->q->mq_ops)
|
||||
/* stacked on blk-mq queue(s) */
|
||||
tio->ti->type->release_clone_rq(clone);
|
||||
else if (!md->queue->mq_ops)
|
||||
/* request_fn queue stacked on request_fn queue(s) */
|
||||
free_old_clone_request(md, clone);
|
||||
|
||||
if (!md->queue->mq_ops)
|
||||
free_old_rq_tio(tio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Complete the clone and the original request.
|
||||
* Must be called without clone's queue lock held,
|
||||
@@ -270,20 +223,9 @@ static void dm_end_request(struct request *clone, int error)
|
||||
struct mapped_device *md = tio->md;
|
||||
struct request *rq = tio->orig;
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
rq->errors = clone->errors;
|
||||
rq->resid_len = clone->resid_len;
|
||||
blk_rq_unprep_clone(clone);
|
||||
tio->ti->type->release_clone_rq(clone);
|
||||
|
||||
if (rq->sense)
|
||||
/*
|
||||
* We are using the sense buffer of the original
|
||||
* request.
|
||||
* So setting the length of the sense data is enough.
|
||||
*/
|
||||
rq->sense_len = clone->sense_len;
|
||||
}
|
||||
|
||||
free_rq_clone(clone);
|
||||
rq_end_stats(md, rq);
|
||||
if (!rq->q->mq_ops)
|
||||
blk_end_request_all(rq, error);
|
||||
@@ -292,22 +234,6 @@ static void dm_end_request(struct request *clone, int error)
|
||||
rq_completed(md, rw, true);
|
||||
}
|
||||
|
||||
static void dm_unprep_request(struct request *rq)
|
||||
{
|
||||
struct dm_rq_target_io *tio = tio_from_request(rq);
|
||||
struct request *clone = tio->clone;
|
||||
|
||||
if (!rq->q->mq_ops) {
|
||||
rq->special = NULL;
|
||||
rq->rq_flags &= ~RQF_DONTPREP;
|
||||
}
|
||||
|
||||
if (clone)
|
||||
free_rq_clone(clone);
|
||||
else if (!tio->md->queue->mq_ops)
|
||||
free_old_rq_tio(tio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Requeue the original request of a clone.
|
||||
*/
|
||||
@@ -346,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
|
||||
int rw = rq_data_dir(rq);
|
||||
|
||||
rq_end_stats(md, rq);
|
||||
dm_unprep_request(rq);
|
||||
if (tio->clone) {
|
||||
blk_rq_unprep_clone(tio->clone);
|
||||
tio->ti->type->release_clone_rq(tio->clone);
|
||||
}
|
||||
|
||||
if (!rq->q->mq_ops)
|
||||
dm_old_requeue_request(rq);
|
||||
@@ -401,14 +330,11 @@ static void dm_softirq_done(struct request *rq)
|
||||
if (!clone) {
|
||||
rq_end_stats(tio->md, rq);
|
||||
rw = rq_data_dir(rq);
|
||||
if (!rq->q->mq_ops) {
|
||||
if (!rq->q->mq_ops)
|
||||
blk_end_request_all(rq, tio->error);
|
||||
rq_completed(tio->md, rw, false);
|
||||
free_old_rq_tio(tio);
|
||||
} else {
|
||||
else
|
||||
blk_mq_end_request(rq, tio->error);
|
||||
rq_completed(tio->md, rw, false);
|
||||
}
|
||||
rq_completed(tio->md, rw, false);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -452,16 +378,6 @@ static void end_clone_request(struct request *clone, int error)
|
||||
{
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
|
||||
if (!clone->q->mq_ops) {
|
||||
/*
|
||||
* For just cleaning up the information of the queue in which
|
||||
* the clone was dispatched.
|
||||
* The clone is *NOT* freed actually here because it is alloced
|
||||
* from dm own mempool (RQF_ALLOCED isn't set).
|
||||
*/
|
||||
__blk_put_request(clone->q, clone);
|
||||
}
|
||||
|
||||
/*
|
||||
* Actual request completion is done in a softirq context which doesn't
|
||||
* hold the clone's queue lock. Otherwise, deadlock could occur because:
|
||||
@@ -511,9 +427,6 @@ static int setup_clone(struct request *clone, struct request *rq,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
clone->cmd = rq->cmd;
|
||||
clone->cmd_len = rq->cmd_len;
|
||||
clone->sense = rq->sense;
|
||||
clone->end_io = end_clone_request;
|
||||
clone->end_io_data = tio;
|
||||
|
||||
@@ -522,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
|
||||
struct dm_rq_target_io *tio, gfp_t gfp_mask)
|
||||
{
|
||||
/*
|
||||
* Create clone for use with .request_fn request_queue
|
||||
*/
|
||||
struct request *clone;
|
||||
|
||||
clone = alloc_old_clone_request(md, gfp_mask);
|
||||
if (!clone)
|
||||
return NULL;
|
||||
|
||||
blk_rq_init(NULL, clone);
|
||||
if (setup_clone(clone, rq, tio, gfp_mask)) {
|
||||
/* -ENOMEM */
|
||||
free_old_clone_request(md, clone);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
|
||||
static void map_tio_request(struct kthread_work *work);
|
||||
|
||||
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
|
||||
@@ -565,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
|
||||
kthread_init_work(&tio->work, map_tio_request);
|
||||
}
|
||||
|
||||
static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
|
||||
struct mapped_device *md,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct dm_rq_target_io *tio;
|
||||
int srcu_idx;
|
||||
struct dm_table *table;
|
||||
|
||||
tio = alloc_old_rq_tio(md, gfp_mask);
|
||||
if (!tio)
|
||||
return NULL;
|
||||
|
||||
init_tio(tio, rq, md);
|
||||
|
||||
table = dm_get_live_table(md, &srcu_idx);
|
||||
/*
|
||||
* Must clone a request if this .request_fn DM device
|
||||
* is stacked on .request_fn device(s).
|
||||
*/
|
||||
if (!dm_table_all_blk_mq_devices(table)) {
|
||||
if (!clone_old_rq(rq, md, tio, gfp_mask)) {
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
free_old_rq_tio(tio);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
|
||||
return tio;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with the queue lock held.
|
||||
*/
|
||||
static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct mapped_device *md = q->queuedata;
|
||||
struct dm_rq_target_io *tio;
|
||||
|
||||
if (unlikely(rq->special)) {
|
||||
DMWARN("Already has something in rq->special.");
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
|
||||
tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
|
||||
if (!tio)
|
||||
return BLKPREP_DEFER;
|
||||
|
||||
rq->special = tio;
|
||||
rq->rq_flags |= RQF_DONTPREP;
|
||||
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns:
|
||||
* DM_MAPIO_* : the request has been processed as indicated
|
||||
@@ -633,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio)
|
||||
struct request *rq = tio->orig;
|
||||
struct request *clone = NULL;
|
||||
|
||||
if (tio->clone) {
|
||||
clone = tio->clone;
|
||||
r = ti->type->map_rq(ti, clone, &tio->info);
|
||||
if (r == DM_MAPIO_DELAY_REQUEUE)
|
||||
return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
|
||||
} else {
|
||||
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
|
||||
if (r < 0) {
|
||||
/* The target wants to complete the I/O */
|
||||
dm_kill_unmapped_request(rq, r);
|
||||
return r;
|
||||
}
|
||||
if (r == DM_MAPIO_REMAPPED &&
|
||||
setup_clone(clone, rq, tio, GFP_ATOMIC)) {
|
||||
/* -ENOMEM */
|
||||
ti->type->release_clone_rq(clone);
|
||||
return DM_MAPIO_REQUEUE;
|
||||
}
|
||||
}
|
||||
|
||||
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
|
||||
switch (r) {
|
||||
case DM_MAPIO_SUBMITTED:
|
||||
/* The target has taken the I/O to submit by itself later */
|
||||
break;
|
||||
case DM_MAPIO_REMAPPED:
|
||||
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
|
||||
/* -ENOMEM */
|
||||
ti->type->release_clone_rq(clone);
|
||||
return DM_MAPIO_REQUEUE;
|
||||
}
|
||||
|
||||
/* The target has remapped the I/O so dispatch it */
|
||||
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
|
||||
blk_rq_pos(rq));
|
||||
@@ -716,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
|
||||
dm_get(md);
|
||||
}
|
||||
|
||||
static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
|
||||
{
|
||||
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
/*
|
||||
* Must initialize md member of tio, otherwise it won't
|
||||
* be available in dm_mq_queue_rq.
|
||||
*/
|
||||
tio->md = md;
|
||||
|
||||
if (md->init_tio_pdu) {
|
||||
/* target-specific per-io data is immediately after the tio */
|
||||
tio->info.ptr = tio + 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
|
||||
{
|
||||
return __dm_rq_init_rq(q->rq_alloc_data, rq);
|
||||
}
|
||||
|
||||
static void map_tio_request(struct kthread_work *work)
|
||||
{
|
||||
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
|
||||
@@ -814,6 +661,7 @@ static void dm_old_request_fn(struct request_queue *q)
|
||||
dm_start_request(md, rq);
|
||||
|
||||
tio = tio_from_request(rq);
|
||||
init_tio(tio, rq, md);
|
||||
/* Establish tio->ti before queuing work (map_tio_request) */
|
||||
tio->ti = ti;
|
||||
kthread_queue_work(&md->kworker, &tio->work);
|
||||
@@ -824,10 +672,23 @@ static void dm_old_request_fn(struct request_queue *q)
|
||||
/*
|
||||
* Fully initialize a .request_fn request-based queue.
|
||||
*/
|
||||
int dm_old_init_request_queue(struct mapped_device *md)
|
||||
int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
||||
{
|
||||
struct dm_target *immutable_tgt;
|
||||
|
||||
/* Fully initialize the queue */
|
||||
if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
|
||||
md->queue->cmd_size = sizeof(struct dm_rq_target_io);
|
||||
md->queue->rq_alloc_data = md;
|
||||
md->queue->request_fn = dm_old_request_fn;
|
||||
md->queue->init_rq_fn = dm_rq_init_rq;
|
||||
|
||||
immutable_tgt = dm_table_get_immutable_target(t);
|
||||
if (immutable_tgt && immutable_tgt->per_io_data_size) {
|
||||
/* any target-specific per-io data is immediately after the tio */
|
||||
md->queue->cmd_size += immutable_tgt->per_io_data_size;
|
||||
md->init_tio_pdu = true;
|
||||
}
|
||||
if (blk_init_allocated_queue(md->queue) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* disable dm_old_request_fn's merge heuristic by default */
|
||||
@@ -835,7 +696,6 @@ int dm_old_init_request_queue(struct mapped_device *md)
|
||||
|
||||
dm_init_normal_md_queue(md);
|
||||
blk_queue_softirq_done(md->queue, dm_softirq_done);
|
||||
blk_queue_prep_rq(md->queue, dm_old_prep_fn);
|
||||
|
||||
/* Initialize the request-based DM worker thread */
|
||||
kthread_init_worker(&md->kworker);
|
||||
@@ -856,21 +716,7 @@ static int dm_mq_init_request(void *data, struct request *rq,
|
||||
unsigned int hctx_idx, unsigned int request_idx,
|
||||
unsigned int numa_node)
|
||||
{
|
||||
struct mapped_device *md = data;
|
||||
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
/*
|
||||
* Must initialize md member of tio, otherwise it won't
|
||||
* be available in dm_mq_queue_rq.
|
||||
*/
|
||||
tio->md = md;
|
||||
|
||||
if (md->init_tio_pdu) {
|
||||
/* target-specific per-io data is immediately after the tio */
|
||||
tio->info.ptr = tio + 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return __dm_rq_init_rq(data, rq);
|
||||
}
|
||||
|
||||
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
Reference in New Issue
Block a user