block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion. Note that device mapper overloaded bi_error with a private value, which we'll have to keep arround at least for now and thus propagate to a proper blk_status_t value. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:

committed by
Jens Axboe

parent
fc17b6534e
commit
4e4cbee93d
@@ -383,8 +383,8 @@ static void end_discard(struct discard_op *op, int r)
|
||||
* Even if r is set, there could be sub discards in flight that we
|
||||
* need to wait for.
|
||||
*/
|
||||
if (r && !op->parent_bio->bi_error)
|
||||
op->parent_bio->bi_error = r;
|
||||
if (r && !op->parent_bio->bi_status)
|
||||
op->parent_bio->bi_status = errno_to_blk_status(r);
|
||||
bio_endio(op->parent_bio);
|
||||
}
|
||||
|
||||
@@ -450,22 +450,20 @@ static void cell_release_no_holder(struct pool *pool,
|
||||
}
|
||||
|
||||
static void cell_error_with_code(struct pool *pool,
|
||||
struct dm_bio_prison_cell *cell, int error_code)
|
||||
struct dm_bio_prison_cell *cell, blk_status_t error_code)
|
||||
{
|
||||
dm_cell_error(pool->prison, cell, error_code);
|
||||
dm_bio_prison_free_cell(pool->prison, cell);
|
||||
}
|
||||
|
||||
static int get_pool_io_error_code(struct pool *pool)
|
||||
static blk_status_t get_pool_io_error_code(struct pool *pool)
|
||||
{
|
||||
return pool->out_of_data_space ? -ENOSPC : -EIO;
|
||||
return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
|
||||
{
|
||||
int error = get_pool_io_error_code(pool);
|
||||
|
||||
cell_error_with_code(pool, cell, error);
|
||||
cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
|
||||
}
|
||||
|
||||
static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
|
||||
@@ -475,7 +473,7 @@ static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
|
||||
|
||||
static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
|
||||
{
|
||||
cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
|
||||
cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
@@ -555,17 +553,18 @@ static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
|
||||
bio_list_init(master);
|
||||
}
|
||||
|
||||
static void error_bio_list(struct bio_list *bios, int error)
|
||||
static void error_bio_list(struct bio_list *bios, blk_status_t error)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
while ((bio = bio_list_pop(bios))) {
|
||||
bio->bi_error = error;
|
||||
bio->bi_status = error;
|
||||
bio_endio(bio);
|
||||
}
|
||||
}
|
||||
|
||||
static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
|
||||
static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
|
||||
blk_status_t error)
|
||||
{
|
||||
struct bio_list bios;
|
||||
unsigned long flags;
|
||||
@@ -608,11 +607,11 @@ static void requeue_io(struct thin_c *tc)
|
||||
__merge_bio_list(&bios, &tc->retry_on_resume_list);
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
|
||||
error_bio_list(&bios, DM_ENDIO_REQUEUE);
|
||||
error_bio_list(&bios, BLK_STS_DM_REQUEUE);
|
||||
requeue_deferred_cells(tc);
|
||||
}
|
||||
|
||||
static void error_retry_list_with_code(struct pool *pool, int error)
|
||||
static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
|
||||
{
|
||||
struct thin_c *tc;
|
||||
|
||||
@@ -624,9 +623,7 @@ static void error_retry_list_with_code(struct pool *pool, int error)
|
||||
|
||||
static void error_retry_list(struct pool *pool)
|
||||
{
|
||||
int error = get_pool_io_error_code(pool);
|
||||
|
||||
error_retry_list_with_code(pool, error);
|
||||
error_retry_list_with_code(pool, get_pool_io_error_code(pool));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -774,7 +771,7 @@ struct dm_thin_new_mapping {
|
||||
*/
|
||||
atomic_t prepare_actions;
|
||||
|
||||
int err;
|
||||
blk_status_t status;
|
||||
struct thin_c *tc;
|
||||
dm_block_t virt_begin, virt_end;
|
||||
dm_block_t data_block;
|
||||
@@ -814,7 +811,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
|
||||
{
|
||||
struct dm_thin_new_mapping *m = context;
|
||||
|
||||
m->err = read_err || write_err ? -EIO : 0;
|
||||
m->status = read_err || write_err ? BLK_STS_IOERR : 0;
|
||||
complete_mapping_preparation(m);
|
||||
}
|
||||
|
||||
@@ -825,7 +822,7 @@ static void overwrite_endio(struct bio *bio)
|
||||
|
||||
bio->bi_end_io = m->saved_bi_end_io;
|
||||
|
||||
m->err = bio->bi_error;
|
||||
m->status = bio->bi_status;
|
||||
complete_mapping_preparation(m);
|
||||
}
|
||||
|
||||
@@ -925,7 +922,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
|
||||
struct bio *bio = m->bio;
|
||||
int r;
|
||||
|
||||
if (m->err) {
|
||||
if (m->status) {
|
||||
cell_error(pool, m->cell);
|
||||
goto out;
|
||||
}
|
||||
@@ -1495,7 +1492,7 @@ static void retry_on_resume(struct bio *bio)
|
||||
spin_unlock_irqrestore(&tc->lock, flags);
|
||||
}
|
||||
|
||||
static int should_error_unserviceable_bio(struct pool *pool)
|
||||
static blk_status_t should_error_unserviceable_bio(struct pool *pool)
|
||||
{
|
||||
enum pool_mode m = get_pool_mode(pool);
|
||||
|
||||
@@ -1503,27 +1500,27 @@ static int should_error_unserviceable_bio(struct pool *pool)
|
||||
case PM_WRITE:
|
||||
/* Shouldn't get here */
|
||||
DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
|
||||
return -EIO;
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
case PM_OUT_OF_DATA_SPACE:
|
||||
return pool->pf.error_if_no_space ? -ENOSPC : 0;
|
||||
return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
|
||||
|
||||
case PM_READ_ONLY:
|
||||
case PM_FAIL:
|
||||
return -EIO;
|
||||
return BLK_STS_IOERR;
|
||||
default:
|
||||
/* Shouldn't get here */
|
||||
DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
|
||||
return -EIO;
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
|
||||
{
|
||||
int error = should_error_unserviceable_bio(pool);
|
||||
blk_status_t error = should_error_unserviceable_bio(pool);
|
||||
|
||||
if (error) {
|
||||
bio->bi_error = error;
|
||||
bio->bi_status = error;
|
||||
bio_endio(bio);
|
||||
} else
|
||||
retry_on_resume(bio);
|
||||
@@ -1533,7 +1530,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
|
||||
{
|
||||
struct bio *bio;
|
||||
struct bio_list bios;
|
||||
int error;
|
||||
blk_status_t error;
|
||||
|
||||
error = should_error_unserviceable_bio(pool);
|
||||
if (error) {
|
||||
@@ -2071,7 +2068,8 @@ static void process_thin_deferred_bios(struct thin_c *tc)
|
||||
unsigned count = 0;
|
||||
|
||||
if (tc->requeue_mode) {
|
||||
error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
|
||||
error_thin_bio_list(tc, &tc->deferred_bio_list,
|
||||
BLK_STS_DM_REQUEUE);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2322,7 +2320,7 @@ static void do_no_space_timeout(struct work_struct *ws)
|
||||
if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
|
||||
pool->pf.error_if_no_space = true;
|
||||
notify_of_pool_mode_change_to_oods(pool);
|
||||
error_retry_list_with_code(pool, -ENOSPC);
|
||||
error_retry_list_with_code(pool, BLK_STS_NOSPC);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2624,7 +2622,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
|
||||
thin_hook_bio(tc, bio);
|
||||
|
||||
if (tc->requeue_mode) {
|
||||
bio->bi_error = DM_ENDIO_REQUEUE;
|
||||
bio->bi_status = BLK_STS_DM_REQUEUE;
|
||||
bio_endio(bio);
|
||||
return DM_MAPIO_SUBMITTED;
|
||||
}
|
||||
@@ -4177,7 +4175,8 @@ static int thin_map(struct dm_target *ti, struct bio *bio)
|
||||
return thin_bio_map(ti, bio);
|
||||
}
|
||||
|
||||
static int thin_endio(struct dm_target *ti, struct bio *bio, int *err)
|
||||
static int thin_endio(struct dm_target *ti, struct bio *bio,
|
||||
blk_status_t *err)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
|
||||
|
Reference in New Issue
Block a user