block: add a bi_error field to struct bio
Currently we have two different ways to signal an I/O error on a BIO: (1) by clearing the BIO_UPTODATE flag (2) by returning a Linux errno value to the bi_end_io callback The first one has the drawback of only communicating a single possible error (-EIO), and the second one has the drawback of not beeing persistent when bios are queued up, and are not passed along from child to parent bio in the ever more popular chaining scenario. Having both mechanisms available has the additional drawback of utterly confusing driver authors and introducing bugs where various I/O submitters only deal with one of them, and the others have to add boilerplate code to deal with both kinds of error returns. So add a new bi_error field to store an errno value directly in struct bio and remove the existing mechanisms to clean all this up. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: NeilBrown <neilb@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:

committed by
Jens Axboe

parent
0034af0365
commit
4246a0b63b
@@ -278,7 +278,7 @@ err:
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void btree_node_read_endio(struct bio *bio, int error)
|
||||
static void btree_node_read_endio(struct bio *bio)
|
||||
{
|
||||
struct closure *cl = bio->bi_private;
|
||||
closure_put(cl);
|
||||
@@ -305,7 +305,7 @@ static void bch_btree_node_read(struct btree *b)
|
||||
bch_submit_bbio(bio, b->c, &b->key, 0);
|
||||
closure_sync(&cl);
|
||||
|
||||
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
|
||||
if (bio->bi_error)
|
||||
set_btree_node_io_error(b);
|
||||
|
||||
bch_bbio_free(bio, b->c);
|
||||
@@ -371,15 +371,15 @@ static void btree_node_write_done(struct closure *cl)
|
||||
__btree_node_write_done(cl);
|
||||
}
|
||||
|
||||
static void btree_node_write_endio(struct bio *bio, int error)
|
||||
static void btree_node_write_endio(struct bio *bio)
|
||||
{
|
||||
struct closure *cl = bio->bi_private;
|
||||
struct btree *b = container_of(cl, struct btree, io);
|
||||
|
||||
if (error)
|
||||
if (bio->bi_error)
|
||||
set_btree_node_io_error(b);
|
||||
|
||||
bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
|
||||
bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
|
||||
closure_put(cl);
|
||||
}
|
||||
|
||||
|
@@ -38,7 +38,7 @@
|
||||
* they are running owned by the thread that is running them. Otherwise, suppose
|
||||
* you submit some bios and wish to have a function run when they all complete:
|
||||
*
|
||||
* foo_endio(struct bio *bio, int error)
|
||||
* foo_endio(struct bio *bio)
|
||||
* {
|
||||
* closure_put(cl);
|
||||
* }
|
||||
|
@@ -55,19 +55,19 @@ static void bch_bio_submit_split_done(struct closure *cl)
|
||||
|
||||
s->bio->bi_end_io = s->bi_end_io;
|
||||
s->bio->bi_private = s->bi_private;
|
||||
bio_endio(s->bio, 0);
|
||||
bio_endio(s->bio);
|
||||
|
||||
closure_debug_destroy(&s->cl);
|
||||
mempool_free(s, s->p->bio_split_hook);
|
||||
}
|
||||
|
||||
static void bch_bio_submit_split_endio(struct bio *bio, int error)
|
||||
static void bch_bio_submit_split_endio(struct bio *bio)
|
||||
{
|
||||
struct closure *cl = bio->bi_private;
|
||||
struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
|
||||
|
||||
if (error)
|
||||
clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
|
||||
if (bio->bi_error)
|
||||
s->bio->bi_error = bio->bi_error;
|
||||
|
||||
bio_put(bio);
|
||||
closure_put(cl);
|
||||
|
@@ -24,7 +24,7 @@
|
||||
* bit.
|
||||
*/
|
||||
|
||||
static void journal_read_endio(struct bio *bio, int error)
|
||||
static void journal_read_endio(struct bio *bio)
|
||||
{
|
||||
struct closure *cl = bio->bi_private;
|
||||
closure_put(cl);
|
||||
@@ -401,7 +401,7 @@ retry:
|
||||
|
||||
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
|
||||
|
||||
static void journal_discard_endio(struct bio *bio, int error)
|
||||
static void journal_discard_endio(struct bio *bio)
|
||||
{
|
||||
struct journal_device *ja =
|
||||
container_of(bio, struct journal_device, discard_bio);
|
||||
@@ -547,11 +547,11 @@ void bch_journal_next(struct journal *j)
|
||||
pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
|
||||
}
|
||||
|
||||
static void journal_write_endio(struct bio *bio, int error)
|
||||
static void journal_write_endio(struct bio *bio)
|
||||
{
|
||||
struct journal_write *w = bio->bi_private;
|
||||
|
||||
cache_set_err_on(error, w->c, "journal io error");
|
||||
cache_set_err_on(bio->bi_error, w->c, "journal io error");
|
||||
closure_put(&w->c->journal.io);
|
||||
}
|
||||
|
||||
|
@@ -60,20 +60,20 @@ static void write_moving_finish(struct closure *cl)
|
||||
closure_return_with_destructor(cl, moving_io_destructor);
|
||||
}
|
||||
|
||||
static void read_moving_endio(struct bio *bio, int error)
|
||||
static void read_moving_endio(struct bio *bio)
|
||||
{
|
||||
struct bbio *b = container_of(bio, struct bbio, bio);
|
||||
struct moving_io *io = container_of(bio->bi_private,
|
||||
struct moving_io, cl);
|
||||
|
||||
if (error)
|
||||
io->op.error = error;
|
||||
if (bio->bi_error)
|
||||
io->op.error = bio->bi_error;
|
||||
else if (!KEY_DIRTY(&b->key) &&
|
||||
ptr_stale(io->op.c, &b->key, 0)) {
|
||||
io->op.error = -EINTR;
|
||||
}
|
||||
|
||||
bch_bbio_endio(io->op.c, bio, error, "reading data to move");
|
||||
bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
|
||||
}
|
||||
|
||||
static void moving_init(struct moving_io *io)
|
||||
|
@@ -173,22 +173,22 @@ static void bch_data_insert_error(struct closure *cl)
|
||||
bch_data_insert_keys(cl);
|
||||
}
|
||||
|
||||
static void bch_data_insert_endio(struct bio *bio, int error)
|
||||
static void bch_data_insert_endio(struct bio *bio)
|
||||
{
|
||||
struct closure *cl = bio->bi_private;
|
||||
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
|
||||
|
||||
if (error) {
|
||||
if (bio->bi_error) {
|
||||
/* TODO: We could try to recover from this. */
|
||||
if (op->writeback)
|
||||
op->error = error;
|
||||
op->error = bio->bi_error;
|
||||
else if (!op->replace)
|
||||
set_closure_fn(cl, bch_data_insert_error, op->wq);
|
||||
else
|
||||
set_closure_fn(cl, NULL, NULL);
|
||||
}
|
||||
|
||||
bch_bbio_endio(op->c, bio, error, "writing data to cache");
|
||||
bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
|
||||
}
|
||||
|
||||
static void bch_data_insert_start(struct closure *cl)
|
||||
@@ -477,7 +477,7 @@ struct search {
|
||||
struct data_insert_op iop;
|
||||
};
|
||||
|
||||
static void bch_cache_read_endio(struct bio *bio, int error)
|
||||
static void bch_cache_read_endio(struct bio *bio)
|
||||
{
|
||||
struct bbio *b = container_of(bio, struct bbio, bio);
|
||||
struct closure *cl = bio->bi_private;
|
||||
@@ -490,15 +490,15 @@ static void bch_cache_read_endio(struct bio *bio, int error)
|
||||
* from the backing device.
|
||||
*/
|
||||
|
||||
if (error)
|
||||
s->iop.error = error;
|
||||
if (bio->bi_error)
|
||||
s->iop.error = bio->bi_error;
|
||||
else if (!KEY_DIRTY(&b->key) &&
|
||||
ptr_stale(s->iop.c, &b->key, 0)) {
|
||||
atomic_long_inc(&s->iop.c->cache_read_races);
|
||||
s->iop.error = -EINTR;
|
||||
}
|
||||
|
||||
bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
|
||||
bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -591,13 +591,13 @@ static void cache_lookup(struct closure *cl)
|
||||
|
||||
/* Common code for the make_request functions */
|
||||
|
||||
static void request_endio(struct bio *bio, int error)
|
||||
static void request_endio(struct bio *bio)
|
||||
{
|
||||
struct closure *cl = bio->bi_private;
|
||||
|
||||
if (error) {
|
||||
if (bio->bi_error) {
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
s->iop.error = error;
|
||||
s->iop.error = bio->bi_error;
|
||||
/* Only cache read errors are recoverable */
|
||||
s->recoverable = false;
|
||||
}
|
||||
@@ -613,7 +613,8 @@ static void bio_complete(struct search *s)
|
||||
&s->d->disk->part0, s->start_time);
|
||||
|
||||
trace_bcache_request_end(s->d, s->orig_bio);
|
||||
bio_endio(s->orig_bio, s->iop.error);
|
||||
s->orig_bio->bi_error = s->iop.error;
|
||||
bio_endio(s->orig_bio);
|
||||
s->orig_bio = NULL;
|
||||
}
|
||||
}
|
||||
@@ -992,7 +993,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
|
||||
} else {
|
||||
if ((bio->bi_rw & REQ_DISCARD) &&
|
||||
!blk_queue_discard(bdev_get_queue(dc->bdev)))
|
||||
bio_endio(bio, 0);
|
||||
bio_endio(bio);
|
||||
else
|
||||
bch_generic_make_request(bio, &d->bio_split_hook);
|
||||
}
|
||||
|
@@ -221,7 +221,7 @@ err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void write_bdev_super_endio(struct bio *bio, int error)
|
||||
static void write_bdev_super_endio(struct bio *bio)
|
||||
{
|
||||
struct cached_dev *dc = bio->bi_private;
|
||||
/* XXX: error checking */
|
||||
@@ -290,11 +290,11 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
|
||||
closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
|
||||
}
|
||||
|
||||
static void write_super_endio(struct bio *bio, int error)
|
||||
static void write_super_endio(struct bio *bio)
|
||||
{
|
||||
struct cache *ca = bio->bi_private;
|
||||
|
||||
bch_count_io_errors(ca, error, "writing superblock");
|
||||
bch_count_io_errors(ca, bio->bi_error, "writing superblock");
|
||||
closure_put(&ca->set->sb_write);
|
||||
}
|
||||
|
||||
@@ -339,12 +339,12 @@ void bcache_write_super(struct cache_set *c)
|
||||
|
||||
/* UUID io */
|
||||
|
||||
static void uuid_endio(struct bio *bio, int error)
|
||||
static void uuid_endio(struct bio *bio)
|
||||
{
|
||||
struct closure *cl = bio->bi_private;
|
||||
struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
|
||||
|
||||
cache_set_err_on(error, c, "accessing uuids");
|
||||
cache_set_err_on(bio->bi_error, c, "accessing uuids");
|
||||
bch_bbio_free(bio, c);
|
||||
closure_put(cl);
|
||||
}
|
||||
@@ -512,11 +512,11 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c)
|
||||
* disk.
|
||||
*/
|
||||
|
||||
static void prio_endio(struct bio *bio, int error)
|
||||
static void prio_endio(struct bio *bio)
|
||||
{
|
||||
struct cache *ca = bio->bi_private;
|
||||
|
||||
cache_set_err_on(error, ca->set, "accessing priorities");
|
||||
cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
|
||||
bch_bbio_free(bio, ca->set);
|
||||
closure_put(&ca->prio);
|
||||
}
|
||||
|
@@ -166,12 +166,12 @@ static void write_dirty_finish(struct closure *cl)
|
||||
closure_return_with_destructor(cl, dirty_io_destructor);
|
||||
}
|
||||
|
||||
static void dirty_endio(struct bio *bio, int error)
|
||||
static void dirty_endio(struct bio *bio)
|
||||
{
|
||||
struct keybuf_key *w = bio->bi_private;
|
||||
struct dirty_io *io = w->private;
|
||||
|
||||
if (error)
|
||||
if (bio->bi_error)
|
||||
SET_KEY_DIRTY(&w->key, false);
|
||||
|
||||
closure_put(&io->cl);
|
||||
@@ -193,15 +193,15 @@ static void write_dirty(struct closure *cl)
|
||||
continue_at(cl, write_dirty_finish, system_wq);
|
||||
}
|
||||
|
||||
static void read_dirty_endio(struct bio *bio, int error)
|
||||
static void read_dirty_endio(struct bio *bio)
|
||||
{
|
||||
struct keybuf_key *w = bio->bi_private;
|
||||
struct dirty_io *io = w->private;
|
||||
|
||||
bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
|
||||
error, "reading dirty data from cache");
|
||||
bio->bi_error, "reading dirty data from cache");
|
||||
|
||||
dirty_endio(bio, error);
|
||||
dirty_endio(bio);
|
||||
}
|
||||
|
||||
static void read_dirty_submit(struct closure *cl)
|
||||
|
Reference in New Issue
Block a user