io.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Some low level IO code, and hacks for various block layer limitations
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <[email protected]>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "bset.h"
  10. #include "debug.h"
  11. #include <linux/blkdev.h>
  12. /* Bios with headers */
  13. void bch_bbio_free(struct bio *bio, struct cache_set *c)
  14. {
  15. struct bbio *b = container_of(bio, struct bbio, bio);
  16. mempool_free(b, &c->bio_meta);
  17. }
  18. struct bio *bch_bbio_alloc(struct cache_set *c)
  19. {
  20. struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
  21. struct bio *bio = &b->bio;
  22. bio_init(bio, NULL, bio->bi_inline_vecs,
  23. meta_bucket_pages(&c->cache->sb), 0);
  24. return bio;
  25. }
  26. void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
  27. {
  28. struct bbio *b = container_of(bio, struct bbio, bio);
  29. bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
  30. bio_set_dev(bio, c->cache->bdev);
  31. b->submit_time_us = local_clock_us();
  32. closure_bio_submit(c, bio, bio->bi_private);
  33. }
  34. void bch_submit_bbio(struct bio *bio, struct cache_set *c,
  35. struct bkey *k, unsigned int ptr)
  36. {
  37. struct bbio *b = container_of(bio, struct bbio, bio);
  38. bch_bkey_copy_single_ptr(&b->key, k, ptr);
  39. __bch_submit_bbio(bio, c);
  40. }
  41. /* IO errors */
  42. void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
  43. {
  44. unsigned int errors;
  45. WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
  46. /*
  47. * Read-ahead requests on a degrading and recovering md raid
  48. * (e.g. raid6) device might be failured immediately by md
  49. * raid code, which is not a real hardware media failure. So
  50. * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
  51. */
  52. if (bio->bi_opf & REQ_RAHEAD) {
  53. pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
  54. dc->bdev);
  55. return;
  56. }
  57. errors = atomic_add_return(1, &dc->io_errors);
  58. if (errors < dc->error_limit)
  59. pr_err("%pg: IO error on backing device, unrecoverable\n",
  60. dc->bdev);
  61. else
  62. bch_cached_dev_error(dc);
  63. }
  64. void bch_count_io_errors(struct cache *ca,
  65. blk_status_t error,
  66. int is_read,
  67. const char *m)
  68. {
  69. /*
  70. * The halflife of an error is:
  71. * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
  72. */
  73. if (ca->set->error_decay) {
  74. unsigned int count = atomic_inc_return(&ca->io_count);
  75. while (count > ca->set->error_decay) {
  76. unsigned int errors;
  77. unsigned int old = count;
  78. unsigned int new = count - ca->set->error_decay;
  79. /*
  80. * First we subtract refresh from count; each time we
  81. * successfully do so, we rescale the errors once:
  82. */
  83. count = atomic_cmpxchg(&ca->io_count, old, new);
  84. if (count == old) {
  85. count = new;
  86. errors = atomic_read(&ca->io_errors);
  87. do {
  88. old = errors;
  89. new = ((uint64_t) errors * 127) / 128;
  90. errors = atomic_cmpxchg(&ca->io_errors,
  91. old, new);
  92. } while (old != errors);
  93. }
  94. }
  95. }
  96. if (error) {
  97. unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
  98. &ca->io_errors);
  99. errors >>= IO_ERROR_SHIFT;
  100. if (errors < ca->set->error_limit)
  101. pr_err("%pg: IO error on %s%s\n",
  102. ca->bdev, m,
  103. is_read ? ", recovering." : ".");
  104. else
  105. bch_cache_set_error(ca->set,
  106. "%pg: too many IO errors %s\n",
  107. ca->bdev, m);
  108. }
  109. }
  110. void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
  111. blk_status_t error, const char *m)
  112. {
  113. struct bbio *b = container_of(bio, struct bbio, bio);
  114. struct cache *ca = c->cache;
  115. int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
  116. unsigned int threshold = op_is_write(bio_op(bio))
  117. ? c->congested_write_threshold_us
  118. : c->congested_read_threshold_us;
  119. if (threshold) {
  120. unsigned int t = local_clock_us();
  121. int us = t - b->submit_time_us;
  122. int congested = atomic_read(&c->congested);
  123. if (us > (int) threshold) {
  124. int ms = us / 1024;
  125. c->congested_last_us = t;
  126. ms = min(ms, CONGESTED_MAX + congested);
  127. atomic_sub(ms, &c->congested);
  128. } else if (congested < 0)
  129. atomic_inc(&c->congested);
  130. }
  131. bch_count_io_errors(ca, error, is_read, m);
  132. }
  133. void bch_bbio_endio(struct cache_set *c, struct bio *bio,
  134. blk_status_t error, const char *m)
  135. {
  136. struct closure *cl = bio->bi_private;
  137. bch_bbio_count_io_errors(c, bio, error, m);
  138. bio_put(bio);
  139. closure_put(cl);
  140. }