raid1-10.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Maximum size of each resync request */
  3. #define RESYNC_BLOCK_SIZE (64*1024)
  4. #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  5. /*
  6. * Number of guaranteed raid bios in case of extreme VM load:
  7. */
  8. #define NR_RAID_BIOS 256
  9. /* when we get a read error on a read-only array, we redirect to another
  10. * device without failing the first device, or trying to over-write to
  11. * correct the read error. To keep track of bad blocks on a per-bio
  12. * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  13. */
  14. #define IO_BLOCKED ((struct bio *)1)
  15. /* When we successfully write to a known bad-block, we need to remove the
  16. * bad-block marking which must be done from process context. So we record
  17. * the success by setting devs[n].bio to IO_MADE_GOOD
  18. */
  19. #define IO_MADE_GOOD ((struct bio *)2)
  20. #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  21. /* for managing resync I/O pages */
  22. struct resync_pages {
  23. void *raid_bio;
  24. struct page *pages[RESYNC_PAGES];
  25. };
  26. struct raid1_plug_cb {
  27. struct blk_plug_cb cb;
  28. struct bio_list pending;
  29. };
  30. static void rbio_pool_free(void *rbio, void *data)
  31. {
  32. kfree(rbio);
  33. }
  34. static inline int resync_alloc_pages(struct resync_pages *rp,
  35. gfp_t gfp_flags)
  36. {
  37. int i;
  38. for (i = 0; i < RESYNC_PAGES; i++) {
  39. rp->pages[i] = alloc_page(gfp_flags);
  40. if (!rp->pages[i])
  41. goto out_free;
  42. }
  43. return 0;
  44. out_free:
  45. while (--i >= 0)
  46. put_page(rp->pages[i]);
  47. return -ENOMEM;
  48. }
  49. static inline void resync_free_pages(struct resync_pages *rp)
  50. {
  51. int i;
  52. for (i = 0; i < RESYNC_PAGES; i++)
  53. put_page(rp->pages[i]);
  54. }
  55. static inline void resync_get_all_pages(struct resync_pages *rp)
  56. {
  57. int i;
  58. for (i = 0; i < RESYNC_PAGES; i++)
  59. get_page(rp->pages[i]);
  60. }
  61. static inline struct page *resync_fetch_page(struct resync_pages *rp,
  62. unsigned idx)
  63. {
  64. if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
  65. return NULL;
  66. return rp->pages[idx];
  67. }
  68. /*
  69. * 'strct resync_pages' stores actual pages used for doing the resync
  70. * IO, and it is per-bio, so make .bi_private points to it.
  71. */
  72. static inline struct resync_pages *get_resync_pages(struct bio *bio)
  73. {
  74. return bio->bi_private;
  75. }
  76. /* generally called after bio_reset() for reseting bvec */
  77. static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
  78. int size)
  79. {
  80. int idx = 0;
  81. /* initialize bvec table again */
  82. do {
  83. struct page *page = resync_fetch_page(rp, idx);
  84. int len = min_t(int, size, PAGE_SIZE);
  85. /*
  86. * won't fail because the vec table is big
  87. * enough to hold all these pages
  88. */
  89. bio_add_page(bio, page, len, 0);
  90. size -= len;
  91. } while (idx++ < RESYNC_PAGES && size > 0);
  92. }
  93. static inline void raid1_submit_write(struct bio *bio)
  94. {
  95. struct md_rdev *rdev = (void *)bio->bi_bdev;
  96. bio->bi_next = NULL;
  97. bio_set_dev(bio, rdev->bdev);
  98. if (test_bit(Faulty, &rdev->flags))
  99. bio_io_error(bio);
  100. else if (unlikely(bio_op(bio) == REQ_OP_DISCARD &&
  101. !bdev_max_discard_sectors(bio->bi_bdev)))
  102. /* Just ignore it */
  103. bio_endio(bio);
  104. else
  105. submit_bio_noacct(bio);
  106. }
  107. static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
  108. blk_plug_cb_fn unplug)
  109. {
  110. struct raid1_plug_cb *plug = NULL;
  111. struct blk_plug_cb *cb;
  112. /*
  113. * If bitmap is not enabled, it's safe to submit the io directly, and
  114. * this can get optimal performance.
  115. */
  116. if (!md_bitmap_enabled(mddev->bitmap)) {
  117. raid1_submit_write(bio);
  118. return true;
  119. }
  120. cb = blk_check_plugged(unplug, mddev, sizeof(*plug));
  121. if (!cb)
  122. return false;
  123. plug = container_of(cb, struct raid1_plug_cb, cb);
  124. bio_list_add(&plug->pending, bio);
  125. return true;
  126. }