xfs_bio_io.c 1.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2019 Christoph Hellwig.
  4. */
  5. #include "xfs.h"
  6. static inline unsigned int bio_max_vecs(unsigned int count)
  7. {
  8. return bio_max_segs(howmany(count, PAGE_SIZE));
  9. }
  10. int
  11. xfs_rw_bdev(
  12. struct block_device *bdev,
  13. sector_t sector,
  14. unsigned int count,
  15. char *data,
  16. enum req_op op)
  17. {
  18. unsigned int is_vmalloc = is_vmalloc_addr(data);
  19. unsigned int left = count;
  20. int error;
  21. struct bio *bio;
  22. if (is_vmalloc && op == REQ_OP_WRITE)
  23. flush_kernel_vmap_range(data, count);
  24. bio = bio_alloc(bdev, bio_max_vecs(left), op | REQ_META | REQ_SYNC,
  25. GFP_KERNEL);
  26. bio->bi_iter.bi_sector = sector;
  27. do {
  28. struct page *page = kmem_to_page(data);
  29. unsigned int off = offset_in_page(data);
  30. unsigned int len = min_t(unsigned, left, PAGE_SIZE - off);
  31. while (bio_add_page(bio, page, len, off) != len) {
  32. struct bio *prev = bio;
  33. bio = bio_alloc(prev->bi_bdev, bio_max_vecs(left),
  34. prev->bi_opf, GFP_KERNEL);
  35. bio->bi_iter.bi_sector = bio_end_sector(prev);
  36. bio_chain(prev, bio);
  37. submit_bio(prev);
  38. }
  39. data += len;
  40. left -= len;
  41. } while (left > 0);
  42. error = submit_bio_wait(bio);
  43. bio_put(bio);
  44. if (is_vmalloc && op == REQ_OP_READ)
  45. invalidate_kernel_vmap_range(data, count);
  46. return error;
  47. }