extent-io-tree.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef BTRFS_EXTENT_IO_TREE_H
  3. #define BTRFS_EXTENT_IO_TREE_H
  4. struct extent_changeset;
  5. struct io_failure_record;
  6. /* Bits for the extent state */
  7. #define EXTENT_DIRTY (1U << 0)
  8. #define EXTENT_UPTODATE (1U << 1)
  9. #define EXTENT_LOCKED (1U << 2)
  10. #define EXTENT_NEW (1U << 3)
  11. #define EXTENT_DELALLOC (1U << 4)
  12. #define EXTENT_DEFRAG (1U << 5)
  13. #define EXTENT_BOUNDARY (1U << 6)
  14. #define EXTENT_NODATASUM (1U << 7)
  15. #define EXTENT_CLEAR_META_RESV (1U << 8)
  16. #define EXTENT_NEED_WAIT (1U << 9)
  17. #define EXTENT_NORESERVE (1U << 11)
  18. #define EXTENT_QGROUP_RESERVED (1U << 12)
  19. #define EXTENT_CLEAR_DATA_RESV (1U << 13)
  20. /*
  21. * Must be cleared only during ordered extent completion or on error paths if we
  22. * did not manage to submit bios and create the ordered extents for the range.
  23. * Should not be cleared during page release and page invalidation (if there is
  24. * an ordered extent in flight), that is left for the ordered extent completion.
  25. */
  26. #define EXTENT_DELALLOC_NEW (1U << 14)
  27. /*
  28. * When an ordered extent successfully completes for a region marked as a new
  29. * delalloc range, use this flag when clearing a new delalloc range to indicate
  30. * that the VFS' inode number of bytes should be incremented and the inode's new
  31. * delalloc bytes decremented, in an atomic way to prevent races with stat(2).
  32. */
  33. #define EXTENT_ADD_INODE_BYTES (1U << 15)
  34. /*
  35. * Set during truncate when we're clearing an entire range and we just want the
  36. * extent states to go away.
  37. */
  38. #define EXTENT_CLEAR_ALL_BITS (1U << 16)
  39. #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
  40. EXTENT_CLEAR_DATA_RESV)
  41. #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \
  42. EXTENT_ADD_INODE_BYTES | \
  43. EXTENT_CLEAR_ALL_BITS)
  44. /*
  45. * Redefined bits above which are used only in the device allocation tree,
  46. * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
  47. * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
  48. * manipulation functions
  49. */
  50. #define CHUNK_ALLOCATED EXTENT_DIRTY
  51. #define CHUNK_TRIMMED EXTENT_DEFRAG
  52. #define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \
  53. CHUNK_TRIMMED)
  54. enum {
  55. IO_TREE_FS_PINNED_EXTENTS,
  56. IO_TREE_FS_EXCLUDED_EXTENTS,
  57. IO_TREE_BTREE_INODE_IO,
  58. IO_TREE_INODE_IO,
  59. IO_TREE_RELOC_BLOCKS,
  60. IO_TREE_TRANS_DIRTY_PAGES,
  61. IO_TREE_ROOT_DIRTY_LOG_PAGES,
  62. IO_TREE_INODE_FILE_EXTENT,
  63. IO_TREE_LOG_CSUM_RANGE,
  64. IO_TREE_SELFTEST,
  65. IO_TREE_DEVICE_ALLOC_STATE,
  66. };
  67. struct extent_io_tree {
  68. struct rb_root state;
  69. struct btrfs_fs_info *fs_info;
  70. void *private_data;
  71. /* Who owns this io tree, should be one of IO_TREE_* */
  72. u8 owner;
  73. spinlock_t lock;
  74. };
  75. struct extent_state {
  76. u64 start;
  77. u64 end; /* inclusive */
  78. struct rb_node rb_node;
  79. /* ADD NEW ELEMENTS AFTER THIS */
  80. wait_queue_head_t wq;
  81. refcount_t refs;
  82. u32 state;
  83. #ifdef CONFIG_BTRFS_DEBUG
  84. struct list_head leak_list;
  85. #endif
  86. };
  87. void extent_io_tree_init(struct btrfs_fs_info *fs_info,
  88. struct extent_io_tree *tree, unsigned int owner,
  89. void *private_data);
  90. void extent_io_tree_release(struct extent_io_tree *tree);
  91. int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
  92. struct extent_state **cached);
  93. int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
  94. int __init extent_state_init_cachep(void);
  95. void __cold extent_state_free_cachep(void);
  96. u64 count_range_bits(struct extent_io_tree *tree,
  97. u64 *start, u64 search_end,
  98. u64 max_bytes, u32 bits, int contig);
  99. void free_extent_state(struct extent_state *state);
  100. int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
  101. u32 bits, int filled, struct extent_state *cached_state);
  102. int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  103. u32 bits, struct extent_changeset *changeset);
  104. int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  105. u32 bits, struct extent_state **cached, gfp_t mask,
  106. struct extent_changeset *changeset);
  107. static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start,
  108. u64 end, u32 bits,
  109. struct extent_state **cached)
  110. {
  111. return __clear_extent_bit(tree, start, end, bits, cached,
  112. GFP_NOFS, NULL);
  113. }
  114. static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
  115. struct extent_state **cached)
  116. {
  117. return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached,
  118. GFP_NOFS, NULL);
  119. }
  120. static inline int unlock_extent_atomic(struct extent_io_tree *tree, u64 start,
  121. u64 end, struct extent_state **cached)
  122. {
  123. return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached,
  124. GFP_ATOMIC, NULL);
  125. }
  126. static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
  127. u64 end, u32 bits)
  128. {
  129. return clear_extent_bit(tree, start, end, bits, NULL);
  130. }
  131. int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
  132. u32 bits, struct extent_changeset *changeset);
  133. int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  134. u32 bits, struct extent_state **cached_state, gfp_t mask);
  135. static inline int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start,
  136. u64 end, u32 bits)
  137. {
  138. return set_extent_bit(tree, start, end, bits, NULL, GFP_NOWAIT);
  139. }
  140. static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
  141. u64 end, u32 bits)
  142. {
  143. return set_extent_bit(tree, start, end, bits, NULL, GFP_NOFS);
  144. }
  145. static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
  146. u64 end, struct extent_state **cached_state)
  147. {
  148. return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
  149. cached_state, GFP_NOFS, NULL);
  150. }
  151. static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
  152. u64 end, gfp_t mask)
  153. {
  154. return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL, mask);
  155. }
  156. static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
  157. u64 end, struct extent_state **cached)
  158. {
  159. return clear_extent_bit(tree, start, end,
  160. EXTENT_DIRTY | EXTENT_DELALLOC |
  161. EXTENT_DO_ACCOUNTING, cached);
  162. }
  163. int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  164. u32 bits, u32 clear_bits,
  165. struct extent_state **cached_state);
  166. static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
  167. u64 end, u32 extra_bits,
  168. struct extent_state **cached_state)
  169. {
  170. return set_extent_bit(tree, start, end,
  171. EXTENT_DELALLOC | extra_bits,
  172. cached_state, GFP_NOFS);
  173. }
  174. static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
  175. u64 end, struct extent_state **cached_state)
  176. {
  177. return set_extent_bit(tree, start, end,
  178. EXTENT_DELALLOC | EXTENT_DEFRAG,
  179. cached_state, GFP_NOFS);
  180. }
  181. static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
  182. u64 end)
  183. {
  184. return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, GFP_NOFS);
  185. }
  186. static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
  187. u64 end, struct extent_state **cached_state, gfp_t mask)
  188. {
  189. return set_extent_bit(tree, start, end, EXTENT_UPTODATE,
  190. cached_state, mask);
  191. }
  192. int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
  193. u64 *start_ret, u64 *end_ret, u32 bits,
  194. struct extent_state **cached_state);
  195. void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
  196. u64 *start_ret, u64 *end_ret, u32 bits);
  197. int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
  198. u64 *start_ret, u64 *end_ret, u32 bits);
  199. bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
  200. u64 *end, u64 max_bytes,
  201. struct extent_state **cached_state);
  202. void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits);
  203. #endif /* BTRFS_EXTENT_IO_TREE_H */