buffer_head.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * include/linux/buffer_head.h
  4. *
  5. * Everything to do with buffer_heads.
  6. */
  7. #ifndef _LINUX_BUFFER_HEAD_H
  8. #define _LINUX_BUFFER_HEAD_H
  9. #include <linux/types.h>
  10. #include <linux/blk_types.h>
  11. #include <linux/fs.h>
  12. #include <linux/linkage.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/wait.h>
  15. #include <linux/atomic.h>
  16. #ifdef CONFIG_BLOCK
  17. enum bh_state_bits {
  18. BH_Uptodate, /* Contains valid data */
  19. BH_Dirty, /* Is dirty */
  20. BH_Lock, /* Is locked */
  21. BH_Req, /* Has been submitted for I/O */
  22. BH_Mapped, /* Has a disk mapping */
  23. BH_New, /* Disk mapping was newly created by get_block */
  24. BH_Async_Read, /* Is under end_buffer_async_read I/O */
  25. BH_Async_Write, /* Is under end_buffer_async_write I/O */
  26. BH_Delay, /* Buffer is not yet allocated on disk */
  27. BH_Boundary, /* Block is followed by a discontiguity */
  28. BH_Write_EIO, /* I/O error on write */
  29. BH_Unwritten, /* Buffer is allocated on disk but not written */
  30. BH_Quiet, /* Buffer Error Prinks to be quiet */
  31. BH_Meta, /* Buffer contains metadata */
  32. BH_Prio, /* Buffer should be submitted with REQ_PRIO */
  33. BH_Defer_Completion, /* Defer AIO completion to workqueue */
  34. BH_PrivateStart,/* not a state bit, but the first bit available
  35. * for private allocation by other entities
  36. */
  37. };
  38. #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
  39. struct page;
  40. struct buffer_head;
  41. struct address_space;
  42. typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
  43. /*
  44. * Historically, a buffer_head was used to map a single block
  45. * within a page, and of course as the unit of I/O through the
  46. * filesystem and block layers. Nowadays the basic I/O unit
  47. * is the bio, and buffer_heads are used for extracting block
  48. * mappings (via a get_block_t call), for tracking state within
  49. * a page (via a page_mapping) and for wrapping bio submission
  50. * for backward compatibility reasons (e.g. submit_bh).
  51. */
  52. struct buffer_head {
  53. unsigned long b_state; /* buffer state bitmap (see above) */
  54. struct buffer_head *b_this_page;/* circular list of page's buffers */
  55. struct page *b_page; /* the page this bh is mapped to */
  56. sector_t b_blocknr; /* start block number */
  57. size_t b_size; /* size of mapping */
  58. char *b_data; /* pointer to data within the page */
  59. struct block_device *b_bdev;
  60. bh_end_io_t *b_end_io; /* I/O completion */
  61. void *b_private; /* reserved for b_end_io */
  62. struct list_head b_assoc_buffers; /* associated with another mapping */
  63. struct address_space *b_assoc_map; /* mapping this buffer is
  64. associated with */
  65. atomic_t b_count; /* users using this buffer_head */
  66. spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
  67. * serialise IO completion of other
  68. * buffers in the page */
  69. };
  70. /*
  71. * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
  72. * and buffer_foo() functions.
  73. * To avoid reset buffer flags that are already set, because that causes
  74. * a costly cache line transition, check the flag first.
  75. */
  76. #define BUFFER_FNS(bit, name) \
  77. static __always_inline void set_buffer_##name(struct buffer_head *bh) \
  78. { \
  79. if (!test_bit(BH_##bit, &(bh)->b_state)) \
  80. set_bit(BH_##bit, &(bh)->b_state); \
  81. } \
  82. static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
  83. { \
  84. clear_bit(BH_##bit, &(bh)->b_state); \
  85. } \
  86. static __always_inline int buffer_##name(const struct buffer_head *bh) \
  87. { \
  88. return test_bit(BH_##bit, &(bh)->b_state); \
  89. }
  90. /*
  91. * test_set_buffer_foo() and test_clear_buffer_foo()
  92. */
  93. #define TAS_BUFFER_FNS(bit, name) \
  94. static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
  95. { \
  96. return test_and_set_bit(BH_##bit, &(bh)->b_state); \
  97. } \
  98. static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
  99. { \
  100. return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
  101. } \
  102. /*
  103. * Emit the buffer bitops functions. Note that there are also functions
  104. * of the form "mark_buffer_foo()". These are higher-level functions which
  105. * do something in addition to setting a b_state bit.
  106. */
  107. BUFFER_FNS(Dirty, dirty)
  108. TAS_BUFFER_FNS(Dirty, dirty)
  109. BUFFER_FNS(Lock, locked)
  110. BUFFER_FNS(Req, req)
  111. TAS_BUFFER_FNS(Req, req)
  112. BUFFER_FNS(Mapped, mapped)
  113. BUFFER_FNS(New, new)
  114. BUFFER_FNS(Async_Read, async_read)
  115. BUFFER_FNS(Async_Write, async_write)
  116. BUFFER_FNS(Delay, delay)
  117. BUFFER_FNS(Boundary, boundary)
  118. BUFFER_FNS(Write_EIO, write_io_error)
  119. BUFFER_FNS(Unwritten, unwritten)
  120. BUFFER_FNS(Meta, meta)
  121. BUFFER_FNS(Prio, prio)
  122. BUFFER_FNS(Defer_Completion, defer_completion)
  123. static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
  124. {
  125. /*
  126. * If somebody else already set this uptodate, they will
  127. * have done the memory barrier, and a reader will thus
  128. * see *some* valid buffer state.
  129. *
  130. * Any other serialization (with IO errors or whatever that
  131. * might clear the bit) has to come from other state (eg BH_Lock).
  132. */
  133. if (test_bit(BH_Uptodate, &bh->b_state))
  134. return;
  135. /*
  136. * make it consistent with folio_mark_uptodate
  137. * pairs with smp_load_acquire in buffer_uptodate
  138. */
  139. smp_mb__before_atomic();
  140. set_bit(BH_Uptodate, &bh->b_state);
  141. }
  142. static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
  143. {
  144. clear_bit(BH_Uptodate, &bh->b_state);
  145. }
  146. static __always_inline int buffer_uptodate(const struct buffer_head *bh)
  147. {
  148. /*
  149. * make it consistent with folio_test_uptodate
  150. * pairs with smp_mb__before_atomic in set_buffer_uptodate
  151. */
  152. return test_bit_acquire(BH_Uptodate, &bh->b_state);
  153. }
  154. #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
  155. /* If we *know* page->private refers to buffer_heads */
  156. #define page_buffers(page) \
  157. ({ \
  158. BUG_ON(!PagePrivate(page)); \
  159. ((struct buffer_head *)page_private(page)); \
  160. })
  161. #define page_has_buffers(page) PagePrivate(page)
  162. #define folio_buffers(folio) folio_get_private(folio)
  163. void buffer_check_dirty_writeback(struct folio *folio,
  164. bool *dirty, bool *writeback);
  165. /*
  166. * Declarations
  167. */
  168. void mark_buffer_dirty(struct buffer_head *bh);
  169. void mark_buffer_write_io_error(struct buffer_head *bh);
  170. void touch_buffer(struct buffer_head *bh);
  171. void set_bh_page(struct buffer_head *bh,
  172. struct page *page, unsigned long offset);
  173. bool try_to_free_buffers(struct folio *);
  174. struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
  175. bool retry);
  176. void create_empty_buffers(struct page *, unsigned long,
  177. unsigned long b_state);
  178. void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
  179. void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
  180. void end_buffer_async_write(struct buffer_head *bh, int uptodate);
  181. /* Things to do with buffers at mapping->private_list */
  182. void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
  183. int inode_has_buffers(struct inode *);
  184. void invalidate_inode_buffers(struct inode *);
  185. int remove_inode_buffers(struct inode *inode);
  186. int sync_mapping_buffers(struct address_space *mapping);
  187. void clean_bdev_aliases(struct block_device *bdev, sector_t block,
  188. sector_t len);
  189. static inline void clean_bdev_bh_alias(struct buffer_head *bh)
  190. {
  191. clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
  192. }
  193. void mark_buffer_async_write(struct buffer_head *bh);
  194. void __wait_on_buffer(struct buffer_head *);
  195. wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
  196. struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
  197. unsigned size);
  198. struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
  199. unsigned size, gfp_t gfp);
  200. void __brelse(struct buffer_head *);
  201. void __bforget(struct buffer_head *);
  202. void __breadahead(struct block_device *, sector_t block, unsigned int size);
  203. struct buffer_head *__bread_gfp(struct block_device *,
  204. sector_t block, unsigned size, gfp_t gfp);
  205. void invalidate_bh_lrus(void);
  206. void invalidate_bh_lrus_cpu(void);
  207. bool has_bh_in_lru(int cpu, void *dummy);
  208. struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
  209. void free_buffer_head(struct buffer_head * bh);
  210. void unlock_buffer(struct buffer_head *bh);
  211. void __lock_buffer(struct buffer_head *bh);
  212. int sync_dirty_buffer(struct buffer_head *bh);
  213. int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
  214. void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
  215. void submit_bh(blk_opf_t, struct buffer_head *);
  216. void write_boundary_block(struct block_device *bdev,
  217. sector_t bblock, unsigned blocksize);
  218. int bh_uptodate_or_lock(struct buffer_head *bh);
  219. int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
  220. void __bh_read_batch(int nr, struct buffer_head *bhs[],
  221. blk_opf_t op_flags, bool force_lock);
  222. extern int buffer_heads_over_limit;
  223. /*
  224. * Generic address_space_operations implementations for buffer_head-backed
  225. * address_spaces.
  226. */
  227. void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
  228. int block_write_full_page(struct page *page, get_block_t *get_block,
  229. struct writeback_control *wbc);
  230. int __block_write_full_page(struct inode *inode, struct page *page,
  231. get_block_t *get_block, struct writeback_control *wbc,
  232. bh_end_io_t *handler);
  233. int block_read_full_folio(struct folio *, get_block_t *);
  234. bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
  235. int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
  236. struct page **pagep, get_block_t *get_block);
  237. int __block_write_begin(struct page *page, loff_t pos, unsigned len,
  238. get_block_t *get_block);
  239. int block_write_end(struct file *, struct address_space *,
  240. loff_t, unsigned, unsigned,
  241. struct page *, void *);
  242. int generic_write_end(struct file *, struct address_space *,
  243. loff_t, unsigned, unsigned,
  244. struct page *, void *);
  245. void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
  246. void clean_page_buffers(struct page *page);
  247. int cont_write_begin(struct file *, struct address_space *, loff_t,
  248. unsigned, struct page **, void **,
  249. get_block_t *, loff_t *);
  250. int generic_cont_expand_simple(struct inode *inode, loff_t size);
  251. int block_commit_write(struct page *page, unsigned from, unsigned to);
  252. int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
  253. get_block_t get_block);
  254. /* Convert errno to return value from ->page_mkwrite() call */
  255. static inline vm_fault_t block_page_mkwrite_return(int err)
  256. {
  257. if (err == 0)
  258. return VM_FAULT_LOCKED;
  259. if (err == -EFAULT || err == -EAGAIN)
  260. return VM_FAULT_NOPAGE;
  261. if (err == -ENOMEM)
  262. return VM_FAULT_OOM;
  263. /* -ENOSPC, -EDQUOT, -EIO ... */
  264. return VM_FAULT_SIGBUS;
  265. }
  266. sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
  267. int block_truncate_page(struct address_space *, loff_t, get_block_t *);
  268. #ifdef CONFIG_MIGRATION
  269. extern int buffer_migrate_folio(struct address_space *,
  270. struct folio *dst, struct folio *src, enum migrate_mode);
  271. extern int buffer_migrate_folio_norefs(struct address_space *,
  272. struct folio *dst, struct folio *src, enum migrate_mode);
  273. #else
  274. #define buffer_migrate_folio NULL
  275. #define buffer_migrate_folio_norefs NULL
  276. #endif
  277. void buffer_init(void);
  278. /*
  279. * inline definitions
  280. */
  281. static inline void get_bh(struct buffer_head *bh)
  282. {
  283. atomic_inc(&bh->b_count);
  284. }
  285. static inline void put_bh(struct buffer_head *bh)
  286. {
  287. smp_mb__before_atomic();
  288. atomic_dec(&bh->b_count);
  289. }
  290. static inline void brelse(struct buffer_head *bh)
  291. {
  292. if (bh)
  293. __brelse(bh);
  294. }
  295. static inline void bforget(struct buffer_head *bh)
  296. {
  297. if (bh)
  298. __bforget(bh);
  299. }
  300. static inline struct buffer_head *
  301. sb_bread(struct super_block *sb, sector_t block)
  302. {
  303. return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
  304. }
  305. static inline struct buffer_head *
  306. sb_bread_unmovable(struct super_block *sb, sector_t block)
  307. {
  308. return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
  309. }
  310. static inline void
  311. sb_breadahead(struct super_block *sb, sector_t block)
  312. {
  313. __breadahead(sb->s_bdev, block, sb->s_blocksize);
  314. }
  315. static inline struct buffer_head *
  316. sb_getblk(struct super_block *sb, sector_t block)
  317. {
  318. return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
  319. }
  320. static inline struct buffer_head *
  321. sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
  322. {
  323. return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
  324. }
  325. static inline struct buffer_head *
  326. sb_find_get_block(struct super_block *sb, sector_t block)
  327. {
  328. return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
  329. }
  330. static inline void
  331. map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
  332. {
  333. set_buffer_mapped(bh);
  334. bh->b_bdev = sb->s_bdev;
  335. bh->b_blocknr = block;
  336. bh->b_size = sb->s_blocksize;
  337. }
  338. static inline void wait_on_buffer(struct buffer_head *bh)
  339. {
  340. might_sleep();
  341. if (buffer_locked(bh))
  342. __wait_on_buffer(bh);
  343. }
  344. static inline int trylock_buffer(struct buffer_head *bh)
  345. {
  346. return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
  347. }
  348. static inline void lock_buffer(struct buffer_head *bh)
  349. {
  350. might_sleep();
  351. if (!trylock_buffer(bh))
  352. __lock_buffer(bh);
  353. }
  354. static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
  355. sector_t block,
  356. unsigned size)
  357. {
  358. return __getblk_gfp(bdev, block, size, 0);
  359. }
  360. static inline struct buffer_head *__getblk(struct block_device *bdev,
  361. sector_t block,
  362. unsigned size)
  363. {
  364. return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
  365. }
  366. static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
  367. {
  368. if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
  369. if (!buffer_uptodate(bh))
  370. __bh_read(bh, op_flags, false);
  371. else
  372. unlock_buffer(bh);
  373. }
  374. }
  375. static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
  376. {
  377. if (!bh_uptodate_or_lock(bh))
  378. __bh_read(bh, op_flags, false);
  379. }
  380. /* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
  381. static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
  382. {
  383. if (bh_uptodate_or_lock(bh))
  384. return 1;
  385. return __bh_read(bh, op_flags, true);
  386. }
  387. static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
  388. {
  389. __bh_read_batch(nr, bhs, 0, true);
  390. }
  391. static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
  392. blk_opf_t op_flags)
  393. {
  394. __bh_read_batch(nr, bhs, op_flags, false);
  395. }
  396. /**
  397. * __bread() - reads a specified block and returns the bh
  398. * @bdev: the block_device to read from
  399. * @block: number of block
  400. * @size: size (in bytes) to read
  401. *
  402. * Reads a specified block, and returns buffer head that contains it.
  403. * The page cache is allocated from movable area so that it can be migrated.
  404. * It returns NULL if the block was unreadable.
  405. */
  406. static inline struct buffer_head *
  407. __bread(struct block_device *bdev, sector_t block, unsigned size)
  408. {
  409. return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
  410. }
  411. bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
  412. #else /* CONFIG_BLOCK */
  413. static inline void buffer_init(void) {}
  414. static inline bool try_to_free_buffers(struct folio *folio) { return true; }
  415. static inline int inode_has_buffers(struct inode *inode) { return 0; }
  416. static inline void invalidate_inode_buffers(struct inode *inode) {}
  417. static inline int remove_inode_buffers(struct inode *inode) { return 1; }
  418. static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
  419. static inline void invalidate_bh_lrus_cpu(void) {}
  420. static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
  421. #define buffer_heads_over_limit 0
  422. #endif /* CONFIG_BLOCK */
  423. #endif /* _LINUX_BUFFER_HEAD_H */