page_ref.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_PAGE_REF_H
  3. #define _LINUX_PAGE_REF_H
  4. #include <linux/atomic.h>
  5. #include <linux/mm_types.h>
  6. #include <linux/page-flags.h>
  7. #include <linux/tracepoint-defs.h>
  8. DECLARE_TRACEPOINT(page_ref_set);
  9. DECLARE_TRACEPOINT(page_ref_mod);
  10. DECLARE_TRACEPOINT(page_ref_mod_and_test);
  11. DECLARE_TRACEPOINT(page_ref_mod_and_return);
  12. DECLARE_TRACEPOINT(page_ref_mod_unless);
  13. DECLARE_TRACEPOINT(page_ref_freeze);
  14. DECLARE_TRACEPOINT(page_ref_unfreeze);
  15. #ifdef CONFIG_DEBUG_PAGE_REF
  16. /*
  17. * Ideally we would want to use the trace_<tracepoint>_enabled() helper
  18. * functions. But due to include header file issues, that is not
  19. * feasible. Instead we have to open code the static key functions.
  20. *
  21. * See trace_##name##_enabled(void) in include/linux/tracepoint.h
  22. */
  23. #define page_ref_tracepoint_active(t) tracepoint_enabled(t)
  24. extern void __page_ref_set(struct page *page, int v);
  25. extern void __page_ref_mod(struct page *page, int v);
  26. extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
  27. extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
  28. extern void __page_ref_mod_unless(struct page *page, int v, int u);
  29. extern void __page_ref_freeze(struct page *page, int v, int ret);
  30. extern void __page_ref_unfreeze(struct page *page, int v);
  31. #else
  32. #define page_ref_tracepoint_active(t) false
  33. static inline void __page_ref_set(struct page *page, int v)
  34. {
  35. }
  36. static inline void __page_ref_mod(struct page *page, int v)
  37. {
  38. }
  39. static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
  40. {
  41. }
  42. static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
  43. {
  44. }
  45. static inline void __page_ref_mod_unless(struct page *page, int v, int u)
  46. {
  47. }
  48. static inline void __page_ref_freeze(struct page *page, int v, int ret)
  49. {
  50. }
  51. static inline void __page_ref_unfreeze(struct page *page, int v)
  52. {
  53. }
  54. #endif
  55. static inline int page_ref_count(const struct page *page)
  56. {
  57. return atomic_read(&page->_refcount);
  58. }
  59. /**
  60. * folio_ref_count - The reference count on this folio.
  61. * @folio: The folio.
  62. *
  63. * The refcount is usually incremented by calls to folio_get() and
  64. * decremented by calls to folio_put(). Some typical users of the
  65. * folio refcount:
  66. *
  67. * - Each reference from a page table
  68. * - The page cache
  69. * - Filesystem private data
  70. * - The LRU list
  71. * - Pipes
  72. * - Direct IO which references this page in the process address space
  73. *
  74. * Return: The number of references to this folio.
  75. */
  76. static inline int folio_ref_count(const struct folio *folio)
  77. {
  78. return page_ref_count(&folio->page);
  79. }
  80. static inline int page_count(const struct page *page)
  81. {
  82. return folio_ref_count(page_folio(page));
  83. }
  84. static inline void set_page_count(struct page *page, int v)
  85. {
  86. atomic_set(&page->_refcount, v);
  87. if (page_ref_tracepoint_active(page_ref_set))
  88. __page_ref_set(page, v);
  89. }
  90. static inline void folio_set_count(struct folio *folio, int v)
  91. {
  92. set_page_count(&folio->page, v);
  93. }
  94. /*
  95. * Setup the page count before being freed into the page allocator for
  96. * the first time (boot or memory hotplug)
  97. */
  98. static inline void init_page_count(struct page *page)
  99. {
  100. set_page_count(page, 1);
  101. }
  102. static inline void page_ref_add(struct page *page, int nr)
  103. {
  104. atomic_add(nr, &page->_refcount);
  105. if (page_ref_tracepoint_active(page_ref_mod))
  106. __page_ref_mod(page, nr);
  107. }
  108. static inline void folio_ref_add(struct folio *folio, int nr)
  109. {
  110. page_ref_add(&folio->page, nr);
  111. }
  112. static inline void page_ref_sub(struct page *page, int nr)
  113. {
  114. atomic_sub(nr, &page->_refcount);
  115. if (page_ref_tracepoint_active(page_ref_mod))
  116. __page_ref_mod(page, -nr);
  117. }
  118. static inline void folio_ref_sub(struct folio *folio, int nr)
  119. {
  120. page_ref_sub(&folio->page, nr);
  121. }
  122. static inline int page_ref_sub_return(struct page *page, int nr)
  123. {
  124. int ret = atomic_sub_return(nr, &page->_refcount);
  125. if (page_ref_tracepoint_active(page_ref_mod_and_return))
  126. __page_ref_mod_and_return(page, -nr, ret);
  127. return ret;
  128. }
  129. static inline int folio_ref_sub_return(struct folio *folio, int nr)
  130. {
  131. return page_ref_sub_return(&folio->page, nr);
  132. }
  133. static inline void page_ref_inc(struct page *page)
  134. {
  135. atomic_inc(&page->_refcount);
  136. if (page_ref_tracepoint_active(page_ref_mod))
  137. __page_ref_mod(page, 1);
  138. }
  139. static inline void folio_ref_inc(struct folio *folio)
  140. {
  141. page_ref_inc(&folio->page);
  142. }
  143. static inline void page_ref_dec(struct page *page)
  144. {
  145. atomic_dec(&page->_refcount);
  146. if (page_ref_tracepoint_active(page_ref_mod))
  147. __page_ref_mod(page, -1);
  148. }
  149. static inline void folio_ref_dec(struct folio *folio)
  150. {
  151. page_ref_dec(&folio->page);
  152. }
  153. static inline int page_ref_sub_and_test(struct page *page, int nr)
  154. {
  155. int ret = atomic_sub_and_test(nr, &page->_refcount);
  156. if (page_ref_tracepoint_active(page_ref_mod_and_test))
  157. __page_ref_mod_and_test(page, -nr, ret);
  158. return ret;
  159. }
  160. static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
  161. {
  162. return page_ref_sub_and_test(&folio->page, nr);
  163. }
  164. static inline int page_ref_inc_return(struct page *page)
  165. {
  166. int ret = atomic_inc_return(&page->_refcount);
  167. if (page_ref_tracepoint_active(page_ref_mod_and_return))
  168. __page_ref_mod_and_return(page, 1, ret);
  169. return ret;
  170. }
  171. static inline int folio_ref_inc_return(struct folio *folio)
  172. {
  173. return page_ref_inc_return(&folio->page);
  174. }
  175. static inline int page_ref_dec_and_test(struct page *page)
  176. {
  177. int ret = atomic_dec_and_test(&page->_refcount);
  178. if (page_ref_tracepoint_active(page_ref_mod_and_test))
  179. __page_ref_mod_and_test(page, -1, ret);
  180. return ret;
  181. }
  182. static inline int folio_ref_dec_and_test(struct folio *folio)
  183. {
  184. return page_ref_dec_and_test(&folio->page);
  185. }
  186. static inline int page_ref_dec_return(struct page *page)
  187. {
  188. int ret = atomic_dec_return(&page->_refcount);
  189. if (page_ref_tracepoint_active(page_ref_mod_and_return))
  190. __page_ref_mod_and_return(page, -1, ret);
  191. return ret;
  192. }
  193. static inline int folio_ref_dec_return(struct folio *folio)
  194. {
  195. return page_ref_dec_return(&folio->page);
  196. }
  197. static inline bool page_ref_add_unless(struct page *page, int nr, int u)
  198. {
  199. bool ret = atomic_add_unless(&page->_refcount, nr, u);
  200. if (page_ref_tracepoint_active(page_ref_mod_unless))
  201. __page_ref_mod_unless(page, nr, ret);
  202. return ret;
  203. }
  204. static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
  205. {
  206. return page_ref_add_unless(&folio->page, nr, u);
  207. }
  208. /**
  209. * folio_try_get - Attempt to increase the refcount on a folio.
  210. * @folio: The folio.
  211. *
  212. * If you do not already have a reference to a folio, you can attempt to
  213. * get one using this function. It may fail if, for example, the folio
  214. * has been freed since you found a pointer to it, or it is frozen for
  215. * the purposes of splitting or migration.
  216. *
  217. * Return: True if the reference count was successfully incremented.
  218. */
  219. static inline bool folio_try_get(struct folio *folio)
  220. {
  221. return folio_ref_add_unless(folio, 1, 0);
  222. }
  223. static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
  224. {
  225. #ifdef CONFIG_TINY_RCU
  226. /*
  227. * The caller guarantees the folio will not be freed from interrupt
  228. * context, so (on !SMP) we only need preemption to be disabled
  229. * and TINY_RCU does that for us.
  230. */
  231. # ifdef CONFIG_PREEMPT_COUNT
  232. VM_BUG_ON(!in_atomic() && !irqs_disabled());
  233. # endif
  234. VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
  235. folio_ref_add(folio, count);
  236. #else
  237. if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
  238. /* Either the folio has been freed, or will be freed. */
  239. return false;
  240. }
  241. #endif
  242. return true;
  243. }
  244. /**
  245. * folio_try_get_rcu - Attempt to increase the refcount on a folio.
  246. * @folio: The folio.
  247. *
  248. * This is a version of folio_try_get() optimised for non-SMP kernels.
  249. * If you are still holding the rcu_read_lock() after looking up the
  250. * page and know that the page cannot have its refcount decreased to
  251. * zero in interrupt context, you can use this instead of folio_try_get().
  252. *
  253. * Example users include get_user_pages_fast() (as pages are not unmapped
  254. * from interrupt context) and the page cache lookups (as pages are not
  255. * truncated from interrupt context). We also know that pages are not
  256. * frozen in interrupt context for the purposes of splitting or migration.
  257. *
  258. * You can also use this function if you're holding a lock that prevents
  259. * pages being frozen & removed; eg the i_pages lock for the page cache
  260. * or the mmap_sem or page table lock for page tables. In this case,
  261. * it will always succeed, and you could have used a plain folio_get(),
  262. * but it's sometimes more convenient to have a common function called
  263. * from both locked and RCU-protected contexts.
  264. *
  265. * Return: True if the reference count was successfully incremented.
  266. */
  267. static inline bool folio_try_get_rcu(struct folio *folio)
  268. {
  269. return folio_ref_try_add_rcu(folio, 1);
  270. }
  271. static inline int page_ref_freeze(struct page *page, int count)
  272. {
  273. int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
  274. if (page_ref_tracepoint_active(page_ref_freeze))
  275. __page_ref_freeze(page, count, ret);
  276. return ret;
  277. }
  278. static inline int folio_ref_freeze(struct folio *folio, int count)
  279. {
  280. return page_ref_freeze(&folio->page, count);
  281. }
  282. static inline void page_ref_unfreeze(struct page *page, int count)
  283. {
  284. VM_BUG_ON_PAGE(page_count(page) != 0, page);
  285. VM_BUG_ON(count == 0);
  286. atomic_set_release(&page->_refcount, count);
  287. if (page_ref_tracepoint_active(page_ref_unfreeze))
  288. __page_ref_unfreeze(page, count);
  289. }
  290. static inline void folio_ref_unfreeze(struct folio *folio, int count)
  291. {
  292. page_ref_unfreeze(&folio->page, count);
  293. }
  294. #endif