swapops.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SWAPOPS_H
  3. #define _LINUX_SWAPOPS_H
  4. #include <linux/radix-tree.h>
  5. #include <linux/bug.h>
  6. #include <linux/mm_types.h>
  7. #ifdef CONFIG_MMU
  8. #ifdef CONFIG_SWAP
  9. #include <linux/swapfile.h>
  10. #endif /* CONFIG_SWAP */
  11. /*
  12. * swapcache pages are stored in the swapper_space radix tree. We want to
  13. * get good packing density in that tree, so the index should be dense in
  14. * the low-order bits.
  15. *
  16. * We arrange the `type' and `offset' fields so that `type' is at the six
  17. * high-order bits of the swp_entry_t and `offset' is right-aligned in the
  18. * remaining bits. Although `type' itself needs only five bits, we allow for
  19. * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
  20. *
  21. * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  22. */
  23. #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
  24. #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
  25. /*
  26. * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
  27. * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
  28. * can use the extra bits to store other information besides PFN.
  29. */
  30. #ifdef MAX_PHYSMEM_BITS
  31. #define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
  32. #else /* MAX_PHYSMEM_BITS */
  33. #define SWP_PFN_BITS min_t(int, \
  34. sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
  35. SWP_TYPE_SHIFT)
  36. #endif /* MAX_PHYSMEM_BITS */
  37. #define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
  38. /**
  39. * Migration swap entry specific bitfield definitions. Layout:
  40. *
  41. * |----------+--------------------|
  42. * | swp_type | swp_offset |
  43. * |----------+--------+-+-+-------|
  44. * | | resv |D|A| PFN |
  45. * |----------+--------+-+-+-------|
  46. *
  47. * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
  48. * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
  49. *
  50. * Note: A/D bits will be stored in migration entries iff there're enough
  51. * free bits in arch specific swp offset. By default we'll ignore A/D bits
  52. * when migrating a page. Please refer to migration_entry_supports_ad()
  53. * for more information. If there're more bits besides PFN and A/D bits,
  54. * they should be reserved and always be zeros.
  55. */
  56. #define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
  57. #define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
  58. #define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
  59. #define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
  60. #define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
  61. static inline bool is_pfn_swap_entry(swp_entry_t entry);
  62. /* Clear all flags but only keep swp_entry_t related information */
  63. static inline pte_t pte_swp_clear_flags(pte_t pte)
  64. {
  65. if (pte_swp_exclusive(pte))
  66. pte = pte_swp_clear_exclusive(pte);
  67. if (pte_swp_soft_dirty(pte))
  68. pte = pte_swp_clear_soft_dirty(pte);
  69. if (pte_swp_uffd_wp(pte))
  70. pte = pte_swp_clear_uffd_wp(pte);
  71. return pte;
  72. }
  73. /*
  74. * Store a type+offset into a swp_entry_t in an arch-independent format
  75. */
  76. static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
  77. {
  78. swp_entry_t ret;
  79. ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
  80. return ret;
  81. }
  82. /*
  83. * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
  84. * arch-independent format
  85. */
  86. static inline unsigned swp_type(swp_entry_t entry)
  87. {
  88. return (entry.val >> SWP_TYPE_SHIFT);
  89. }
  90. /*
  91. * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
  92. * arch-independent format
  93. */
  94. static inline pgoff_t swp_offset(swp_entry_t entry)
  95. {
  96. return entry.val & SWP_OFFSET_MASK;
  97. }
  98. /*
  99. * This should only be called upon a pfn swap entry to get the PFN stored
  100. * in the swap entry. Please refers to is_pfn_swap_entry() for definition
  101. * of pfn swap entry.
  102. */
  103. static inline unsigned long swp_offset_pfn(swp_entry_t entry)
  104. {
  105. VM_BUG_ON(!is_pfn_swap_entry(entry));
  106. return swp_offset(entry) & SWP_PFN_MASK;
  107. }
  108. /* check whether a pte points to a swap entry */
  109. static inline int is_swap_pte(pte_t pte)
  110. {
  111. return !pte_none(pte) && !pte_present(pte);
  112. }
  113. /*
  114. * Convert the arch-dependent pte representation of a swp_entry_t into an
  115. * arch-independent swp_entry_t.
  116. */
  117. static inline swp_entry_t pte_to_swp_entry(pte_t pte)
  118. {
  119. swp_entry_t arch_entry;
  120. pte = pte_swp_clear_flags(pte);
  121. arch_entry = __pte_to_swp_entry(pte);
  122. return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  123. }
  124. /*
  125. * Convert the arch-independent representation of a swp_entry_t into the
  126. * arch-dependent pte representation.
  127. */
  128. static inline pte_t swp_entry_to_pte(swp_entry_t entry)
  129. {
  130. swp_entry_t arch_entry;
  131. arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  132. return __swp_entry_to_pte(arch_entry);
  133. }
  134. static inline swp_entry_t radix_to_swp_entry(void *arg)
  135. {
  136. swp_entry_t entry;
  137. entry.val = xa_to_value(arg);
  138. return entry;
  139. }
  140. static inline void *swp_to_radix_entry(swp_entry_t entry)
  141. {
  142. return xa_mk_value(entry.val);
  143. }
  144. static inline swp_entry_t make_swapin_error_entry(struct page *page)
  145. {
  146. return swp_entry(SWP_SWAPIN_ERROR, page_to_pfn(page));
  147. }
  148. static inline int is_swapin_error_entry(swp_entry_t entry)
  149. {
  150. return swp_type(entry) == SWP_SWAPIN_ERROR;
  151. }
  152. #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
  153. static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
  154. {
  155. return swp_entry(SWP_DEVICE_READ, offset);
  156. }
  157. static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
  158. {
  159. return swp_entry(SWP_DEVICE_WRITE, offset);
  160. }
  161. static inline bool is_device_private_entry(swp_entry_t entry)
  162. {
  163. int type = swp_type(entry);
  164. return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
  165. }
  166. static inline bool is_writable_device_private_entry(swp_entry_t entry)
  167. {
  168. return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
  169. }
  170. static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
  171. {
  172. return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
  173. }
  174. static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
  175. {
  176. return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
  177. }
  178. static inline bool is_device_exclusive_entry(swp_entry_t entry)
  179. {
  180. return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
  181. swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
  182. }
  183. static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
  184. {
  185. return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
  186. }
  187. #else /* CONFIG_DEVICE_PRIVATE */
  188. static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
  189. {
  190. return swp_entry(0, 0);
  191. }
  192. static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
  193. {
  194. return swp_entry(0, 0);
  195. }
  196. static inline bool is_device_private_entry(swp_entry_t entry)
  197. {
  198. return false;
  199. }
  200. static inline bool is_writable_device_private_entry(swp_entry_t entry)
  201. {
  202. return false;
  203. }
  204. static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
  205. {
  206. return swp_entry(0, 0);
  207. }
  208. static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
  209. {
  210. return swp_entry(0, 0);
  211. }
  212. static inline bool is_device_exclusive_entry(swp_entry_t entry)
  213. {
  214. return false;
  215. }
  216. static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
  217. {
  218. return false;
  219. }
  220. #endif /* CONFIG_DEVICE_PRIVATE */
  221. #ifdef CONFIG_MIGRATION
  222. static inline int is_migration_entry(swp_entry_t entry)
  223. {
  224. return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
  225. swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
  226. swp_type(entry) == SWP_MIGRATION_WRITE);
  227. }
  228. static inline int is_writable_migration_entry(swp_entry_t entry)
  229. {
  230. return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
  231. }
  232. static inline int is_readable_migration_entry(swp_entry_t entry)
  233. {
  234. return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
  235. }
  236. static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
  237. {
  238. return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
  239. }
  240. static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
  241. {
  242. return swp_entry(SWP_MIGRATION_READ, offset);
  243. }
  244. static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
  245. {
  246. return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
  247. }
  248. static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
  249. {
  250. return swp_entry(SWP_MIGRATION_WRITE, offset);
  251. }
  252. /*
  253. * Returns whether the host has large enough swap offset field to support
  254. * carrying over pgtable A/D bits for page migrations. The result is
  255. * pretty much arch specific.
  256. */
  257. static inline bool migration_entry_supports_ad(void)
  258. {
  259. #ifdef CONFIG_SWAP
  260. return swap_migration_ad_supported;
  261. #else /* CONFIG_SWAP */
  262. return false;
  263. #endif /* CONFIG_SWAP */
  264. }
  265. static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
  266. {
  267. if (migration_entry_supports_ad())
  268. return swp_entry(swp_type(entry),
  269. swp_offset(entry) | SWP_MIG_YOUNG);
  270. return entry;
  271. }
  272. static inline bool is_migration_entry_young(swp_entry_t entry)
  273. {
  274. if (migration_entry_supports_ad())
  275. return swp_offset(entry) & SWP_MIG_YOUNG;
  276. /* Keep the old behavior of aging page after migration */
  277. return false;
  278. }
  279. static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
  280. {
  281. if (migration_entry_supports_ad())
  282. return swp_entry(swp_type(entry),
  283. swp_offset(entry) | SWP_MIG_DIRTY);
  284. return entry;
  285. }
  286. static inline bool is_migration_entry_dirty(swp_entry_t entry)
  287. {
  288. if (migration_entry_supports_ad())
  289. return swp_offset(entry) & SWP_MIG_DIRTY;
  290. /* Keep the old behavior of clean page after migration */
  291. return false;
  292. }
  293. extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
  294. spinlock_t *ptl);
  295. extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  296. unsigned long address);
  297. #ifdef CONFIG_HUGETLB_PAGE
  298. extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
  299. extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
  300. #endif /* CONFIG_HUGETLB_PAGE */
  301. #else /* CONFIG_MIGRATION */
  302. static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
  303. {
  304. return swp_entry(0, 0);
  305. }
  306. static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
  307. {
  308. return swp_entry(0, 0);
  309. }
  310. static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
  311. {
  312. return swp_entry(0, 0);
  313. }
  314. static inline int is_migration_entry(swp_entry_t swp)
  315. {
  316. return 0;
  317. }
  318. static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
  319. spinlock_t *ptl) { }
  320. static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  321. unsigned long address) { }
  322. #ifdef CONFIG_HUGETLB_PAGE
  323. static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
  324. static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
  325. #endif /* CONFIG_HUGETLB_PAGE */
  326. static inline int is_writable_migration_entry(swp_entry_t entry)
  327. {
  328. return 0;
  329. }
  330. static inline int is_readable_migration_entry(swp_entry_t entry)
  331. {
  332. return 0;
  333. }
  334. static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
  335. {
  336. return entry;
  337. }
  338. static inline bool is_migration_entry_young(swp_entry_t entry)
  339. {
  340. return false;
  341. }
  342. static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
  343. {
  344. return entry;
  345. }
  346. static inline bool is_migration_entry_dirty(swp_entry_t entry)
  347. {
  348. return false;
  349. }
  350. #endif /* CONFIG_MIGRATION */
  351. typedef unsigned long pte_marker;
  352. #define PTE_MARKER_UFFD_WP BIT(0)
  353. #define PTE_MARKER_MASK (PTE_MARKER_UFFD_WP)
  354. #ifdef CONFIG_PTE_MARKER
  355. static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
  356. {
  357. return swp_entry(SWP_PTE_MARKER, marker);
  358. }
  359. static inline bool is_pte_marker_entry(swp_entry_t entry)
  360. {
  361. return swp_type(entry) == SWP_PTE_MARKER;
  362. }
  363. static inline pte_marker pte_marker_get(swp_entry_t entry)
  364. {
  365. return swp_offset(entry) & PTE_MARKER_MASK;
  366. }
  367. static inline bool is_pte_marker(pte_t pte)
  368. {
  369. return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte));
  370. }
  371. #else /* CONFIG_PTE_MARKER */
  372. static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
  373. {
  374. /* This should never be called if !CONFIG_PTE_MARKER */
  375. WARN_ON_ONCE(1);
  376. return swp_entry(0, 0);
  377. }
  378. static inline bool is_pte_marker_entry(swp_entry_t entry)
  379. {
  380. return false;
  381. }
  382. static inline pte_marker pte_marker_get(swp_entry_t entry)
  383. {
  384. return 0;
  385. }
  386. static inline bool is_pte_marker(pte_t pte)
  387. {
  388. return false;
  389. }
  390. #endif /* CONFIG_PTE_MARKER */
  391. static inline pte_t make_pte_marker(pte_marker marker)
  392. {
  393. return swp_entry_to_pte(make_pte_marker_entry(marker));
  394. }
  395. /*
  396. * This is a special version to check pte_none() just to cover the case when
  397. * the pte is a pte marker. It existed because in many cases the pte marker
  398. * should be seen as a none pte; it's just that we have stored some information
  399. * onto the none pte so it becomes not-none any more.
  400. *
  401. * It should be used when the pte is file-backed, ram-based and backing
  402. * userspace pages, like shmem. It is not needed upon pgtables that do not
  403. * support pte markers at all. For example, it's not needed on anonymous
  404. * memory, kernel-only memory (including when the system is during-boot),
  405. * non-ram based generic file-system. It's fine to be used even there, but the
  406. * extra pte marker check will be pure overhead.
  407. *
  408. * For systems configured with !CONFIG_PTE_MARKER this will be automatically
  409. * optimized to pte_none().
  410. */
  411. static inline int pte_none_mostly(pte_t pte)
  412. {
  413. return pte_none(pte) || is_pte_marker(pte);
  414. }
  415. static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
  416. {
  417. struct page *p = pfn_to_page(swp_offset_pfn(entry));
  418. /*
  419. * Any use of migration entries may only occur while the
  420. * corresponding page is locked
  421. */
  422. BUG_ON(is_migration_entry(entry) && !PageLocked(p));
  423. return p;
  424. }
  425. /*
  426. * A pfn swap entry is a special type of swap entry that always has a pfn stored
  427. * in the swap offset. They are used to represent unaddressable device memory
  428. * and to restrict access to a page undergoing migration.
  429. */
  430. static inline bool is_pfn_swap_entry(swp_entry_t entry)
  431. {
  432. /* Make sure the swp offset can always store the needed fields */
  433. BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
  434. return is_migration_entry(entry) || is_device_private_entry(entry) ||
  435. is_device_exclusive_entry(entry);
  436. }
  437. struct page_vma_mapped_walk;
  438. #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
  439. extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
  440. struct page *page);
  441. extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
  442. struct page *new);
  443. extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
  444. static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
  445. {
  446. swp_entry_t arch_entry;
  447. if (pmd_swp_soft_dirty(pmd))
  448. pmd = pmd_swp_clear_soft_dirty(pmd);
  449. if (pmd_swp_uffd_wp(pmd))
  450. pmd = pmd_swp_clear_uffd_wp(pmd);
  451. arch_entry = __pmd_to_swp_entry(pmd);
  452. return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  453. }
  454. static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
  455. {
  456. swp_entry_t arch_entry;
  457. arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  458. return __swp_entry_to_pmd(arch_entry);
  459. }
  460. static inline int is_pmd_migration_entry(pmd_t pmd)
  461. {
  462. return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
  463. }
  464. #else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
  465. static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
  466. struct page *page)
  467. {
  468. BUILD_BUG();
  469. }
  470. static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
  471. struct page *new)
  472. {
  473. BUILD_BUG();
  474. }
  475. static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
  476. static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
  477. {
  478. return swp_entry(0, 0);
  479. }
  480. static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
  481. {
  482. return __pmd(0);
  483. }
  484. static inline int is_pmd_migration_entry(pmd_t pmd)
  485. {
  486. return 0;
  487. }
  488. #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
  489. #ifdef CONFIG_MEMORY_FAILURE
  490. extern atomic_long_t num_poisoned_pages __read_mostly;
  491. /*
  492. * Support for hardware poisoned pages
  493. */
  494. static inline swp_entry_t make_hwpoison_entry(struct page *page)
  495. {
  496. BUG_ON(!PageLocked(page));
  497. return swp_entry(SWP_HWPOISON, page_to_pfn(page));
  498. }
  499. static inline int is_hwpoison_entry(swp_entry_t entry)
  500. {
  501. return swp_type(entry) == SWP_HWPOISON;
  502. }
  503. static inline void num_poisoned_pages_inc(void)
  504. {
  505. atomic_long_inc(&num_poisoned_pages);
  506. }
  507. static inline void num_poisoned_pages_sub(long i)
  508. {
  509. atomic_long_sub(i, &num_poisoned_pages);
  510. }
  511. #else /* CONFIG_MEMORY_FAILURE */
  512. static inline swp_entry_t make_hwpoison_entry(struct page *page)
  513. {
  514. return swp_entry(0, 0);
  515. }
  516. static inline int is_hwpoison_entry(swp_entry_t swp)
  517. {
  518. return 0;
  519. }
  520. static inline void num_poisoned_pages_inc(void)
  521. {
  522. }
  523. static inline void num_poisoned_pages_sub(long i)
  524. {
  525. }
  526. #endif /* CONFIG_MEMORY_FAILURE */
  527. static inline int non_swap_entry(swp_entry_t entry)
  528. {
  529. return swp_type(entry) >= MAX_SWAPFILES;
  530. }
  531. #endif /* CONFIG_MMU */
  532. #endif /* _LINUX_SWAPOPS_H */