swap.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SWAP_H
  3. #define _LINUX_SWAP_H
  4. #include <linux/spinlock.h>
  5. #include <linux/linkage.h>
  6. #include <linux/mmzone.h>
  7. #include <linux/list.h>
  8. #include <linux/memcontrol.h>
  9. #include <linux/sched.h>
  10. #include <linux/node.h>
  11. #include <linux/fs.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/atomic.h>
  14. #include <linux/page-flags.h>
  15. #include <uapi/linux/mempolicy.h>
  16. #include <asm/page.h>
  17. struct notifier_block;
  18. struct bio;
  19. struct pagevec;
  20. #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
  21. #define SWAP_FLAG_PRIO_MASK 0x7fff
  22. #define SWAP_FLAG_PRIO_SHIFT 0
  23. #define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
  24. #define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
  25. #define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
  26. #define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
  27. SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
  28. SWAP_FLAG_DISCARD_PAGES)
  29. #define SWAP_BATCH 64
  30. int kswapd (void *p);
  31. static inline int current_is_kswapd(void)
  32. {
  33. return current->flags & PF_KSWAPD;
  34. }
  35. /*
  36. * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
  37. * be swapped to. The swap type and the offset into that swap type are
  38. * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
  39. * for the type means that the maximum number of swapcache pages is 27 bits
  40. * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
  41. * the type/offset into the pte as 5/27 as well.
  42. */
  43. #define MAX_SWAPFILES_SHIFT 5
  44. /*
  45. * Use some of the swap files numbers for other purposes. This
  46. * is a convenient way to hook into the VM to trigger special
  47. * actions on faults.
  48. */
  49. #define SWP_SWAPIN_ERROR_NUM 1
  50. #define SWP_SWAPIN_ERROR (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
  51. SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \
  52. SWP_PTE_MARKER_NUM)
  53. /*
  54. * PTE markers are used to persist information onto PTEs that are mapped with
  55. * file-backed memories. As its name "PTE" hints, it should only be applied to
  56. * the leaves of pgtables.
  57. */
  58. #ifdef CONFIG_PTE_MARKER
  59. #define SWP_PTE_MARKER_NUM 1
  60. #define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
  61. SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
  62. #else
  63. #define SWP_PTE_MARKER_NUM 0
  64. #endif
  65. /*
  66. * Unaddressable device memory support. See include/linux/hmm.h and
  67. * Documentation/mm/hmm.rst. Short description is we need struct pages for
  68. * device memory that is unaddressable (inaccessible) by CPU, so that we can
  69. * migrate part of a process memory to device memory.
  70. *
  71. * When a page is migrated from CPU to device, we set the CPU page table entry
  72. * to a special SWP_DEVICE_{READ|WRITE} entry.
  73. *
  74. * When a page is mapped by the device for exclusive access we set the CPU page
  75. * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
  76. */
  77. #ifdef CONFIG_DEVICE_PRIVATE
  78. #define SWP_DEVICE_NUM 4
  79. #define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
  80. #define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
  81. #define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
  82. #define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
  83. #else
  84. #define SWP_DEVICE_NUM 0
  85. #endif
  86. /*
  87. * Page migration support.
  88. *
  89. * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
  90. * indicates that the referenced (part of) an anonymous page is exclusive to
  91. * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
  92. * (part of) an anonymous page that are mapped writable are exclusive to a
  93. * single process.
  94. */
  95. #ifdef CONFIG_MIGRATION
  96. #define SWP_MIGRATION_NUM 3
  97. #define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
  98. #define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
  99. #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
  100. #else
  101. #define SWP_MIGRATION_NUM 0
  102. #endif
  103. /*
  104. * Handling of hardware poisoned pages with memory corruption.
  105. */
  106. #ifdef CONFIG_MEMORY_FAILURE
  107. #define SWP_HWPOISON_NUM 1
  108. #define SWP_HWPOISON MAX_SWAPFILES
  109. #else
  110. #define SWP_HWPOISON_NUM 0
  111. #endif
  112. #define MAX_SWAPFILES \
  113. ((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
  114. SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
  115. SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM)
  116. /*
  117. * Magic header for a swap area. The first part of the union is
  118. * what the swap magic looks like for the old (limited to 128MB)
  119. * swap area format, the second part of the union adds - in the
  120. * old reserved area - some extra information. Note that the first
  121. * kilobyte is reserved for boot loader or disk label stuff...
  122. *
  123. * Having the magic at the end of the PAGE_SIZE makes detecting swap
  124. * areas somewhat tricky on machines that support multiple page sizes.
  125. * For 2.5 we'll probably want to move the magic to just beyond the
  126. * bootbits...
  127. */
  128. union swap_header {
  129. struct {
  130. char reserved[PAGE_SIZE - 10];
  131. char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
  132. } magic;
  133. struct {
  134. char bootbits[1024]; /* Space for disklabel etc. */
  135. __u32 version;
  136. __u32 last_page;
  137. __u32 nr_badpages;
  138. unsigned char sws_uuid[16];
  139. unsigned char sws_volume[16];
  140. __u32 padding[117];
  141. __u32 badpages[1];
  142. } info;
  143. };
  144. /*
  145. * current->reclaim_state points to one of these when a task is running
  146. * memory reclaim
  147. */
  148. struct reclaim_state {
  149. unsigned long reclaimed_slab;
  150. #ifdef CONFIG_LRU_GEN
  151. /* per-thread mm walk data */
  152. struct lru_gen_mm_walk *mm_walk;
  153. #endif
  154. };
  155. #ifdef __KERNEL__
  156. struct address_space;
  157. struct sysinfo;
  158. struct writeback_control;
  159. struct zone;
  160. /*
  161. * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
  162. * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
  163. * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
  164. * from setup, they're handled identically.
  165. *
  166. * We always assume that blocks are of size PAGE_SIZE.
  167. */
  168. struct swap_extent {
  169. struct rb_node rb_node;
  170. pgoff_t start_page;
  171. pgoff_t nr_pages;
  172. sector_t start_block;
  173. };
  174. /*
  175. * Max bad pages in the new format..
  176. */
  177. #define MAX_SWAP_BADPAGES \
  178. ((offsetof(union swap_header, magic.magic) - \
  179. offsetof(union swap_header, info.badpages)) / sizeof(int))
  180. enum {
  181. SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
  182. SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
  183. SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
  184. SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
  185. SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
  186. SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
  187. SWP_BLKDEV = (1 << 6), /* its a block device */
  188. SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
  189. SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
  190. SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
  191. SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
  192. SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
  193. SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
  194. /* add others here before... */
  195. SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
  196. };
  197. #define SWAP_CLUSTER_MAX 32UL
  198. #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
  199. /* Bit flag in swap_map */
  200. #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
  201. #define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
  202. /* Special value in first swap_map */
  203. #define SWAP_MAP_MAX 0x3e /* Max count */
  204. #define SWAP_MAP_BAD 0x3f /* Note page is bad */
  205. #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
  206. /* Special value in each swap_map continuation */
  207. #define SWAP_CONT_MAX 0x7f /* Max count */
  208. /*
  209. * We use this to track usage of a cluster. A cluster is a block of swap disk
  210. * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
  211. * free clusters are organized into a list. We fetch an entry from the list to
  212. * get a free cluster.
  213. *
  214. * The data field stores next cluster if the cluster is free or cluster usage
  215. * counter otherwise. The flags field determines if a cluster is free. This is
  216. * protected by swap_info_struct.lock.
  217. */
  218. struct swap_cluster_info {
  219. spinlock_t lock; /*
  220. * Protect swap_cluster_info fields
  221. * and swap_info_struct->swap_map
  222. * elements correspond to the swap
  223. * cluster
  224. */
  225. unsigned int data:24;
  226. unsigned int flags:8;
  227. };
  228. #define CLUSTER_FLAG_FREE 1 /* This cluster is free */
  229. #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
  230. #define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */
  231. /*
  232. * We assign a cluster to each CPU, so each CPU can allocate swap entry from
  233. * its own cluster and swapout sequentially. The purpose is to optimize swapout
  234. * throughput.
  235. */
  236. struct percpu_cluster {
  237. struct swap_cluster_info index; /* Current cluster index */
  238. unsigned int next; /* Likely next allocation offset */
  239. };
  240. struct swap_cluster_list {
  241. struct swap_cluster_info head;
  242. struct swap_cluster_info tail;
  243. };
  244. /*
  245. * The in-memory structure used to track swap areas.
  246. */
  247. struct swap_info_struct {
  248. struct percpu_ref users; /* indicate and keep swap device valid. */
  249. unsigned long flags; /* SWP_USED etc: see above */
  250. signed short prio; /* swap priority of this type */
  251. struct plist_node list; /* entry in swap_active_head */
  252. signed char type; /* strange name for an index */
  253. unsigned int max; /* extent of the swap_map */
  254. unsigned char *swap_map; /* vmalloc'ed array of usage counts */
  255. struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
  256. struct swap_cluster_list free_clusters; /* free clusters list */
  257. unsigned int lowest_bit; /* index of first free in swap_map */
  258. unsigned int highest_bit; /* index of last free in swap_map */
  259. unsigned int pages; /* total of usable pages of swap */
  260. unsigned int inuse_pages; /* number of those currently in use */
  261. unsigned int cluster_next; /* likely index for next allocation */
  262. unsigned int cluster_nr; /* countdown to next cluster search */
  263. unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
  264. struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
  265. struct rb_root swap_extent_root;/* root of the swap extent rbtree */
  266. struct block_device *bdev; /* swap device or bdev of swap file */
  267. struct file *swap_file; /* seldom referenced */
  268. unsigned int old_block_size; /* seldom referenced */
  269. struct completion comp; /* seldom referenced */
  270. #ifdef CONFIG_FRONTSWAP
  271. unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
  272. atomic_t frontswap_pages; /* frontswap pages in-use counter */
  273. #endif
  274. spinlock_t lock; /*
  275. * protect map scan related fields like
  276. * swap_map, lowest_bit, highest_bit,
  277. * inuse_pages, cluster_next,
  278. * cluster_nr, lowest_alloc,
  279. * highest_alloc, free/discard cluster
  280. * list. other fields are only changed
  281. * at swapon/swapoff, so are protected
  282. * by swap_lock. changing flags need
  283. * hold this lock and swap_lock. If
  284. * both locks need hold, hold swap_lock
  285. * first.
  286. */
  287. spinlock_t cont_lock; /*
  288. * protect swap count continuation page
  289. * list.
  290. */
  291. struct work_struct discard_work; /* discard worker */
  292. struct swap_cluster_list discard_clusters; /* discard clusters list */
  293. ANDROID_VENDOR_DATA(1);
  294. struct plist_node avail_lists[]; /*
  295. * entries in swap_avail_heads, one
  296. * entry per node.
  297. * Must be last as the number of the
  298. * array is nr_node_ids, which is not
  299. * a fixed value so have to allocate
  300. * dynamically.
  301. * And it has to be an array so that
  302. * plist_for_each_* can work.
  303. */
  304. };
  305. #ifdef CONFIG_64BIT
  306. #define SWAP_RA_ORDER_CEILING 5
  307. #else
  308. /* Avoid stack overflow, because we need to save part of page table */
  309. #define SWAP_RA_ORDER_CEILING 3
  310. #define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING)
  311. #endif
  312. struct vma_swap_readahead {
  313. unsigned short win;
  314. unsigned short offset;
  315. unsigned short nr_pte;
  316. #ifdef CONFIG_64BIT
  317. pte_t *ptes;
  318. #else
  319. pte_t ptes[SWAP_RA_PTE_CACHE_SIZE];
  320. #endif
  321. };
  322. static inline swp_entry_t folio_swap_entry(struct folio *folio)
  323. {
  324. swp_entry_t entry = { .val = page_private(&folio->page) };
  325. return entry;
  326. }
  327. static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
  328. {
  329. folio->private = (void *)entry.val;
  330. }
  331. /* linux/mm/workingset.c */
  332. void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
  333. void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
  334. void workingset_refault(struct folio *folio, void *shadow);
  335. void workingset_activation(struct folio *folio);
  336. /* Only track the nodes of mappings with shadow entries */
  337. void workingset_update_node(struct xa_node *node);
  338. extern struct list_lru shadow_nodes;
  339. #define mapping_set_update(xas, mapping) do { \
  340. if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
  341. xas_set_update(xas, workingset_update_node); \
  342. xas_set_lru(xas, &shadow_nodes); \
  343. } \
  344. } while (0)
  345. /* linux/mm/page_alloc.c */
  346. extern unsigned long totalreserve_pages;
  347. /* Definition of global_zone_page_state not available yet */
  348. #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
  349. /* linux/mm/swap.c */
  350. void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages);
  351. void lru_note_cost_folio(struct folio *);
  352. void folio_add_lru(struct folio *);
  353. void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
  354. void lru_cache_add(struct page *);
  355. void mark_page_accessed(struct page *);
  356. void folio_mark_accessed(struct folio *);
  357. extern atomic_t lru_disable_count;
  358. static inline bool lru_cache_disabled(void)
  359. {
  360. return atomic_read(&lru_disable_count);
  361. }
  362. static inline void lru_cache_enable(void)
  363. {
  364. atomic_dec(&lru_disable_count);
  365. }
  366. extern void lru_cache_disable(void);
  367. extern void lru_add_drain(void);
  368. extern void lru_add_drain_cpu(int cpu);
  369. extern void lru_add_drain_cpu_zone(struct zone *zone);
  370. extern void lru_add_drain_all(void);
  371. extern void deactivate_page(struct page *page);
  372. extern void mark_page_lazyfree(struct page *page);
  373. extern void swap_setup(void);
  374. extern void lru_cache_add_inactive_or_unevictable(struct page *page,
  375. struct vm_area_struct *vma);
  376. /* linux/mm/vmscan.c */
  377. extern unsigned long zone_reclaimable_pages(struct zone *zone);
  378. extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
  379. gfp_t gfp_mask, nodemask_t *mask);
  380. #define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
  381. #define MEMCG_RECLAIM_PROACTIVE (1 << 2)
  382. extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
  383. unsigned long nr_pages,
  384. gfp_t gfp_mask,
  385. unsigned int reclaim_options);
  386. extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
  387. gfp_t gfp_mask, bool noswap,
  388. pg_data_t *pgdat,
  389. unsigned long *nr_scanned);
  390. extern unsigned long shrink_all_memory(unsigned long nr_pages);
  391. extern int vm_swappiness;
  392. long remove_mapping(struct address_space *mapping, struct folio *folio);
  393. extern unsigned long reclaim_pages(struct list_head *page_list);
  394. #ifdef CONFIG_NUMA
  395. extern int node_reclaim_mode;
  396. extern int sysctl_min_unmapped_ratio;
  397. extern int sysctl_min_slab_ratio;
  398. #else
  399. #define node_reclaim_mode 0
  400. #endif
  401. static inline bool node_reclaim_enabled(void)
  402. {
  403. /* Is any node_reclaim_mode bit set? */
  404. return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
  405. }
  406. void check_move_unevictable_folios(struct folio_batch *fbatch);
  407. void check_move_unevictable_pages(struct pagevec *pvec);
  408. extern void kswapd_run(int nid);
  409. extern void kswapd_stop(int nid);
  410. #ifdef CONFIG_SWAP
  411. int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
  412. unsigned long nr_pages, sector_t start_block);
  413. int generic_swapfile_activate(struct swap_info_struct *, struct file *,
  414. sector_t *);
  415. static inline unsigned long total_swapcache_pages(void)
  416. {
  417. return global_node_page_state(NR_SWAPCACHE);
  418. }
  419. extern void free_swap_cache(struct page *page);
  420. extern void free_page_and_swap_cache(struct page *);
  421. extern void free_pages_and_swap_cache(struct page **, int);
  422. /* linux/mm/swapfile.c */
  423. extern atomic_long_t nr_swap_pages;
  424. extern long total_swap_pages;
  425. extern atomic_t nr_rotate_swap;
  426. extern bool has_usable_swap(void);
  427. /* Swap 50% full? Release swapcache more aggressively.. */
  428. static inline bool vm_swap_full(void)
  429. {
  430. return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
  431. }
  432. static inline long get_nr_swap_pages(void)
  433. {
  434. return atomic_long_read(&nr_swap_pages);
  435. }
  436. extern void si_swapinfo(struct sysinfo *);
  437. swp_entry_t folio_alloc_swap(struct folio *folio);
  438. bool folio_free_swap(struct folio *folio);
  439. void put_swap_folio(struct folio *folio, swp_entry_t entry);
  440. extern swp_entry_t get_swap_page_of_type(int);
  441. extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
  442. extern int add_swap_count_continuation(swp_entry_t, gfp_t);
  443. extern void swap_shmem_alloc(swp_entry_t);
  444. extern int swap_duplicate(swp_entry_t);
  445. extern int swapcache_prepare(swp_entry_t);
  446. extern void swap_free(swp_entry_t);
  447. extern void swapcache_free_entries(swp_entry_t *entries, int n);
  448. extern int free_swap_and_cache(swp_entry_t);
  449. int swap_type_of(dev_t device, sector_t offset);
  450. int find_first_swap(dev_t *device);
  451. extern unsigned int count_swap_pages(int, int);
  452. extern sector_t swapdev_block(int, pgoff_t);
  453. extern int __swap_count(swp_entry_t entry);
  454. extern int __swp_swapcount(swp_entry_t entry);
  455. extern int swp_swapcount(swp_entry_t entry);
  456. extern struct swap_info_struct *page_swap_info(struct page *);
  457. extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
  458. struct backing_dev_info;
  459. extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
  460. extern void exit_swap_address_space(unsigned int type);
  461. extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
  462. sector_t swap_page_sector(struct page *page);
  463. extern sector_t alloc_swapdev_block(int swap);
  464. static inline void put_swap_device(struct swap_info_struct *si)
  465. {
  466. percpu_ref_put(&si->users);
  467. }
  468. #else /* CONFIG_SWAP */
  469. static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
  470. {
  471. return NULL;
  472. }
  473. static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
  474. {
  475. return NULL;
  476. }
  477. static inline void put_swap_device(struct swap_info_struct *si)
  478. {
  479. }
  480. #define get_nr_swap_pages() 0L
  481. #define total_swap_pages 0L
  482. #define total_swapcache_pages() 0UL
  483. #define vm_swap_full() 0
  484. #define si_swapinfo(val) \
  485. do { (val)->freeswap = (val)->totalswap = 0; } while (0)
  486. /* only sparc can not include linux/pagemap.h in this file
  487. * so leave put_page and release_pages undeclared... */
  488. #define free_page_and_swap_cache(page) \
  489. put_page(page)
  490. #define free_pages_and_swap_cache(pages, nr) \
  491. release_pages((pages), (nr));
  492. /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
  493. #define free_swap_and_cache(e) is_pfn_swap_entry(e)
  494. static inline void free_swap_cache(struct page *page)
  495. {
  496. }
  497. static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
  498. {
  499. return 0;
  500. }
  501. static inline void swap_shmem_alloc(swp_entry_t swp)
  502. {
  503. }
  504. static inline int swap_duplicate(swp_entry_t swp)
  505. {
  506. return 0;
  507. }
  508. static inline void swap_free(swp_entry_t swp)
  509. {
  510. }
  511. static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
  512. {
  513. }
  514. static inline int __swap_count(swp_entry_t entry)
  515. {
  516. return 0;
  517. }
  518. static inline int __swp_swapcount(swp_entry_t entry)
  519. {
  520. return 0;
  521. }
  522. static inline int swp_swapcount(swp_entry_t entry)
  523. {
  524. return 0;
  525. }
  526. static inline swp_entry_t folio_alloc_swap(struct folio *folio)
  527. {
  528. swp_entry_t entry;
  529. entry.val = 0;
  530. return entry;
  531. }
  532. static inline bool folio_free_swap(struct folio *folio)
  533. {
  534. return false;
  535. }
  536. static inline int add_swap_extent(struct swap_info_struct *sis,
  537. unsigned long start_page,
  538. unsigned long nr_pages, sector_t start_block)
  539. {
  540. return -EINVAL;
  541. }
  542. #endif /* CONFIG_SWAP */
  543. #ifdef CONFIG_THP_SWAP
  544. extern int split_swap_cluster(swp_entry_t entry);
  545. #else
  546. static inline int split_swap_cluster(swp_entry_t entry)
  547. {
  548. return 0;
  549. }
  550. #endif
  551. #ifdef CONFIG_MEMCG
  552. static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
  553. {
  554. /* Cgroup2 doesn't have per-cgroup swappiness */
  555. if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
  556. return vm_swappiness;
  557. /* root ? */
  558. if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg))
  559. return vm_swappiness;
  560. return memcg->swappiness;
  561. }
  562. #else
  563. static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
  564. {
  565. return vm_swappiness;
  566. }
  567. #endif
  568. #ifdef CONFIG_ZSWAP
  569. extern u64 zswap_pool_total_size;
  570. extern atomic_t zswap_stored_pages;
  571. #endif
  572. #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
  573. extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
  574. static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
  575. {
  576. if (mem_cgroup_disabled())
  577. return;
  578. __cgroup_throttle_swaprate(page, gfp_mask);
  579. }
  580. #else
  581. static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
  582. {
  583. }
  584. #endif
  585. static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
  586. {
  587. cgroup_throttle_swaprate(&folio->page, gfp);
  588. }
  589. #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
  590. void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
  591. int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
  592. static inline int mem_cgroup_try_charge_swap(struct folio *folio,
  593. swp_entry_t entry)
  594. {
  595. if (mem_cgroup_disabled())
  596. return 0;
  597. return __mem_cgroup_try_charge_swap(folio, entry);
  598. }
  599. extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
  600. static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
  601. {
  602. if (mem_cgroup_disabled())
  603. return;
  604. __mem_cgroup_uncharge_swap(entry, nr_pages);
  605. }
  606. extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
  607. extern bool mem_cgroup_swap_full(struct folio *folio);
  608. #else
  609. static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
  610. {
  611. }
  612. static inline int mem_cgroup_try_charge_swap(struct folio *folio,
  613. swp_entry_t entry)
  614. {
  615. return 0;
  616. }
  617. static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
  618. unsigned int nr_pages)
  619. {
  620. }
  621. static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
  622. {
  623. return get_nr_swap_pages();
  624. }
  625. static inline bool mem_cgroup_swap_full(struct folio *folio)
  626. {
  627. return vm_swap_full();
  628. }
  629. #endif
  630. #endif /* __KERNEL__*/
  631. #endif /* _LINUX_SWAP_H */