vmstat.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_VMSTAT_H
  3. #define _LINUX_VMSTAT_H
  4. #include <linux/types.h>
  5. #include <linux/percpu.h>
  6. #include <linux/mmzone.h>
  7. #include <linux/vm_event_item.h>
  8. #include <linux/atomic.h>
  9. #include <linux/static_key.h>
  10. #include <linux/mmdebug.h>
  11. extern int sysctl_stat_interval;
  12. #ifdef CONFIG_NUMA
  13. #define ENABLE_NUMA_STAT 1
  14. #define DISABLE_NUMA_STAT 0
  15. extern int sysctl_vm_numa_stat;
  16. DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
  17. int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
  18. void *buffer, size_t *length, loff_t *ppos);
  19. #endif
  20. struct reclaim_stat {
  21. unsigned nr_dirty;
  22. unsigned nr_unqueued_dirty;
  23. unsigned nr_congested;
  24. unsigned nr_writeback;
  25. unsigned nr_immediate;
  26. unsigned nr_pageout;
  27. unsigned nr_activate[ANON_AND_FILE];
  28. unsigned nr_ref_keep;
  29. unsigned nr_unmap_fail;
  30. unsigned nr_lazyfree_fail;
  31. };
  32. enum writeback_stat_item {
  33. NR_DIRTY_THRESHOLD,
  34. NR_DIRTY_BG_THRESHOLD,
  35. NR_VM_WRITEBACK_STAT_ITEMS,
  36. };
  37. #ifdef CONFIG_VM_EVENT_COUNTERS
  38. /*
  39. * Light weight per cpu counter implementation.
  40. *
  41. * Counters should only be incremented and no critical kernel component
  42. * should rely on the counter values.
  43. *
  44. * Counters are handled completely inline. On many platforms the code
  45. * generated will simply be the increment of a global address.
  46. */
  47. struct vm_event_state {
  48. unsigned long event[NR_VM_EVENT_ITEMS];
  49. };
  50. DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
  51. /*
  52. * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
  53. * local_irq_disable overhead.
  54. */
  55. static inline void __count_vm_event(enum vm_event_item item)
  56. {
  57. raw_cpu_inc(vm_event_states.event[item]);
  58. }
  59. static inline void count_vm_event(enum vm_event_item item)
  60. {
  61. this_cpu_inc(vm_event_states.event[item]);
  62. }
  63. static inline void __count_vm_events(enum vm_event_item item, long delta)
  64. {
  65. raw_cpu_add(vm_event_states.event[item], delta);
  66. }
  67. static inline void count_vm_events(enum vm_event_item item, long delta)
  68. {
  69. this_cpu_add(vm_event_states.event[item], delta);
  70. }
  71. extern void all_vm_events(unsigned long *);
  72. extern void vm_events_fold_cpu(int cpu);
  73. #else
  74. /* Disable counters */
  75. static inline void count_vm_event(enum vm_event_item item)
  76. {
  77. }
  78. static inline void count_vm_events(enum vm_event_item item, long delta)
  79. {
  80. }
  81. static inline void __count_vm_event(enum vm_event_item item)
  82. {
  83. }
  84. static inline void __count_vm_events(enum vm_event_item item, long delta)
  85. {
  86. }
  87. static inline void all_vm_events(unsigned long *ret)
  88. {
  89. }
  90. static inline void vm_events_fold_cpu(int cpu)
  91. {
  92. }
  93. #endif /* CONFIG_VM_EVENT_COUNTERS */
  94. #ifdef CONFIG_NUMA_BALANCING
  95. #define count_vm_numa_event(x) count_vm_event(x)
  96. #define count_vm_numa_events(x, y) count_vm_events(x, y)
  97. #else
  98. #define count_vm_numa_event(x) do {} while (0)
  99. #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
  100. #endif /* CONFIG_NUMA_BALANCING */
  101. #ifdef CONFIG_DEBUG_TLBFLUSH
  102. #define count_vm_tlb_event(x) count_vm_event(x)
  103. #define count_vm_tlb_events(x, y) count_vm_events(x, y)
  104. #else
  105. #define count_vm_tlb_event(x) do {} while (0)
  106. #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
  107. #endif
  108. #ifdef CONFIG_PER_VMA_LOCK_STATS
  109. #define count_vm_vma_lock_event(x) count_vm_event(x)
  110. #else
  111. #define count_vm_vma_lock_event(x) do {} while (0)
  112. #endif
  113. #define __count_zid_vm_events(item, zid, delta) \
  114. __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
  115. /*
  116. * Zone and node-based page accounting with per cpu differentials.
  117. */
  118. extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
  119. extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
  120. extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
  121. #ifdef CONFIG_NUMA
  122. static inline void zone_numa_event_add(long x, struct zone *zone,
  123. enum numa_stat_item item)
  124. {
  125. atomic_long_add(x, &zone->vm_numa_event[item]);
  126. atomic_long_add(x, &vm_numa_event[item]);
  127. }
  128. static inline unsigned long zone_numa_event_state(struct zone *zone,
  129. enum numa_stat_item item)
  130. {
  131. return atomic_long_read(&zone->vm_numa_event[item]);
  132. }
  133. static inline unsigned long
  134. global_numa_event_state(enum numa_stat_item item)
  135. {
  136. return atomic_long_read(&vm_numa_event[item]);
  137. }
  138. #endif /* CONFIG_NUMA */
  139. static inline void zone_page_state_add(long x, struct zone *zone,
  140. enum zone_stat_item item)
  141. {
  142. atomic_long_add(x, &zone->vm_stat[item]);
  143. atomic_long_add(x, &vm_zone_stat[item]);
  144. }
  145. static inline void node_page_state_add(long x, struct pglist_data *pgdat,
  146. enum node_stat_item item)
  147. {
  148. atomic_long_add(x, &pgdat->vm_stat[item]);
  149. atomic_long_add(x, &vm_node_stat[item]);
  150. }
  151. static inline unsigned long global_zone_page_state(enum zone_stat_item item)
  152. {
  153. long x = atomic_long_read(&vm_zone_stat[item]);
  154. #ifdef CONFIG_SMP
  155. if (x < 0)
  156. x = 0;
  157. #endif
  158. return x;
  159. }
  160. static inline
  161. unsigned long global_node_page_state_pages(enum node_stat_item item)
  162. {
  163. long x = atomic_long_read(&vm_node_stat[item]);
  164. #ifdef CONFIG_SMP
  165. if (x < 0)
  166. x = 0;
  167. #endif
  168. return x;
  169. }
  170. static inline unsigned long global_node_page_state(enum node_stat_item item)
  171. {
  172. VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
  173. return global_node_page_state_pages(item);
  174. }
  175. static inline unsigned long zone_page_state(struct zone *zone,
  176. enum zone_stat_item item)
  177. {
  178. long x = atomic_long_read(&zone->vm_stat[item]);
  179. #ifdef CONFIG_SMP
  180. if (x < 0)
  181. x = 0;
  182. #endif
  183. return x;
  184. }
  185. /*
  186. * More accurate version that also considers the currently pending
  187. * deltas. For that we need to loop over all cpus to find the current
  188. * deltas. There is no synchronization so the result cannot be
  189. * exactly accurate either.
  190. */
  191. static inline unsigned long zone_page_state_snapshot(struct zone *zone,
  192. enum zone_stat_item item)
  193. {
  194. long x = atomic_long_read(&zone->vm_stat[item]);
  195. #ifdef CONFIG_SMP
  196. int cpu;
  197. for_each_online_cpu(cpu)
  198. x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
  199. if (x < 0)
  200. x = 0;
  201. #endif
  202. return x;
  203. }
  204. #ifdef CONFIG_NUMA
  205. /* See __count_vm_event comment on why raw_cpu_inc is used. */
  206. static inline void
  207. __count_numa_event(struct zone *zone, enum numa_stat_item item)
  208. {
  209. struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
  210. raw_cpu_inc(pzstats->vm_numa_event[item]);
  211. }
  212. static inline void
  213. __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
  214. {
  215. struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
  216. raw_cpu_add(pzstats->vm_numa_event[item], delta);
  217. }
  218. extern unsigned long sum_zone_node_page_state(int node,
  219. enum zone_stat_item item);
  220. extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
  221. extern unsigned long node_page_state(struct pglist_data *pgdat,
  222. enum node_stat_item item);
  223. extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
  224. enum node_stat_item item);
  225. extern void fold_vm_numa_events(void);
  226. #else
  227. #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
  228. #define node_page_state(node, item) global_node_page_state(item)
  229. #define node_page_state_pages(node, item) global_node_page_state_pages(item)
  230. static inline void fold_vm_numa_events(void)
  231. {
  232. }
  233. #endif /* CONFIG_NUMA */
  234. #ifdef CONFIG_SMP
  235. void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
  236. void __inc_zone_page_state(struct page *, enum zone_stat_item);
  237. void __dec_zone_page_state(struct page *, enum zone_stat_item);
  238. void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
  239. void __inc_node_page_state(struct page *, enum node_stat_item);
  240. void __dec_node_page_state(struct page *, enum node_stat_item);
  241. void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
  242. void inc_zone_page_state(struct page *, enum zone_stat_item);
  243. void dec_zone_page_state(struct page *, enum zone_stat_item);
  244. void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
  245. void inc_node_page_state(struct page *, enum node_stat_item);
  246. void dec_node_page_state(struct page *, enum node_stat_item);
  247. extern void inc_node_state(struct pglist_data *, enum node_stat_item);
  248. extern void __inc_zone_state(struct zone *, enum zone_stat_item);
  249. extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
  250. extern void dec_zone_state(struct zone *, enum zone_stat_item);
  251. extern void __dec_zone_state(struct zone *, enum zone_stat_item);
  252. extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
  253. void quiet_vmstat(void);
  254. void cpu_vm_stats_fold(int cpu);
  255. void refresh_zone_stat_thresholds(void);
  256. struct ctl_table;
  257. int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
  258. loff_t *ppos);
  259. void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
  260. int calculate_pressure_threshold(struct zone *zone);
  261. int calculate_normal_threshold(struct zone *zone);
  262. void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  263. int (*calculate_pressure)(struct zone *));
  264. #else /* CONFIG_SMP */
  265. /*
  266. * We do not maintain differentials in a single processor configuration.
  267. * The functions directly modify the zone and global counters.
  268. */
  269. static inline void __mod_zone_page_state(struct zone *zone,
  270. enum zone_stat_item item, long delta)
  271. {
  272. zone_page_state_add(delta, zone, item);
  273. }
  274. static inline void __mod_node_page_state(struct pglist_data *pgdat,
  275. enum node_stat_item item, int delta)
  276. {
  277. if (vmstat_item_in_bytes(item)) {
  278. /*
  279. * Only cgroups use subpage accounting right now; at
  280. * the global level, these items still change in
  281. * multiples of whole pages. Store them as pages
  282. * internally to keep the per-cpu counters compact.
  283. */
  284. VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
  285. delta >>= PAGE_SHIFT;
  286. }
  287. node_page_state_add(delta, pgdat, item);
  288. }
  289. static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
  290. {
  291. atomic_long_inc(&zone->vm_stat[item]);
  292. atomic_long_inc(&vm_zone_stat[item]);
  293. }
  294. static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  295. {
  296. atomic_long_inc(&pgdat->vm_stat[item]);
  297. atomic_long_inc(&vm_node_stat[item]);
  298. }
  299. static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
  300. {
  301. atomic_long_dec(&zone->vm_stat[item]);
  302. atomic_long_dec(&vm_zone_stat[item]);
  303. }
  304. static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
  305. {
  306. atomic_long_dec(&pgdat->vm_stat[item]);
  307. atomic_long_dec(&vm_node_stat[item]);
  308. }
  309. static inline void __inc_zone_page_state(struct page *page,
  310. enum zone_stat_item item)
  311. {
  312. __inc_zone_state(page_zone(page), item);
  313. }
  314. static inline void __inc_node_page_state(struct page *page,
  315. enum node_stat_item item)
  316. {
  317. __inc_node_state(page_pgdat(page), item);
  318. }
  319. static inline void __dec_zone_page_state(struct page *page,
  320. enum zone_stat_item item)
  321. {
  322. __dec_zone_state(page_zone(page), item);
  323. }
  324. static inline void __dec_node_page_state(struct page *page,
  325. enum node_stat_item item)
  326. {
  327. __dec_node_state(page_pgdat(page), item);
  328. }
  329. /*
  330. * We only use atomic operations to update counters. So there is no need to
  331. * disable interrupts.
  332. */
  333. #define inc_zone_page_state __inc_zone_page_state
  334. #define dec_zone_page_state __dec_zone_page_state
  335. #define mod_zone_page_state __mod_zone_page_state
  336. #define inc_node_page_state __inc_node_page_state
  337. #define dec_node_page_state __dec_node_page_state
  338. #define mod_node_page_state __mod_node_page_state
  339. #define inc_zone_state __inc_zone_state
  340. #define inc_node_state __inc_node_state
  341. #define dec_zone_state __dec_zone_state
  342. #define set_pgdat_percpu_threshold(pgdat, callback) { }
  343. static inline void refresh_zone_stat_thresholds(void) { }
  344. static inline void cpu_vm_stats_fold(int cpu) { }
  345. static inline void quiet_vmstat(void) { }
  346. static inline void drain_zonestat(struct zone *zone,
  347. struct per_cpu_zonestat *pzstats) { }
  348. #endif /* CONFIG_SMP */
  349. static inline void __zone_stat_mod_folio(struct folio *folio,
  350. enum zone_stat_item item, long nr)
  351. {
  352. __mod_zone_page_state(folio_zone(folio), item, nr);
  353. }
  354. static inline void __zone_stat_add_folio(struct folio *folio,
  355. enum zone_stat_item item)
  356. {
  357. __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
  358. }
  359. static inline void __zone_stat_sub_folio(struct folio *folio,
  360. enum zone_stat_item item)
  361. {
  362. __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
  363. }
  364. static inline void zone_stat_mod_folio(struct folio *folio,
  365. enum zone_stat_item item, long nr)
  366. {
  367. mod_zone_page_state(folio_zone(folio), item, nr);
  368. }
  369. static inline void zone_stat_add_folio(struct folio *folio,
  370. enum zone_stat_item item)
  371. {
  372. mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
  373. }
  374. static inline void zone_stat_sub_folio(struct folio *folio,
  375. enum zone_stat_item item)
  376. {
  377. mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
  378. }
  379. static inline void __node_stat_mod_folio(struct folio *folio,
  380. enum node_stat_item item, long nr)
  381. {
  382. __mod_node_page_state(folio_pgdat(folio), item, nr);
  383. }
  384. static inline void __node_stat_add_folio(struct folio *folio,
  385. enum node_stat_item item)
  386. {
  387. __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
  388. }
  389. static inline void __node_stat_sub_folio(struct folio *folio,
  390. enum node_stat_item item)
  391. {
  392. __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
  393. }
  394. static inline void node_stat_mod_folio(struct folio *folio,
  395. enum node_stat_item item, long nr)
  396. {
  397. mod_node_page_state(folio_pgdat(folio), item, nr);
  398. }
  399. static inline void node_stat_add_folio(struct folio *folio,
  400. enum node_stat_item item)
  401. {
  402. mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
  403. }
  404. static inline void node_stat_sub_folio(struct folio *folio,
  405. enum node_stat_item item)
  406. {
  407. mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
  408. }
  409. static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
  410. int migratetype)
  411. {
  412. __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
  413. if (is_migrate_cma(migratetype))
  414. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
  415. }
  416. extern const char * const vmstat_text[];
  417. static inline const char *zone_stat_name(enum zone_stat_item item)
  418. {
  419. return vmstat_text[item];
  420. }
  421. #ifdef CONFIG_NUMA
  422. static inline const char *numa_stat_name(enum numa_stat_item item)
  423. {
  424. return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
  425. item];
  426. }
  427. #endif /* CONFIG_NUMA */
  428. static inline const char *node_stat_name(enum node_stat_item item)
  429. {
  430. return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
  431. NR_VM_NUMA_EVENT_ITEMS +
  432. item];
  433. }
  434. static inline const char *lru_list_name(enum lru_list lru)
  435. {
  436. return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
  437. }
  438. static inline const char *writeback_stat_name(enum writeback_stat_item item)
  439. {
  440. return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
  441. NR_VM_NUMA_EVENT_ITEMS +
  442. NR_VM_NODE_STAT_ITEMS +
  443. item];
  444. }
  445. #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
  446. static inline const char *vm_event_name(enum vm_event_item item)
  447. {
  448. return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
  449. NR_VM_NUMA_EVENT_ITEMS +
  450. NR_VM_NODE_STAT_ITEMS +
  451. NR_VM_WRITEBACK_STAT_ITEMS +
  452. item];
  453. }
  454. #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
  455. #ifdef CONFIG_MEMCG
  456. void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
  457. int val);
  458. static inline void mod_lruvec_state(struct lruvec *lruvec,
  459. enum node_stat_item idx, int val)
  460. {
  461. unsigned long flags;
  462. local_irq_save(flags);
  463. __mod_lruvec_state(lruvec, idx, val);
  464. local_irq_restore(flags);
  465. }
  466. void __mod_lruvec_page_state(struct page *page,
  467. enum node_stat_item idx, int val);
  468. static inline void mod_lruvec_page_state(struct page *page,
  469. enum node_stat_item idx, int val)
  470. {
  471. unsigned long flags;
  472. local_irq_save(flags);
  473. __mod_lruvec_page_state(page, idx, val);
  474. local_irq_restore(flags);
  475. }
  476. #else
  477. static inline void __mod_lruvec_state(struct lruvec *lruvec,
  478. enum node_stat_item idx, int val)
  479. {
  480. __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  481. }
  482. static inline void mod_lruvec_state(struct lruvec *lruvec,
  483. enum node_stat_item idx, int val)
  484. {
  485. mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
  486. }
  487. static inline void __mod_lruvec_page_state(struct page *page,
  488. enum node_stat_item idx, int val)
  489. {
  490. __mod_node_page_state(page_pgdat(page), idx, val);
  491. }
  492. static inline void mod_lruvec_page_state(struct page *page,
  493. enum node_stat_item idx, int val)
  494. {
  495. mod_node_page_state(page_pgdat(page), idx, val);
  496. }
  497. #endif /* CONFIG_MEMCG */
  498. static inline void __inc_lruvec_page_state(struct page *page,
  499. enum node_stat_item idx)
  500. {
  501. __mod_lruvec_page_state(page, idx, 1);
  502. }
  503. static inline void __dec_lruvec_page_state(struct page *page,
  504. enum node_stat_item idx)
  505. {
  506. __mod_lruvec_page_state(page, idx, -1);
  507. }
  508. static inline void __lruvec_stat_mod_folio(struct folio *folio,
  509. enum node_stat_item idx, int val)
  510. {
  511. __mod_lruvec_page_state(&folio->page, idx, val);
  512. }
  513. static inline void __lruvec_stat_add_folio(struct folio *folio,
  514. enum node_stat_item idx)
  515. {
  516. __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
  517. }
  518. static inline void __lruvec_stat_sub_folio(struct folio *folio,
  519. enum node_stat_item idx)
  520. {
  521. __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
  522. }
  523. static inline void inc_lruvec_page_state(struct page *page,
  524. enum node_stat_item idx)
  525. {
  526. mod_lruvec_page_state(page, idx, 1);
  527. }
  528. static inline void dec_lruvec_page_state(struct page *page,
  529. enum node_stat_item idx)
  530. {
  531. mod_lruvec_page_state(page, idx, -1);
  532. }
  533. static inline void lruvec_stat_mod_folio(struct folio *folio,
  534. enum node_stat_item idx, int val)
  535. {
  536. mod_lruvec_page_state(&folio->page, idx, val);
  537. }
  538. static inline void lruvec_stat_add_folio(struct folio *folio,
  539. enum node_stat_item idx)
  540. {
  541. lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
  542. }
  543. static inline void lruvec_stat_sub_folio(struct folio *folio,
  544. enum node_stat_item idx)
  545. {
  546. lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
  547. }
  548. #endif /* _LINUX_VMSTAT_H */