mm_init.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * mm_init.c - Memory initialisation verification and debugging
  4. *
  5. * Copyright 2008 IBM Corporation, 2008
  6. * Author Mel Gorman <[email protected]>
  7. *
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/kobject.h>
  12. #include <linux/export.h>
  13. #include <linux/memory.h>
  14. #include <linux/notifier.h>
  15. #include <linux/sched.h>
  16. #include <linux/mman.h>
  17. #include "internal.h"
  18. #ifdef CONFIG_DEBUG_MEMORY_INIT
  19. int __meminitdata mminit_loglevel;
  20. /* The zonelists are simply reported, validation is manual. */
  21. void __init mminit_verify_zonelist(void)
  22. {
  23. int nid;
  24. if (mminit_loglevel < MMINIT_VERIFY)
  25. return;
  26. for_each_online_node(nid) {
  27. pg_data_t *pgdat = NODE_DATA(nid);
  28. struct zone *zone;
  29. struct zoneref *z;
  30. struct zonelist *zonelist;
  31. int i, listid, zoneid;
  32. BUILD_BUG_ON(MAX_ZONELISTS > 2);
  33. for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
  34. /* Identify the zone and nodelist */
  35. zoneid = i % MAX_NR_ZONES;
  36. listid = i / MAX_NR_ZONES;
  37. zonelist = &pgdat->node_zonelists[listid];
  38. zone = &pgdat->node_zones[zoneid];
  39. if (!populated_zone(zone))
  40. continue;
  41. /* Print information about the zonelist */
  42. printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
  43. listid > 0 ? "thisnode" : "general", nid,
  44. zone->name);
  45. /* Iterate the zonelist */
  46. for_each_zone_zonelist(zone, z, zonelist, zoneid)
  47. pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
  48. pr_cont("\n");
  49. }
  50. }
  51. }
  52. void __init mminit_verify_pageflags_layout(void)
  53. {
  54. int shift, width;
  55. unsigned long or_mask, add_mask;
  56. shift = 8 * sizeof(unsigned long);
  57. width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
  58. - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
  59. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
  60. "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
  61. SECTIONS_WIDTH,
  62. NODES_WIDTH,
  63. ZONES_WIDTH,
  64. LAST_CPUPID_WIDTH,
  65. KASAN_TAG_WIDTH,
  66. LRU_GEN_WIDTH,
  67. LRU_REFS_WIDTH,
  68. NR_PAGEFLAGS);
  69. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
  70. "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
  71. SECTIONS_SHIFT,
  72. NODES_SHIFT,
  73. ZONES_SHIFT,
  74. LAST_CPUPID_SHIFT,
  75. KASAN_TAG_WIDTH);
  76. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
  77. "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
  78. (unsigned long)SECTIONS_PGSHIFT,
  79. (unsigned long)NODES_PGSHIFT,
  80. (unsigned long)ZONES_PGSHIFT,
  81. (unsigned long)LAST_CPUPID_PGSHIFT,
  82. (unsigned long)KASAN_TAG_PGSHIFT);
  83. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
  84. "Node/Zone ID: %lu -> %lu\n",
  85. (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
  86. (unsigned long)ZONEID_PGOFF);
  87. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
  88. "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
  89. shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
  90. #ifdef NODE_NOT_IN_PAGE_FLAGS
  91. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
  92. "Node not in page flags");
  93. #endif
  94. #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
  95. mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
  96. "Last cpupid not in page flags");
  97. #endif
  98. if (SECTIONS_WIDTH) {
  99. shift -= SECTIONS_WIDTH;
  100. BUG_ON(shift != SECTIONS_PGSHIFT);
  101. }
  102. if (NODES_WIDTH) {
  103. shift -= NODES_WIDTH;
  104. BUG_ON(shift != NODES_PGSHIFT);
  105. }
  106. if (ZONES_WIDTH) {
  107. shift -= ZONES_WIDTH;
  108. BUG_ON(shift != ZONES_PGSHIFT);
  109. }
  110. /* Check for bitmask overlaps */
  111. or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
  112. (NODES_MASK << NODES_PGSHIFT) |
  113. (SECTIONS_MASK << SECTIONS_PGSHIFT);
  114. add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
  115. (NODES_MASK << NODES_PGSHIFT) +
  116. (SECTIONS_MASK << SECTIONS_PGSHIFT);
  117. BUG_ON(or_mask != add_mask);
  118. }
  119. static __init int set_mminit_loglevel(char *str)
  120. {
  121. get_option(&str, &mminit_loglevel);
  122. return 0;
  123. }
  124. early_param("mminit_loglevel", set_mminit_loglevel);
  125. #endif /* CONFIG_DEBUG_MEMORY_INIT */
  126. struct kobject *mm_kobj;
  127. EXPORT_SYMBOL_GPL(mm_kobj);
  128. #ifdef CONFIG_SMP
  129. s32 vm_committed_as_batch = 32;
  130. void mm_compute_batch(int overcommit_policy)
  131. {
  132. u64 memsized_batch;
  133. s32 nr = num_present_cpus();
  134. s32 batch = max_t(s32, nr*2, 32);
  135. unsigned long ram_pages = totalram_pages();
  136. /*
  137. * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
  138. * (total memory/#cpus), and lift it to 25% for other policies
  139. * to easy the possible lock contention for percpu_counter
  140. * vm_committed_as, while the max limit is INT_MAX
  141. */
  142. if (overcommit_policy == OVERCOMMIT_NEVER)
  143. memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
  144. else
  145. memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
  146. vm_committed_as_batch = max_t(s32, memsized_batch, batch);
  147. }
  148. static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
  149. unsigned long action, void *arg)
  150. {
  151. switch (action) {
  152. case MEM_ONLINE:
  153. case MEM_OFFLINE:
  154. mm_compute_batch(sysctl_overcommit_memory);
  155. break;
  156. default:
  157. break;
  158. }
  159. return NOTIFY_OK;
  160. }
  161. static struct notifier_block compute_batch_nb __meminitdata = {
  162. .notifier_call = mm_compute_batch_notifier,
  163. .priority = IPC_CALLBACK_PRI, /* use lowest priority */
  164. };
  165. static int __init mm_compute_batch_init(void)
  166. {
  167. mm_compute_batch(sysctl_overcommit_memory);
  168. register_hotmemory_notifier(&compute_batch_nb);
  169. return 0;
  170. }
  171. __initcall(mm_compute_batch_init);
  172. #endif
  173. static int __init mm_sysfs_init(void)
  174. {
  175. mm_kobj = kobject_create_and_add("mm", kernel_kobj);
  176. if (!mm_kobj)
  177. return -ENOMEM;
  178. return 0;
  179. }
  180. postcore_initcall(mm_sysfs_init);