memory_hotplug.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_MEMORY_HOTPLUG_H
  3. #define __LINUX_MEMORY_HOTPLUG_H
  4. #include <linux/mmzone.h>
  5. #include <linux/spinlock.h>
  6. #include <linux/notifier.h>
  7. #include <linux/bug.h>
  8. struct page;
  9. struct zone;
  10. struct pglist_data;
  11. struct mem_section;
  12. struct memory_group;
  13. struct resource;
  14. struct vmem_altmap;
  15. struct dev_pagemap;
  16. #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
  17. /*
  18. * For supporting node-hotadd, we have to allocate a new pgdat.
  19. *
  20. * If an arch has generic style NODE_DATA(),
  21. * node_data[nid] = kzalloc() works well. But it depends on the architecture.
  22. *
  23. * In general, generic_alloc_nodedata() is used.
  24. *
  25. */
  26. extern pg_data_t *arch_alloc_nodedata(int nid);
  27. extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
  28. #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
  29. #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
  30. #ifdef CONFIG_NUMA
  31. /*
  32. * XXX: node aware allocation can't work well to get new node's memory at this time.
  33. * Because, pgdat for the new node is not allocated/initialized yet itself.
  34. * To use new node's memory, more consideration will be necessary.
  35. */
  36. #define generic_alloc_nodedata(nid) \
  37. ({ \
  38. memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
  39. })
  40. extern pg_data_t *node_data[];
  41. static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
  42. {
  43. node_data[nid] = pgdat;
  44. }
  45. #else /* !CONFIG_NUMA */
  46. /* never called */
  47. static inline pg_data_t *generic_alloc_nodedata(int nid)
  48. {
  49. BUG();
  50. return NULL;
  51. }
  52. static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
  53. {
  54. }
  55. #endif /* CONFIG_NUMA */
  56. #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
  57. #ifdef CONFIG_MEMORY_HOTPLUG
  58. struct page *pfn_to_online_page(unsigned long pfn);
  59. /* Types for control the zone type of onlined and offlined memory */
  60. enum {
  61. /* Offline the memory. */
  62. MMOP_OFFLINE = 0,
  63. /* Online the memory. Zone depends, see default_zone_for_pfn(). */
  64. MMOP_ONLINE,
  65. /* Online the memory to ZONE_NORMAL. */
  66. MMOP_ONLINE_KERNEL,
  67. /* Online the memory to ZONE_MOVABLE. */
  68. MMOP_ONLINE_MOVABLE,
  69. };
  70. /* Flags for add_memory() and friends to specify memory hotplug details. */
  71. typedef int __bitwise mhp_t;
  72. /* No special request */
  73. #define MHP_NONE ((__force mhp_t)0)
  74. /*
  75. * Allow merging of the added System RAM resource with adjacent,
  76. * mergeable resources. After a successful call to add_memory_resource()
  77. * with this flag set, the resource pointer must no longer be used as it
  78. * might be stale, or the resource might have changed.
  79. */
  80. #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
  81. /*
  82. * We want memmap (struct page array) to be self contained.
  83. * To do so, we will use the beginning of the hot-added range to build
  84. * the page tables for the memmap array that describes the entire range.
  85. * Only selected architectures support it with SPARSE_VMEMMAP.
  86. */
  87. #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
  88. /*
  89. * The nid field specifies a memory group id (mgid) instead. The memory group
  90. * implies the node id (nid).
  91. */
  92. #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
  93. /*
  94. * Extended parameters for memory hotplug:
  95. * altmap: alternative allocator for memmap array (optional)
  96. * pgprot: page protection flags to apply to newly created page tables
  97. * (required)
  98. */
  99. struct mhp_params {
  100. struct vmem_altmap *altmap;
  101. pgprot_t pgprot;
  102. struct dev_pagemap *pgmap;
  103. };
  104. bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
  105. struct range mhp_get_pluggable_range(bool need_mapping);
  106. /*
  107. * Zone resizing functions
  108. *
  109. * Note: any attempt to resize a zone should has pgdat_resize_lock()
  110. * zone_span_writelock() both held. This ensure the size of a zone
  111. * can't be changed while pgdat_resize_lock() held.
  112. */
  113. static inline unsigned zone_span_seqbegin(struct zone *zone)
  114. {
  115. return read_seqbegin(&zone->span_seqlock);
  116. }
  117. static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
  118. {
  119. return read_seqretry(&zone->span_seqlock, iv);
  120. }
  121. static inline void zone_span_writelock(struct zone *zone)
  122. {
  123. write_seqlock(&zone->span_seqlock);
  124. }
  125. static inline void zone_span_writeunlock(struct zone *zone)
  126. {
  127. write_sequnlock(&zone->span_seqlock);
  128. }
  129. static inline void zone_seqlock_init(struct zone *zone)
  130. {
  131. seqlock_init(&zone->span_seqlock);
  132. }
  133. extern void adjust_present_page_count(struct page *page,
  134. struct memory_group *group,
  135. long nr_pages);
  136. /* VM interface that may be used by firmware interface */
  137. extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
  138. struct zone *zone);
  139. extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
  140. extern int online_pages(unsigned long pfn, unsigned long nr_pages,
  141. struct zone *zone, struct memory_group *group);
  142. extern void __offline_isolated_pages(unsigned long start_pfn,
  143. unsigned long end_pfn);
  144. typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
  145. extern void generic_online_page(struct page *page, unsigned int order);
  146. extern int set_online_page_callback(online_page_callback_t callback);
  147. extern int restore_online_page_callback(online_page_callback_t callback);
  148. extern int try_online_node(int nid);
  149. extern int arch_add_memory(int nid, u64 start, u64 size,
  150. struct mhp_params *params);
  151. extern u64 max_mem_size;
  152. extern int mhp_online_type_from_str(const char *str);
  153. /* Default online_type (MMOP_*) when new memory blocks are added. */
  154. extern int mhp_default_online_type;
  155. /* If movable_node boot option specified */
  156. extern bool movable_node_enabled;
  157. static inline bool movable_node_is_enabled(void)
  158. {
  159. return movable_node_enabled;
  160. }
  161. extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
  162. extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
  163. struct vmem_altmap *altmap);
  164. /* reasonably generic interface to expand the physical pages */
  165. extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
  166. struct mhp_params *params);
  167. #ifndef CONFIG_ARCH_HAS_ADD_PAGES
  168. static inline int add_pages(int nid, unsigned long start_pfn,
  169. unsigned long nr_pages, struct mhp_params *params)
  170. {
  171. return __add_pages(nid, start_pfn, nr_pages, params);
  172. }
  173. #else /* ARCH_HAS_ADD_PAGES */
  174. int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
  175. struct mhp_params *params);
  176. #endif /* ARCH_HAS_ADD_PAGES */
  177. void get_online_mems(void);
  178. void put_online_mems(void);
  179. void mem_hotplug_begin(void);
  180. void mem_hotplug_done(void);
  181. /* See kswapd_is_running() */
  182. static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
  183. {
  184. mutex_lock(&pgdat->kswapd_lock);
  185. }
  186. static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
  187. {
  188. mutex_unlock(&pgdat->kswapd_lock);
  189. }
  190. static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
  191. {
  192. mutex_init(&pgdat->kswapd_lock);
  193. }
  194. #else /* ! CONFIG_MEMORY_HOTPLUG */
  195. #define pfn_to_online_page(pfn) \
  196. ({ \
  197. struct page *___page = NULL; \
  198. if (pfn_valid(pfn)) \
  199. ___page = pfn_to_page(pfn); \
  200. ___page; \
  201. })
  202. static inline unsigned zone_span_seqbegin(struct zone *zone)
  203. {
  204. return 0;
  205. }
  206. static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
  207. {
  208. return 0;
  209. }
  210. static inline void zone_span_writelock(struct zone *zone) {}
  211. static inline void zone_span_writeunlock(struct zone *zone) {}
  212. static inline void zone_seqlock_init(struct zone *zone) {}
  213. static inline int try_online_node(int nid)
  214. {
  215. return 0;
  216. }
  217. static inline void get_online_mems(void) {}
  218. static inline void put_online_mems(void) {}
  219. static inline void mem_hotplug_begin(void) {}
  220. static inline void mem_hotplug_done(void) {}
  221. static inline bool movable_node_is_enabled(void)
  222. {
  223. return false;
  224. }
  225. static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
  226. static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
  227. static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
  228. #endif /* ! CONFIG_MEMORY_HOTPLUG */
  229. /*
  230. * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
  231. * platforms might override and use arch_get_mappable_range()
  232. * for internal non memory hotplug purposes.
  233. */
  234. struct range arch_get_mappable_range(void);
  235. #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
  236. /*
  237. * pgdat resizing functions
  238. */
  239. static inline
  240. void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
  241. {
  242. spin_lock_irqsave(&pgdat->node_size_lock, *flags);
  243. }
  244. static inline
  245. void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
  246. {
  247. spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
  248. }
  249. static inline
  250. void pgdat_resize_init(struct pglist_data *pgdat)
  251. {
  252. spin_lock_init(&pgdat->node_size_lock);
  253. }
  254. #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
  255. /*
  256. * Stub functions for when hotplug is off
  257. */
  258. static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
  259. static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
  260. static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
  261. #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
  262. #ifdef CONFIG_MEMORY_HOTREMOVE
  263. extern void try_offline_node(int nid);
  264. extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
  265. struct zone *zone, struct memory_group *group);
  266. extern int remove_memory(u64 start, u64 size);
  267. #ifdef CONFIG_MEMORY_HOTPLUG_SUBSECTIONS
  268. extern int remove_memory_subsection(u64 start, u64 size);
  269. #else
  270. static inline int remove_memory_subsection(u64 start, u64 size)
  271. {
  272. return -EBUSY;
  273. }
  274. #endif /* CONFIG_MEMORY_HOTPLUG_SUBSECTIONS */
  275. extern void __remove_memory(u64 start, u64 size);
  276. extern int offline_and_remove_memory(u64 start, u64 size);
  277. #else
  278. static inline void try_offline_node(int nid) {}
  279. static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
  280. struct zone *zone, struct memory_group *group)
  281. {
  282. return -EINVAL;
  283. }
  284. static inline int remove_memory(u64 start, u64 size)
  285. {
  286. return -EBUSY;
  287. }
  288. static inline void __remove_memory(u64 start, u64 size) {}
  289. #endif /* CONFIG_MEMORY_HOTREMOVE */
  290. extern void set_zone_contiguous(struct zone *zone);
  291. extern void clear_zone_contiguous(struct zone *zone);
  292. #ifdef CONFIG_MEMORY_HOTPLUG
  293. extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
  294. extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
  295. extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
  296. #ifdef CONFIG_MEMORY_HOTPLUG_SUBSECTIONS
  297. extern int add_memory_subsection(int nid, u64 start, u64 size);
  298. #else
  299. static inline int add_memory_subsection(int nid, u64 start, u64 size)
  300. {
  301. return -EBUSY;
  302. }
  303. #endif /* CONFIG_MEMORY_HOTPLUG_SUBSECTIONS */
  304. extern int add_memory_resource(int nid, struct resource *resource,
  305. mhp_t mhp_flags);
  306. extern int add_memory_driver_managed(int nid, u64 start, u64 size,
  307. const char *resource_name,
  308. mhp_t mhp_flags);
  309. extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
  310. unsigned long nr_pages,
  311. struct vmem_altmap *altmap, int migratetype);
  312. extern void remove_pfn_range_from_zone(struct zone *zone,
  313. unsigned long start_pfn,
  314. unsigned long nr_pages);
  315. extern int sparse_add_section(int nid, unsigned long pfn,
  316. unsigned long nr_pages, struct vmem_altmap *altmap,
  317. struct dev_pagemap *pgmap);
  318. extern void sparse_remove_section(struct mem_section *ms,
  319. unsigned long pfn, unsigned long nr_pages,
  320. unsigned long map_offset, struct vmem_altmap *altmap);
  321. extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
  322. unsigned long pnum);
  323. extern struct zone *zone_for_pfn_range(int online_type, int nid,
  324. struct memory_group *group, unsigned long start_pfn,
  325. unsigned long nr_pages);
  326. extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
  327. struct mhp_params *params);
  328. void arch_remove_linear_mapping(u64 start, u64 size);
  329. extern bool mhp_supports_memmap_on_memory(unsigned long size);
  330. #endif /* CONFIG_MEMORY_HOTPLUG */
  331. #endif /* __LINUX_MEMORY_HOTPLUG_H */