memblock.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. #ifndef _LINUX_MEMBLOCK_H
  3. #define _LINUX_MEMBLOCK_H
  4. #ifdef __KERNEL__
  5. /*
  6. * Logical memory blocks.
  7. *
  8. * Copyright (C) 2001 Peter Bergner, IBM Corp.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/mm.h>
  12. #include <asm/dma.h>
  13. extern unsigned long max_low_pfn;
  14. extern unsigned long min_low_pfn;
  15. /*
  16. * highest page
  17. */
  18. extern unsigned long max_pfn;
  19. /*
  20. * highest possible page
  21. */
  22. extern unsigned long long max_possible_pfn;
  23. /**
  24. * enum memblock_flags - definition of memory region attributes
  25. * @MEMBLOCK_NONE: no special request
  26. * @MEMBLOCK_HOTPLUG: hotpluggable region
  27. * @MEMBLOCK_MIRROR: mirrored region
  28. * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
  29. */
  30. enum memblock_flags {
  31. MEMBLOCK_NONE = 0x0, /* No special request */
  32. MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
  33. MEMBLOCK_MIRROR = 0x2, /* mirrored region */
  34. MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
  35. };
  36. /**
  37. * struct memblock_region - represents a memory region
  38. * @base: base address of the region
  39. * @size: size of the region
  40. * @flags: memory region attributes
  41. * @nid: NUMA node id
  42. */
  43. struct memblock_region {
  44. phys_addr_t base;
  45. phys_addr_t size;
  46. enum memblock_flags flags;
  47. #ifdef CONFIG_NEED_MULTIPLE_NODES
  48. int nid;
  49. #endif
  50. };
  51. /**
  52. * struct memblock_type - collection of memory regions of certain type
  53. * @cnt: number of regions
  54. * @max: size of the allocated array
  55. * @total_size: size of all regions
  56. * @regions: array of regions
  57. * @name: the memory type symbolic name
  58. */
  59. struct memblock_type {
  60. unsigned long cnt;
  61. unsigned long max;
  62. phys_addr_t total_size;
  63. struct memblock_region *regions;
  64. char *name;
  65. };
  66. /**
  67. * struct memblock - memblock allocator metadata
  68. * @bottom_up: is bottom up direction?
  69. * @current_limit: physical address of the current allocation limit
  70. * @memory: usable memory regions
  71. * @reserved: reserved memory regions
  72. */
  73. struct memblock {
  74. bool bottom_up; /* is bottom up direction? */
  75. phys_addr_t current_limit;
  76. struct memblock_type memory;
  77. struct memblock_type reserved;
  78. };
  79. extern struct memblock memblock;
  80. #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
  81. #define __init_memblock __meminit
  82. #define __initdata_memblock __meminitdata
  83. void memblock_discard(void);
  84. #else
  85. #define __init_memblock
  86. #define __initdata_memblock
  87. static inline void memblock_discard(void) {}
  88. #endif
  89. phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
  90. phys_addr_t size, phys_addr_t align);
  91. void memblock_allow_resize(void);
  92. int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
  93. int memblock_add(phys_addr_t base, phys_addr_t size);
  94. int memblock_remove(phys_addr_t base, phys_addr_t size);
  95. int memblock_free(phys_addr_t base, phys_addr_t size);
  96. int memblock_reserve(phys_addr_t base, phys_addr_t size);
  97. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  98. int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
  99. #endif
  100. void memblock_trim_memory(phys_addr_t align);
  101. bool memblock_overlaps_region(struct memblock_type *type,
  102. phys_addr_t base, phys_addr_t size);
  103. int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
  104. int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
  105. int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
  106. int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
  107. int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
  108. unsigned long memblock_free_all(void);
  109. void reset_node_managed_pages(pg_data_t *pgdat);
  110. void reset_all_zones_managed_pages(void);
  111. /* Low level functions */
  112. void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
  113. struct memblock_type *type_a,
  114. struct memblock_type *type_b, phys_addr_t *out_start,
  115. phys_addr_t *out_end, int *out_nid);
  116. void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
  117. struct memblock_type *type_a,
  118. struct memblock_type *type_b, phys_addr_t *out_start,
  119. phys_addr_t *out_end, int *out_nid);
  120. void __memblock_free_late(phys_addr_t base, phys_addr_t size);
  121. #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
  122. static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
  123. phys_addr_t *out_start,
  124. phys_addr_t *out_end)
  125. {
  126. extern struct memblock_type physmem;
  127. __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
  128. out_start, out_end, NULL);
  129. }
  130. /**
  131. * for_each_physmem_range - iterate through physmem areas not included in type.
  132. * @i: u64 used as loop variable
  133. * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
  134. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  135. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  136. */
  137. #define for_each_physmem_range(i, type, p_start, p_end) \
  138. for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
  139. i != (u64)ULLONG_MAX; \
  140. __next_physmem_range(&i, type, p_start, p_end))
  141. #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
  142. /**
  143. * __for_each_mem_range - iterate through memblock areas from type_a and not
  144. * included in type_b. Or just type_a if type_b is NULL.
  145. * @i: u64 used as loop variable
  146. * @type_a: ptr to memblock_type to iterate
  147. * @type_b: ptr to memblock_type which excludes from the iteration
  148. * @nid: node selector, %NUMA_NO_NODE for all nodes
  149. * @flags: pick from blocks based on memory attributes
  150. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  151. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  152. * @p_nid: ptr to int for nid of the range, can be %NULL
  153. */
  154. #define __for_each_mem_range(i, type_a, type_b, nid, flags, \
  155. p_start, p_end, p_nid) \
  156. for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
  157. p_start, p_end, p_nid); \
  158. i != (u64)ULLONG_MAX; \
  159. __next_mem_range(&i, nid, flags, type_a, type_b, \
  160. p_start, p_end, p_nid))
  161. /**
  162. * __for_each_mem_range_rev - reverse iterate through memblock areas from
  163. * type_a and not included in type_b. Or just type_a if type_b is NULL.
  164. * @i: u64 used as loop variable
  165. * @type_a: ptr to memblock_type to iterate
  166. * @type_b: ptr to memblock_type which excludes from the iteration
  167. * @nid: node selector, %NUMA_NO_NODE for all nodes
  168. * @flags: pick from blocks based on memory attributes
  169. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  170. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  171. * @p_nid: ptr to int for nid of the range, can be %NULL
  172. */
  173. #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
  174. p_start, p_end, p_nid) \
  175. for (i = (u64)ULLONG_MAX, \
  176. __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
  177. p_start, p_end, p_nid); \
  178. i != (u64)ULLONG_MAX; \
  179. __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
  180. p_start, p_end, p_nid))
  181. /**
  182. * for_each_mem_range - iterate through memory areas.
  183. * @i: u64 used as loop variable
  184. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  185. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  186. */
  187. #define for_each_mem_range(i, p_start, p_end) \
  188. __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
  189. MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
  190. /**
  191. * for_each_mem_range_rev - reverse iterate through memblock areas from
  192. * type_a and not included in type_b. Or just type_a if type_b is NULL.
  193. * @i: u64 used as loop variable
  194. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  195. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  196. */
  197. #define for_each_mem_range_rev(i, p_start, p_end) \
  198. __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
  199. MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
  200. /**
  201. * for_each_reserved_mem_range - iterate over all reserved memblock areas
  202. * @i: u64 used as loop variable
  203. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  204. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  205. *
  206. * Walks over reserved areas of memblock. Available as soon as memblock
  207. * is initialized.
  208. */
  209. #define for_each_reserved_mem_range(i, p_start, p_end) \
  210. __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
  211. MEMBLOCK_NONE, p_start, p_end, NULL)
  212. static inline bool memblock_is_hotpluggable(struct memblock_region *m)
  213. {
  214. return m->flags & MEMBLOCK_HOTPLUG;
  215. }
  216. static inline bool memblock_is_mirror(struct memblock_region *m)
  217. {
  218. return m->flags & MEMBLOCK_MIRROR;
  219. }
  220. static inline bool memblock_is_nomap(struct memblock_region *m)
  221. {
  222. return m->flags & MEMBLOCK_NOMAP;
  223. }
  224. int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
  225. unsigned long *end_pfn);
  226. void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
  227. unsigned long *out_end_pfn, int *out_nid);
  228. /**
  229. * for_each_mem_pfn_range - early memory pfn range iterator
  230. * @i: an integer used as loop variable
  231. * @nid: node selector, %MAX_NUMNODES for all nodes
  232. * @p_start: ptr to ulong for start pfn of the range, can be %NULL
  233. * @p_end: ptr to ulong for end pfn of the range, can be %NULL
  234. * @p_nid: ptr to int for nid of the range, can be %NULL
  235. *
  236. * Walks over configured memory ranges.
  237. */
  238. #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
  239. for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
  240. i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
  241. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  242. void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
  243. unsigned long *out_spfn,
  244. unsigned long *out_epfn);
  245. /**
  246. * for_each_free_mem_range_in_zone - iterate through zone specific free
  247. * memblock areas
  248. * @i: u64 used as loop variable
  249. * @zone: zone in which all of the memory blocks reside
  250. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  251. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  252. *
  253. * Walks over free (memory && !reserved) areas of memblock in a specific
  254. * zone. Available once memblock and an empty zone is initialized. The main
  255. * assumption is that the zone start, end, and pgdat have been associated.
  256. * This way we can use the zone to determine NUMA node, and if a given part
  257. * of the memblock is valid for the zone.
  258. */
  259. #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
  260. for (i = 0, \
  261. __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
  262. i != U64_MAX; \
  263. __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
  264. /**
  265. * for_each_free_mem_range_in_zone_from - iterate through zone specific
  266. * free memblock areas from a given point
  267. * @i: u64 used as loop variable
  268. * @zone: zone in which all of the memory blocks reside
  269. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  270. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  271. *
  272. * Walks over free (memory && !reserved) areas of memblock in a specific
  273. * zone, continuing from current position. Available as soon as memblock is
  274. * initialized.
  275. */
  276. #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
  277. for (; i != U64_MAX; \
  278. __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
  279. int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
  280. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  281. /**
  282. * for_each_free_mem_range - iterate through free memblock areas
  283. * @i: u64 used as loop variable
  284. * @nid: node selector, %NUMA_NO_NODE for all nodes
  285. * @flags: pick from blocks based on memory attributes
  286. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  287. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  288. * @p_nid: ptr to int for nid of the range, can be %NULL
  289. *
  290. * Walks over free (memory && !reserved) areas of memblock. Available as
  291. * soon as memblock is initialized.
  292. */
  293. #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
  294. __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
  295. nid, flags, p_start, p_end, p_nid)
  296. /**
  297. * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
  298. * @i: u64 used as loop variable
  299. * @nid: node selector, %NUMA_NO_NODE for all nodes
  300. * @flags: pick from blocks based on memory attributes
  301. * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
  302. * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
  303. * @p_nid: ptr to int for nid of the range, can be %NULL
  304. *
  305. * Walks over free (memory && !reserved) areas of memblock in reverse
  306. * order. Available as soon as memblock is initialized.
  307. */
  308. #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
  309. p_nid) \
  310. __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
  311. nid, flags, p_start, p_end, p_nid)
  312. int memblock_set_node(phys_addr_t base, phys_addr_t size,
  313. struct memblock_type *type, int nid);
  314. #ifdef CONFIG_NEED_MULTIPLE_NODES
  315. static inline void memblock_set_region_node(struct memblock_region *r, int nid)
  316. {
  317. r->nid = nid;
  318. }
  319. static inline int memblock_get_region_node(const struct memblock_region *r)
  320. {
  321. return r->nid;
  322. }
  323. #else
  324. static inline void memblock_set_region_node(struct memblock_region *r, int nid)
  325. {
  326. }
  327. static inline int memblock_get_region_node(const struct memblock_region *r)
  328. {
  329. return 0;
  330. }
  331. #endif /* CONFIG_NEED_MULTIPLE_NODES */
  332. /* Flags for memblock allocation APIs */
  333. #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
  334. #define MEMBLOCK_ALLOC_ACCESSIBLE 0
  335. #define MEMBLOCK_ALLOC_KASAN 1
  336. /* We are using top down, so it is safe to use 0 here */
  337. #define MEMBLOCK_LOW_LIMIT 0
  338. #ifndef ARCH_LOW_ADDRESS_LIMIT
  339. #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
  340. #endif
  341. phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
  342. phys_addr_t start, phys_addr_t end);
  343. phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
  344. phys_addr_t align, phys_addr_t start,
  345. phys_addr_t end, int nid, bool exact_nid);
  346. phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
  347. static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
  348. phys_addr_t align)
  349. {
  350. return memblock_phys_alloc_range(size, align, 0,
  351. MEMBLOCK_ALLOC_ACCESSIBLE);
  352. }
  353. void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
  354. phys_addr_t min_addr, phys_addr_t max_addr,
  355. int nid);
  356. void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
  357. phys_addr_t min_addr, phys_addr_t max_addr,
  358. int nid);
  359. void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
  360. phys_addr_t min_addr, phys_addr_t max_addr,
  361. int nid);
  362. static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
  363. {
  364. return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
  365. MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
  366. }
  367. static inline void *memblock_alloc_raw(phys_addr_t size,
  368. phys_addr_t align)
  369. {
  370. return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
  371. MEMBLOCK_ALLOC_ACCESSIBLE,
  372. NUMA_NO_NODE);
  373. }
  374. static inline void *memblock_alloc_from(phys_addr_t size,
  375. phys_addr_t align,
  376. phys_addr_t min_addr)
  377. {
  378. return memblock_alloc_try_nid(size, align, min_addr,
  379. MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
  380. }
  381. static inline void *memblock_alloc_low(phys_addr_t size,
  382. phys_addr_t align)
  383. {
  384. return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
  385. ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
  386. }
  387. static inline void *memblock_alloc_node(phys_addr_t size,
  388. phys_addr_t align, int nid)
  389. {
  390. return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
  391. MEMBLOCK_ALLOC_ACCESSIBLE, nid);
  392. }
  393. static inline void memblock_free_early(phys_addr_t base,
  394. phys_addr_t size)
  395. {
  396. memblock_free(base, size);
  397. }
  398. static inline void memblock_free_early_nid(phys_addr_t base,
  399. phys_addr_t size, int nid)
  400. {
  401. memblock_free(base, size);
  402. }
  403. static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
  404. {
  405. __memblock_free_late(base, size);
  406. }
  407. /*
  408. * Set the allocation direction to bottom-up or top-down.
  409. */
  410. static inline __init_memblock void memblock_set_bottom_up(bool enable)
  411. {
  412. memblock.bottom_up = enable;
  413. }
  414. /*
  415. * Check if the allocation direction is bottom-up or not.
  416. * if this is true, that said, memblock will allocate memory
  417. * in bottom-up direction.
  418. */
  419. static inline __init_memblock bool memblock_bottom_up(void)
  420. {
  421. return memblock.bottom_up;
  422. }
  423. phys_addr_t memblock_phys_mem_size(void);
  424. phys_addr_t memblock_reserved_size(void);
  425. phys_addr_t memblock_start_of_DRAM(void);
  426. phys_addr_t memblock_end_of_DRAM(void);
  427. void memblock_enforce_memory_limit(phys_addr_t memory_limit);
  428. void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
  429. void memblock_mem_limit_remove_map(phys_addr_t limit);
  430. bool memblock_is_memory(phys_addr_t addr);
  431. bool memblock_is_map_memory(phys_addr_t addr);
  432. bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
  433. bool memblock_is_reserved(phys_addr_t addr);
  434. bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
  435. bool memblock_is_nomap_remove(void);
  436. void memblock_dump_all(void);
  437. /**
  438. * memblock_set_current_limit - Set the current allocation limit to allow
  439. * limiting allocations to what is currently
  440. * accessible during boot
  441. * @limit: New limit value (physical address)
  442. */
  443. void memblock_set_current_limit(phys_addr_t limit);
  444. phys_addr_t memblock_get_current_limit(void);
  445. /*
  446. * pfn conversion functions
  447. *
  448. * While the memory MEMBLOCKs should always be page aligned, the reserved
  449. * MEMBLOCKs may not be. This accessor attempt to provide a very clear
  450. * idea of what they return for such non aligned MEMBLOCKs.
  451. */
  452. /**
  453. * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
  454. * @reg: memblock_region structure
  455. *
  456. * Return: the lowest pfn intersecting with the memory region
  457. */
  458. static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
  459. {
  460. return PFN_UP(reg->base);
  461. }
  462. /**
  463. * memblock_region_memory_end_pfn - get the end pfn of the memory region
  464. * @reg: memblock_region structure
  465. *
  466. * Return: the end_pfn of the reserved region
  467. */
  468. static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
  469. {
  470. return PFN_DOWN(reg->base + reg->size);
  471. }
  472. /**
  473. * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
  474. * @reg: memblock_region structure
  475. *
  476. * Return: the lowest pfn intersecting with the reserved region
  477. */
  478. static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
  479. {
  480. return PFN_DOWN(reg->base);
  481. }
  482. /**
  483. * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
  484. * @reg: memblock_region structure
  485. *
  486. * Return: the end_pfn of the reserved region
  487. */
  488. static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
  489. {
  490. return PFN_UP(reg->base + reg->size);
  491. }
  492. /**
  493. * for_each_mem_region - itereate over memory regions
  494. * @region: loop variable
  495. */
  496. #define for_each_mem_region(region) \
  497. for (region = memblock.memory.regions; \
  498. region < (memblock.memory.regions + memblock.memory.cnt); \
  499. region++)
  500. /**
  501. * for_each_reserved_mem_region - itereate over reserved memory regions
  502. * @region: loop variable
  503. */
  504. #define for_each_reserved_mem_region(region) \
  505. for (region = memblock.reserved.regions; \
  506. region < (memblock.reserved.regions + memblock.reserved.cnt); \
  507. region++)
  508. extern void *alloc_large_system_hash(const char *tablename,
  509. unsigned long bucketsize,
  510. unsigned long numentries,
  511. int scale,
  512. int flags,
  513. unsigned int *_hash_shift,
  514. unsigned int *_hash_mask,
  515. unsigned long low_limit,
  516. unsigned long high_limit);
  517. #define HASH_EARLY 0x00000001 /* Allocating during early boot? */
  518. #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
  519. * shift passed via *_hash_shift */
  520. #define HASH_ZERO 0x00000004 /* Zero allocated hash table */
  521. /* Only NUMA needs hash distribution. 64bit NUMA architectures have
  522. * sufficient vmalloc space.
  523. */
  524. #ifdef CONFIG_NUMA
  525. #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
  526. extern int hashdist; /* Distribute hashes across NUMA nodes? */
  527. #else
  528. #define hashdist (0)
  529. #endif
  530. #ifdef CONFIG_MEMTEST
  531. extern void early_memtest(phys_addr_t start, phys_addr_t end);
  532. #else
  533. static inline void early_memtest(phys_addr_t start, phys_addr_t end)
  534. {
  535. }
  536. #endif
  537. #endif /* __KERNEL__ */
  538. #endif /* _LINUX_MEMBLOCK_H */