hugetlbpage.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. /*
  2. * PPC Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright (C) 2003 David Gibson, IBM Corporation.
  5. * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
  6. *
  7. * Based on the IA-32 version:
  8. * Copyright (C) 2002, Rohit Seth <[email protected]>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/io.h>
  12. #include <linux/slab.h>
  13. #include <linux/hugetlb.h>
  14. #include <linux/export.h>
  15. #include <linux/of_fdt.h>
  16. #include <linux/memblock.h>
  17. #include <linux/moduleparam.h>
  18. #include <linux/swap.h>
  19. #include <linux/swapops.h>
  20. #include <linux/kmemleak.h>
  21. #include <asm/pgalloc.h>
  22. #include <asm/tlb.h>
  23. #include <asm/setup.h>
  24. #include <asm/hugetlb.h>
  25. #include <asm/pte-walk.h>
  26. #include <asm/firmware.h>
  27. bool hugetlb_disabled = false;
  28. #define hugepd_none(hpd) (hpd_val(hpd) == 0)
  29. #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
  30. __builtin_ffs(sizeof(void *)))
  31. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
  32. {
  33. /*
  34. * Only called for hugetlbfs pages, hence can ignore THP and the
  35. * irq disabled walk.
  36. */
  37. return __find_linux_pte(mm->pgd, addr, NULL, NULL);
  38. }
  39. static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
  40. unsigned long address, unsigned int pdshift,
  41. unsigned int pshift, spinlock_t *ptl)
  42. {
  43. struct kmem_cache *cachep;
  44. pte_t *new;
  45. int i;
  46. int num_hugepd;
  47. if (pshift >= pdshift) {
  48. cachep = PGT_CACHE(PTE_T_ORDER);
  49. num_hugepd = 1 << (pshift - pdshift);
  50. } else {
  51. cachep = PGT_CACHE(pdshift - pshift);
  52. num_hugepd = 1;
  53. }
  54. if (!cachep) {
  55. WARN_ONCE(1, "No page table cache created for hugetlb tables");
  56. return -ENOMEM;
  57. }
  58. new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
  59. BUG_ON(pshift > HUGEPD_SHIFT_MASK);
  60. BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
  61. if (!new)
  62. return -ENOMEM;
  63. /*
  64. * Make sure other cpus find the hugepd set only after a
  65. * properly initialized page table is visible to them.
  66. * For more details look for comment in __pte_alloc().
  67. */
  68. smp_wmb();
  69. spin_lock(ptl);
  70. /*
  71. * We have multiple higher-level entries that point to the same
  72. * actual pte location. Fill in each as we go and backtrack on error.
  73. * We need all of these so the DTLB pgtable walk code can find the
  74. * right higher-level entry without knowing if it's a hugepage or not.
  75. */
  76. for (i = 0; i < num_hugepd; i++, hpdp++) {
  77. if (unlikely(!hugepd_none(*hpdp)))
  78. break;
  79. hugepd_populate(hpdp, new, pshift);
  80. }
  81. /* If we bailed from the for loop early, an error occurred, clean up */
  82. if (i < num_hugepd) {
  83. for (i = i - 1 ; i >= 0; i--, hpdp--)
  84. *hpdp = __hugepd(0);
  85. kmem_cache_free(cachep, new);
  86. } else {
  87. kmemleak_ignore(new);
  88. }
  89. spin_unlock(ptl);
  90. return 0;
  91. }
  92. /*
  93. * At this point we do the placement change only for BOOK3S 64. This would
  94. * possibly work on other subarchs.
  95. */
  96. pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
  97. unsigned long addr, unsigned long sz)
  98. {
  99. pgd_t *pg;
  100. p4d_t *p4;
  101. pud_t *pu;
  102. pmd_t *pm;
  103. hugepd_t *hpdp = NULL;
  104. unsigned pshift = __ffs(sz);
  105. unsigned pdshift = PGDIR_SHIFT;
  106. spinlock_t *ptl;
  107. addr &= ~(sz-1);
  108. pg = pgd_offset(mm, addr);
  109. p4 = p4d_offset(pg, addr);
  110. #ifdef CONFIG_PPC_BOOK3S_64
  111. if (pshift == PGDIR_SHIFT)
  112. /* 16GB huge page */
  113. return (pte_t *) p4;
  114. else if (pshift > PUD_SHIFT) {
  115. /*
  116. * We need to use hugepd table
  117. */
  118. ptl = &mm->page_table_lock;
  119. hpdp = (hugepd_t *)p4;
  120. } else {
  121. pdshift = PUD_SHIFT;
  122. pu = pud_alloc(mm, p4, addr);
  123. if (!pu)
  124. return NULL;
  125. if (pshift == PUD_SHIFT)
  126. return (pte_t *)pu;
  127. else if (pshift > PMD_SHIFT) {
  128. ptl = pud_lockptr(mm, pu);
  129. hpdp = (hugepd_t *)pu;
  130. } else {
  131. pdshift = PMD_SHIFT;
  132. pm = pmd_alloc(mm, pu, addr);
  133. if (!pm)
  134. return NULL;
  135. if (pshift == PMD_SHIFT)
  136. /* 16MB hugepage */
  137. return (pte_t *)pm;
  138. else {
  139. ptl = pmd_lockptr(mm, pm);
  140. hpdp = (hugepd_t *)pm;
  141. }
  142. }
  143. }
  144. #else
  145. if (pshift >= PGDIR_SHIFT) {
  146. ptl = &mm->page_table_lock;
  147. hpdp = (hugepd_t *)p4;
  148. } else {
  149. pdshift = PUD_SHIFT;
  150. pu = pud_alloc(mm, p4, addr);
  151. if (!pu)
  152. return NULL;
  153. if (pshift >= PUD_SHIFT) {
  154. ptl = pud_lockptr(mm, pu);
  155. hpdp = (hugepd_t *)pu;
  156. } else {
  157. pdshift = PMD_SHIFT;
  158. pm = pmd_alloc(mm, pu, addr);
  159. if (!pm)
  160. return NULL;
  161. ptl = pmd_lockptr(mm, pm);
  162. hpdp = (hugepd_t *)pm;
  163. }
  164. }
  165. #endif
  166. if (!hpdp)
  167. return NULL;
  168. if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT)
  169. return pte_alloc_map(mm, (pmd_t *)hpdp, addr);
  170. BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
  171. if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
  172. pdshift, pshift, ptl))
  173. return NULL;
  174. return hugepte_offset(*hpdp, addr, pdshift);
  175. }
  176. #ifdef CONFIG_PPC_BOOK3S_64
  177. /*
  178. * Tracks gpages after the device tree is scanned and before the
  179. * huge_boot_pages list is ready on pseries.
  180. */
  181. #define MAX_NUMBER_GPAGES 1024
  182. __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
  183. __initdata static unsigned nr_gpages;
  184. /*
  185. * Build list of addresses of gigantic pages. This function is used in early
  186. * boot before the buddy allocator is setup.
  187. */
  188. void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
  189. {
  190. if (!addr)
  191. return;
  192. while (number_of_pages > 0) {
  193. gpage_freearray[nr_gpages] = addr;
  194. nr_gpages++;
  195. number_of_pages--;
  196. addr += page_size;
  197. }
  198. }
  199. static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
  200. {
  201. struct huge_bootmem_page *m;
  202. if (nr_gpages == 0)
  203. return 0;
  204. m = phys_to_virt(gpage_freearray[--nr_gpages]);
  205. gpage_freearray[nr_gpages] = 0;
  206. list_add(&m->list, &huge_boot_pages);
  207. m->hstate = hstate;
  208. return 1;
  209. }
  210. bool __init hugetlb_node_alloc_supported(void)
  211. {
  212. return false;
  213. }
  214. #endif
  215. int __init alloc_bootmem_huge_page(struct hstate *h, int nid)
  216. {
  217. #ifdef CONFIG_PPC_BOOK3S_64
  218. if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
  219. return pseries_alloc_bootmem_huge_page(h);
  220. #endif
  221. return __alloc_bootmem_huge_page(h, nid);
  222. }
  223. #ifndef CONFIG_PPC_BOOK3S_64
  224. #define HUGEPD_FREELIST_SIZE \
  225. ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
  226. struct hugepd_freelist {
  227. struct rcu_head rcu;
  228. unsigned int index;
  229. void *ptes[];
  230. };
  231. static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
  232. static void hugepd_free_rcu_callback(struct rcu_head *head)
  233. {
  234. struct hugepd_freelist *batch =
  235. container_of(head, struct hugepd_freelist, rcu);
  236. unsigned int i;
  237. for (i = 0; i < batch->index; i++)
  238. kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
  239. free_page((unsigned long)batch);
  240. }
  241. static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
  242. {
  243. struct hugepd_freelist **batchp;
  244. batchp = &get_cpu_var(hugepd_freelist_cur);
  245. if (atomic_read(&tlb->mm->mm_users) < 2 ||
  246. mm_is_thread_local(tlb->mm)) {
  247. kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
  248. put_cpu_var(hugepd_freelist_cur);
  249. return;
  250. }
  251. if (*batchp == NULL) {
  252. *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
  253. (*batchp)->index = 0;
  254. }
  255. (*batchp)->ptes[(*batchp)->index++] = hugepte;
  256. if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
  257. call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
  258. *batchp = NULL;
  259. }
  260. put_cpu_var(hugepd_freelist_cur);
  261. }
  262. #else
  263. static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
  264. #endif
  265. /* Return true when the entry to be freed maps more than the area being freed */
  266. static bool range_is_outside_limits(unsigned long start, unsigned long end,
  267. unsigned long floor, unsigned long ceiling,
  268. unsigned long mask)
  269. {
  270. if ((start & mask) < floor)
  271. return true;
  272. if (ceiling) {
  273. ceiling &= mask;
  274. if (!ceiling)
  275. return true;
  276. }
  277. return end - 1 > ceiling - 1;
  278. }
  279. static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
  280. unsigned long start, unsigned long end,
  281. unsigned long floor, unsigned long ceiling)
  282. {
  283. pte_t *hugepte = hugepd_page(*hpdp);
  284. int i;
  285. unsigned long pdmask = ~((1UL << pdshift) - 1);
  286. unsigned int num_hugepd = 1;
  287. unsigned int shift = hugepd_shift(*hpdp);
  288. /* Note: On fsl the hpdp may be the first of several */
  289. if (shift > pdshift)
  290. num_hugepd = 1 << (shift - pdshift);
  291. if (range_is_outside_limits(start, end, floor, ceiling, pdmask))
  292. return;
  293. for (i = 0; i < num_hugepd; i++, hpdp++)
  294. *hpdp = __hugepd(0);
  295. if (shift >= pdshift)
  296. hugepd_free(tlb, hugepte);
  297. else
  298. pgtable_free_tlb(tlb, hugepte,
  299. get_hugepd_cache_index(pdshift - shift));
  300. }
  301. static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
  302. unsigned long addr, unsigned long end,
  303. unsigned long floor, unsigned long ceiling)
  304. {
  305. pgtable_t token = pmd_pgtable(*pmd);
  306. if (range_is_outside_limits(addr, end, floor, ceiling, PMD_MASK))
  307. return;
  308. pmd_clear(pmd);
  309. pte_free_tlb(tlb, token, addr);
  310. mm_dec_nr_ptes(tlb->mm);
  311. }
  312. static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  313. unsigned long addr, unsigned long end,
  314. unsigned long floor, unsigned long ceiling)
  315. {
  316. pmd_t *pmd;
  317. unsigned long next;
  318. unsigned long start;
  319. start = addr;
  320. do {
  321. unsigned long more;
  322. pmd = pmd_offset(pud, addr);
  323. next = pmd_addr_end(addr, end);
  324. if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
  325. if (pmd_none_or_clear_bad(pmd))
  326. continue;
  327. /*
  328. * if it is not hugepd pointer, we should already find
  329. * it cleared.
  330. */
  331. WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
  332. hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling);
  333. continue;
  334. }
  335. /*
  336. * Increment next by the size of the huge mapping since
  337. * there may be more than one entry at this level for a
  338. * single hugepage, but all of them point to
  339. * the same kmem cache that holds the hugepte.
  340. */
  341. more = addr + (1UL << hugepd_shift(*(hugepd_t *)pmd));
  342. if (more > next)
  343. next = more;
  344. free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
  345. addr, next, floor, ceiling);
  346. } while (addr = next, addr != end);
  347. if (range_is_outside_limits(start, end, floor, ceiling, PUD_MASK))
  348. return;
  349. pmd = pmd_offset(pud, start & PUD_MASK);
  350. pud_clear(pud);
  351. pmd_free_tlb(tlb, pmd, start & PUD_MASK);
  352. mm_dec_nr_pmds(tlb->mm);
  353. }
  354. static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
  355. unsigned long addr, unsigned long end,
  356. unsigned long floor, unsigned long ceiling)
  357. {
  358. pud_t *pud;
  359. unsigned long next;
  360. unsigned long start;
  361. start = addr;
  362. do {
  363. pud = pud_offset(p4d, addr);
  364. next = pud_addr_end(addr, end);
  365. if (!is_hugepd(__hugepd(pud_val(*pud)))) {
  366. if (pud_none_or_clear_bad(pud))
  367. continue;
  368. hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
  369. ceiling);
  370. } else {
  371. unsigned long more;
  372. /*
  373. * Increment next by the size of the huge mapping since
  374. * there may be more than one entry at this level for a
  375. * single hugepage, but all of them point to
  376. * the same kmem cache that holds the hugepte.
  377. */
  378. more = addr + (1UL << hugepd_shift(*(hugepd_t *)pud));
  379. if (more > next)
  380. next = more;
  381. free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
  382. addr, next, floor, ceiling);
  383. }
  384. } while (addr = next, addr != end);
  385. if (range_is_outside_limits(start, end, floor, ceiling, PGDIR_MASK))
  386. return;
  387. pud = pud_offset(p4d, start & PGDIR_MASK);
  388. p4d_clear(p4d);
  389. pud_free_tlb(tlb, pud, start & PGDIR_MASK);
  390. mm_dec_nr_puds(tlb->mm);
  391. }
  392. /*
  393. * This function frees user-level page tables of a process.
  394. */
  395. void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  396. unsigned long addr, unsigned long end,
  397. unsigned long floor, unsigned long ceiling)
  398. {
  399. pgd_t *pgd;
  400. p4d_t *p4d;
  401. unsigned long next;
  402. /*
  403. * Because there are a number of different possible pagetable
  404. * layouts for hugepage ranges, we limit knowledge of how
  405. * things should be laid out to the allocation path
  406. * (huge_pte_alloc(), above). Everything else works out the
  407. * structure as it goes from information in the hugepd
  408. * pointers. That means that we can't here use the
  409. * optimization used in the normal page free_pgd_range(), of
  410. * checking whether we're actually covering a large enough
  411. * range to have to do anything at the top level of the walk
  412. * instead of at the bottom.
  413. *
  414. * To make sense of this, you should probably go read the big
  415. * block comment at the top of the normal free_pgd_range(),
  416. * too.
  417. */
  418. do {
  419. next = pgd_addr_end(addr, end);
  420. pgd = pgd_offset(tlb->mm, addr);
  421. p4d = p4d_offset(pgd, addr);
  422. if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
  423. if (p4d_none_or_clear_bad(p4d))
  424. continue;
  425. hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
  426. } else {
  427. unsigned long more;
  428. /*
  429. * Increment next by the size of the huge mapping since
  430. * there may be more than one entry at the pgd level
  431. * for a single hugepage, but all of them point to the
  432. * same kmem cache that holds the hugepte.
  433. */
  434. more = addr + (1UL << hugepd_shift(*(hugepd_t *)pgd));
  435. if (more > next)
  436. next = more;
  437. free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT,
  438. addr, next, floor, ceiling);
  439. }
  440. } while (addr = next, addr != end);
  441. }
  442. struct page *follow_huge_pd(struct vm_area_struct *vma,
  443. unsigned long address, hugepd_t hpd,
  444. int flags, int pdshift)
  445. {
  446. pte_t *ptep;
  447. spinlock_t *ptl;
  448. struct page *page = NULL;
  449. unsigned long mask;
  450. int shift = hugepd_shift(hpd);
  451. struct mm_struct *mm = vma->vm_mm;
  452. retry:
  453. /*
  454. * hugepage directory entries are protected by mm->page_table_lock
  455. * Use this instead of huge_pte_lockptr
  456. */
  457. ptl = &mm->page_table_lock;
  458. spin_lock(ptl);
  459. ptep = hugepte_offset(hpd, address, pdshift);
  460. if (pte_present(*ptep)) {
  461. mask = (1UL << shift) - 1;
  462. page = pte_page(*ptep);
  463. page += ((address & mask) >> PAGE_SHIFT);
  464. if (flags & FOLL_GET)
  465. get_page(page);
  466. } else {
  467. if (is_hugetlb_entry_migration(*ptep)) {
  468. spin_unlock(ptl);
  469. __migration_entry_wait(mm, ptep, ptl);
  470. goto retry;
  471. }
  472. }
  473. spin_unlock(ptl);
  474. return page;
  475. }
  476. bool __init arch_hugetlb_valid_size(unsigned long size)
  477. {
  478. int shift = __ffs(size);
  479. int mmu_psize;
  480. /* Check that it is a page size supported by the hardware and
  481. * that it fits within pagetable and slice limits. */
  482. if (size <= PAGE_SIZE || !is_power_of_2(size))
  483. return false;
  484. mmu_psize = check_and_get_huge_psize(shift);
  485. if (mmu_psize < 0)
  486. return false;
  487. BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
  488. return true;
  489. }
  490. static int __init add_huge_page_size(unsigned long long size)
  491. {
  492. int shift = __ffs(size);
  493. if (!arch_hugetlb_valid_size((unsigned long)size))
  494. return -EINVAL;
  495. hugetlb_add_hstate(shift - PAGE_SHIFT);
  496. return 0;
  497. }
  498. static int __init hugetlbpage_init(void)
  499. {
  500. bool configured = false;
  501. int psize;
  502. if (hugetlb_disabled) {
  503. pr_info("HugeTLB support is disabled!\n");
  504. return 0;
  505. }
  506. if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
  507. !mmu_has_feature(MMU_FTR_16M_PAGE))
  508. return -ENODEV;
  509. for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
  510. unsigned shift;
  511. unsigned pdshift;
  512. if (!mmu_psize_defs[psize].shift)
  513. continue;
  514. shift = mmu_psize_to_shift(psize);
  515. #ifdef CONFIG_PPC_BOOK3S_64
  516. if (shift > PGDIR_SHIFT)
  517. continue;
  518. else if (shift > PUD_SHIFT)
  519. pdshift = PGDIR_SHIFT;
  520. else if (shift > PMD_SHIFT)
  521. pdshift = PUD_SHIFT;
  522. else
  523. pdshift = PMD_SHIFT;
  524. #else
  525. if (shift < PUD_SHIFT)
  526. pdshift = PMD_SHIFT;
  527. else if (shift < PGDIR_SHIFT)
  528. pdshift = PUD_SHIFT;
  529. else
  530. pdshift = PGDIR_SHIFT;
  531. #endif
  532. if (add_huge_page_size(1ULL << shift) < 0)
  533. continue;
  534. /*
  535. * if we have pdshift and shift value same, we don't
  536. * use pgt cache for hugepd.
  537. */
  538. if (pdshift > shift) {
  539. if (!IS_ENABLED(CONFIG_PPC_8xx))
  540. pgtable_cache_add(pdshift - shift);
  541. } else if (IS_ENABLED(CONFIG_PPC_E500) ||
  542. IS_ENABLED(CONFIG_PPC_8xx)) {
  543. pgtable_cache_add(PTE_T_ORDER);
  544. }
  545. configured = true;
  546. }
  547. if (!configured)
  548. pr_info("Failed to initialize. Disabling HugeTLB");
  549. return 0;
  550. }
  551. arch_initcall(hugetlbpage_init);
  552. void __init gigantic_hugetlb_cma_reserve(void)
  553. {
  554. unsigned long order = 0;
  555. if (radix_enabled())
  556. order = PUD_SHIFT - PAGE_SHIFT;
  557. else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift)
  558. /*
  559. * For pseries we do use ibm,expected#pages for reserving 16G pages.
  560. */
  561. order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
  562. if (order) {
  563. VM_WARN_ON(order < MAX_ORDER);
  564. hugetlb_cma_reserve(order);
  565. }
  566. }