slice.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * address space "slices" (meta-segments) support
  4. *
  5. * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
  6. *
  7. * Based on hugetlb implementation
  8. *
  9. * Copyright (C) 2003 David Gibson, IBM Corporation.
  10. */
  11. #undef DEBUG
  12. #include <linux/kernel.h>
  13. #include <linux/mm.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/err.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/export.h>
  18. #include <linux/hugetlb.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/security.h>
  21. #include <asm/mman.h>
  22. #include <asm/mmu.h>
  23. #include <asm/copro.h>
  24. #include <asm/hugetlb.h>
  25. #include <asm/mmu_context.h>
  26. static DEFINE_SPINLOCK(slice_convert_lock);
  27. #ifdef DEBUG
  28. int _slice_debug = 1;
  29. static void slice_print_mask(const char *label, const struct slice_mask *mask)
  30. {
  31. if (!_slice_debug)
  32. return;
  33. pr_devel("%s low_slice: %*pbl\n", label,
  34. (int)SLICE_NUM_LOW, &mask->low_slices);
  35. pr_devel("%s high_slice: %*pbl\n", label,
  36. (int)SLICE_NUM_HIGH, mask->high_slices);
  37. }
  38. #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
  39. #else
  40. static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
  41. #define slice_dbg(fmt...)
  42. #endif
  43. static inline notrace bool slice_addr_is_low(unsigned long addr)
  44. {
  45. u64 tmp = (u64)addr;
  46. return tmp < SLICE_LOW_TOP;
  47. }
  48. static void slice_range_to_mask(unsigned long start, unsigned long len,
  49. struct slice_mask *ret)
  50. {
  51. unsigned long end = start + len - 1;
  52. ret->low_slices = 0;
  53. if (SLICE_NUM_HIGH)
  54. bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
  55. if (slice_addr_is_low(start)) {
  56. unsigned long mend = min(end,
  57. (unsigned long)(SLICE_LOW_TOP - 1));
  58. ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
  59. - (1u << GET_LOW_SLICE_INDEX(start));
  60. }
  61. if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
  62. unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
  63. unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
  64. unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
  65. bitmap_set(ret->high_slices, start_index, count);
  66. }
  67. }
  68. static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
  69. unsigned long len)
  70. {
  71. struct vm_area_struct *vma;
  72. if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
  73. return 0;
  74. vma = find_vma(mm, addr);
  75. return (!vma || (addr + len) <= vm_start_gap(vma));
  76. }
  77. static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
  78. {
  79. return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
  80. 1ul << SLICE_LOW_SHIFT);
  81. }
  82. static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
  83. {
  84. unsigned long start = slice << SLICE_HIGH_SHIFT;
  85. unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
  86. /* Hack, so that each addresses is controlled by exactly one
  87. * of the high or low area bitmaps, the first high area starts
  88. * at 4GB, not 0 */
  89. if (start == 0)
  90. start = (unsigned long)SLICE_LOW_TOP;
  91. return !slice_area_is_free(mm, start, end - start);
  92. }
  93. static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
  94. unsigned long high_limit)
  95. {
  96. unsigned long i;
  97. ret->low_slices = 0;
  98. if (SLICE_NUM_HIGH)
  99. bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
  100. for (i = 0; i < SLICE_NUM_LOW; i++)
  101. if (!slice_low_has_vma(mm, i))
  102. ret->low_slices |= 1u << i;
  103. if (slice_addr_is_low(high_limit - 1))
  104. return;
  105. for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
  106. if (!slice_high_has_vma(mm, i))
  107. __set_bit(i, ret->high_slices);
  108. }
  109. static bool slice_check_range_fits(struct mm_struct *mm,
  110. const struct slice_mask *available,
  111. unsigned long start, unsigned long len)
  112. {
  113. unsigned long end = start + len - 1;
  114. u64 low_slices = 0;
  115. if (slice_addr_is_low(start)) {
  116. unsigned long mend = min(end,
  117. (unsigned long)(SLICE_LOW_TOP - 1));
  118. low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
  119. - (1u << GET_LOW_SLICE_INDEX(start));
  120. }
  121. if ((low_slices & available->low_slices) != low_slices)
  122. return false;
  123. if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
  124. unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
  125. unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
  126. unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
  127. unsigned long i;
  128. for (i = start_index; i < start_index + count; i++) {
  129. if (!test_bit(i, available->high_slices))
  130. return false;
  131. }
  132. }
  133. return true;
  134. }
  135. static void slice_flush_segments(void *parm)
  136. {
  137. #ifdef CONFIG_PPC64
  138. struct mm_struct *mm = parm;
  139. unsigned long flags;
  140. if (mm != current->active_mm)
  141. return;
  142. copy_mm_to_paca(current->active_mm);
  143. local_irq_save(flags);
  144. slb_flush_and_restore_bolted();
  145. local_irq_restore(flags);
  146. #endif
  147. }
  148. static void slice_convert(struct mm_struct *mm,
  149. const struct slice_mask *mask, int psize)
  150. {
  151. int index, mask_index;
  152. /* Write the new slice psize bits */
  153. unsigned char *hpsizes, *lpsizes;
  154. struct slice_mask *psize_mask, *old_mask;
  155. unsigned long i, flags;
  156. int old_psize;
  157. slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
  158. slice_print_mask(" mask", mask);
  159. psize_mask = slice_mask_for_size(&mm->context, psize);
  160. /* We need to use a spinlock here to protect against
  161. * concurrent 64k -> 4k demotion ...
  162. */
  163. spin_lock_irqsave(&slice_convert_lock, flags);
  164. lpsizes = mm_ctx_low_slices(&mm->context);
  165. for (i = 0; i < SLICE_NUM_LOW; i++) {
  166. if (!(mask->low_slices & (1u << i)))
  167. continue;
  168. mask_index = i & 0x1;
  169. index = i >> 1;
  170. /* Update the slice_mask */
  171. old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
  172. old_mask = slice_mask_for_size(&mm->context, old_psize);
  173. old_mask->low_slices &= ~(1u << i);
  174. psize_mask->low_slices |= 1u << i;
  175. /* Update the sizes array */
  176. lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
  177. (((unsigned long)psize) << (mask_index * 4));
  178. }
  179. hpsizes = mm_ctx_high_slices(&mm->context);
  180. for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
  181. if (!test_bit(i, mask->high_slices))
  182. continue;
  183. mask_index = i & 0x1;
  184. index = i >> 1;
  185. /* Update the slice_mask */
  186. old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
  187. old_mask = slice_mask_for_size(&mm->context, old_psize);
  188. __clear_bit(i, old_mask->high_slices);
  189. __set_bit(i, psize_mask->high_slices);
  190. /* Update the sizes array */
  191. hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
  192. (((unsigned long)psize) << (mask_index * 4));
  193. }
  194. slice_dbg(" lsps=%lx, hsps=%lx\n",
  195. (unsigned long)mm_ctx_low_slices(&mm->context),
  196. (unsigned long)mm_ctx_high_slices(&mm->context));
  197. spin_unlock_irqrestore(&slice_convert_lock, flags);
  198. copro_flush_all_slbs(mm);
  199. }
  200. /*
  201. * Compute which slice addr is part of;
  202. * set *boundary_addr to the start or end boundary of that slice
  203. * (depending on 'end' parameter);
  204. * return boolean indicating if the slice is marked as available in the
  205. * 'available' slice_mark.
  206. */
  207. static bool slice_scan_available(unsigned long addr,
  208. const struct slice_mask *available,
  209. int end, unsigned long *boundary_addr)
  210. {
  211. unsigned long slice;
  212. if (slice_addr_is_low(addr)) {
  213. slice = GET_LOW_SLICE_INDEX(addr);
  214. *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
  215. return !!(available->low_slices & (1u << slice));
  216. } else {
  217. slice = GET_HIGH_SLICE_INDEX(addr);
  218. *boundary_addr = (slice + end) ?
  219. ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
  220. return !!test_bit(slice, available->high_slices);
  221. }
  222. }
  223. static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
  224. unsigned long addr, unsigned long len,
  225. const struct slice_mask *available,
  226. int psize, unsigned long high_limit)
  227. {
  228. int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
  229. unsigned long found, next_end;
  230. struct vm_unmapped_area_info info;
  231. info.flags = 0;
  232. info.length = len;
  233. info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
  234. info.align_offset = 0;
  235. /*
  236. * Check till the allow max value for this mmap request
  237. */
  238. while (addr < high_limit) {
  239. info.low_limit = addr;
  240. if (!slice_scan_available(addr, available, 1, &addr))
  241. continue;
  242. next_slice:
  243. /*
  244. * At this point [info.low_limit; addr) covers
  245. * available slices only and ends at a slice boundary.
  246. * Check if we need to reduce the range, or if we can
  247. * extend it to cover the next available slice.
  248. */
  249. if (addr >= high_limit)
  250. addr = high_limit;
  251. else if (slice_scan_available(addr, available, 1, &next_end)) {
  252. addr = next_end;
  253. goto next_slice;
  254. }
  255. info.high_limit = addr;
  256. found = vm_unmapped_area(&info);
  257. if (!(found & ~PAGE_MASK))
  258. return found;
  259. }
  260. return -ENOMEM;
  261. }
  262. static unsigned long slice_find_area_topdown(struct mm_struct *mm,
  263. unsigned long addr, unsigned long len,
  264. const struct slice_mask *available,
  265. int psize, unsigned long high_limit)
  266. {
  267. int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
  268. unsigned long found, prev;
  269. struct vm_unmapped_area_info info;
  270. unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
  271. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  272. info.length = len;
  273. info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
  274. info.align_offset = 0;
  275. /*
  276. * If we are trying to allocate above DEFAULT_MAP_WINDOW
  277. * Add the different to the mmap_base.
  278. * Only for that request for which high_limit is above
  279. * DEFAULT_MAP_WINDOW we should apply this.
  280. */
  281. if (high_limit > DEFAULT_MAP_WINDOW)
  282. addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
  283. while (addr > min_addr) {
  284. info.high_limit = addr;
  285. if (!slice_scan_available(addr - 1, available, 0, &addr))
  286. continue;
  287. prev_slice:
  288. /*
  289. * At this point [addr; info.high_limit) covers
  290. * available slices only and starts at a slice boundary.
  291. * Check if we need to reduce the range, or if we can
  292. * extend it to cover the previous available slice.
  293. */
  294. if (addr < min_addr)
  295. addr = min_addr;
  296. else if (slice_scan_available(addr - 1, available, 0, &prev)) {
  297. addr = prev;
  298. goto prev_slice;
  299. }
  300. info.low_limit = addr;
  301. found = vm_unmapped_area(&info);
  302. if (!(found & ~PAGE_MASK))
  303. return found;
  304. }
  305. /*
  306. * A failed mmap() very likely causes application failure,
  307. * so fall back to the bottom-up function here. This scenario
  308. * can happen with large stack limits and large mmap()
  309. * allocations.
  310. */
  311. return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
  312. }
  313. static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
  314. const struct slice_mask *mask, int psize,
  315. int topdown, unsigned long high_limit)
  316. {
  317. if (topdown)
  318. return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
  319. else
  320. return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
  321. }
  322. static inline void slice_copy_mask(struct slice_mask *dst,
  323. const struct slice_mask *src)
  324. {
  325. dst->low_slices = src->low_slices;
  326. if (!SLICE_NUM_HIGH)
  327. return;
  328. bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
  329. }
  330. static inline void slice_or_mask(struct slice_mask *dst,
  331. const struct slice_mask *src1,
  332. const struct slice_mask *src2)
  333. {
  334. dst->low_slices = src1->low_slices | src2->low_slices;
  335. if (!SLICE_NUM_HIGH)
  336. return;
  337. bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
  338. }
  339. static inline void slice_andnot_mask(struct slice_mask *dst,
  340. const struct slice_mask *src1,
  341. const struct slice_mask *src2)
  342. {
  343. dst->low_slices = src1->low_slices & ~src2->low_slices;
  344. if (!SLICE_NUM_HIGH)
  345. return;
  346. bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
  347. }
  348. #ifdef CONFIG_PPC_64K_PAGES
  349. #define MMU_PAGE_BASE MMU_PAGE_64K
  350. #else
  351. #define MMU_PAGE_BASE MMU_PAGE_4K
  352. #endif
  353. unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
  354. unsigned long flags, unsigned int psize,
  355. int topdown)
  356. {
  357. struct slice_mask good_mask;
  358. struct slice_mask potential_mask;
  359. const struct slice_mask *maskp;
  360. const struct slice_mask *compat_maskp = NULL;
  361. int fixed = (flags & MAP_FIXED);
  362. int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
  363. unsigned long page_size = 1UL << pshift;
  364. struct mm_struct *mm = current->mm;
  365. unsigned long newaddr;
  366. unsigned long high_limit;
  367. high_limit = DEFAULT_MAP_WINDOW;
  368. if (addr >= high_limit || (fixed && (addr + len > high_limit)))
  369. high_limit = TASK_SIZE;
  370. if (len > high_limit)
  371. return -ENOMEM;
  372. if (len & (page_size - 1))
  373. return -EINVAL;
  374. if (fixed) {
  375. if (addr & (page_size - 1))
  376. return -EINVAL;
  377. if (addr > high_limit - len)
  378. return -ENOMEM;
  379. }
  380. if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
  381. /*
  382. * Increasing the slb_addr_limit does not require
  383. * slice mask cache to be recalculated because it should
  384. * be already initialised beyond the old address limit.
  385. */
  386. mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
  387. on_each_cpu(slice_flush_segments, mm, 1);
  388. }
  389. /* Sanity checks */
  390. BUG_ON(mm->task_size == 0);
  391. BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
  392. VM_BUG_ON(radix_enabled());
  393. slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
  394. slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
  395. addr, len, flags, topdown);
  396. /* If hint, make sure it matches our alignment restrictions */
  397. if (!fixed && addr) {
  398. addr = ALIGN(addr, page_size);
  399. slice_dbg(" aligned addr=%lx\n", addr);
  400. /* Ignore hint if it's too large or overlaps a VMA */
  401. if (addr > high_limit - len || addr < mmap_min_addr ||
  402. !slice_area_is_free(mm, addr, len))
  403. addr = 0;
  404. }
  405. /* First make up a "good" mask of slices that have the right size
  406. * already
  407. */
  408. maskp = slice_mask_for_size(&mm->context, psize);
  409. /*
  410. * Here "good" means slices that are already the right page size,
  411. * "compat" means slices that have a compatible page size (i.e.
  412. * 4k in a 64k pagesize kernel), and "free" means slices without
  413. * any VMAs.
  414. *
  415. * If MAP_FIXED:
  416. * check if fits in good | compat => OK
  417. * check if fits in good | compat | free => convert free
  418. * else bad
  419. * If have hint:
  420. * check if hint fits in good => OK
  421. * check if hint fits in good | free => convert free
  422. * Otherwise:
  423. * search in good, found => OK
  424. * search in good | free, found => convert free
  425. * search in good | compat | free, found => convert free.
  426. */
  427. /*
  428. * If we support combo pages, we can allow 64k pages in 4k slices
  429. * The mask copies could be avoided in most cases here if we had
  430. * a pointer to good mask for the next code to use.
  431. */
  432. if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
  433. compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
  434. if (fixed)
  435. slice_or_mask(&good_mask, maskp, compat_maskp);
  436. else
  437. slice_copy_mask(&good_mask, maskp);
  438. } else {
  439. slice_copy_mask(&good_mask, maskp);
  440. }
  441. slice_print_mask(" good_mask", &good_mask);
  442. if (compat_maskp)
  443. slice_print_mask(" compat_mask", compat_maskp);
  444. /* First check hint if it's valid or if we have MAP_FIXED */
  445. if (addr != 0 || fixed) {
  446. /* Check if we fit in the good mask. If we do, we just return,
  447. * nothing else to do
  448. */
  449. if (slice_check_range_fits(mm, &good_mask, addr, len)) {
  450. slice_dbg(" fits good !\n");
  451. newaddr = addr;
  452. goto return_addr;
  453. }
  454. } else {
  455. /* Now let's see if we can find something in the existing
  456. * slices for that size
  457. */
  458. newaddr = slice_find_area(mm, len, &good_mask,
  459. psize, topdown, high_limit);
  460. if (newaddr != -ENOMEM) {
  461. /* Found within the good mask, we don't have to setup,
  462. * we thus return directly
  463. */
  464. slice_dbg(" found area at 0x%lx\n", newaddr);
  465. goto return_addr;
  466. }
  467. }
  468. /*
  469. * We don't fit in the good mask, check what other slices are
  470. * empty and thus can be converted
  471. */
  472. slice_mask_for_free(mm, &potential_mask, high_limit);
  473. slice_or_mask(&potential_mask, &potential_mask, &good_mask);
  474. slice_print_mask(" potential", &potential_mask);
  475. if (addr != 0 || fixed) {
  476. if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
  477. slice_dbg(" fits potential !\n");
  478. newaddr = addr;
  479. goto convert;
  480. }
  481. }
  482. /* If we have MAP_FIXED and failed the above steps, then error out */
  483. if (fixed)
  484. return -EBUSY;
  485. slice_dbg(" search...\n");
  486. /* If we had a hint that didn't work out, see if we can fit
  487. * anywhere in the good area.
  488. */
  489. if (addr) {
  490. newaddr = slice_find_area(mm, len, &good_mask,
  491. psize, topdown, high_limit);
  492. if (newaddr != -ENOMEM) {
  493. slice_dbg(" found area at 0x%lx\n", newaddr);
  494. goto return_addr;
  495. }
  496. }
  497. /* Now let's see if we can find something in the existing slices
  498. * for that size plus free slices
  499. */
  500. newaddr = slice_find_area(mm, len, &potential_mask,
  501. psize, topdown, high_limit);
  502. if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
  503. psize == MMU_PAGE_64K) {
  504. /* retry the search with 4k-page slices included */
  505. slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
  506. newaddr = slice_find_area(mm, len, &potential_mask,
  507. psize, topdown, high_limit);
  508. }
  509. if (newaddr == -ENOMEM)
  510. return -ENOMEM;
  511. slice_range_to_mask(newaddr, len, &potential_mask);
  512. slice_dbg(" found potential area at 0x%lx\n", newaddr);
  513. slice_print_mask(" mask", &potential_mask);
  514. convert:
  515. /*
  516. * Try to allocate the context before we do slice convert
  517. * so that we handle the context allocation failure gracefully.
  518. */
  519. if (need_extra_context(mm, newaddr)) {
  520. if (alloc_extended_context(mm, newaddr) < 0)
  521. return -ENOMEM;
  522. }
  523. slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
  524. if (compat_maskp && !fixed)
  525. slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
  526. if (potential_mask.low_slices ||
  527. (SLICE_NUM_HIGH &&
  528. !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
  529. slice_convert(mm, &potential_mask, psize);
  530. if (psize > MMU_PAGE_BASE)
  531. on_each_cpu(slice_flush_segments, mm, 1);
  532. }
  533. return newaddr;
  534. return_addr:
  535. if (need_extra_context(mm, newaddr)) {
  536. if (alloc_extended_context(mm, newaddr) < 0)
  537. return -ENOMEM;
  538. }
  539. return newaddr;
  540. }
  541. EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
  542. unsigned long arch_get_unmapped_area(struct file *filp,
  543. unsigned long addr,
  544. unsigned long len,
  545. unsigned long pgoff,
  546. unsigned long flags)
  547. {
  548. if (radix_enabled())
  549. return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
  550. return slice_get_unmapped_area(addr, len, flags,
  551. mm_ctx_user_psize(&current->mm->context), 0);
  552. }
  553. unsigned long arch_get_unmapped_area_topdown(struct file *filp,
  554. const unsigned long addr0,
  555. const unsigned long len,
  556. const unsigned long pgoff,
  557. const unsigned long flags)
  558. {
  559. if (radix_enabled())
  560. return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
  561. return slice_get_unmapped_area(addr0, len, flags,
  562. mm_ctx_user_psize(&current->mm->context), 1);
  563. }
  564. unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
  565. {
  566. unsigned char *psizes;
  567. int index, mask_index;
  568. VM_BUG_ON(radix_enabled());
  569. if (slice_addr_is_low(addr)) {
  570. psizes = mm_ctx_low_slices(&mm->context);
  571. index = GET_LOW_SLICE_INDEX(addr);
  572. } else {
  573. psizes = mm_ctx_high_slices(&mm->context);
  574. index = GET_HIGH_SLICE_INDEX(addr);
  575. }
  576. mask_index = index & 0x1;
  577. return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
  578. }
  579. EXPORT_SYMBOL_GPL(get_slice_psize);
  580. void slice_init_new_context_exec(struct mm_struct *mm)
  581. {
  582. unsigned char *hpsizes, *lpsizes;
  583. struct slice_mask *mask;
  584. unsigned int psize = mmu_virtual_psize;
  585. slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
  586. /*
  587. * In the case of exec, use the default limit. In the
  588. * case of fork it is just inherited from the mm being
  589. * duplicated.
  590. */
  591. mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
  592. mm_ctx_set_user_psize(&mm->context, psize);
  593. /*
  594. * Set all slice psizes to the default.
  595. */
  596. lpsizes = mm_ctx_low_slices(&mm->context);
  597. memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
  598. hpsizes = mm_ctx_high_slices(&mm->context);
  599. memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
  600. /*
  601. * Slice mask cache starts zeroed, fill the default size cache.
  602. */
  603. mask = slice_mask_for_size(&mm->context, psize);
  604. mask->low_slices = ~0UL;
  605. if (SLICE_NUM_HIGH)
  606. bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
  607. }
  608. void slice_setup_new_exec(void)
  609. {
  610. struct mm_struct *mm = current->mm;
  611. slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
  612. if (!is_32bit_task())
  613. return;
  614. mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
  615. }
  616. void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
  617. unsigned long len, unsigned int psize)
  618. {
  619. struct slice_mask mask;
  620. VM_BUG_ON(radix_enabled());
  621. slice_range_to_mask(start, len, &mask);
  622. slice_convert(mm, &mask, psize);
  623. }
  624. #ifdef CONFIG_HUGETLB_PAGE
  625. /*
  626. * is_hugepage_only_range() is used by generic code to verify whether
  627. * a normal mmap mapping (non hugetlbfs) is valid on a given area.
  628. *
  629. * until the generic code provides a more generic hook and/or starts
  630. * calling arch get_unmapped_area for MAP_FIXED (which our implementation
  631. * here knows how to deal with), we hijack it to keep standard mappings
  632. * away from us.
  633. *
  634. * because of that generic code limitation, MAP_FIXED mapping cannot
  635. * "convert" back a slice with no VMAs to the standard page size, only
  636. * get_unmapped_area() can. It would be possible to fix it here but I
  637. * prefer working on fixing the generic code instead.
  638. *
  639. * WARNING: This will not work if hugetlbfs isn't enabled since the
  640. * generic code will redefine that function as 0 in that. This is ok
  641. * for now as we only use slices with hugetlbfs enabled. This should
  642. * be fixed as the generic code gets fixed.
  643. */
  644. int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
  645. unsigned long len)
  646. {
  647. const struct slice_mask *maskp;
  648. unsigned int psize = mm_ctx_user_psize(&mm->context);
  649. VM_BUG_ON(radix_enabled());
  650. maskp = slice_mask_for_size(&mm->context, psize);
  651. /* We need to account for 4k slices too */
  652. if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
  653. const struct slice_mask *compat_maskp;
  654. struct slice_mask available;
  655. compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
  656. slice_or_mask(&available, maskp, compat_maskp);
  657. return !slice_check_range_fits(mm, &available, addr, len);
  658. }
  659. return !slice_check_range_fits(mm, maskp, addr, len);
  660. }
  661. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  662. {
  663. /* With radix we don't use slice, so derive it from vma*/
  664. if (radix_enabled())
  665. return vma_kernel_pagesize(vma);
  666. return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
  667. }
  668. static int file_to_psize(struct file *file)
  669. {
  670. struct hstate *hstate = hstate_file(file);
  671. return shift_to_mmu_psize(huge_page_shift(hstate));
  672. }
  673. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  674. unsigned long len, unsigned long pgoff,
  675. unsigned long flags)
  676. {
  677. if (radix_enabled())
  678. return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
  679. return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
  680. }
  681. #endif