vmem.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright IBM Corp. 2006
  4. */
  5. #include <linux/memory_hotplug.h>
  6. #include <linux/memblock.h>
  7. #include <linux/pfn.h>
  8. #include <linux/mm.h>
  9. #include <linux/init.h>
  10. #include <linux/list.h>
  11. #include <linux/hugetlb.h>
  12. #include <linux/slab.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/nospec-branch.h>
  15. #include <asm/pgalloc.h>
  16. #include <asm/setup.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/sections.h>
  19. #include <asm/set_memory.h>
  20. static DEFINE_MUTEX(vmem_mutex);
  21. static void __ref *vmem_alloc_pages(unsigned int order)
  22. {
  23. unsigned long size = PAGE_SIZE << order;
  24. if (slab_is_available())
  25. return (void *)__get_free_pages(GFP_KERNEL, order);
  26. return memblock_alloc(size, size);
  27. }
  28. static void vmem_free_pages(unsigned long addr, int order)
  29. {
  30. /* We don't expect boot memory to be removed ever. */
  31. if (!slab_is_available() ||
  32. WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
  33. return;
  34. free_pages(addr, order);
  35. }
  36. void *vmem_crst_alloc(unsigned long val)
  37. {
  38. unsigned long *table;
  39. table = vmem_alloc_pages(CRST_ALLOC_ORDER);
  40. if (table)
  41. crst_table_init(table, val);
  42. return table;
  43. }
  44. pte_t __ref *vmem_pte_alloc(void)
  45. {
  46. unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
  47. pte_t *pte;
  48. if (slab_is_available())
  49. pte = (pte_t *) page_table_alloc(&init_mm);
  50. else
  51. pte = (pte_t *) memblock_alloc(size, size);
  52. if (!pte)
  53. return NULL;
  54. memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
  55. return pte;
  56. }
  57. static void vmem_pte_free(unsigned long *table)
  58. {
  59. /* We don't expect boot memory to be removed ever. */
  60. if (!slab_is_available() ||
  61. WARN_ON_ONCE(PageReserved(virt_to_page(table))))
  62. return;
  63. page_table_free(&init_mm, table);
  64. }
  65. #define PAGE_UNUSED 0xFD
  66. /*
  67. * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
  68. * from unused_sub_pmd_start to next PMD_SIZE boundary.
  69. */
  70. static unsigned long unused_sub_pmd_start;
  71. static void vmemmap_flush_unused_sub_pmd(void)
  72. {
  73. if (!unused_sub_pmd_start)
  74. return;
  75. memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
  76. ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
  77. unused_sub_pmd_start = 0;
  78. }
  79. static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
  80. {
  81. /*
  82. * As we expect to add in the same granularity as we remove, it's
  83. * sufficient to mark only some piece used to block the memmap page from
  84. * getting removed (just in case the memmap never gets initialized,
  85. * e.g., because the memory block never gets onlined).
  86. */
  87. memset((void *)start, 0, sizeof(struct page));
  88. }
  89. static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
  90. {
  91. /*
  92. * We only optimize if the new used range directly follows the
  93. * previously unused range (esp., when populating consecutive sections).
  94. */
  95. if (unused_sub_pmd_start == start) {
  96. unused_sub_pmd_start = end;
  97. if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
  98. unused_sub_pmd_start = 0;
  99. return;
  100. }
  101. vmemmap_flush_unused_sub_pmd();
  102. vmemmap_mark_sub_pmd_used(start, end);
  103. }
  104. static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
  105. {
  106. unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
  107. vmemmap_flush_unused_sub_pmd();
  108. /* Could be our memmap page is filled with PAGE_UNUSED already ... */
  109. vmemmap_mark_sub_pmd_used(start, end);
  110. /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
  111. if (!IS_ALIGNED(start, PMD_SIZE))
  112. memset((void *)page, PAGE_UNUSED, start - page);
  113. /*
  114. * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
  115. * consecutive sections. Remember for the last added PMD the last
  116. * unused range in the populated PMD.
  117. */
  118. if (!IS_ALIGNED(end, PMD_SIZE))
  119. unused_sub_pmd_start = end;
  120. }
  121. /* Returns true if the PMD is completely unused and can be freed. */
  122. static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
  123. {
  124. unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
  125. vmemmap_flush_unused_sub_pmd();
  126. memset((void *)start, PAGE_UNUSED, end - start);
  127. return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
  128. }
  129. /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
  130. static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
  131. unsigned long end, bool add, bool direct)
  132. {
  133. unsigned long prot, pages = 0;
  134. int ret = -ENOMEM;
  135. pte_t *pte;
  136. prot = pgprot_val(PAGE_KERNEL);
  137. if (!MACHINE_HAS_NX)
  138. prot &= ~_PAGE_NOEXEC;
  139. pte = pte_offset_kernel(pmd, addr);
  140. for (; addr < end; addr += PAGE_SIZE, pte++) {
  141. if (!add) {
  142. if (pte_none(*pte))
  143. continue;
  144. if (!direct)
  145. vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
  146. pte_clear(&init_mm, addr, pte);
  147. } else if (pte_none(*pte)) {
  148. if (!direct) {
  149. void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
  150. if (!new_page)
  151. goto out;
  152. set_pte(pte, __pte(__pa(new_page) | prot));
  153. } else {
  154. set_pte(pte, __pte(__pa(addr) | prot));
  155. }
  156. } else {
  157. continue;
  158. }
  159. pages++;
  160. }
  161. ret = 0;
  162. out:
  163. if (direct)
  164. update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
  165. return ret;
  166. }
  167. static void try_free_pte_table(pmd_t *pmd, unsigned long start)
  168. {
  169. pte_t *pte;
  170. int i;
  171. /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
  172. pte = pte_offset_kernel(pmd, start);
  173. for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
  174. if (!pte_none(*pte))
  175. return;
  176. }
  177. vmem_pte_free((unsigned long *) pmd_deref(*pmd));
  178. pmd_clear(pmd);
  179. }
  180. /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
  181. static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
  182. unsigned long end, bool add, bool direct)
  183. {
  184. unsigned long next, prot, pages = 0;
  185. int ret = -ENOMEM;
  186. pmd_t *pmd;
  187. pte_t *pte;
  188. prot = pgprot_val(SEGMENT_KERNEL);
  189. if (!MACHINE_HAS_NX)
  190. prot &= ~_SEGMENT_ENTRY_NOEXEC;
  191. pmd = pmd_offset(pud, addr);
  192. for (; addr < end; addr = next, pmd++) {
  193. next = pmd_addr_end(addr, end);
  194. if (!add) {
  195. if (pmd_none(*pmd))
  196. continue;
  197. if (pmd_large(*pmd)) {
  198. if (IS_ALIGNED(addr, PMD_SIZE) &&
  199. IS_ALIGNED(next, PMD_SIZE)) {
  200. if (!direct)
  201. vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
  202. pmd_clear(pmd);
  203. pages++;
  204. } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
  205. vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
  206. pmd_clear(pmd);
  207. }
  208. continue;
  209. }
  210. } else if (pmd_none(*pmd)) {
  211. if (IS_ALIGNED(addr, PMD_SIZE) &&
  212. IS_ALIGNED(next, PMD_SIZE) &&
  213. MACHINE_HAS_EDAT1 && direct &&
  214. !debug_pagealloc_enabled()) {
  215. set_pmd(pmd, __pmd(__pa(addr) | prot));
  216. pages++;
  217. continue;
  218. } else if (!direct && MACHINE_HAS_EDAT1) {
  219. void *new_page;
  220. /*
  221. * Use 1MB frames for vmemmap if available. We
  222. * always use large frames even if they are only
  223. * partially used. Otherwise we would have also
  224. * page tables since vmemmap_populate gets
  225. * called for each section separately.
  226. */
  227. new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
  228. if (new_page) {
  229. set_pmd(pmd, __pmd(__pa(new_page) | prot));
  230. if (!IS_ALIGNED(addr, PMD_SIZE) ||
  231. !IS_ALIGNED(next, PMD_SIZE)) {
  232. vmemmap_use_new_sub_pmd(addr, next);
  233. }
  234. continue;
  235. }
  236. }
  237. pte = vmem_pte_alloc();
  238. if (!pte)
  239. goto out;
  240. pmd_populate(&init_mm, pmd, pte);
  241. } else if (pmd_large(*pmd)) {
  242. if (!direct)
  243. vmemmap_use_sub_pmd(addr, next);
  244. continue;
  245. }
  246. ret = modify_pte_table(pmd, addr, next, add, direct);
  247. if (ret)
  248. goto out;
  249. if (!add)
  250. try_free_pte_table(pmd, addr & PMD_MASK);
  251. }
  252. ret = 0;
  253. out:
  254. if (direct)
  255. update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
  256. return ret;
  257. }
  258. static void try_free_pmd_table(pud_t *pud, unsigned long start)
  259. {
  260. const unsigned long end = start + PUD_SIZE;
  261. pmd_t *pmd;
  262. int i;
  263. /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
  264. if (end > VMALLOC_START)
  265. return;
  266. #ifdef CONFIG_KASAN
  267. if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
  268. return;
  269. #endif
  270. pmd = pmd_offset(pud, start);
  271. for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
  272. if (!pmd_none(*pmd))
  273. return;
  274. vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
  275. pud_clear(pud);
  276. }
  277. static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
  278. bool add, bool direct)
  279. {
  280. unsigned long next, prot, pages = 0;
  281. int ret = -ENOMEM;
  282. pud_t *pud;
  283. pmd_t *pmd;
  284. prot = pgprot_val(REGION3_KERNEL);
  285. if (!MACHINE_HAS_NX)
  286. prot &= ~_REGION_ENTRY_NOEXEC;
  287. pud = pud_offset(p4d, addr);
  288. for (; addr < end; addr = next, pud++) {
  289. next = pud_addr_end(addr, end);
  290. if (!add) {
  291. if (pud_none(*pud))
  292. continue;
  293. if (pud_large(*pud)) {
  294. if (IS_ALIGNED(addr, PUD_SIZE) &&
  295. IS_ALIGNED(next, PUD_SIZE)) {
  296. pud_clear(pud);
  297. pages++;
  298. }
  299. continue;
  300. }
  301. } else if (pud_none(*pud)) {
  302. if (IS_ALIGNED(addr, PUD_SIZE) &&
  303. IS_ALIGNED(next, PUD_SIZE) &&
  304. MACHINE_HAS_EDAT2 && direct &&
  305. !debug_pagealloc_enabled()) {
  306. set_pud(pud, __pud(__pa(addr) | prot));
  307. pages++;
  308. continue;
  309. }
  310. pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
  311. if (!pmd)
  312. goto out;
  313. pud_populate(&init_mm, pud, pmd);
  314. } else if (pud_large(*pud)) {
  315. continue;
  316. }
  317. ret = modify_pmd_table(pud, addr, next, add, direct);
  318. if (ret)
  319. goto out;
  320. if (!add)
  321. try_free_pmd_table(pud, addr & PUD_MASK);
  322. }
  323. ret = 0;
  324. out:
  325. if (direct)
  326. update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
  327. return ret;
  328. }
  329. static void try_free_pud_table(p4d_t *p4d, unsigned long start)
  330. {
  331. const unsigned long end = start + P4D_SIZE;
  332. pud_t *pud;
  333. int i;
  334. /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
  335. if (end > VMALLOC_START)
  336. return;
  337. #ifdef CONFIG_KASAN
  338. if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
  339. return;
  340. #endif
  341. pud = pud_offset(p4d, start);
  342. for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
  343. if (!pud_none(*pud))
  344. return;
  345. }
  346. vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
  347. p4d_clear(p4d);
  348. }
  349. static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
  350. bool add, bool direct)
  351. {
  352. unsigned long next;
  353. int ret = -ENOMEM;
  354. p4d_t *p4d;
  355. pud_t *pud;
  356. p4d = p4d_offset(pgd, addr);
  357. for (; addr < end; addr = next, p4d++) {
  358. next = p4d_addr_end(addr, end);
  359. if (!add) {
  360. if (p4d_none(*p4d))
  361. continue;
  362. } else if (p4d_none(*p4d)) {
  363. pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
  364. if (!pud)
  365. goto out;
  366. p4d_populate(&init_mm, p4d, pud);
  367. }
  368. ret = modify_pud_table(p4d, addr, next, add, direct);
  369. if (ret)
  370. goto out;
  371. if (!add)
  372. try_free_pud_table(p4d, addr & P4D_MASK);
  373. }
  374. ret = 0;
  375. out:
  376. return ret;
  377. }
  378. static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
  379. {
  380. const unsigned long end = start + PGDIR_SIZE;
  381. p4d_t *p4d;
  382. int i;
  383. /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
  384. if (end > VMALLOC_START)
  385. return;
  386. #ifdef CONFIG_KASAN
  387. if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
  388. return;
  389. #endif
  390. p4d = p4d_offset(pgd, start);
  391. for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
  392. if (!p4d_none(*p4d))
  393. return;
  394. }
  395. vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
  396. pgd_clear(pgd);
  397. }
  398. static int modify_pagetable(unsigned long start, unsigned long end, bool add,
  399. bool direct)
  400. {
  401. unsigned long addr, next;
  402. int ret = -ENOMEM;
  403. pgd_t *pgd;
  404. p4d_t *p4d;
  405. if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
  406. return -EINVAL;
  407. for (addr = start; addr < end; addr = next) {
  408. next = pgd_addr_end(addr, end);
  409. pgd = pgd_offset_k(addr);
  410. if (!add) {
  411. if (pgd_none(*pgd))
  412. continue;
  413. } else if (pgd_none(*pgd)) {
  414. p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
  415. if (!p4d)
  416. goto out;
  417. pgd_populate(&init_mm, pgd, p4d);
  418. }
  419. ret = modify_p4d_table(pgd, addr, next, add, direct);
  420. if (ret)
  421. goto out;
  422. if (!add)
  423. try_free_p4d_table(pgd, addr & PGDIR_MASK);
  424. }
  425. ret = 0;
  426. out:
  427. if (!add)
  428. flush_tlb_kernel_range(start, end);
  429. return ret;
  430. }
  431. static int add_pagetable(unsigned long start, unsigned long end, bool direct)
  432. {
  433. return modify_pagetable(start, end, true, direct);
  434. }
  435. static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
  436. {
  437. return modify_pagetable(start, end, false, direct);
  438. }
  439. /*
  440. * Add a physical memory range to the 1:1 mapping.
  441. */
  442. static int vmem_add_range(unsigned long start, unsigned long size)
  443. {
  444. return add_pagetable(start, start + size, true);
  445. }
  446. /*
  447. * Remove a physical memory range from the 1:1 mapping.
  448. */
  449. static void vmem_remove_range(unsigned long start, unsigned long size)
  450. {
  451. remove_pagetable(start, start + size, true);
  452. }
  453. /*
  454. * Add a backed mem_map array to the virtual mem_map array.
  455. */
  456. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
  457. struct vmem_altmap *altmap)
  458. {
  459. int ret;
  460. mutex_lock(&vmem_mutex);
  461. /* We don't care about the node, just use NUMA_NO_NODE on allocations */
  462. ret = add_pagetable(start, end, false);
  463. if (ret)
  464. remove_pagetable(start, end, false);
  465. mutex_unlock(&vmem_mutex);
  466. return ret;
  467. }
  468. void vmemmap_free(unsigned long start, unsigned long end,
  469. struct vmem_altmap *altmap)
  470. {
  471. mutex_lock(&vmem_mutex);
  472. remove_pagetable(start, end, false);
  473. mutex_unlock(&vmem_mutex);
  474. }
  475. void vmem_remove_mapping(unsigned long start, unsigned long size)
  476. {
  477. mutex_lock(&vmem_mutex);
  478. vmem_remove_range(start, size);
  479. mutex_unlock(&vmem_mutex);
  480. }
  481. struct range arch_get_mappable_range(void)
  482. {
  483. struct range mhp_range;
  484. mhp_range.start = 0;
  485. mhp_range.end = VMEM_MAX_PHYS - 1;
  486. return mhp_range;
  487. }
  488. int vmem_add_mapping(unsigned long start, unsigned long size)
  489. {
  490. struct range range = arch_get_mappable_range();
  491. int ret;
  492. if (start < range.start ||
  493. start + size > range.end + 1 ||
  494. start + size < start)
  495. return -ERANGE;
  496. mutex_lock(&vmem_mutex);
  497. ret = vmem_add_range(start, size);
  498. if (ret)
  499. vmem_remove_range(start, size);
  500. mutex_unlock(&vmem_mutex);
  501. return ret;
  502. }
  503. /*
  504. * Allocate new or return existing page-table entry, but do not map it
  505. * to any physical address. If missing, allocate segment- and region-
  506. * table entries along. Meeting a large segment- or region-table entry
  507. * while traversing is an error, since the function is expected to be
  508. * called against virtual regions reserverd for 4KB mappings only.
  509. */
  510. pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
  511. {
  512. pte_t *ptep = NULL;
  513. pgd_t *pgd;
  514. p4d_t *p4d;
  515. pud_t *pud;
  516. pmd_t *pmd;
  517. pte_t *pte;
  518. pgd = pgd_offset_k(addr);
  519. if (pgd_none(*pgd)) {
  520. if (!alloc)
  521. goto out;
  522. p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
  523. if (!p4d)
  524. goto out;
  525. pgd_populate(&init_mm, pgd, p4d);
  526. }
  527. p4d = p4d_offset(pgd, addr);
  528. if (p4d_none(*p4d)) {
  529. if (!alloc)
  530. goto out;
  531. pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
  532. if (!pud)
  533. goto out;
  534. p4d_populate(&init_mm, p4d, pud);
  535. }
  536. pud = pud_offset(p4d, addr);
  537. if (pud_none(*pud)) {
  538. if (!alloc)
  539. goto out;
  540. pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
  541. if (!pmd)
  542. goto out;
  543. pud_populate(&init_mm, pud, pmd);
  544. } else if (WARN_ON_ONCE(pud_large(*pud))) {
  545. goto out;
  546. }
  547. pmd = pmd_offset(pud, addr);
  548. if (pmd_none(*pmd)) {
  549. if (!alloc)
  550. goto out;
  551. pte = vmem_pte_alloc();
  552. if (!pte)
  553. goto out;
  554. pmd_populate(&init_mm, pmd, pte);
  555. } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
  556. goto out;
  557. }
  558. ptep = pte_offset_kernel(pmd, addr);
  559. out:
  560. return ptep;
  561. }
  562. int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
  563. {
  564. pte_t *ptep, pte;
  565. if (!IS_ALIGNED(addr, PAGE_SIZE))
  566. return -EINVAL;
  567. ptep = vmem_get_alloc_pte(addr, alloc);
  568. if (!ptep)
  569. return -ENOMEM;
  570. __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
  571. pte = mk_pte_phys(phys, prot);
  572. set_pte(ptep, pte);
  573. return 0;
  574. }
  575. int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
  576. {
  577. int rc;
  578. mutex_lock(&vmem_mutex);
  579. rc = __vmem_map_4k_page(addr, phys, prot, true);
  580. mutex_unlock(&vmem_mutex);
  581. return rc;
  582. }
  583. void vmem_unmap_4k_page(unsigned long addr)
  584. {
  585. pte_t *ptep;
  586. mutex_lock(&vmem_mutex);
  587. ptep = virt_to_kpte(addr);
  588. __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
  589. pte_clear(&init_mm, addr, ptep);
  590. mutex_unlock(&vmem_mutex);
  591. }
  592. /*
  593. * map whole physical memory to virtual memory (identity mapping)
  594. * we reserve enough space in the vmalloc area for vmemmap to hotplug
  595. * additional memory segments.
  596. */
  597. void __init vmem_map_init(void)
  598. {
  599. phys_addr_t base, end;
  600. u64 i;
  601. for_each_mem_range(i, &base, &end)
  602. vmem_add_range(base, end - base);
  603. __set_memory((unsigned long)_stext,
  604. (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
  605. SET_MEMORY_RO | SET_MEMORY_X);
  606. __set_memory((unsigned long)_etext,
  607. (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
  608. SET_MEMORY_RO);
  609. __set_memory((unsigned long)_sinittext,
  610. (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
  611. SET_MEMORY_RO | SET_MEMORY_X);
  612. __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
  613. SET_MEMORY_RO | SET_MEMORY_X);
  614. /* lowcore requires 4k mapping for real addresses / prefixing */
  615. set_memory_4k(0, LC_PAGES);
  616. /* lowcore must be executable for LPSWE */
  617. if (!static_key_enabled(&cpu_has_bear))
  618. set_memory_x(0, 1);
  619. pr_info("Write protected kernel read-only data: %luk\n",
  620. (unsigned long)(__end_rodata - _stext) >> 10);
  621. }