pgd.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/arch/arm/mm/pgd.c
  4. *
  5. * Copyright (C) 1998-2005 Russell King
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/gfp.h>
  9. #include <linux/highmem.h>
  10. #include <linux/slab.h>
  11. #include <asm/cp15.h>
  12. #include <asm/pgalloc.h>
  13. #include <asm/page.h>
  14. #include <asm/tlbflush.h>
  15. #include "mm.h"
  16. #ifdef CONFIG_ARM_LPAE
  17. #define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL)
  18. #define __pgd_free(pgd) kfree(pgd)
  19. #else
  20. #define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
  21. #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
  22. #endif
  23. /*
  24. * need to get a 16k page for level 1
  25. */
  26. pgd_t *pgd_alloc(struct mm_struct *mm)
  27. {
  28. pgd_t *new_pgd, *init_pgd;
  29. p4d_t *new_p4d, *init_p4d;
  30. pud_t *new_pud, *init_pud;
  31. pmd_t *new_pmd, *init_pmd;
  32. pte_t *new_pte, *init_pte;
  33. new_pgd = __pgd_alloc();
  34. if (!new_pgd)
  35. goto no_pgd;
  36. memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  37. /*
  38. * Copy over the kernel and IO PGD entries
  39. */
  40. init_pgd = pgd_offset_k(0);
  41. memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
  42. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  43. clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
  44. #ifdef CONFIG_ARM_LPAE
  45. /*
  46. * Allocate PMD table for modules and pkmap mappings.
  47. */
  48. new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
  49. MODULES_VADDR);
  50. if (!new_p4d)
  51. goto no_p4d;
  52. new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR);
  53. if (!new_pud)
  54. goto no_pud;
  55. new_pmd = pmd_alloc(mm, new_pud, 0);
  56. if (!new_pmd)
  57. goto no_pmd;
  58. #ifdef CONFIG_KASAN
  59. /*
  60. * Copy PMD table for KASAN shadow mappings.
  61. */
  62. init_pgd = pgd_offset_k(TASK_SIZE);
  63. init_p4d = p4d_offset(init_pgd, TASK_SIZE);
  64. init_pud = pud_offset(init_p4d, TASK_SIZE);
  65. init_pmd = pmd_offset(init_pud, TASK_SIZE);
  66. new_pmd = pmd_offset(new_pud, TASK_SIZE);
  67. memcpy(new_pmd, init_pmd,
  68. (pmd_index(MODULES_VADDR) - pmd_index(TASK_SIZE))
  69. * sizeof(pmd_t));
  70. clean_dcache_area(new_pmd, PTRS_PER_PMD * sizeof(pmd_t));
  71. #endif /* CONFIG_KASAN */
  72. #endif /* CONFIG_LPAE */
  73. if (!vectors_high()) {
  74. /*
  75. * On ARM, first page must always be allocated since it
  76. * contains the machine vectors. The vectors are always high
  77. * with LPAE.
  78. */
  79. new_p4d = p4d_alloc(mm, new_pgd, 0);
  80. if (!new_p4d)
  81. goto no_p4d;
  82. new_pud = pud_alloc(mm, new_p4d, 0);
  83. if (!new_pud)
  84. goto no_pud;
  85. new_pmd = pmd_alloc(mm, new_pud, 0);
  86. if (!new_pmd)
  87. goto no_pmd;
  88. new_pte = pte_alloc_map(mm, new_pmd, 0);
  89. if (!new_pte)
  90. goto no_pte;
  91. #ifndef CONFIG_ARM_LPAE
  92. /*
  93. * Modify the PTE pointer to have the correct domain. This
  94. * needs to be the vectors domain to avoid the low vectors
  95. * being unmapped.
  96. */
  97. pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
  98. pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
  99. #endif
  100. init_p4d = p4d_offset(init_pgd, 0);
  101. init_pud = pud_offset(init_p4d, 0);
  102. init_pmd = pmd_offset(init_pud, 0);
  103. init_pte = pte_offset_map(init_pmd, 0);
  104. set_pte_ext(new_pte + 0, init_pte[0], 0);
  105. set_pte_ext(new_pte + 1, init_pte[1], 0);
  106. pte_unmap(init_pte);
  107. pte_unmap(new_pte);
  108. }
  109. return new_pgd;
  110. no_pte:
  111. pmd_free(mm, new_pmd);
  112. mm_dec_nr_pmds(mm);
  113. no_pmd:
  114. pud_free(mm, new_pud);
  115. no_pud:
  116. p4d_free(mm, new_p4d);
  117. no_p4d:
  118. __pgd_free(new_pgd);
  119. no_pgd:
  120. return NULL;
  121. }
  122. void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
  123. {
  124. pgd_t *pgd;
  125. p4d_t *p4d;
  126. pud_t *pud;
  127. pmd_t *pmd;
  128. pgtable_t pte;
  129. if (!pgd_base)
  130. return;
  131. pgd = pgd_base + pgd_index(0);
  132. if (pgd_none_or_clear_bad(pgd))
  133. goto no_pgd;
  134. p4d = p4d_offset(pgd, 0);
  135. if (p4d_none_or_clear_bad(p4d))
  136. goto no_p4d;
  137. pud = pud_offset(p4d, 0);
  138. if (pud_none_or_clear_bad(pud))
  139. goto no_pud;
  140. pmd = pmd_offset(pud, 0);
  141. if (pmd_none_or_clear_bad(pmd))
  142. goto no_pmd;
  143. pte = pmd_pgtable(*pmd);
  144. pmd_clear(pmd);
  145. pte_free(mm, pte);
  146. mm_dec_nr_ptes(mm);
  147. no_pmd:
  148. pud_clear(pud);
  149. pmd_free(mm, pmd);
  150. mm_dec_nr_pmds(mm);
  151. no_pud:
  152. p4d_clear(p4d);
  153. pud_free(mm, pud);
  154. no_p4d:
  155. pgd_clear(pgd);
  156. p4d_free(mm, p4d);
  157. no_pgd:
  158. #ifdef CONFIG_ARM_LPAE
  159. /*
  160. * Free modules/pkmap or identity pmd tables.
  161. */
  162. for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
  163. if (pgd_none_or_clear_bad(pgd))
  164. continue;
  165. if (pgd_val(*pgd) & L_PGD_SWAPPER)
  166. continue;
  167. p4d = p4d_offset(pgd, 0);
  168. if (p4d_none_or_clear_bad(p4d))
  169. continue;
  170. pud = pud_offset(p4d, 0);
  171. if (pud_none_or_clear_bad(pud))
  172. continue;
  173. pmd = pmd_offset(pud, 0);
  174. pud_clear(pud);
  175. pmd_free(mm, pmd);
  176. mm_dec_nr_pmds(mm);
  177. p4d_clear(p4d);
  178. pud_free(mm, pud);
  179. mm_dec_nr_puds(mm);
  180. pgd_clear(pgd);
  181. p4d_free(mm, p4d);
  182. }
  183. #endif
  184. __pgd_free(pgd_base);
  185. }