pgalloc.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_GENERIC_PGALLOC_H
  3. #define __ASM_GENERIC_PGALLOC_H
  4. #ifdef CONFIG_MMU
  5. #define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO)
  6. #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
  7. /**
  8. * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
  9. * @mm: the mm_struct of the current context
  10. *
  11. * This function is intended for architectures that need
  12. * anything beyond simple page allocation.
  13. *
  14. * Return: pointer to the allocated memory or %NULL on error
  15. */
  16. static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
  17. {
  18. return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
  19. }
  20. #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
  21. /**
  22. * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
  23. * @mm: the mm_struct of the current context
  24. *
  25. * Return: pointer to the allocated memory or %NULL on error
  26. */
  27. static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
  28. {
  29. return __pte_alloc_one_kernel(mm);
  30. }
  31. #endif
  32. /**
  33. * pte_free_kernel - free PTE-level kernel page table page
  34. * @mm: the mm_struct of the current context
  35. * @pte: pointer to the memory containing the page table
  36. */
  37. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  38. {
  39. free_page((unsigned long)pte);
  40. }
  41. /**
  42. * __pte_alloc_one - allocate a page for PTE-level user page table
  43. * @mm: the mm_struct of the current context
  44. * @gfp: GFP flags to use for the allocation
  45. *
  46. * Allocates a page and runs the pgtable_pte_page_ctor().
  47. *
  48. * This function is intended for architectures that need
  49. * anything beyond simple page allocation or must have custom GFP flags.
  50. *
  51. * Return: `struct page` initialized as page table or %NULL on error
  52. */
  53. static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
  54. {
  55. struct page *pte;
  56. pte = alloc_page(gfp);
  57. if (!pte)
  58. return NULL;
  59. if (!pgtable_pte_page_ctor(pte)) {
  60. __free_page(pte);
  61. return NULL;
  62. }
  63. return pte;
  64. }
  65. #ifndef __HAVE_ARCH_PTE_ALLOC_ONE
  66. /**
  67. * pte_alloc_one - allocate a page for PTE-level user page table
  68. * @mm: the mm_struct of the current context
  69. *
  70. * Allocates a page and runs the pgtable_pte_page_ctor().
  71. *
  72. * Return: `struct page` initialized as page table or %NULL on error
  73. */
  74. static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
  75. {
  76. return __pte_alloc_one(mm, GFP_PGTABLE_USER);
  77. }
  78. #endif
  79. /*
  80. * Should really implement gc for free page table pages. This could be
  81. * done with a reference count in struct page.
  82. */
  83. /**
  84. * pte_free - free PTE-level user page table page
  85. * @mm: the mm_struct of the current context
  86. * @pte_page: the `struct page` representing the page table
  87. */
  88. static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
  89. {
  90. pgtable_pte_page_dtor(pte_page);
  91. __free_page(pte_page);
  92. }
  93. #if CONFIG_PGTABLE_LEVELS > 2
  94. #ifndef __HAVE_ARCH_PMD_ALLOC_ONE
  95. /**
  96. * pmd_alloc_one - allocate a page for PMD-level page table
  97. * @mm: the mm_struct of the current context
  98. *
  99. * Allocates a page and runs the pgtable_pmd_page_ctor().
  100. * Allocations use %GFP_PGTABLE_USER in user context and
  101. * %GFP_PGTABLE_KERNEL in kernel context.
  102. *
  103. * Return: pointer to the allocated memory or %NULL on error
  104. */
  105. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  106. {
  107. struct page *page;
  108. gfp_t gfp = GFP_PGTABLE_USER;
  109. if (mm == &init_mm)
  110. gfp = GFP_PGTABLE_KERNEL;
  111. page = alloc_pages(gfp, 0);
  112. if (!page)
  113. return NULL;
  114. if (!pgtable_pmd_page_ctor(page)) {
  115. __free_pages(page, 0);
  116. return NULL;
  117. }
  118. return (pmd_t *)page_address(page);
  119. }
  120. #endif
  121. #ifndef __HAVE_ARCH_PMD_FREE
  122. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  123. {
  124. BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
  125. pgtable_pmd_page_dtor(virt_to_page(pmd));
  126. free_page((unsigned long)pmd);
  127. }
  128. #endif
  129. #endif /* CONFIG_PGTABLE_LEVELS > 2 */
  130. #if CONFIG_PGTABLE_LEVELS > 3
  131. static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  132. {
  133. gfp_t gfp = GFP_PGTABLE_USER;
  134. if (mm == &init_mm)
  135. gfp = GFP_PGTABLE_KERNEL;
  136. return (pud_t *)get_zeroed_page(gfp);
  137. }
  138. #ifndef __HAVE_ARCH_PUD_ALLOC_ONE
  139. /**
  140. * pud_alloc_one - allocate a page for PUD-level page table
  141. * @mm: the mm_struct of the current context
  142. *
  143. * Allocates a page using %GFP_PGTABLE_USER for user context and
  144. * %GFP_PGTABLE_KERNEL for kernel context.
  145. *
  146. * Return: pointer to the allocated memory or %NULL on error
  147. */
  148. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  149. {
  150. return __pud_alloc_one(mm, addr);
  151. }
  152. #endif
  153. static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
  154. {
  155. BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
  156. free_page((unsigned long)pud);
  157. }
  158. #ifndef __HAVE_ARCH_PUD_FREE
  159. static inline void pud_free(struct mm_struct *mm, pud_t *pud)
  160. {
  161. __pud_free(mm, pud);
  162. }
  163. #endif
  164. #endif /* CONFIG_PGTABLE_LEVELS > 3 */
  165. #ifndef __HAVE_ARCH_PGD_FREE
  166. static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  167. {
  168. free_page((unsigned long)pgd);
  169. }
  170. #endif
  171. #endif /* CONFIG_MMU */
  172. #endif /* __ASM_GENERIC_PGALLOC_H */