pgalloc.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2009 Chen Liqin <[email protected]>
  4. * Copyright (C) 2012 Regents of the University of California
  5. */
  6. #ifndef _ASM_RISCV_PGALLOC_H
  7. #define _ASM_RISCV_PGALLOC_H
  8. #include <linux/mm.h>
  9. #include <asm/tlb.h>
  10. #ifdef CONFIG_MMU
  11. #define __HAVE_ARCH_PUD_ALLOC_ONE
  12. #define __HAVE_ARCH_PUD_FREE
  13. #include <asm-generic/pgalloc.h>
  14. static inline void pmd_populate_kernel(struct mm_struct *mm,
  15. pmd_t *pmd, pte_t *pte)
  16. {
  17. unsigned long pfn = virt_to_pfn(pte);
  18. set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  19. }
  20. static inline void pmd_populate(struct mm_struct *mm,
  21. pmd_t *pmd, pgtable_t pte)
  22. {
  23. unsigned long pfn = virt_to_pfn(page_address(pte));
  24. set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  25. }
  26. #ifndef __PAGETABLE_PMD_FOLDED
  27. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  28. {
  29. unsigned long pfn = virt_to_pfn(pmd);
  30. set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  31. }
  32. static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
  33. {
  34. if (pgtable_l4_enabled) {
  35. unsigned long pfn = virt_to_pfn(pud);
  36. set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  37. }
  38. }
  39. static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
  40. pud_t *pud)
  41. {
  42. if (pgtable_l4_enabled) {
  43. unsigned long pfn = virt_to_pfn(pud);
  44. set_p4d_safe(p4d,
  45. __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  46. }
  47. }
  48. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
  49. {
  50. if (pgtable_l5_enabled) {
  51. unsigned long pfn = virt_to_pfn(p4d);
  52. set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  53. }
  54. }
  55. static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
  56. p4d_t *p4d)
  57. {
  58. if (pgtable_l5_enabled) {
  59. unsigned long pfn = virt_to_pfn(p4d);
  60. set_pgd_safe(pgd,
  61. __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
  62. }
  63. }
  64. #define pud_alloc_one pud_alloc_one
  65. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  66. {
  67. if (pgtable_l4_enabled)
  68. return __pud_alloc_one(mm, addr);
  69. return NULL;
  70. }
  71. #define pud_free pud_free
  72. static inline void pud_free(struct mm_struct *mm, pud_t *pud)
  73. {
  74. if (pgtable_l4_enabled)
  75. __pud_free(mm, pud);
  76. }
  77. #define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud)
  78. #define p4d_alloc_one p4d_alloc_one
  79. static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
  80. {
  81. if (pgtable_l5_enabled) {
  82. gfp_t gfp = GFP_PGTABLE_USER;
  83. if (mm == &init_mm)
  84. gfp = GFP_PGTABLE_KERNEL;
  85. return (p4d_t *)get_zeroed_page(gfp);
  86. }
  87. return NULL;
  88. }
  89. static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
  90. {
  91. BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
  92. free_page((unsigned long)p4d);
  93. }
  94. #define p4d_free p4d_free
  95. static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
  96. {
  97. if (pgtable_l5_enabled)
  98. __p4d_free(mm, p4d);
  99. }
  100. #define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
  101. #endif /* __PAGETABLE_PMD_FOLDED */
  102. static inline void sync_kernel_mappings(pgd_t *pgd)
  103. {
  104. memcpy(pgd + USER_PTRS_PER_PGD,
  105. init_mm.pgd + USER_PTRS_PER_PGD,
  106. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  107. }
  108. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  109. {
  110. pgd_t *pgd;
  111. pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
  112. if (likely(pgd != NULL)) {
  113. memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  114. /* Copy kernel mappings */
  115. sync_kernel_mappings(pgd);
  116. }
  117. return pgd;
  118. }
  119. #ifndef __PAGETABLE_PMD_FOLDED
  120. #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
  121. #endif /* __PAGETABLE_PMD_FOLDED */
  122. #define __pte_free_tlb(tlb, pte, buf) \
  123. do { \
  124. pgtable_pte_page_dtor(pte); \
  125. tlb_remove_page((tlb), pte); \
  126. } while (0)
  127. #endif /* CONFIG_MMU */
  128. #endif /* _ASM_RISCV_PGALLOC_H */