hugetlbpage.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PARISC64 Huge TLB page support.
  4. *
  5. * This parisc implementation is heavily based on the SPARC and x86 code.
  6. *
  7. * Copyright (C) 2015 Helge Deller <[email protected]>
  8. */
  9. #include <linux/fs.h>
  10. #include <linux/mm.h>
  11. #include <linux/sched/mm.h>
  12. #include <linux/hugetlb.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/sysctl.h>
  15. #include <asm/mman.h>
  16. #include <asm/tlb.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/mmu_context.h>
  20. unsigned long
  21. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  22. unsigned long len, unsigned long pgoff, unsigned long flags)
  23. {
  24. struct hstate *h = hstate_file(file);
  25. if (len & ~huge_page_mask(h))
  26. return -EINVAL;
  27. if (len > TASK_SIZE)
  28. return -ENOMEM;
  29. if (flags & MAP_FIXED)
  30. if (prepare_hugepage_range(file, addr, len))
  31. return -EINVAL;
  32. if (addr)
  33. addr = ALIGN(addr, huge_page_size(h));
  34. /* we need to make sure the colouring is OK */
  35. return arch_get_unmapped_area(file, addr, len, pgoff, flags);
  36. }
  37. pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
  38. unsigned long addr, unsigned long sz)
  39. {
  40. pgd_t *pgd;
  41. p4d_t *p4d;
  42. pud_t *pud;
  43. pmd_t *pmd;
  44. pte_t *pte = NULL;
  45. /* We must align the address, because our caller will run
  46. * set_huge_pte_at() on whatever we return, which writes out
  47. * all of the sub-ptes for the hugepage range. So we have
  48. * to give it the first such sub-pte.
  49. */
  50. addr &= HPAGE_MASK;
  51. pgd = pgd_offset(mm, addr);
  52. p4d = p4d_offset(pgd, addr);
  53. pud = pud_alloc(mm, p4d, addr);
  54. if (pud) {
  55. pmd = pmd_alloc(mm, pud, addr);
  56. if (pmd)
  57. pte = pte_alloc_map(mm, pmd, addr);
  58. }
  59. return pte;
  60. }
  61. pte_t *huge_pte_offset(struct mm_struct *mm,
  62. unsigned long addr, unsigned long sz)
  63. {
  64. pgd_t *pgd;
  65. p4d_t *p4d;
  66. pud_t *pud;
  67. pmd_t *pmd;
  68. pte_t *pte = NULL;
  69. addr &= HPAGE_MASK;
  70. pgd = pgd_offset(mm, addr);
  71. if (!pgd_none(*pgd)) {
  72. p4d = p4d_offset(pgd, addr);
  73. if (!p4d_none(*p4d)) {
  74. pud = pud_offset(p4d, addr);
  75. if (!pud_none(*pud)) {
  76. pmd = pmd_offset(pud, addr);
  77. if (!pmd_none(*pmd))
  78. pte = pte_offset_map(pmd, addr);
  79. }
  80. }
  81. }
  82. return pte;
  83. }
  84. /* Purge data and instruction TLB entries. Must be called holding
  85. * the pa_tlb_lock. The TLB purge instructions are slow on SMP
  86. * machines since the purge must be broadcast to all CPUs.
  87. */
  88. static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
  89. {
  90. int i;
  91. /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
  92. * Linux standard huge pages (e.g. 2 MB) */
  93. BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
  94. addr &= HPAGE_MASK;
  95. addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
  96. for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
  97. purge_tlb_entries(mm, addr);
  98. addr += (1UL << REAL_HPAGE_SHIFT);
  99. }
  100. }
  101. /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
  102. static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  103. pte_t *ptep, pte_t entry)
  104. {
  105. unsigned long addr_start;
  106. int i;
  107. addr &= HPAGE_MASK;
  108. addr_start = addr;
  109. for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
  110. set_pte(ptep, entry);
  111. ptep++;
  112. addr += PAGE_SIZE;
  113. pte_val(entry) += PAGE_SIZE;
  114. }
  115. purge_tlb_entries_huge(mm, addr_start);
  116. }
  117. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  118. pte_t *ptep, pte_t entry)
  119. {
  120. __set_huge_pte_at(mm, addr, ptep, entry);
  121. }
  122. pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  123. pte_t *ptep)
  124. {
  125. pte_t entry;
  126. entry = *ptep;
  127. __set_huge_pte_at(mm, addr, ptep, __pte(0));
  128. return entry;
  129. }
  130. void huge_ptep_set_wrprotect(struct mm_struct *mm,
  131. unsigned long addr, pte_t *ptep)
  132. {
  133. pte_t old_pte;
  134. old_pte = *ptep;
  135. __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
  136. }
  137. int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  138. unsigned long addr, pte_t *ptep,
  139. pte_t pte, int dirty)
  140. {
  141. int changed;
  142. struct mm_struct *mm = vma->vm_mm;
  143. changed = !pte_same(*ptep, pte);
  144. if (changed) {
  145. __set_huge_pte_at(mm, addr, ptep, pte);
  146. }
  147. return changed;
  148. }
  149. int pmd_huge(pmd_t pmd)
  150. {
  151. return 0;
  152. }
  153. int pud_huge(pud_t pud)
  154. {
  155. return 0;
  156. }