pageattr.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2019 SiFive
  4. */
  5. #include <linux/pagewalk.h>
  6. #include <linux/pgtable.h>
  7. #include <asm/tlbflush.h>
  8. #include <asm/bitops.h>
  9. #include <asm/set_memory.h>
  10. struct pageattr_masks {
  11. pgprot_t set_mask;
  12. pgprot_t clear_mask;
  13. };
  14. static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
  15. {
  16. struct pageattr_masks *masks = walk->private;
  17. unsigned long new_val = val;
  18. new_val &= ~(pgprot_val(masks->clear_mask));
  19. new_val |= (pgprot_val(masks->set_mask));
  20. return new_val;
  21. }
  22. static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
  23. unsigned long next, struct mm_walk *walk)
  24. {
  25. pgd_t val = READ_ONCE(*pgd);
  26. if (pgd_leaf(val)) {
  27. val = __pgd(set_pageattr_masks(pgd_val(val), walk));
  28. set_pgd(pgd, val);
  29. }
  30. return 0;
  31. }
  32. static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
  33. unsigned long next, struct mm_walk *walk)
  34. {
  35. p4d_t val = READ_ONCE(*p4d);
  36. if (p4d_leaf(val)) {
  37. val = __p4d(set_pageattr_masks(p4d_val(val), walk));
  38. set_p4d(p4d, val);
  39. }
  40. return 0;
  41. }
  42. static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
  43. unsigned long next, struct mm_walk *walk)
  44. {
  45. pud_t val = READ_ONCE(*pud);
  46. if (pud_leaf(val)) {
  47. val = __pud(set_pageattr_masks(pud_val(val), walk));
  48. set_pud(pud, val);
  49. }
  50. return 0;
  51. }
  52. static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
  53. unsigned long next, struct mm_walk *walk)
  54. {
  55. pmd_t val = READ_ONCE(*pmd);
  56. if (pmd_leaf(val)) {
  57. val = __pmd(set_pageattr_masks(pmd_val(val), walk));
  58. set_pmd(pmd, val);
  59. }
  60. return 0;
  61. }
  62. static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
  63. unsigned long next, struct mm_walk *walk)
  64. {
  65. pte_t val = READ_ONCE(*pte);
  66. val = __pte(set_pageattr_masks(pte_val(val), walk));
  67. set_pte(pte, val);
  68. return 0;
  69. }
  70. static int pageattr_pte_hole(unsigned long addr, unsigned long next,
  71. int depth, struct mm_walk *walk)
  72. {
  73. /* Nothing to do here */
  74. return 0;
  75. }
  76. static const struct mm_walk_ops pageattr_ops = {
  77. .pgd_entry = pageattr_pgd_entry,
  78. .p4d_entry = pageattr_p4d_entry,
  79. .pud_entry = pageattr_pud_entry,
  80. .pmd_entry = pageattr_pmd_entry,
  81. .pte_entry = pageattr_pte_entry,
  82. .pte_hole = pageattr_pte_hole,
  83. .walk_lock = PGWALK_RDLOCK,
  84. };
  85. static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
  86. pgprot_t clear_mask)
  87. {
  88. int ret;
  89. unsigned long start = addr;
  90. unsigned long end = start + PAGE_SIZE * numpages;
  91. struct pageattr_masks masks = {
  92. .set_mask = set_mask,
  93. .clear_mask = clear_mask
  94. };
  95. if (!numpages)
  96. return 0;
  97. mmap_write_lock(&init_mm);
  98. ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
  99. &masks);
  100. mmap_write_unlock(&init_mm);
  101. flush_tlb_kernel_range(start, end);
  102. return ret;
  103. }
  104. int set_memory_rw_nx(unsigned long addr, int numpages)
  105. {
  106. return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
  107. __pgprot(_PAGE_EXEC));
  108. }
  109. int set_memory_ro(unsigned long addr, int numpages)
  110. {
  111. return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
  112. __pgprot(_PAGE_WRITE));
  113. }
  114. int set_memory_rw(unsigned long addr, int numpages)
  115. {
  116. return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
  117. __pgprot(0));
  118. }
  119. int set_memory_x(unsigned long addr, int numpages)
  120. {
  121. return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
  122. }
  123. int set_memory_nx(unsigned long addr, int numpages)
  124. {
  125. return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
  126. }
  127. int set_direct_map_invalid_noflush(struct page *page)
  128. {
  129. int ret;
  130. unsigned long start = (unsigned long)page_address(page);
  131. unsigned long end = start + PAGE_SIZE;
  132. struct pageattr_masks masks = {
  133. .set_mask = __pgprot(0),
  134. .clear_mask = __pgprot(_PAGE_PRESENT)
  135. };
  136. mmap_read_lock(&init_mm);
  137. ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
  138. mmap_read_unlock(&init_mm);
  139. return ret;
  140. }
  141. int set_direct_map_default_noflush(struct page *page)
  142. {
  143. int ret;
  144. unsigned long start = (unsigned long)page_address(page);
  145. unsigned long end = start + PAGE_SIZE;
  146. struct pageattr_masks masks = {
  147. .set_mask = PAGE_KERNEL,
  148. .clear_mask = __pgprot(0)
  149. };
  150. mmap_read_lock(&init_mm);
  151. ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
  152. mmap_read_unlock(&init_mm);
  153. return ret;
  154. }
  155. #ifdef CONFIG_DEBUG_PAGEALLOC
  156. void __kernel_map_pages(struct page *page, int numpages, int enable)
  157. {
  158. if (!debug_pagealloc_enabled())
  159. return;
  160. if (enable)
  161. __set_memory((unsigned long)page_address(page), numpages,
  162. __pgprot(_PAGE_PRESENT), __pgprot(0));
  163. else
  164. __set_memory((unsigned long)page_address(page), numpages,
  165. __pgprot(0), __pgprot(_PAGE_PRESENT));
  166. }
  167. #endif
  168. bool kernel_page_present(struct page *page)
  169. {
  170. unsigned long addr = (unsigned long)page_address(page);
  171. pgd_t *pgd;
  172. pud_t *pud;
  173. p4d_t *p4d;
  174. pmd_t *pmd;
  175. pte_t *pte;
  176. pgd = pgd_offset_k(addr);
  177. if (!pgd_present(*pgd))
  178. return false;
  179. if (pgd_leaf(*pgd))
  180. return true;
  181. p4d = p4d_offset(pgd, addr);
  182. if (!p4d_present(*p4d))
  183. return false;
  184. if (p4d_leaf(*p4d))
  185. return true;
  186. pud = pud_offset(p4d, addr);
  187. if (!pud_present(*pud))
  188. return false;
  189. if (pud_leaf(*pud))
  190. return true;
  191. pmd = pmd_offset(pud, addr);
  192. if (!pmd_present(*pmd))
  193. return false;
  194. if (pmd_leaf(*pmd))
  195. return true;
  196. pte = pte_offset_kernel(pmd, addr);
  197. return pte_present(*pte);
  198. }