page_table_check.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2021, Google LLC.
  4. * Pasha Tatashin <[email protected]>
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/page_table_check.h>
  8. #undef pr_fmt
  9. #define pr_fmt(fmt) "page_table_check: " fmt
  10. struct page_table_check {
  11. atomic_t anon_map_count;
  12. atomic_t file_map_count;
  13. };
  14. static bool __page_table_check_enabled __initdata =
  15. IS_ENABLED(CONFIG_PAGE_TABLE_CHECK_ENFORCED);
  16. DEFINE_STATIC_KEY_TRUE(page_table_check_disabled);
  17. EXPORT_SYMBOL(page_table_check_disabled);
  18. static int __init early_page_table_check_param(char *buf)
  19. {
  20. return strtobool(buf, &__page_table_check_enabled);
  21. }
  22. early_param("page_table_check", early_page_table_check_param);
  23. static bool __init need_page_table_check(void)
  24. {
  25. return __page_table_check_enabled;
  26. }
  27. static void __init init_page_table_check(void)
  28. {
  29. if (!__page_table_check_enabled)
  30. return;
  31. static_branch_disable(&page_table_check_disabled);
  32. }
  33. struct page_ext_operations page_table_check_ops = {
  34. .size = sizeof(struct page_table_check),
  35. .need = need_page_table_check,
  36. .init = init_page_table_check,
  37. };
  38. static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
  39. {
  40. BUG_ON(!page_ext);
  41. return (void *)(page_ext) + page_table_check_ops.offset;
  42. }
  43. /*
  44. * An entry is removed from the page table, decrement the counters for that page
  45. * verify that it is of correct type and counters do not become negative.
  46. */
  47. static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
  48. unsigned long pfn, unsigned long pgcnt)
  49. {
  50. struct page_ext *page_ext;
  51. struct page *page;
  52. unsigned long i;
  53. bool anon;
  54. if (!pfn_valid(pfn))
  55. return;
  56. page = pfn_to_page(pfn);
  57. page_ext = page_ext_get(page);
  58. BUG_ON(PageSlab(page));
  59. anon = PageAnon(page);
  60. for (i = 0; i < pgcnt; i++) {
  61. struct page_table_check *ptc = get_page_table_check(page_ext);
  62. if (anon) {
  63. BUG_ON(atomic_read(&ptc->file_map_count));
  64. BUG_ON(atomic_dec_return(&ptc->anon_map_count) < 0);
  65. } else {
  66. BUG_ON(atomic_read(&ptc->anon_map_count));
  67. BUG_ON(atomic_dec_return(&ptc->file_map_count) < 0);
  68. }
  69. page_ext = page_ext_next(page_ext);
  70. }
  71. page_ext_put(page_ext);
  72. }
  73. /*
  74. * A new entry is added to the page table, increment the counters for that page
  75. * verify that it is of correct type and is not being mapped with a different
  76. * type to a different process.
  77. */
  78. static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
  79. unsigned long pfn, unsigned long pgcnt,
  80. bool rw)
  81. {
  82. struct page_ext *page_ext;
  83. struct page *page;
  84. unsigned long i;
  85. bool anon;
  86. if (!pfn_valid(pfn))
  87. return;
  88. page = pfn_to_page(pfn);
  89. page_ext = page_ext_get(page);
  90. BUG_ON(PageSlab(page));
  91. anon = PageAnon(page);
  92. for (i = 0; i < pgcnt; i++) {
  93. struct page_table_check *ptc = get_page_table_check(page_ext);
  94. if (anon) {
  95. BUG_ON(atomic_read(&ptc->file_map_count));
  96. BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
  97. } else {
  98. BUG_ON(atomic_read(&ptc->anon_map_count));
  99. BUG_ON(atomic_inc_return(&ptc->file_map_count) < 0);
  100. }
  101. page_ext = page_ext_next(page_ext);
  102. }
  103. page_ext_put(page_ext);
  104. }
  105. /*
  106. * page is on free list, or is being allocated, verify that counters are zeroes
  107. * crash if they are not.
  108. */
  109. void __page_table_check_zero(struct page *page, unsigned int order)
  110. {
  111. struct page_ext *page_ext;
  112. unsigned long i;
  113. BUG_ON(PageSlab(page));
  114. page_ext = page_ext_get(page);
  115. BUG_ON(!page_ext);
  116. for (i = 0; i < (1ul << order); i++) {
  117. struct page_table_check *ptc = get_page_table_check(page_ext);
  118. BUG_ON(atomic_read(&ptc->anon_map_count));
  119. BUG_ON(atomic_read(&ptc->file_map_count));
  120. page_ext = page_ext_next(page_ext);
  121. }
  122. page_ext_put(page_ext);
  123. }
  124. void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
  125. pte_t pte)
  126. {
  127. if (&init_mm == mm)
  128. return;
  129. if (pte_user_accessible_page(pte)) {
  130. page_table_check_clear(mm, addr, pte_pfn(pte),
  131. PAGE_SIZE >> PAGE_SHIFT);
  132. }
  133. }
  134. EXPORT_SYMBOL(__page_table_check_pte_clear);
  135. void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
  136. pmd_t pmd)
  137. {
  138. if (&init_mm == mm)
  139. return;
  140. if (pmd_user_accessible_page(pmd)) {
  141. page_table_check_clear(mm, addr, pmd_pfn(pmd),
  142. PMD_SIZE >> PAGE_SHIFT);
  143. }
  144. }
  145. EXPORT_SYMBOL(__page_table_check_pmd_clear);
  146. void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
  147. pud_t pud)
  148. {
  149. if (&init_mm == mm)
  150. return;
  151. if (pud_user_accessible_page(pud)) {
  152. page_table_check_clear(mm, addr, pud_pfn(pud),
  153. PUD_SIZE >> PAGE_SHIFT);
  154. }
  155. }
  156. EXPORT_SYMBOL(__page_table_check_pud_clear);
  157. void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
  158. pte_t *ptep, pte_t pte)
  159. {
  160. if (&init_mm == mm)
  161. return;
  162. __page_table_check_pte_clear(mm, addr, *ptep);
  163. if (pte_user_accessible_page(pte)) {
  164. page_table_check_set(mm, addr, pte_pfn(pte),
  165. PAGE_SIZE >> PAGE_SHIFT,
  166. pte_write(pte));
  167. }
  168. }
  169. EXPORT_SYMBOL(__page_table_check_pte_set);
  170. void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
  171. pmd_t *pmdp, pmd_t pmd)
  172. {
  173. if (&init_mm == mm)
  174. return;
  175. __page_table_check_pmd_clear(mm, addr, *pmdp);
  176. if (pmd_user_accessible_page(pmd)) {
  177. page_table_check_set(mm, addr, pmd_pfn(pmd),
  178. PMD_SIZE >> PAGE_SHIFT,
  179. pmd_write(pmd));
  180. }
  181. }
  182. EXPORT_SYMBOL(__page_table_check_pmd_set);
  183. void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
  184. pud_t *pudp, pud_t pud)
  185. {
  186. if (&init_mm == mm)
  187. return;
  188. __page_table_check_pud_clear(mm, addr, *pudp);
  189. if (pud_user_accessible_page(pud)) {
  190. page_table_check_set(mm, addr, pud_pfn(pud),
  191. PUD_SIZE >> PAGE_SHIFT,
  192. pud_write(pud));
  193. }
  194. }
  195. EXPORT_SYMBOL(__page_table_check_pud_set);
  196. void __page_table_check_pte_clear_range(struct mm_struct *mm,
  197. unsigned long addr,
  198. pmd_t pmd)
  199. {
  200. if (&init_mm == mm)
  201. return;
  202. if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
  203. pte_t *ptep = pte_offset_map(&pmd, addr);
  204. unsigned long i;
  205. for (i = 0; i < PTRS_PER_PTE; i++) {
  206. __page_table_check_pte_clear(mm, addr, *ptep);
  207. addr += PAGE_SIZE;
  208. ptep++;
  209. }
  210. pte_unmap(ptep - PTRS_PER_PTE);
  211. }
  212. }