page_vma_mapped.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/mm.h>
  3. #include <linux/rmap.h>
  4. #include <linux/hugetlb.h>
  5. #include <linux/swap.h>
  6. #include <linux/swapops.h>
  7. #include "internal.h"
  8. static inline bool not_found(struct page_vma_mapped_walk *pvmw)
  9. {
  10. page_vma_mapped_walk_done(pvmw);
  11. return false;
  12. }
  13. static bool map_pte(struct page_vma_mapped_walk *pvmw)
  14. {
  15. pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
  16. if (!(pvmw->flags & PVMW_SYNC)) {
  17. if (pvmw->flags & PVMW_MIGRATION) {
  18. if (!is_swap_pte(*pvmw->pte))
  19. return false;
  20. } else {
  21. /*
  22. * We get here when we are trying to unmap a private
  23. * device page from the process address space. Such
  24. * page is not CPU accessible and thus is mapped as
  25. * a special swap entry, nonetheless it still does
  26. * count as a valid regular mapping for the page (and
  27. * is accounted as such in page maps count).
  28. *
  29. * So handle this special case as if it was a normal
  30. * page mapping ie lock CPU page table and returns
  31. * true.
  32. *
  33. * For more details on device private memory see HMM
  34. * (include/linux/hmm.h or mm/hmm.c).
  35. */
  36. if (is_swap_pte(*pvmw->pte)) {
  37. swp_entry_t entry;
  38. /* Handle un-addressable ZONE_DEVICE memory */
  39. entry = pte_to_swp_entry(*pvmw->pte);
  40. if (!is_device_private_entry(entry) &&
  41. !is_device_exclusive_entry(entry))
  42. return false;
  43. } else if (!pte_present(*pvmw->pte))
  44. return false;
  45. }
  46. }
  47. pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
  48. spin_lock(pvmw->ptl);
  49. return true;
  50. }
  51. /**
  52. * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
  53. * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
  54. *
  55. * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
  56. * mapped. check_pte() has to validate this.
  57. *
  58. * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
  59. * arbitrary page.
  60. *
  61. * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
  62. * entry that points to @pvmw->page or any subpage in case of THP.
  63. *
  64. * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
  65. * pvmw->page or any subpage in case of THP.
  66. *
  67. * Otherwise, return false.
  68. *
  69. */
  70. static bool check_pte(struct page_vma_mapped_walk *pvmw)
  71. {
  72. unsigned long pfn;
  73. if (pvmw->flags & PVMW_MIGRATION) {
  74. swp_entry_t entry;
  75. if (!is_swap_pte(*pvmw->pte))
  76. return false;
  77. entry = pte_to_swp_entry(*pvmw->pte);
  78. if (!is_migration_entry(entry) &&
  79. !is_device_exclusive_entry(entry))
  80. return false;
  81. pfn = swp_offset_pfn(entry);
  82. } else if (is_swap_pte(*pvmw->pte)) {
  83. swp_entry_t entry;
  84. /* Handle un-addressable ZONE_DEVICE memory */
  85. entry = pte_to_swp_entry(*pvmw->pte);
  86. if (!is_device_private_entry(entry) &&
  87. !is_device_exclusive_entry(entry))
  88. return false;
  89. pfn = swp_offset_pfn(entry);
  90. } else {
  91. if (!pte_present(*pvmw->pte))
  92. return false;
  93. pfn = pte_pfn(*pvmw->pte);
  94. }
  95. return (pfn - pvmw->pfn) < pvmw->nr_pages;
  96. }
  97. /* Returns true if the two ranges overlap. Careful to not overflow. */
  98. static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
  99. {
  100. if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
  101. return false;
  102. if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
  103. return false;
  104. return true;
  105. }
  106. static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
  107. {
  108. pvmw->address = (pvmw->address + size) & ~(size - 1);
  109. if (!pvmw->address)
  110. pvmw->address = ULONG_MAX;
  111. }
  112. /**
  113. * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
  114. * @pvmw->address
  115. * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
  116. * must be set. pmd, pte and ptl must be NULL.
  117. *
  118. * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
  119. * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
  120. * adjusted if needed (for PTE-mapped THPs).
  121. *
  122. * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
  123. * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
  124. * a loop to find all PTEs that map the THP.
  125. *
  126. * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
  127. * regardless of which page table level the page is mapped at. @pvmw->pmd is
  128. * NULL.
  129. *
  130. * Returns false if there are no more page table entries for the page in
  131. * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
  132. *
  133. * If you need to stop the walk before page_vma_mapped_walk() returned false,
  134. * use page_vma_mapped_walk_done(). It will do the housekeeping.
  135. */
  136. bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
  137. {
  138. struct vm_area_struct *vma = pvmw->vma;
  139. struct mm_struct *mm = vma->vm_mm;
  140. unsigned long end;
  141. pgd_t *pgd;
  142. p4d_t *p4d;
  143. pud_t *pud;
  144. pmd_t pmde;
  145. /* The only possible pmd mapping has been handled on last iteration */
  146. if (pvmw->pmd && !pvmw->pte)
  147. return not_found(pvmw);
  148. if (unlikely(is_vm_hugetlb_page(vma))) {
  149. struct hstate *hstate = hstate_vma(vma);
  150. unsigned long size = huge_page_size(hstate);
  151. /* The only possible mapping was handled on last iteration */
  152. if (pvmw->pte)
  153. return not_found(pvmw);
  154. /* when pud is not present, pte will be NULL */
  155. pvmw->pte = huge_pte_offset(mm, pvmw->address, size);
  156. if (!pvmw->pte)
  157. return false;
  158. pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
  159. if (!check_pte(pvmw))
  160. return not_found(pvmw);
  161. return true;
  162. }
  163. end = vma_address_end(pvmw);
  164. if (pvmw->pte)
  165. goto next_pte;
  166. restart:
  167. do {
  168. pgd = pgd_offset(mm, pvmw->address);
  169. if (!pgd_present(*pgd)) {
  170. step_forward(pvmw, PGDIR_SIZE);
  171. continue;
  172. }
  173. p4d = p4d_offset(pgd, pvmw->address);
  174. if (!p4d_present(*p4d)) {
  175. step_forward(pvmw, P4D_SIZE);
  176. continue;
  177. }
  178. pud = pud_offset(p4d, pvmw->address);
  179. if (!pud_present(*pud)) {
  180. step_forward(pvmw, PUD_SIZE);
  181. continue;
  182. }
  183. pvmw->pmd = pmd_offset(pud, pvmw->address);
  184. /*
  185. * Make sure the pmd value isn't cached in a register by the
  186. * compiler and used as a stale value after we've observed a
  187. * subsequent update.
  188. */
  189. pmde = READ_ONCE(*pvmw->pmd);
  190. if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
  191. (pmd_present(pmde) && pmd_devmap(pmde))) {
  192. pvmw->ptl = pmd_lock(mm, pvmw->pmd);
  193. pmde = *pvmw->pmd;
  194. if (!pmd_present(pmde)) {
  195. swp_entry_t entry;
  196. if (!thp_migration_supported() ||
  197. !(pvmw->flags & PVMW_MIGRATION))
  198. return not_found(pvmw);
  199. entry = pmd_to_swp_entry(pmde);
  200. if (!is_migration_entry(entry) ||
  201. !check_pmd(swp_offset_pfn(entry), pvmw))
  202. return not_found(pvmw);
  203. return true;
  204. }
  205. if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
  206. if (pvmw->flags & PVMW_MIGRATION)
  207. return not_found(pvmw);
  208. if (!check_pmd(pmd_pfn(pmde), pvmw))
  209. return not_found(pvmw);
  210. return true;
  211. }
  212. /* THP pmd was split under us: handle on pte level */
  213. spin_unlock(pvmw->ptl);
  214. pvmw->ptl = NULL;
  215. } else if (!pmd_present(pmde)) {
  216. /*
  217. * If PVMW_SYNC, take and drop THP pmd lock so that we
  218. * cannot return prematurely, while zap_huge_pmd() has
  219. * cleared *pmd but not decremented compound_mapcount().
  220. */
  221. if ((pvmw->flags & PVMW_SYNC) &&
  222. transhuge_vma_suitable(vma, pvmw->address) &&
  223. (pvmw->nr_pages >= HPAGE_PMD_NR)) {
  224. spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
  225. spin_unlock(ptl);
  226. }
  227. step_forward(pvmw, PMD_SIZE);
  228. continue;
  229. }
  230. if (!map_pte(pvmw))
  231. goto next_pte;
  232. this_pte:
  233. if (check_pte(pvmw))
  234. return true;
  235. next_pte:
  236. do {
  237. pvmw->address += PAGE_SIZE;
  238. if (pvmw->address >= end)
  239. return not_found(pvmw);
  240. /* Did we cross page table boundary? */
  241. if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
  242. if (pvmw->ptl) {
  243. spin_unlock(pvmw->ptl);
  244. pvmw->ptl = NULL;
  245. }
  246. pte_unmap(pvmw->pte);
  247. pvmw->pte = NULL;
  248. goto restart;
  249. }
  250. pvmw->pte++;
  251. if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
  252. pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
  253. spin_lock(pvmw->ptl);
  254. }
  255. } while (pte_none(*pvmw->pte));
  256. if (!pvmw->ptl) {
  257. pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
  258. spin_lock(pvmw->ptl);
  259. }
  260. goto this_pte;
  261. } while (pvmw->address < end);
  262. return false;
  263. }
  264. /**
  265. * page_mapped_in_vma - check whether a page is really mapped in a VMA
  266. * @page: the page to test
  267. * @vma: the VMA to test
  268. *
  269. * Returns 1 if the page is mapped into the page tables of the VMA, 0
  270. * if the page is not mapped into the page tables of this VMA. Only
  271. * valid for normal file or anonymous VMAs.
  272. */
  273. int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  274. {
  275. struct page_vma_mapped_walk pvmw = {
  276. .pfn = page_to_pfn(page),
  277. .nr_pages = 1,
  278. .vma = vma,
  279. .flags = PVMW_SYNC,
  280. };
  281. pvmw.address = vma_address(page, vma);
  282. if (pvmw.address == -EFAULT)
  283. return 0;
  284. if (!page_vma_mapped_walk(&pvmw))
  285. return 0;
  286. page_vma_mapped_walk_done(&pvmw);
  287. return 1;
  288. }