migrate.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_MIGRATE_H
  3. #define _LINUX_MIGRATE_H
  4. #include <linux/mm.h>
  5. #include <linux/mempolicy.h>
  6. #include <linux/migrate_mode.h>
  7. #include <linux/hugetlb.h>
  8. typedef struct page *new_page_t(struct page *page, unsigned long private);
  9. typedef void free_page_t(struct page *page, unsigned long private);
  10. struct migration_target_control;
  11. /*
  12. * Return values from addresss_space_operations.migratepage():
  13. * - negative errno on page migration failure;
  14. * - zero on page migration success;
  15. */
  16. #define MIGRATEPAGE_SUCCESS 0
  17. /**
  18. * struct movable_operations - Driver page migration
  19. * @isolate_page:
  20. * The VM calls this function to prepare the page to be moved. The page
  21. * is locked and the driver should not unlock it. The driver should
  22. * return ``true`` if the page is movable and ``false`` if it is not
  23. * currently movable. After this function returns, the VM uses the
  24. * page->lru field, so the driver must preserve any information which
  25. * is usually stored here.
  26. *
  27. * @migrate_page:
  28. * After isolation, the VM calls this function with the isolated
  29. * @src page. The driver should copy the contents of the
  30. * @src page to the @dst page and set up the fields of @dst page.
  31. * Both pages are locked.
  32. * If page migration is successful, the driver should call
  33. * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
  34. * If the driver cannot migrate the page at the moment, it can return
  35. * -EAGAIN. The VM interprets this as a temporary migration failure and
  36. * will retry it later. Any other error value is a permanent migration
  37. * failure and migration will not be retried.
  38. * The driver shouldn't touch the @src->lru field while in the
  39. * migrate_page() function. It may write to @dst->lru.
  40. *
  41. * @putback_page:
  42. * If migration fails on the isolated page, the VM informs the driver
  43. * that the page is no longer a candidate for migration by calling
  44. * this function. The driver should put the isolated page back into
  45. * its own data structure.
  46. */
  47. struct movable_operations {
  48. bool (*isolate_page)(struct page *, isolate_mode_t);
  49. int (*migrate_page)(struct page *dst, struct page *src,
  50. enum migrate_mode);
  51. void (*putback_page)(struct page *);
  52. };
  53. /* Defined in mm/debug.c: */
  54. extern const char *migrate_reason_names[MR_TYPES];
  55. #ifdef CONFIG_MIGRATION
  56. extern void putback_movable_pages(struct list_head *l);
  57. int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
  58. struct folio *src, enum migrate_mode mode, int extra_count);
  59. int migrate_folio(struct address_space *mapping, struct folio *dst,
  60. struct folio *src, enum migrate_mode mode);
  61. extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
  62. unsigned long private, enum migrate_mode mode, int reason,
  63. unsigned int *ret_succeeded);
  64. extern struct page *alloc_migration_target(struct page *page, unsigned long private);
  65. extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
  66. int migrate_huge_page_move_mapping(struct address_space *mapping,
  67. struct folio *dst, struct folio *src);
  68. void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
  69. spinlock_t *ptl);
  70. void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
  71. void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
  72. int folio_migrate_mapping(struct address_space *mapping,
  73. struct folio *newfolio, struct folio *folio, int extra_count);
  74. #else
  75. static inline void putback_movable_pages(struct list_head *l) {}
  76. static inline int migrate_pages(struct list_head *l, new_page_t new,
  77. free_page_t free, unsigned long private, enum migrate_mode mode,
  78. int reason, unsigned int *ret_succeeded)
  79. { return -ENOSYS; }
  80. static inline struct page *alloc_migration_target(struct page *page,
  81. unsigned long private)
  82. { return NULL; }
  83. static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
  84. { return -EBUSY; }
  85. static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
  86. struct folio *dst, struct folio *src)
  87. {
  88. return -ENOSYS;
  89. }
  90. #endif /* CONFIG_MIGRATION */
  91. #ifdef CONFIG_COMPACTION
  92. bool PageMovable(struct page *page);
  93. void __SetPageMovable(struct page *page, const struct movable_operations *ops);
  94. void __ClearPageMovable(struct page *page);
  95. #else
  96. static inline bool PageMovable(struct page *page) { return false; }
  97. static inline void __SetPageMovable(struct page *page,
  98. const struct movable_operations *ops)
  99. {
  100. }
  101. static inline void __ClearPageMovable(struct page *page)
  102. {
  103. }
  104. #endif
  105. static inline bool folio_test_movable(struct folio *folio)
  106. {
  107. return PageMovable(&folio->page);
  108. }
  109. static inline
  110. const struct movable_operations *page_movable_ops(struct page *page)
  111. {
  112. VM_BUG_ON(!__PageMovable(page));
  113. return (const struct movable_operations *)
  114. ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
  115. }
  116. #ifdef CONFIG_NUMA_BALANCING
  117. extern int migrate_misplaced_page(struct page *page,
  118. struct vm_area_struct *vma, int node);
  119. #else
  120. static inline int migrate_misplaced_page(struct page *page,
  121. struct vm_area_struct *vma, int node)
  122. {
  123. return -EAGAIN; /* can't migrate now */
  124. }
  125. #endif /* CONFIG_NUMA_BALANCING */
  126. #ifdef CONFIG_MIGRATION
  127. /*
  128. * Watch out for PAE architecture, which has an unsigned long, and might not
  129. * have enough bits to store all physical address and flags. So far we have
  130. * enough room for all our flags.
  131. */
  132. #define MIGRATE_PFN_VALID (1UL << 0)
  133. #define MIGRATE_PFN_MIGRATE (1UL << 1)
  134. #define MIGRATE_PFN_WRITE (1UL << 3)
  135. #define MIGRATE_PFN_SHIFT 6
  136. static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
  137. {
  138. if (!(mpfn & MIGRATE_PFN_VALID))
  139. return NULL;
  140. return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
  141. }
  142. static inline unsigned long migrate_pfn(unsigned long pfn)
  143. {
  144. return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
  145. }
  146. enum migrate_vma_direction {
  147. MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
  148. MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
  149. MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2,
  150. };
  151. struct migrate_vma {
  152. struct vm_area_struct *vma;
  153. /*
  154. * Both src and dst array must be big enough for
  155. * (end - start) >> PAGE_SHIFT entries.
  156. *
  157. * The src array must not be modified by the caller after
  158. * migrate_vma_setup(), and must not change the dst array after
  159. * migrate_vma_pages() returns.
  160. */
  161. unsigned long *dst;
  162. unsigned long *src;
  163. unsigned long cpages;
  164. unsigned long npages;
  165. unsigned long start;
  166. unsigned long end;
  167. /*
  168. * Set to the owner value also stored in page->pgmap->owner for
  169. * migrating out of device private memory. The flags also need to
  170. * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
  171. * The caller should always set this field when using mmu notifier
  172. * callbacks to avoid device MMU invalidations for device private
  173. * pages that are not being migrated.
  174. */
  175. void *pgmap_owner;
  176. unsigned long flags;
  177. /*
  178. * Set to vmf->page if this is being called to migrate a page as part of
  179. * a migrate_to_ram() callback.
  180. */
  181. struct page *fault_page;
  182. };
  183. int migrate_vma_setup(struct migrate_vma *args);
  184. void migrate_vma_pages(struct migrate_vma *migrate);
  185. void migrate_vma_finalize(struct migrate_vma *migrate);
  186. int migrate_device_range(unsigned long *src_pfns, unsigned long start,
  187. unsigned long npages);
  188. void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
  189. unsigned long npages);
  190. void migrate_device_finalize(unsigned long *src_pfns,
  191. unsigned long *dst_pfns, unsigned long npages);
  192. #endif /* CONFIG_MIGRATION */
  193. #endif /* _LINUX_MIGRATE_H */