highmem-internal.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_HIGHMEM_INTERNAL_H
  3. #define _LINUX_HIGHMEM_INTERNAL_H
  4. /*
  5. * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
  6. */
  7. #ifdef CONFIG_KMAP_LOCAL
  8. void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
  9. void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
  10. void kunmap_local_indexed(const void *vaddr);
  11. void kmap_local_fork(struct task_struct *tsk);
  12. void __kmap_local_sched_out(void);
  13. void __kmap_local_sched_in(void);
  14. static inline void kmap_assert_nomap(void)
  15. {
  16. DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
  17. }
  18. #else
  19. static inline void kmap_local_fork(struct task_struct *tsk) { }
  20. static inline void kmap_assert_nomap(void) { }
  21. #endif
  22. #ifdef CONFIG_HIGHMEM
  23. #include <asm/highmem.h>
  24. #ifndef ARCH_HAS_KMAP_FLUSH_TLB
  25. static inline void kmap_flush_tlb(unsigned long addr) { }
  26. #endif
  27. #ifndef kmap_prot
  28. #define kmap_prot PAGE_KERNEL
  29. #endif
  30. void *kmap_high(struct page *page);
  31. void kunmap_high(struct page *page);
  32. void __kmap_flush_unused(void);
  33. struct page *__kmap_to_page(void *addr);
  34. static inline void *kmap(struct page *page)
  35. {
  36. void *addr;
  37. might_sleep();
  38. if (!PageHighMem(page))
  39. addr = page_address(page);
  40. else
  41. addr = kmap_high(page);
  42. kmap_flush_tlb((unsigned long)addr);
  43. return addr;
  44. }
  45. static inline void kunmap(struct page *page)
  46. {
  47. might_sleep();
  48. if (!PageHighMem(page))
  49. return;
  50. kunmap_high(page);
  51. }
  52. static inline struct page *kmap_to_page(void *addr)
  53. {
  54. return __kmap_to_page(addr);
  55. }
  56. static inline void kmap_flush_unused(void)
  57. {
  58. __kmap_flush_unused();
  59. }
  60. static inline void *kmap_local_page(struct page *page)
  61. {
  62. return __kmap_local_page_prot(page, kmap_prot);
  63. }
  64. static inline void *kmap_local_folio(struct folio *folio, size_t offset)
  65. {
  66. struct page *page = folio_page(folio, offset / PAGE_SIZE);
  67. return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
  68. }
  69. static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
  70. {
  71. return __kmap_local_page_prot(page, prot);
  72. }
  73. static inline void *kmap_local_pfn(unsigned long pfn)
  74. {
  75. return __kmap_local_pfn_prot(pfn, kmap_prot);
  76. }
  77. static inline void __kunmap_local(const void *vaddr)
  78. {
  79. kunmap_local_indexed(vaddr);
  80. }
  81. static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  82. {
  83. if (IS_ENABLED(CONFIG_PREEMPT_RT))
  84. migrate_disable();
  85. else
  86. preempt_disable();
  87. pagefault_disable();
  88. return __kmap_local_page_prot(page, prot);
  89. }
  90. static inline void *kmap_atomic(struct page *page)
  91. {
  92. return kmap_atomic_prot(page, kmap_prot);
  93. }
  94. static inline void *kmap_atomic_pfn(unsigned long pfn)
  95. {
  96. if (IS_ENABLED(CONFIG_PREEMPT_RT))
  97. migrate_disable();
  98. else
  99. preempt_disable();
  100. pagefault_disable();
  101. return __kmap_local_pfn_prot(pfn, kmap_prot);
  102. }
  103. static inline void __kunmap_atomic(const void *addr)
  104. {
  105. kunmap_local_indexed(addr);
  106. pagefault_enable();
  107. if (IS_ENABLED(CONFIG_PREEMPT_RT))
  108. migrate_enable();
  109. else
  110. preempt_enable();
  111. }
  112. unsigned int __nr_free_highpages(void);
  113. extern atomic_long_t _totalhigh_pages;
  114. static inline unsigned int nr_free_highpages(void)
  115. {
  116. return __nr_free_highpages();
  117. }
  118. static inline unsigned long totalhigh_pages(void)
  119. {
  120. return (unsigned long)atomic_long_read(&_totalhigh_pages);
  121. }
  122. static inline void totalhigh_pages_add(long count)
  123. {
  124. atomic_long_add(count, &_totalhigh_pages);
  125. }
  126. static inline bool is_kmap_addr(const void *x)
  127. {
  128. unsigned long addr = (unsigned long)x;
  129. return addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP);
  130. }
  131. #else /* CONFIG_HIGHMEM */
  132. static inline struct page *kmap_to_page(void *addr)
  133. {
  134. return virt_to_page(addr);
  135. }
  136. static inline void *kmap(struct page *page)
  137. {
  138. might_sleep();
  139. return page_address(page);
  140. }
  141. static inline void kunmap_high(struct page *page) { }
  142. static inline void kmap_flush_unused(void) { }
  143. static inline void kunmap(struct page *page)
  144. {
  145. #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
  146. kunmap_flush_on_unmap(page_address(page));
  147. #endif
  148. }
  149. static inline void *kmap_local_page(struct page *page)
  150. {
  151. return page_address(page);
  152. }
  153. static inline void *kmap_local_folio(struct folio *folio, size_t offset)
  154. {
  155. return page_address(&folio->page) + offset;
  156. }
  157. static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
  158. {
  159. return kmap_local_page(page);
  160. }
  161. static inline void *kmap_local_pfn(unsigned long pfn)
  162. {
  163. return kmap_local_page(pfn_to_page(pfn));
  164. }
  165. static inline void __kunmap_local(const void *addr)
  166. {
  167. #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
  168. kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
  169. #endif
  170. }
  171. static inline void *kmap_atomic(struct page *page)
  172. {
  173. if (IS_ENABLED(CONFIG_PREEMPT_RT))
  174. migrate_disable();
  175. else
  176. preempt_disable();
  177. pagefault_disable();
  178. return page_address(page);
  179. }
  180. static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
  181. {
  182. return kmap_atomic(page);
  183. }
  184. static inline void *kmap_atomic_pfn(unsigned long pfn)
  185. {
  186. return kmap_atomic(pfn_to_page(pfn));
  187. }
  188. static inline void __kunmap_atomic(const void *addr)
  189. {
  190. #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
  191. kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
  192. #endif
  193. pagefault_enable();
  194. if (IS_ENABLED(CONFIG_PREEMPT_RT))
  195. migrate_enable();
  196. else
  197. preempt_enable();
  198. }
  199. static inline unsigned int nr_free_highpages(void) { return 0; }
  200. static inline unsigned long totalhigh_pages(void) { return 0UL; }
  201. static inline bool is_kmap_addr(const void *x)
  202. {
  203. return false;
  204. }
  205. #endif /* CONFIG_HIGHMEM */
  206. /**
  207. * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
  208. * @__addr: Virtual address to be unmapped
  209. *
  210. * Unmaps an address previously mapped by kmap_atomic() and re-enables
  211. * pagefaults. Depending on PREEMP_RT configuration, re-enables also
  212. * migration and preemption. Users should not count on these side effects.
  213. *
  214. * Mappings should be unmapped in the reverse order that they were mapped.
  215. * See kmap_local_page() for details on nesting.
  216. *
  217. * @__addr can be any address within the mapped page, so there is no need
  218. * to subtract any offset that has been added. In contrast to kunmap(),
  219. * this function takes the address returned from kmap_atomic(), not the
  220. * page passed to it. The compiler will warn you if you pass the page.
  221. */
  222. #define kunmap_atomic(__addr) \
  223. do { \
  224. BUILD_BUG_ON(__same_type((__addr), struct page *)); \
  225. __kunmap_atomic(__addr); \
  226. } while (0)
  227. /**
  228. * kunmap_local - Unmap a page mapped via kmap_local_page().
  229. * @__addr: An address within the page mapped
  230. *
  231. * @__addr can be any address within the mapped page. Commonly it is the
  232. * address return from kmap_local_page(), but it can also include offsets.
  233. *
  234. * Unmapping should be done in the reverse order of the mapping. See
  235. * kmap_local_page() for details.
  236. */
  237. #define kunmap_local(__addr) \
  238. do { \
  239. BUILD_BUG_ON(__same_type((__addr), struct page *)); \
  240. __kunmap_local(__addr); \
  241. } while (0)
  242. #endif