cacheflush.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_GENERIC_CACHEFLUSH_H
  3. #define _ASM_GENERIC_CACHEFLUSH_H
  4. #include <linux/instrumented.h>
  5. struct mm_struct;
  6. struct vm_area_struct;
  7. struct page;
  8. struct address_space;
  9. /*
  10. * The cache doesn't need to be flushed when TLB entries change when
  11. * the cache is mapped to physical memory, not virtual memory
  12. */
  13. #ifndef flush_cache_all
  14. static inline void flush_cache_all(void)
  15. {
  16. }
  17. #endif
  18. #ifndef flush_cache_mm
  19. static inline void flush_cache_mm(struct mm_struct *mm)
  20. {
  21. }
  22. #endif
  23. #ifndef flush_cache_dup_mm
  24. static inline void flush_cache_dup_mm(struct mm_struct *mm)
  25. {
  26. }
  27. #endif
  28. #ifndef flush_cache_range
  29. static inline void flush_cache_range(struct vm_area_struct *vma,
  30. unsigned long start,
  31. unsigned long end)
  32. {
  33. }
  34. #endif
  35. #ifndef flush_cache_page
  36. static inline void flush_cache_page(struct vm_area_struct *vma,
  37. unsigned long vmaddr,
  38. unsigned long pfn)
  39. {
  40. }
  41. #endif
  42. #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
  43. static inline void flush_dcache_page(struct page *page)
  44. {
  45. }
  46. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
  47. #endif
  48. #ifndef flush_dcache_mmap_lock
  49. static inline void flush_dcache_mmap_lock(struct address_space *mapping)
  50. {
  51. }
  52. #endif
  53. #ifndef flush_dcache_mmap_unlock
  54. static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
  55. {
  56. }
  57. #endif
  58. #ifndef flush_icache_range
  59. static inline void flush_icache_range(unsigned long start, unsigned long end)
  60. {
  61. }
  62. #endif
  63. #ifndef flush_icache_user_range
  64. #define flush_icache_user_range flush_icache_range
  65. #endif
  66. #ifndef flush_icache_page
  67. static inline void flush_icache_page(struct vm_area_struct *vma,
  68. struct page *page)
  69. {
  70. }
  71. #endif
  72. #ifndef flush_icache_user_page
  73. static inline void flush_icache_user_page(struct vm_area_struct *vma,
  74. struct page *page,
  75. unsigned long addr, int len)
  76. {
  77. }
  78. #endif
  79. #ifndef flush_cache_vmap
  80. static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  81. {
  82. }
  83. #endif
  84. #ifndef flush_cache_vunmap
  85. static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
  86. {
  87. }
  88. #endif
  89. #ifndef copy_to_user_page
  90. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  91. do { \
  92. instrument_copy_to_user((void __user *)dst, src, len); \
  93. memcpy(dst, src, len); \
  94. flush_icache_user_page(vma, page, vaddr, len); \
  95. } while (0)
  96. #endif
  97. #ifndef copy_from_user_page
  98. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  99. do { \
  100. instrument_copy_from_user_before(dst, (void __user *)src, \
  101. len); \
  102. memcpy(dst, src, len); \
  103. instrument_copy_from_user_after(dst, (void __user *)src, len, \
  104. 0); \
  105. } while (0)
  106. #endif
  107. #endif /* _ASM_GENERIC_CACHEFLUSH_H */