cacheflush.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Based on arch/arm/include/asm/cacheflush.h
  4. *
  5. * Copyright (C) 1999-2002 Russell King.
  6. * Copyright (C) 2012 ARM Ltd.
  7. */
  8. #ifndef __ASM_CACHEFLUSH_H
  9. #define __ASM_CACHEFLUSH_H
  10. #include <linux/kgdb.h>
  11. #include <linux/mm.h>
  12. /*
  13. * This flag is used to indicate that the page pointed to by a pte is clean
  14. * and does not require cleaning before returning it to the user.
  15. */
  16. #define PG_dcache_clean PG_arch_1
  17. /*
  18. * MM Cache Management
  19. * ===================
  20. *
  21. * The arch/arm64/mm/cache.S implements these methods.
  22. *
  23. * Start addresses are inclusive and end addresses are exclusive; start
  24. * addresses should be rounded down, end addresses up.
  25. *
  26. * See Documentation/core-api/cachetlb.rst for more information. Please note that
  27. * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  28. * VIPT I-cache.
  29. *
  30. * All functions below apply to the interval [start, end)
  31. * - start - virtual start address (inclusive)
  32. * - end - virtual end address (exclusive)
  33. *
  34. * caches_clean_inval_pou(start, end)
  35. *
  36. * Ensure coherency between the I-cache and the D-cache region to
  37. * the Point of Unification.
  38. *
  39. * caches_clean_inval_user_pou(start, end)
  40. *
  41. * Ensure coherency between the I-cache and the D-cache region to
  42. * the Point of Unification.
  43. * Use only if the region might access user memory.
  44. *
  45. * icache_inval_pou(start, end)
  46. *
  47. * Invalidate I-cache region to the Point of Unification.
  48. *
  49. * dcache_clean_inval_poc(start, end)
  50. *
  51. * Clean and invalidate D-cache region to the Point of Coherency.
  52. *
  53. * dcache_inval_poc(start, end)
  54. *
  55. * Invalidate D-cache region to the Point of Coherency.
  56. *
  57. * dcache_clean_poc(start, end)
  58. *
  59. * Clean D-cache region to the Point of Coherency.
  60. *
  61. * dcache_clean_pop(start, end)
  62. *
  63. * Clean D-cache region to the Point of Persistence.
  64. *
  65. * dcache_clean_pou(start, end)
  66. *
  67. * Clean D-cache region to the Point of Unification.
  68. */
  69. extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
  70. extern void icache_inval_pou(unsigned long start, unsigned long end);
  71. extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
  72. extern void dcache_inval_poc(unsigned long start, unsigned long end);
  73. extern void dcache_clean_poc(unsigned long start, unsigned long end);
  74. extern void dcache_clean_pop(unsigned long start, unsigned long end);
  75. extern void dcache_clean_pou(unsigned long start, unsigned long end);
  76. extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
  77. extern void sync_icache_aliases(unsigned long start, unsigned long end);
  78. static inline void flush_icache_range(unsigned long start, unsigned long end)
  79. {
  80. caches_clean_inval_pou(start, end);
  81. /*
  82. * IPI all online CPUs so that they undergo a context synchronization
  83. * event and are forced to refetch the new instructions.
  84. */
  85. /*
  86. * KGDB performs cache maintenance with interrupts disabled, so we
  87. * will deadlock trying to IPI the secondary CPUs. In theory, we can
  88. * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
  89. * just means that KGDB will elide the maintenance altogether! As it
  90. * turns out, KGDB uses IPIs to round-up the secondary CPUs during
  91. * the patching operation, so we don't need extra IPIs here anyway.
  92. * In which case, add a KGDB-specific bodge and return early.
  93. */
  94. if (in_dbg_master())
  95. return;
  96. kick_all_cpus_sync();
  97. }
  98. #define flush_icache_range flush_icache_range
  99. /*
  100. * Copy user data from/to a page which is mapped into a different
  101. * processes address space. Really, we want to allow our "user
  102. * space" model to handle this.
  103. */
  104. extern void copy_to_user_page(struct vm_area_struct *, struct page *,
  105. unsigned long, void *, const void *, unsigned long);
  106. #define copy_to_user_page copy_to_user_page
  107. /*
  108. * flush_dcache_page is used when the kernel has written to the page
  109. * cache page at virtual address page->virtual.
  110. *
  111. * If this page isn't mapped (ie, page_mapping == NULL), or it might
  112. * have userspace mappings, then we _must_ always clean + invalidate
  113. * the dcache entries associated with the kernel mapping.
  114. *
  115. * Otherwise we can defer the operation, and clean the cache when we are
  116. * about to change to user space. This is the same method as used on SPARC64.
  117. * See update_mmu_cache for the user space part.
  118. */
  119. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  120. extern void flush_dcache_page(struct page *);
  121. static __always_inline void icache_inval_all_pou(void)
  122. {
  123. if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
  124. return;
  125. asm("ic ialluis");
  126. dsb(ish);
  127. }
  128. #include <asm-generic/cacheflush.h>
  129. #endif /* __ASM_CACHEFLUSH_H */