cache-v4.S 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm/mm/cache-v4.S
  4. *
  5. * Copyright (C) 1997-2002 Russell king
  6. */
  7. #include <linux/linkage.h>
  8. #include <linux/init.h>
  9. #include <asm/assembler.h>
  10. #include <asm/page.h>
  11. #include "proc-macros.S"
  12. /*
  13. * flush_icache_all()
  14. *
  15. * Unconditionally clean and invalidate the entire icache.
  16. */
  17. ENTRY(v4_flush_icache_all)
  18. ret lr
  19. ENDPROC(v4_flush_icache_all)
  20. /*
  21. * flush_user_cache_all()
  22. *
  23. * Invalidate all cache entries in a particular address
  24. * space.
  25. *
  26. * - mm - mm_struct describing address space
  27. */
  28. ENTRY(v4_flush_user_cache_all)
  29. /* FALLTHROUGH */
  30. /*
  31. * flush_kern_cache_all()
  32. *
  33. * Clean and invalidate the entire cache.
  34. */
  35. ENTRY(v4_flush_kern_cache_all)
  36. #ifdef CONFIG_CPU_CP15
  37. mov r0, #0
  38. mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
  39. ret lr
  40. #else
  41. /* FALLTHROUGH */
  42. #endif
  43. /*
  44. * flush_user_cache_range(start, end, flags)
  45. *
  46. * Invalidate a range of cache entries in the specified
  47. * address space.
  48. *
  49. * - start - start address (may not be aligned)
  50. * - end - end address (exclusive, may not be aligned)
  51. * - flags - vma_area_struct flags describing address space
  52. */
  53. ENTRY(v4_flush_user_cache_range)
  54. #ifdef CONFIG_CPU_CP15
  55. mov ip, #0
  56. mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
  57. ret lr
  58. #else
  59. /* FALLTHROUGH */
  60. #endif
  61. /*
  62. * coherent_kern_range(start, end)
  63. *
  64. * Ensure coherency between the Icache and the Dcache in the
  65. * region described by start. If you have non-snooping
  66. * Harvard caches, you need to implement this function.
  67. *
  68. * - start - virtual start address
  69. * - end - virtual end address
  70. */
  71. ENTRY(v4_coherent_kern_range)
  72. /* FALLTHROUGH */
  73. /*
  74. * coherent_user_range(start, end)
  75. *
  76. * Ensure coherency between the Icache and the Dcache in the
  77. * region described by start. If you have non-snooping
  78. * Harvard caches, you need to implement this function.
  79. *
  80. * - start - virtual start address
  81. * - end - virtual end address
  82. */
  83. ENTRY(v4_coherent_user_range)
  84. mov r0, #0
  85. ret lr
  86. /*
  87. * flush_kern_dcache_area(void *addr, size_t size)
  88. *
  89. * Ensure no D cache aliasing occurs, either with itself or
  90. * the I cache
  91. *
  92. * - addr - kernel address
  93. * - size - region size
  94. */
  95. ENTRY(v4_flush_kern_dcache_area)
  96. /* FALLTHROUGH */
  97. /*
  98. * dma_flush_range(start, end)
  99. *
  100. * Clean and invalidate the specified virtual address range.
  101. *
  102. * - start - virtual start address
  103. * - end - virtual end address
  104. */
  105. ENTRY(v4_dma_flush_range)
  106. #ifdef CONFIG_CPU_CP15
  107. mov r0, #0
  108. mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
  109. #endif
  110. ret lr
  111. /*
  112. * dma_unmap_area(start, size, dir)
  113. * - start - kernel virtual start address
  114. * - size - size of region
  115. * - dir - DMA direction
  116. */
  117. ENTRY(v4_dma_unmap_area)
  118. teq r2, #DMA_TO_DEVICE
  119. bne v4_dma_flush_range
  120. /* FALLTHROUGH */
  121. /*
  122. * dma_map_area(start, size, dir)
  123. * - start - kernel virtual start address
  124. * - size - size of region
  125. * - dir - DMA direction
  126. */
  127. ENTRY(v4_dma_map_area)
  128. ret lr
  129. ENDPROC(v4_dma_unmap_area)
  130. ENDPROC(v4_dma_map_area)
  131. .globl v4_flush_kern_cache_louis
  132. .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
  133. __INITDATA
  134. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  135. define_cache_functions v4