cache-v4wt.S 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm/mm/cache-v4wt.S
  4. *
  5. * Copyright (C) 1997-2002 Russell king
  6. *
  7. * ARMv4 write through cache operations support.
  8. *
  9. * We assume that the write buffer is not enabled.
  10. */
  11. #include <linux/linkage.h>
  12. #include <linux/init.h>
  13. #include <asm/assembler.h>
  14. #include <asm/page.h>
  15. #include "proc-macros.S"
  16. /*
  17. * The size of one data cache line.
  18. */
  19. #define CACHE_DLINESIZE 32
  20. /*
  21. * The number of data cache segments.
  22. */
  23. #define CACHE_DSEGMENTS 8
  24. /*
  25. * The number of lines in a cache segment.
  26. */
  27. #define CACHE_DENTRIES 64
  28. /*
  29. * This is the size at which it becomes more efficient to
  30. * clean the whole cache, rather than using the individual
  31. * cache line maintenance instructions.
  32. *
  33. * *** This needs benchmarking
  34. */
  35. #define CACHE_DLIMIT 16384
  36. /*
  37. * flush_icache_all()
  38. *
  39. * Unconditionally clean and invalidate the entire icache.
  40. */
  41. ENTRY(v4wt_flush_icache_all)
  42. mov r0, #0
  43. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  44. ret lr
  45. ENDPROC(v4wt_flush_icache_all)
  46. /*
  47. * flush_user_cache_all()
  48. *
  49. * Invalidate all cache entries in a particular address
  50. * space.
  51. */
  52. ENTRY(v4wt_flush_user_cache_all)
  53. /* FALLTHROUGH */
  54. /*
  55. * flush_kern_cache_all()
  56. *
  57. * Clean and invalidate the entire cache.
  58. */
  59. ENTRY(v4wt_flush_kern_cache_all)
  60. mov r2, #VM_EXEC
  61. mov ip, #0
  62. __flush_whole_cache:
  63. tst r2, #VM_EXEC
  64. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  65. mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
  66. ret lr
  67. /*
  68. * flush_user_cache_range(start, end, flags)
  69. *
  70. * Clean and invalidate a range of cache entries in the specified
  71. * address space.
  72. *
  73. * - start - start address (inclusive, page aligned)
  74. * - end - end address (exclusive, page aligned)
  75. * - flags - vma_area_struct flags describing address space
  76. */
  77. ENTRY(v4wt_flush_user_cache_range)
  78. sub r3, r1, r0 @ calculate total size
  79. cmp r3, #CACHE_DLIMIT
  80. bhs __flush_whole_cache
  81. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  82. tst r2, #VM_EXEC
  83. mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
  84. add r0, r0, #CACHE_DLINESIZE
  85. cmp r0, r1
  86. blo 1b
  87. ret lr
  88. /*
  89. * coherent_kern_range(start, end)
  90. *
  91. * Ensure coherency between the Icache and the Dcache in the
  92. * region described by start. If you have non-snooping
  93. * Harvard caches, you need to implement this function.
  94. *
  95. * - start - virtual start address
  96. * - end - virtual end address
  97. */
  98. ENTRY(v4wt_coherent_kern_range)
  99. /* FALLTRHOUGH */
  100. /*
  101. * coherent_user_range(start, end)
  102. *
  103. * Ensure coherency between the Icache and the Dcache in the
  104. * region described by start. If you have non-snooping
  105. * Harvard caches, you need to implement this function.
  106. *
  107. * - start - virtual start address
  108. * - end - virtual end address
  109. */
  110. ENTRY(v4wt_coherent_user_range)
  111. bic r0, r0, #CACHE_DLINESIZE - 1
  112. 1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
  113. add r0, r0, #CACHE_DLINESIZE
  114. cmp r0, r1
  115. blo 1b
  116. mov r0, #0
  117. ret lr
  118. /*
  119. * flush_kern_dcache_area(void *addr, size_t size)
  120. *
  121. * Ensure no D cache aliasing occurs, either with itself or
  122. * the I cache
  123. *
  124. * - addr - kernel address
  125. * - size - region size
  126. */
  127. ENTRY(v4wt_flush_kern_dcache_area)
  128. mov r2, #0
  129. mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
  130. add r1, r0, r1
  131. /* fallthrough */
  132. /*
  133. * dma_inv_range(start, end)
  134. *
  135. * Invalidate (discard) the specified virtual address range.
  136. * May not write back any entries. If 'start' or 'end'
  137. * are not cache line aligned, those lines must be written
  138. * back.
  139. *
  140. * - start - virtual start address
  141. * - end - virtual end address
  142. */
  143. v4wt_dma_inv_range:
  144. bic r0, r0, #CACHE_DLINESIZE - 1
  145. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  146. add r0, r0, #CACHE_DLINESIZE
  147. cmp r0, r1
  148. blo 1b
  149. ret lr
  150. /*
  151. * dma_flush_range(start, end)
  152. *
  153. * Clean and invalidate the specified virtual address range.
  154. *
  155. * - start - virtual start address
  156. * - end - virtual end address
  157. */
  158. .globl v4wt_dma_flush_range
  159. .equ v4wt_dma_flush_range, v4wt_dma_inv_range
  160. /*
  161. * dma_unmap_area(start, size, dir)
  162. * - start - kernel virtual start address
  163. * - size - size of region
  164. * - dir - DMA direction
  165. */
  166. ENTRY(v4wt_dma_unmap_area)
  167. add r1, r1, r0
  168. teq r2, #DMA_TO_DEVICE
  169. bne v4wt_dma_inv_range
  170. /* FALLTHROUGH */
  171. /*
  172. * dma_map_area(start, size, dir)
  173. * - start - kernel virtual start address
  174. * - size - size of region
  175. * - dir - DMA direction
  176. */
  177. ENTRY(v4wt_dma_map_area)
  178. ret lr
  179. ENDPROC(v4wt_dma_unmap_area)
  180. ENDPROC(v4wt_dma_map_area)
  181. .globl v4wt_flush_kern_cache_louis
  182. .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
  183. __INITDATA
  184. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  185. define_cache_functions v4wt