cache-v4wb.S 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm/mm/cache-v4wb.S
  4. *
  5. * Copyright (C) 1997-2002 Russell king
  6. */
  7. #include <linux/linkage.h>
  8. #include <linux/init.h>
  9. #include <asm/assembler.h>
  10. #include <asm/memory.h>
  11. #include <asm/page.h>
  12. #include "proc-macros.S"
  13. /*
  14. * The size of one data cache line.
  15. */
  16. #define CACHE_DLINESIZE 32
  17. /*
  18. * The total size of the data cache.
  19. */
  20. #if defined(CONFIG_CPU_SA110)
  21. # define CACHE_DSIZE 16384
  22. #elif defined(CONFIG_CPU_SA1100)
  23. # define CACHE_DSIZE 8192
  24. #else
  25. # error Unknown cache size
  26. #endif
  27. /*
  28. * This is the size at which it becomes more efficient to
  29. * clean the whole cache, rather than using the individual
  30. * cache line maintenance instructions.
  31. *
  32. * Size Clean (ticks) Dirty (ticks)
  33. * 4096 21 20 21 53 55 54
  34. * 8192 40 41 40 106 100 102
  35. * 16384 77 77 76 140 140 138
  36. * 32768 150 149 150 214 216 212 <---
  37. * 65536 296 297 296 351 358 361
  38. * 131072 591 591 591 656 657 651
  39. * Whole 132 136 132 221 217 207 <---
  40. */
  41. #define CACHE_DLIMIT (CACHE_DSIZE * 4)
  42. .data
  43. .align 2
  44. flush_base:
  45. .long FLUSH_BASE
  46. .text
  47. /*
  48. * flush_icache_all()
  49. *
  50. * Unconditionally clean and invalidate the entire icache.
  51. */
  52. ENTRY(v4wb_flush_icache_all)
  53. mov r0, #0
  54. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  55. ret lr
  56. ENDPROC(v4wb_flush_icache_all)
  57. /*
  58. * flush_user_cache_all()
  59. *
  60. * Clean and invalidate all cache entries in a particular address
  61. * space.
  62. */
  63. ENTRY(v4wb_flush_user_cache_all)
  64. /* FALLTHROUGH */
  65. /*
  66. * flush_kern_cache_all()
  67. *
  68. * Clean and invalidate the entire cache.
  69. */
  70. ENTRY(v4wb_flush_kern_cache_all)
  71. mov ip, #0
  72. mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
  73. __flush_whole_cache:
  74. ldr r3, =flush_base
  75. ldr r1, [r3, #0]
  76. eor r1, r1, #CACHE_DSIZE
  77. str r1, [r3, #0]
  78. add r2, r1, #CACHE_DSIZE
  79. 1: ldr r3, [r1], #32
  80. cmp r1, r2
  81. blo 1b
  82. #ifdef FLUSH_BASE_MINICACHE
  83. add r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
  84. sub r1, r2, #512 @ only 512 bytes
  85. 1: ldr r3, [r1], #32
  86. cmp r1, r2
  87. blo 1b
  88. #endif
  89. mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
  90. ret lr
  91. /*
  92. * flush_user_cache_range(start, end, flags)
  93. *
  94. * Invalidate a range of cache entries in the specified
  95. * address space.
  96. *
  97. * - start - start address (inclusive, page aligned)
  98. * - end - end address (exclusive, page aligned)
  99. * - flags - vma_area_struct flags describing address space
  100. */
  101. ENTRY(v4wb_flush_user_cache_range)
  102. mov ip, #0
  103. sub r3, r1, r0 @ calculate total size
  104. tst r2, #VM_EXEC @ executable region?
  105. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  106. cmp r3, #CACHE_DLIMIT @ total size >= limit?
  107. bhs __flush_whole_cache @ flush whole D cache
  108. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  109. mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  110. add r0, r0, #CACHE_DLINESIZE
  111. cmp r0, r1
  112. blo 1b
  113. tst r2, #VM_EXEC
  114. mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
  115. ret lr
  116. /*
  117. * flush_kern_dcache_area(void *addr, size_t size)
  118. *
  119. * Ensure no D cache aliasing occurs, either with itself or
  120. * the I cache
  121. *
  122. * - addr - kernel address
  123. * - size - region size
  124. */
  125. ENTRY(v4wb_flush_kern_dcache_area)
  126. add r1, r0, r1
  127. /* fall through */
  128. /*
  129. * coherent_kern_range(start, end)
  130. *
  131. * Ensure coherency between the Icache and the Dcache in the
  132. * region described by start. If you have non-snooping
  133. * Harvard caches, you need to implement this function.
  134. *
  135. * - start - virtual start address
  136. * - end - virtual end address
  137. */
  138. ENTRY(v4wb_coherent_kern_range)
  139. /* fall through */
  140. /*
  141. * coherent_user_range(start, end)
  142. *
  143. * Ensure coherency between the Icache and the Dcache in the
  144. * region described by start. If you have non-snooping
  145. * Harvard caches, you need to implement this function.
  146. *
  147. * - start - virtual start address
  148. * - end - virtual end address
  149. */
  150. ENTRY(v4wb_coherent_user_range)
  151. bic r0, r0, #CACHE_DLINESIZE - 1
  152. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  153. mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  154. add r0, r0, #CACHE_DLINESIZE
  155. cmp r0, r1
  156. blo 1b
  157. mov r0, #0
  158. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  159. mcr p15, 0, r0, c7, c10, 4 @ drain WB
  160. ret lr
  161. /*
  162. * dma_inv_range(start, end)
  163. *
  164. * Invalidate (discard) the specified virtual address range.
  165. * May not write back any entries. If 'start' or 'end'
  166. * are not cache line aligned, those lines must be written
  167. * back.
  168. *
  169. * - start - virtual start address
  170. * - end - virtual end address
  171. */
  172. v4wb_dma_inv_range:
  173. tst r0, #CACHE_DLINESIZE - 1
  174. bic r0, r0, #CACHE_DLINESIZE - 1
  175. mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
  176. tst r1, #CACHE_DLINESIZE - 1
  177. mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
  178. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  179. add r0, r0, #CACHE_DLINESIZE
  180. cmp r0, r1
  181. blo 1b
  182. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  183. ret lr
  184. /*
  185. * dma_clean_range(start, end)
  186. *
  187. * Clean (write back) the specified virtual address range.
  188. *
  189. * - start - virtual start address
  190. * - end - virtual end address
  191. */
  192. v4wb_dma_clean_range:
  193. bic r0, r0, #CACHE_DLINESIZE - 1
  194. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  195. add r0, r0, #CACHE_DLINESIZE
  196. cmp r0, r1
  197. blo 1b
  198. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  199. ret lr
  200. /*
  201. * dma_flush_range(start, end)
  202. *
  203. * Clean and invalidate the specified virtual address range.
  204. *
  205. * - start - virtual start address
  206. * - end - virtual end address
  207. *
  208. * This is actually the same as v4wb_coherent_kern_range()
  209. */
  210. .globl v4wb_dma_flush_range
  211. .set v4wb_dma_flush_range, v4wb_coherent_kern_range
  212. /*
  213. * dma_map_area(start, size, dir)
  214. * - start - kernel virtual start address
  215. * - size - size of region
  216. * - dir - DMA direction
  217. */
  218. ENTRY(v4wb_dma_map_area)
  219. add r1, r1, r0
  220. cmp r2, #DMA_TO_DEVICE
  221. beq v4wb_dma_clean_range
  222. bcs v4wb_dma_inv_range
  223. b v4wb_dma_flush_range
  224. ENDPROC(v4wb_dma_map_area)
  225. /*
  226. * dma_unmap_area(start, size, dir)
  227. * - start - kernel virtual start address
  228. * - size - size of region
  229. * - dir - DMA direction
  230. */
  231. ENTRY(v4wb_dma_unmap_area)
  232. ret lr
  233. ENDPROC(v4wb_dma_unmap_area)
  234. .globl v4wb_flush_kern_cache_louis
  235. .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
  236. __INITDATA
  237. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  238. define_cache_functions v4wb