cache-fa.S 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm/mm/cache-fa.S
  4. *
  5. * Copyright (C) 2005 Faraday Corp.
  6. * Copyright (C) 2008-2009 Paulius Zaleckas <[email protected]>
  7. *
  8. * Based on cache-v4wb.S:
  9. * Copyright (C) 1997-2002 Russell king
  10. *
  11. * Processors: FA520 FA526 FA626
  12. */
  13. #include <linux/linkage.h>
  14. #include <linux/init.h>
  15. #include <asm/assembler.h>
  16. #include <asm/memory.h>
  17. #include <asm/page.h>
  18. #include "proc-macros.S"
  19. /*
  20. * The size of one data cache line.
  21. */
  22. #define CACHE_DLINESIZE 16
  23. /*
  24. * The total size of the data cache.
  25. */
  26. #ifdef CONFIG_ARCH_GEMINI
  27. #define CACHE_DSIZE 8192
  28. #else
  29. #define CACHE_DSIZE 16384
  30. #endif
  31. /* FIXME: put optimal value here. Current one is just estimation */
  32. #define CACHE_DLIMIT (CACHE_DSIZE * 2)
  33. /*
  34. * flush_icache_all()
  35. *
  36. * Unconditionally clean and invalidate the entire icache.
  37. */
  38. ENTRY(fa_flush_icache_all)
  39. mov r0, #0
  40. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  41. ret lr
  42. ENDPROC(fa_flush_icache_all)
  43. /*
  44. * flush_user_cache_all()
  45. *
  46. * Clean and invalidate all cache entries in a particular address
  47. * space.
  48. */
  49. ENTRY(fa_flush_user_cache_all)
  50. /* FALLTHROUGH */
  51. /*
  52. * flush_kern_cache_all()
  53. *
  54. * Clean and invalidate the entire cache.
  55. */
  56. ENTRY(fa_flush_kern_cache_all)
  57. mov ip, #0
  58. mov r2, #VM_EXEC
  59. __flush_whole_cache:
  60. mcr p15, 0, ip, c7, c14, 0 @ clean/invalidate D cache
  61. tst r2, #VM_EXEC
  62. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  63. mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
  64. mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
  65. mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
  66. ret lr
  67. /*
  68. * flush_user_cache_range(start, end, flags)
  69. *
  70. * Invalidate a range of cache entries in the specified
  71. * address space.
  72. *
  73. * - start - start address (inclusive, page aligned)
  74. * - end - end address (exclusive, page aligned)
  75. * - flags - vma_area_struct flags describing address space
  76. */
  77. ENTRY(fa_flush_user_cache_range)
  78. mov ip, #0
  79. sub r3, r1, r0 @ calculate total size
  80. cmp r3, #CACHE_DLIMIT @ total size >= limit?
  81. bhs __flush_whole_cache @ flush whole D cache
  82. 1: tst r2, #VM_EXEC
  83. mcrne p15, 0, r0, c7, c5, 1 @ invalidate I line
  84. mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
  85. add r0, r0, #CACHE_DLINESIZE
  86. cmp r0, r1
  87. blo 1b
  88. tst r2, #VM_EXEC
  89. mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
  90. mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
  91. mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
  92. ret lr
  93. /*
  94. * coherent_kern_range(start, end)
  95. *
  96. * Ensure coherency between the Icache and the Dcache in the
  97. * region described by start. If you have non-snooping
  98. * Harvard caches, you need to implement this function.
  99. *
  100. * - start - virtual start address
  101. * - end - virtual end address
  102. */
  103. ENTRY(fa_coherent_kern_range)
  104. /* fall through */
  105. /*
  106. * coherent_user_range(start, end)
  107. *
  108. * Ensure coherency between the Icache and the Dcache in the
  109. * region described by start. If you have non-snooping
  110. * Harvard caches, you need to implement this function.
  111. *
  112. * - start - virtual start address
  113. * - end - virtual end address
  114. */
  115. ENTRY(fa_coherent_user_range)
  116. bic r0, r0, #CACHE_DLINESIZE - 1
  117. 1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
  118. mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
  119. add r0, r0, #CACHE_DLINESIZE
  120. cmp r0, r1
  121. blo 1b
  122. mov r0, #0
  123. mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
  124. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  125. mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
  126. ret lr
  127. /*
  128. * flush_kern_dcache_area(void *addr, size_t size)
  129. *
  130. * Ensure that the data held in the page kaddr is written back
  131. * to the page in question.
  132. *
  133. * - addr - kernel address
  134. * - size - size of region
  135. */
  136. ENTRY(fa_flush_kern_dcache_area)
  137. add r1, r0, r1
  138. 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  139. add r0, r0, #CACHE_DLINESIZE
  140. cmp r0, r1
  141. blo 1b
  142. mov r0, #0
  143. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  144. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  145. ret lr
  146. /*
  147. * dma_inv_range(start, end)
  148. *
  149. * Invalidate (discard) the specified virtual address range.
  150. * May not write back any entries. If 'start' or 'end'
  151. * are not cache line aligned, those lines must be written
  152. * back.
  153. *
  154. * - start - virtual start address
  155. * - end - virtual end address
  156. */
  157. fa_dma_inv_range:
  158. tst r0, #CACHE_DLINESIZE - 1
  159. bic r0, r0, #CACHE_DLINESIZE - 1
  160. mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
  161. tst r1, #CACHE_DLINESIZE - 1
  162. bic r1, r1, #CACHE_DLINESIZE - 1
  163. mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D entry
  164. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
  165. add r0, r0, #CACHE_DLINESIZE
  166. cmp r0, r1
  167. blo 1b
  168. mov r0, #0
  169. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  170. ret lr
  171. /*
  172. * dma_clean_range(start, end)
  173. *
  174. * Clean (write back) the specified virtual address range.
  175. *
  176. * - start - virtual start address
  177. * - end - virtual end address
  178. */
  179. fa_dma_clean_range:
  180. bic r0, r0, #CACHE_DLINESIZE - 1
  181. 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
  182. add r0, r0, #CACHE_DLINESIZE
  183. cmp r0, r1
  184. blo 1b
  185. mov r0, #0
  186. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  187. ret lr
  188. /*
  189. * dma_flush_range(start,end)
  190. * - start - virtual start address of region
  191. * - end - virtual end address of region
  192. */
  193. ENTRY(fa_dma_flush_range)
  194. bic r0, r0, #CACHE_DLINESIZE - 1
  195. 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
  196. add r0, r0, #CACHE_DLINESIZE
  197. cmp r0, r1
  198. blo 1b
  199. mov r0, #0
  200. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  201. ret lr
  202. /*
  203. * dma_map_area(start, size, dir)
  204. * - start - kernel virtual start address
  205. * - size - size of region
  206. * - dir - DMA direction
  207. */
  208. ENTRY(fa_dma_map_area)
  209. add r1, r1, r0
  210. cmp r2, #DMA_TO_DEVICE
  211. beq fa_dma_clean_range
  212. bcs fa_dma_inv_range
  213. b fa_dma_flush_range
  214. ENDPROC(fa_dma_map_area)
  215. /*
  216. * dma_unmap_area(start, size, dir)
  217. * - start - kernel virtual start address
  218. * - size - size of region
  219. * - dir - DMA direction
  220. */
  221. ENTRY(fa_dma_unmap_area)
  222. ret lr
  223. ENDPROC(fa_dma_unmap_area)
  224. .globl fa_flush_kern_cache_louis
  225. .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
  226. __INITDATA
  227. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  228. define_cache_functions fa