cache-v7m.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * linux/arch/arm/mm/cache-v7m.S
  4. *
  5. * Based on linux/arch/arm/mm/cache-v7.S
  6. *
  7. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  8. * Copyright (C) 2005 ARM Ltd.
  9. *
  10. * This is the "shell" of the ARMv7M processor support.
  11. */
  12. #include <linux/linkage.h>
  13. #include <linux/init.h>
  14. #include <asm/assembler.h>
  15. #include <asm/errno.h>
  16. #include <asm/unwind.h>
  17. #include <asm/v7m.h>
  18. #include "proc-macros.S"
  19. /* Generic V7M read/write macros for memory mapped cache operations */
  20. .macro v7m_cache_read, rt, reg
  21. movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg
  22. movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg
  23. ldr \rt, [\rt]
  24. .endm
  25. .macro v7m_cacheop, rt, tmp, op, c = al
  26. movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op
  27. movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op
  28. str\c \rt, [\tmp]
  29. .endm
  30. .macro read_ccsidr, rt
  31. v7m_cache_read \rt, V7M_SCB_CCSIDR
  32. .endm
  33. .macro read_clidr, rt
  34. v7m_cache_read \rt, V7M_SCB_CLIDR
  35. .endm
  36. .macro write_csselr, rt, tmp
  37. v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
  38. .endm
  39. /*
  40. * dcisw: Invalidate data cache by set/way
  41. */
  42. .macro dcisw, rt, tmp
  43. v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
  44. .endm
  45. /*
  46. * dccisw: Clean and invalidate data cache by set/way
  47. */
  48. .macro dccisw, rt, tmp
  49. v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
  50. .endm
  51. /*
  52. * dccimvac: Clean and invalidate data cache line by MVA to PoC.
  53. */
  54. .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
  55. .macro dccimvac\c, rt, tmp
  56. v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
  57. .endm
  58. .endr
  59. /*
  60. * dcimvac: Invalidate data cache line by MVA to PoC
  61. */
  62. .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
  63. .macro dcimvac\c, rt, tmp
  64. v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
  65. .endm
  66. .endr
  67. /*
  68. * dccmvau: Clean data cache line by MVA to PoU
  69. */
  70. .macro dccmvau, rt, tmp
  71. v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
  72. .endm
  73. /*
  74. * dccmvac: Clean data cache line by MVA to PoC
  75. */
  76. .macro dccmvac, rt, tmp
  77. v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
  78. .endm
  79. /*
  80. * icimvau: Invalidate instruction caches by MVA to PoU
  81. */
  82. .macro icimvau, rt, tmp
  83. v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
  84. .endm
  85. /*
  86. * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
  87. * rt data ignored by ICIALLU(IS), so can be used for the address
  88. */
  89. .macro invalidate_icache, rt
  90. v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
  91. mov \rt, #0
  92. .endm
  93. /*
  94. * Invalidate the BTB, inner shareable if SMP.
  95. * rt data ignored by BPIALL, so it can be used for the address
  96. */
  97. .macro invalidate_bp, rt
  98. v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
  99. mov \rt, #0
  100. .endm
  101. ENTRY(v7m_invalidate_l1)
  102. mov r0, #0
  103. write_csselr r0, r1
  104. read_ccsidr r0
  105. movw r1, #0x7fff
  106. and r2, r1, r0, lsr #13
  107. movw r1, #0x3ff
  108. and r3, r1, r0, lsr #3 @ NumWays - 1
  109. add r2, r2, #1 @ NumSets
  110. and r0, r0, #0x7
  111. add r0, r0, #4 @ SetShift
  112. clz r1, r3 @ WayShift
  113. add r4, r3, #1 @ NumWays
  114. 1: sub r2, r2, #1 @ NumSets--
  115. mov r3, r4 @ Temp = NumWays
  116. 2: subs r3, r3, #1 @ Temp--
  117. mov r5, r3, lsl r1
  118. mov r6, r2, lsl r0
  119. orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
  120. dcisw r5, r6
  121. bgt 2b
  122. cmp r2, #0
  123. bgt 1b
  124. dsb st
  125. isb
  126. ret lr
  127. ENDPROC(v7m_invalidate_l1)
  128. /*
  129. * v7m_flush_icache_all()
  130. *
  131. * Flush the whole I-cache.
  132. *
  133. * Registers:
  134. * r0 - set to 0
  135. */
  136. ENTRY(v7m_flush_icache_all)
  137. invalidate_icache r0
  138. ret lr
  139. ENDPROC(v7m_flush_icache_all)
  140. /*
  141. * v7m_flush_dcache_all()
  142. *
  143. * Flush the whole D-cache.
  144. *
  145. * Corrupted registers: r0-r7, r9-r11
  146. */
  147. ENTRY(v7m_flush_dcache_all)
  148. dmb @ ensure ordering with previous memory accesses
  149. read_clidr r0
  150. mov r3, r0, lsr #23 @ move LoC into position
  151. ands r3, r3, #7 << 1 @ extract LoC*2 from clidr
  152. beq finished @ if loc is 0, then no need to clean
  153. start_flush_levels:
  154. mov r10, #0 @ start clean at cache level 0
  155. flush_levels:
  156. add r2, r10, r10, lsr #1 @ work out 3x current cache level
  157. mov r1, r0, lsr r2 @ extract cache type bits from clidr
  158. and r1, r1, #7 @ mask of the bits for current cache only
  159. cmp r1, #2 @ see what cache we have at this level
  160. blt skip @ skip if no cache, or just i-cache
  161. #ifdef CONFIG_PREEMPTION
  162. save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
  163. #endif
  164. write_csselr r10, r1 @ set current cache level
  165. isb @ isb to sych the new cssr&csidr
  166. read_ccsidr r1 @ read the new csidr
  167. #ifdef CONFIG_PREEMPTION
  168. restore_irqs_notrace r9
  169. #endif
  170. and r2, r1, #7 @ extract the length of the cache lines
  171. add r2, r2, #4 @ add 4 (line length offset)
  172. movw r4, #0x3ff
  173. ands r4, r4, r1, lsr #3 @ find maximum number on the way size
  174. clz r5, r4 @ find bit position of way size increment
  175. movw r7, #0x7fff
  176. ands r7, r7, r1, lsr #13 @ extract max number of the index size
  177. loop1:
  178. mov r9, r7 @ create working copy of max index
  179. loop2:
  180. lsl r6, r4, r5
  181. orr r11, r10, r6 @ factor way and cache number into r11
  182. lsl r6, r9, r2
  183. orr r11, r11, r6 @ factor index number into r11
  184. dccisw r11, r6 @ clean/invalidate by set/way
  185. subs r9, r9, #1 @ decrement the index
  186. bge loop2
  187. subs r4, r4, #1 @ decrement the way
  188. bge loop1
  189. skip:
  190. add r10, r10, #2 @ increment cache number
  191. cmp r3, r10
  192. bgt flush_levels
  193. finished:
  194. mov r10, #0 @ switch back to cache level 0
  195. write_csselr r10, r3 @ select current cache level in cssr
  196. dsb st
  197. isb
  198. ret lr
  199. ENDPROC(v7m_flush_dcache_all)
  200. /*
  201. * v7m_flush_cache_all()
  202. *
  203. * Flush the entire cache system.
  204. * The data cache flush is now achieved using atomic clean / invalidates
  205. * working outwards from L1 cache. This is done using Set/Way based cache
  206. * maintenance instructions.
  207. * The instruction cache can still be invalidated back to the point of
  208. * unification in a single instruction.
  209. *
  210. */
  211. ENTRY(v7m_flush_kern_cache_all)
  212. stmfd sp!, {r4-r7, r9-r11, lr}
  213. bl v7m_flush_dcache_all
  214. invalidate_icache r0
  215. ldmfd sp!, {r4-r7, r9-r11, lr}
  216. ret lr
  217. ENDPROC(v7m_flush_kern_cache_all)
  218. /*
  219. * v7m_flush_cache_all()
  220. *
  221. * Flush all TLB entries in a particular address space
  222. *
  223. * - mm - mm_struct describing address space
  224. */
  225. ENTRY(v7m_flush_user_cache_all)
  226. /*FALLTHROUGH*/
  227. /*
  228. * v7m_flush_cache_range(start, end, flags)
  229. *
  230. * Flush a range of TLB entries in the specified address space.
  231. *
  232. * - start - start address (may not be aligned)
  233. * - end - end address (exclusive, may not be aligned)
  234. * - flags - vm_area_struct flags describing address space
  235. *
  236. * It is assumed that:
  237. * - we have a VIPT cache.
  238. */
  239. ENTRY(v7m_flush_user_cache_range)
  240. ret lr
  241. ENDPROC(v7m_flush_user_cache_all)
  242. ENDPROC(v7m_flush_user_cache_range)
  243. /*
  244. * v7m_coherent_kern_range(start,end)
  245. *
  246. * Ensure that the I and D caches are coherent within specified
  247. * region. This is typically used when code has been written to
  248. * a memory region, and will be executed.
  249. *
  250. * - start - virtual start address of region
  251. * - end - virtual end address of region
  252. *
  253. * It is assumed that:
  254. * - the Icache does not read data from the write buffer
  255. */
  256. ENTRY(v7m_coherent_kern_range)
  257. /* FALLTHROUGH */
  258. /*
  259. * v7m_coherent_user_range(start,end)
  260. *
  261. * Ensure that the I and D caches are coherent within specified
  262. * region. This is typically used when code has been written to
  263. * a memory region, and will be executed.
  264. *
  265. * - start - virtual start address of region
  266. * - end - virtual end address of region
  267. *
  268. * It is assumed that:
  269. * - the Icache does not read data from the write buffer
  270. */
  271. ENTRY(v7m_coherent_user_range)
  272. UNWIND(.fnstart )
  273. dcache_line_size r2, r3
  274. sub r3, r2, #1
  275. bic r12, r0, r3
  276. 1:
  277. /*
  278. * We use open coded version of dccmvau otherwise USER() would
  279. * point at movw instruction.
  280. */
  281. dccmvau r12, r3
  282. add r12, r12, r2
  283. cmp r12, r1
  284. blo 1b
  285. dsb ishst
  286. icache_line_size r2, r3
  287. sub r3, r2, #1
  288. bic r12, r0, r3
  289. 2:
  290. icimvau r12, r3
  291. add r12, r12, r2
  292. cmp r12, r1
  293. blo 2b
  294. invalidate_bp r0
  295. dsb ishst
  296. isb
  297. ret lr
  298. UNWIND(.fnend )
  299. ENDPROC(v7m_coherent_kern_range)
  300. ENDPROC(v7m_coherent_user_range)
  301. /*
  302. * v7m_flush_kern_dcache_area(void *addr, size_t size)
  303. *
  304. * Ensure that the data held in the page kaddr is written back
  305. * to the page in question.
  306. *
  307. * - addr - kernel address
  308. * - size - region size
  309. */
  310. ENTRY(v7m_flush_kern_dcache_area)
  311. dcache_line_size r2, r3
  312. add r1, r0, r1
  313. sub r3, r2, #1
  314. bic r0, r0, r3
  315. 1:
  316. dccimvac r0, r3 @ clean & invalidate D line / unified line
  317. add r0, r0, r2
  318. cmp r0, r1
  319. blo 1b
  320. dsb st
  321. ret lr
  322. ENDPROC(v7m_flush_kern_dcache_area)
  323. /*
  324. * v7m_dma_inv_range(start,end)
  325. *
  326. * Invalidate the data cache within the specified region; we will
  327. * be performing a DMA operation in this region and we want to
  328. * purge old data in the cache.
  329. *
  330. * - start - virtual start address of region
  331. * - end - virtual end address of region
  332. */
  333. v7m_dma_inv_range:
  334. dcache_line_size r2, r3
  335. sub r3, r2, #1
  336. tst r0, r3
  337. bic r0, r0, r3
  338. dccimvacne r0, r3
  339. addne r0, r0, r2
  340. subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
  341. tst r1, r3
  342. bic r1, r1, r3
  343. dccimvacne r1, r3
  344. cmp r0, r1
  345. 1:
  346. dcimvaclo r0, r3
  347. addlo r0, r0, r2
  348. cmplo r0, r1
  349. blo 1b
  350. dsb st
  351. ret lr
  352. ENDPROC(v7m_dma_inv_range)
  353. /*
  354. * v7m_dma_clean_range(start,end)
  355. * - start - virtual start address of region
  356. * - end - virtual end address of region
  357. */
  358. v7m_dma_clean_range:
  359. dcache_line_size r2, r3
  360. sub r3, r2, #1
  361. bic r0, r0, r3
  362. 1:
  363. dccmvac r0, r3 @ clean D / U line
  364. add r0, r0, r2
  365. cmp r0, r1
  366. blo 1b
  367. dsb st
  368. ret lr
  369. ENDPROC(v7m_dma_clean_range)
  370. /*
  371. * v7m_dma_flush_range(start,end)
  372. * - start - virtual start address of region
  373. * - end - virtual end address of region
  374. */
  375. ENTRY(v7m_dma_flush_range)
  376. dcache_line_size r2, r3
  377. sub r3, r2, #1
  378. bic r0, r0, r3
  379. 1:
  380. dccimvac r0, r3 @ clean & invalidate D / U line
  381. add r0, r0, r2
  382. cmp r0, r1
  383. blo 1b
  384. dsb st
  385. ret lr
  386. ENDPROC(v7m_dma_flush_range)
  387. /*
  388. * dma_map_area(start, size, dir)
  389. * - start - kernel virtual start address
  390. * - size - size of region
  391. * - dir - DMA direction
  392. */
  393. ENTRY(v7m_dma_map_area)
  394. add r1, r1, r0
  395. teq r2, #DMA_FROM_DEVICE
  396. beq v7m_dma_inv_range
  397. b v7m_dma_clean_range
  398. ENDPROC(v7m_dma_map_area)
  399. /*
  400. * dma_unmap_area(start, size, dir)
  401. * - start - kernel virtual start address
  402. * - size - size of region
  403. * - dir - DMA direction
  404. */
  405. ENTRY(v7m_dma_unmap_area)
  406. add r1, r1, r0
  407. teq r2, #DMA_TO_DEVICE
  408. bne v7m_dma_inv_range
  409. ret lr
  410. ENDPROC(v7m_dma_unmap_area)
  411. .globl v7m_flush_kern_cache_louis
  412. .equ v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
  413. __INITDATA
  414. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  415. define_cache_functions v7m