cache.S 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * This file contains low-level cache management functions
  4. * used for sleep and CPU speed changes on Apple machines.
  5. * (In fact the only thing that is Apple-specific is that we assume
  6. * that we can read from ROM at physical address 0xfff00000.)
  7. *
  8. * Copyright (C) 2004 Paul Mackerras ([email protected]) and
  9. * Benjamin Herrenschmidt ([email protected])
  10. */
  11. #include <asm/processor.h>
  12. #include <asm/ppc_asm.h>
  13. #include <asm/cputable.h>
  14. #include <asm/feature-fixups.h>
  15. /*
  16. * Flush and disable all data caches (dL1, L2, L3). This is used
  17. * when going to sleep, when doing a PMU based cpufreq transition,
  18. * or when "offlining" a CPU on SMP machines. This code is over
  19. * paranoid, but I've had enough issues with various CPU revs and
  20. * bugs that I decided it was worth being over cautious
  21. */
  22. _GLOBAL(flush_disable_caches)
  23. #ifndef CONFIG_PPC_BOOK3S_32
  24. blr
  25. #else
  26. BEGIN_FTR_SECTION
  27. b flush_disable_745x
  28. END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
  29. BEGIN_FTR_SECTION
  30. b flush_disable_75x
  31. END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
  32. b __flush_disable_L1
  33. /* This is the code for G3 and 74[01]0 */
  34. flush_disable_75x:
  35. mflr r10
  36. /* Turn off EE and DR in MSR */
  37. mfmsr r11
  38. rlwinm r0,r11,0,~MSR_EE
  39. rlwinm r0,r0,0,~MSR_DR
  40. sync
  41. mtmsr r0
  42. isync
  43. /* Stop DST streams */
  44. BEGIN_FTR_SECTION
  45. PPC_DSSALL
  46. sync
  47. END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
  48. /* Stop DPM */
  49. mfspr r8,SPRN_HID0 /* Save SPRN_HID0 in r8 */
  50. rlwinm r4,r8,0,12,10 /* Turn off HID0[DPM] */
  51. sync
  52. mtspr SPRN_HID0,r4 /* Disable DPM */
  53. sync
  54. /* Disp-flush L1. We have a weird problem here that I never
  55. * totally figured out. On 750FX, using the ROM for the flush
  56. * results in a non-working flush. We use that workaround for
  57. * now until I finally understand what's going on. --BenH
  58. */
  59. /* ROM base by default */
  60. lis r4,0xfff0
  61. mfpvr r3
  62. srwi r3,r3,16
  63. cmplwi cr0,r3,0x7000
  64. bne+ 1f
  65. /* RAM base on 750FX */
  66. li r4,0
  67. 1: li r4,0x4000
  68. mtctr r4
  69. 1: lwz r0,0(r4)
  70. addi r4,r4,32
  71. bdnz 1b
  72. sync
  73. isync
  74. /* Disable / invalidate / enable L1 data */
  75. mfspr r3,SPRN_HID0
  76. rlwinm r3,r3,0,~(HID0_DCE | HID0_ICE)
  77. mtspr SPRN_HID0,r3
  78. sync
  79. isync
  80. ori r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
  81. sync
  82. isync
  83. mtspr SPRN_HID0,r3
  84. xori r3,r3,(HID0_DCI|HID0_ICFI)
  85. mtspr SPRN_HID0,r3
  86. sync
  87. /* Get the current enable bit of the L2CR into r4 */
  88. mfspr r5,SPRN_L2CR
  89. /* Set to data-only (pre-745x bit) */
  90. oris r3,r5,L2CR_L2DO@h
  91. b 2f
  92. /* When disabling L2, code must be in L1 */
  93. .balign 32
  94. 1: mtspr SPRN_L2CR,r3
  95. 3: sync
  96. isync
  97. b 1f
  98. 2: b 3f
  99. 3: sync
  100. isync
  101. b 1b
  102. 1: /* disp-flush L2. The interesting thing here is that the L2 can be
  103. * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
  104. * but that is probbaly fine. We disp-flush over 4Mb to be safe
  105. */
  106. lis r4,2
  107. mtctr r4
  108. lis r4,0xfff0
  109. 1: lwz r0,0(r4)
  110. addi r4,r4,32
  111. bdnz 1b
  112. sync
  113. isync
  114. lis r4,2
  115. mtctr r4
  116. lis r4,0xfff0
  117. 1: dcbf 0,r4
  118. addi r4,r4,32
  119. bdnz 1b
  120. sync
  121. isync
  122. /* now disable L2 */
  123. rlwinm r5,r5,0,~L2CR_L2E
  124. b 2f
  125. /* When disabling L2, code must be in L1 */
  126. .balign 32
  127. 1: mtspr SPRN_L2CR,r5
  128. 3: sync
  129. isync
  130. b 1f
  131. 2: b 3f
  132. 3: sync
  133. isync
  134. b 1b
  135. 1: sync
  136. isync
  137. /* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
  138. oris r4,r5,L2CR_L2I@h
  139. mtspr SPRN_L2CR,r4
  140. sync
  141. isync
  142. /* Wait for the invalidation to complete */
  143. 1: mfspr r3,SPRN_L2CR
  144. rlwinm. r0,r3,0,31,31
  145. bne 1b
  146. /* Clear L2I */
  147. xoris r4,r4,L2CR_L2I@h
  148. sync
  149. mtspr SPRN_L2CR,r4
  150. sync
  151. /* now disable the L1 data cache */
  152. mfspr r0,SPRN_HID0
  153. rlwinm r0,r0,0,~(HID0_DCE|HID0_ICE)
  154. mtspr SPRN_HID0,r0
  155. sync
  156. isync
  157. /* Restore HID0[DPM] to whatever it was before */
  158. sync
  159. mfspr r0,SPRN_HID0
  160. rlwimi r0,r8,0,11,11 /* Turn back HID0[DPM] */
  161. mtspr SPRN_HID0,r0
  162. sync
  163. /* restore DR and EE */
  164. sync
  165. mtmsr r11
  166. isync
  167. mtlr r10
  168. blr
  169. _ASM_NOKPROBE_SYMBOL(flush_disable_75x)
  170. /* This code is for 745x processors */
  171. flush_disable_745x:
  172. /* Turn off EE and DR in MSR */
  173. mfmsr r11
  174. rlwinm r0,r11,0,~MSR_EE
  175. rlwinm r0,r0,0,~MSR_DR
  176. sync
  177. mtmsr r0
  178. isync
  179. /* Stop prefetch streams */
  180. PPC_DSSALL
  181. sync
  182. /* Disable L2 prefetching */
  183. mfspr r0,SPRN_MSSCR0
  184. rlwinm r0,r0,0,0,29
  185. mtspr SPRN_MSSCR0,r0
  186. sync
  187. isync
  188. lis r4,0
  189. dcbf 0,r4
  190. dcbf 0,r4
  191. dcbf 0,r4
  192. dcbf 0,r4
  193. dcbf 0,r4
  194. dcbf 0,r4
  195. dcbf 0,r4
  196. dcbf 0,r4
  197. /* Due to a bug with the HW flush on some CPU revs, we occasionally
  198. * experience data corruption. I'm adding a displacement flush along
  199. * with a dcbf loop over a few Mb to "help". The problem isn't totally
  200. * fixed by this in theory, but at least, in practice, I couldn't reproduce
  201. * it even with a big hammer...
  202. */
  203. lis r4,0x0002
  204. mtctr r4
  205. li r4,0
  206. 1:
  207. lwz r0,0(r4)
  208. addi r4,r4,32 /* Go to start of next cache line */
  209. bdnz 1b
  210. isync
  211. /* Now, flush the first 4MB of memory */
  212. lis r4,0x0002
  213. mtctr r4
  214. li r4,0
  215. sync
  216. 1:
  217. dcbf 0,r4
  218. addi r4,r4,32 /* Go to start of next cache line */
  219. bdnz 1b
  220. /* Flush and disable the L1 data cache */
  221. mfspr r6,SPRN_LDSTCR
  222. lis r3,0xfff0 /* read from ROM for displacement flush */
  223. li r4,0xfe /* start with only way 0 unlocked */
  224. li r5,128 /* 128 lines in each way */
  225. 1: mtctr r5
  226. rlwimi r6,r4,0,24,31
  227. mtspr SPRN_LDSTCR,r6
  228. sync
  229. isync
  230. 2: lwz r0,0(r3) /* touch each cache line */
  231. addi r3,r3,32
  232. bdnz 2b
  233. rlwinm r4,r4,1,24,30 /* move on to the next way */
  234. ori r4,r4,1
  235. cmpwi r4,0xff /* all done? */
  236. bne 1b
  237. /* now unlock the L1 data cache */
  238. li r4,0
  239. rlwimi r6,r4,0,24,31
  240. sync
  241. mtspr SPRN_LDSTCR,r6
  242. sync
  243. isync
  244. /* Flush the L2 cache using the hardware assist */
  245. mfspr r3,SPRN_L2CR
  246. cmpwi r3,0 /* check if it is enabled first */
  247. bge 4f
  248. oris r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
  249. b 2f
  250. /* When disabling/locking L2, code must be in L1 */
  251. .balign 32
  252. 1: mtspr SPRN_L2CR,r0 /* lock the L2 cache */
  253. 3: sync
  254. isync
  255. b 1f
  256. 2: b 3f
  257. 3: sync
  258. isync
  259. b 1b
  260. 1: sync
  261. isync
  262. ori r0,r3,L2CR_L2HWF_745x
  263. sync
  264. mtspr SPRN_L2CR,r0 /* set the hardware flush bit */
  265. 3: mfspr r0,SPRN_L2CR /* wait for it to go to 0 */
  266. andi. r0,r0,L2CR_L2HWF_745x
  267. bne 3b
  268. sync
  269. rlwinm r3,r3,0,~L2CR_L2E
  270. b 2f
  271. /* When disabling L2, code must be in L1 */
  272. .balign 32
  273. 1: mtspr SPRN_L2CR,r3 /* disable the L2 cache */
  274. 3: sync
  275. isync
  276. b 1f
  277. 2: b 3f
  278. 3: sync
  279. isync
  280. b 1b
  281. 1: sync
  282. isync
  283. oris r4,r3,L2CR_L2I@h
  284. mtspr SPRN_L2CR,r4
  285. sync
  286. isync
  287. 1: mfspr r4,SPRN_L2CR
  288. andis. r0,r4,L2CR_L2I@h
  289. bne 1b
  290. sync
  291. BEGIN_FTR_SECTION
  292. /* Flush the L3 cache using the hardware assist */
  293. 4: mfspr r3,SPRN_L3CR
  294. cmpwi r3,0 /* check if it is enabled */
  295. bge 6f
  296. oris r0,r3,L3CR_L3IO@h
  297. ori r0,r0,L3CR_L3DO
  298. sync
  299. mtspr SPRN_L3CR,r0 /* lock the L3 cache */
  300. sync
  301. isync
  302. ori r0,r0,L3CR_L3HWF
  303. sync
  304. mtspr SPRN_L3CR,r0 /* set the hardware flush bit */
  305. 5: mfspr r0,SPRN_L3CR /* wait for it to go to zero */
  306. andi. r0,r0,L3CR_L3HWF
  307. bne 5b
  308. rlwinm r3,r3,0,~L3CR_L3E
  309. sync
  310. mtspr SPRN_L3CR,r3 /* disable the L3 cache */
  311. sync
  312. ori r4,r3,L3CR_L3I
  313. mtspr SPRN_L3CR,r4
  314. 1: mfspr r4,SPRN_L3CR
  315. andi. r0,r4,L3CR_L3I
  316. bne 1b
  317. sync
  318. END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
  319. 6: mfspr r0,SPRN_HID0 /* now disable the L1 data cache */
  320. rlwinm r0,r0,0,~HID0_DCE
  321. mtspr SPRN_HID0,r0
  322. sync
  323. isync
  324. mtmsr r11 /* restore DR and EE */
  325. isync
  326. blr
  327. _ASM_NOKPROBE_SYMBOL(flush_disable_745x)
  328. #endif /* CONFIG_PPC_BOOK3S_32 */