tlb_low.S 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * This file contains low-level functions for performing various
  4. * types of TLB invalidations on various processors with no hash
  5. * table.
  6. *
  7. * This file implements the following functions for all no-hash
  8. * processors. Some aren't implemented for some variants. Some
  9. * are inline in tlbflush.h
  10. *
  11. * - tlbil_va
  12. * - tlbil_pid
  13. * - tlbil_all
  14. * - tlbivax_bcast
  15. *
  16. * Code mostly moved over from misc_32.S
  17. *
  18. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  19. *
  20. * Partially rewritten by Cort Dougan ([email protected])
  21. * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
  22. */
  23. #include <asm/reg.h>
  24. #include <asm/page.h>
  25. #include <asm/cputable.h>
  26. #include <asm/mmu.h>
  27. #include <asm/ppc_asm.h>
  28. #include <asm/asm-offsets.h>
  29. #include <asm/processor.h>
  30. #include <asm/bug.h>
  31. #include <asm/asm-compat.h>
  32. #include <asm/feature-fixups.h>
  33. #if defined(CONFIG_40x)
  34. /*
  35. * 40x implementation needs only tlbil_va
  36. */
  37. _GLOBAL(__tlbil_va)
  38. /* We run the search with interrupts disabled because we have to change
  39. * the PID and I don't want to preempt when that happens.
  40. */
  41. mfmsr r5
  42. mfspr r6,SPRN_PID
  43. wrteei 0
  44. mtspr SPRN_PID,r4
  45. tlbsx. r3, 0, r3
  46. mtspr SPRN_PID,r6
  47. wrtee r5
  48. bne 1f
  49. sync
  50. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
  51. * clear. Since 25 is the V bit in the TLB_TAG, loading this value
  52. * will invalidate the TLB entry. */
  53. tlbwe r3, r3, TLB_TAG
  54. isync
  55. 1: blr
  56. #elif defined(CONFIG_PPC_8xx)
  57. /*
  58. * Nothing to do for 8xx, everything is inline
  59. */
  60. #elif defined(CONFIG_44x) /* Includes 47x */
  61. /*
  62. * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
  63. * of the TLB for everything else.
  64. */
  65. _GLOBAL(__tlbil_va)
  66. mfspr r5,SPRN_MMUCR
  67. mfmsr r10
  68. /*
  69. * We write 16 bits of STID since 47x supports that much, we
  70. * will never be passed out of bounds values on 440 (hopefully)
  71. */
  72. rlwimi r5,r4,0,16,31
  73. /* We have to run the search with interrupts disabled, otherwise
  74. * an interrupt which causes a TLB miss can clobber the MMUCR
  75. * between the mtspr and the tlbsx.
  76. *
  77. * Critical and Machine Check interrupts take care of saving
  78. * and restoring MMUCR, so only normal interrupts have to be
  79. * taken care of.
  80. */
  81. wrteei 0
  82. mtspr SPRN_MMUCR,r5
  83. tlbsx. r6,0,r3
  84. bne 10f
  85. sync
  86. #ifndef CONFIG_PPC_47x
  87. /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
  88. * 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this
  89. * value will invalidate the TLB entry.
  90. */
  91. tlbwe r6,r6,PPC44x_TLB_PAGEID
  92. #else
  93. oris r7,r6,0x8000 /* specify way explicitly */
  94. clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */
  95. ori r4,r4,PPC47x_TLBE_SIZE
  96. tlbwe r4,r7,0 /* write it */
  97. #endif /* !CONFIG_PPC_47x */
  98. isync
  99. 10: wrtee r10
  100. blr
  101. _GLOBAL(_tlbil_all)
  102. _GLOBAL(_tlbil_pid)
  103. #ifndef CONFIG_PPC_47x
  104. li r3,0
  105. sync
  106. /* Load high watermark */
  107. lis r4,tlb_44x_hwater@ha
  108. lwz r5,tlb_44x_hwater@l(r4)
  109. 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
  110. addi r3,r3,1
  111. cmpw 0,r3,r5
  112. ble 1b
  113. isync
  114. blr
  115. #else
  116. /* 476 variant. There's not simple way to do this, hopefully we'll
  117. * try to limit the amount of such full invalidates
  118. */
  119. mfmsr r11 /* Interrupts off */
  120. wrteei 0
  121. li r3,-1 /* Current set */
  122. lis r10,tlb_47x_boltmap@h
  123. ori r10,r10,tlb_47x_boltmap@l
  124. lis r7,0x8000 /* Specify way explicitly */
  125. b 9f /* For each set */
  126. 1: li r9,4 /* Number of ways */
  127. li r4,0 /* Current way */
  128. li r6,0 /* Default entry value 0 */
  129. andi. r0,r8,1 /* Check if way 0 is bolted */
  130. mtctr r9 /* Load way counter */
  131. bne- 3f /* Bolted, skip loading it */
  132. 2: /* For each way */
  133. or r5,r3,r4 /* Make way|index for tlbre */
  134. rlwimi r5,r5,16,8,15 /* Copy index into position */
  135. tlbre r6,r5,0 /* Read entry */
  136. 3: addis r4,r4,0x2000 /* Next way */
  137. andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */
  138. beq 4f /* Nope, skip it */
  139. rlwimi r7,r5,0,1,2 /* Insert way number */
  140. rlwinm r6,r6,0,21,19 /* Clear V */
  141. tlbwe r6,r7,0 /* Write it */
  142. 4: bdnz 2b /* Loop for each way */
  143. srwi r8,r8,1 /* Next boltmap bit */
  144. 9: cmpwi cr1,r3,255 /* Last set done ? */
  145. addi r3,r3,1 /* Next set */
  146. beq cr1,1f /* End of loop */
  147. andi. r0,r3,0x1f /* Need to load a new boltmap word ? */
  148. bne 1b /* No, loop */
  149. lwz r8,0(r10) /* Load boltmap entry */
  150. addi r10,r10,4 /* Next word */
  151. b 1b /* Then loop */
  152. 1: isync /* Sync shadows */
  153. wrtee r11
  154. blr
  155. #endif /* !CONFIG_PPC_47x */
  156. #ifdef CONFIG_PPC_47x
  157. /*
  158. * _tlbivax_bcast is only on 47x. We don't bother doing a runtime
  159. * check though, it will blow up soon enough if we mistakenly try
  160. * to use it on a 440.
  161. */
  162. _GLOBAL(_tlbivax_bcast)
  163. mfspr r5,SPRN_MMUCR
  164. mfmsr r10
  165. rlwimi r5,r4,0,16,31
  166. wrteei 0
  167. mtspr SPRN_MMUCR,r5
  168. isync
  169. PPC_TLBIVAX(0, R3)
  170. isync
  171. mbar
  172. tlbsync
  173. BEGIN_FTR_SECTION
  174. b 1f
  175. END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
  176. sync
  177. wrtee r10
  178. blr
  179. /*
  180. * DD2 HW could hang if in instruction fetch happens before msync completes.
  181. * Touch enough instruction cache lines to ensure cache hits
  182. */
  183. 1: mflr r9
  184. bcl 20,31,$+4
  185. 2: mflr r6
  186. li r7,32
  187. PPC_ICBT(0,R6,R7) /* touch next cache line */
  188. add r6,r6,r7
  189. PPC_ICBT(0,R6,R7) /* touch next cache line */
  190. add r6,r6,r7
  191. PPC_ICBT(0,R6,R7) /* touch next cache line */
  192. sync
  193. nop
  194. nop
  195. nop
  196. nop
  197. nop
  198. nop
  199. nop
  200. nop
  201. mtlr r9
  202. wrtee r10
  203. blr
  204. #endif /* CONFIG_PPC_47x */
  205. #elif defined(CONFIG_PPC_85xx)
  206. /*
  207. * FSL BookE implementations.
  208. *
  209. * Since feature sections are using _SECTION_ELSE we need
  210. * to have the larger code path before the _SECTION_ELSE
  211. */
  212. /*
  213. * Flush MMU TLB on the local processor
  214. */
  215. _GLOBAL(_tlbil_all)
  216. BEGIN_MMU_FTR_SECTION
  217. li r3,(MMUCSR0_TLBFI)@l
  218. mtspr SPRN_MMUCSR0, r3
  219. 1:
  220. mfspr r3,SPRN_MMUCSR0
  221. andi. r3,r3,MMUCSR0_TLBFI@l
  222. bne 1b
  223. MMU_FTR_SECTION_ELSE
  224. PPC_TLBILX_ALL(0,R0)
  225. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
  226. msync
  227. isync
  228. blr
  229. _GLOBAL(_tlbil_pid)
  230. BEGIN_MMU_FTR_SECTION
  231. slwi r3,r3,16
  232. mfmsr r10
  233. wrteei 0
  234. mfspr r4,SPRN_MAS6 /* save MAS6 */
  235. mtspr SPRN_MAS6,r3
  236. PPC_TLBILX_PID(0,R0)
  237. mtspr SPRN_MAS6,r4 /* restore MAS6 */
  238. wrtee r10
  239. MMU_FTR_SECTION_ELSE
  240. li r3,(MMUCSR0_TLBFI)@l
  241. mtspr SPRN_MMUCSR0, r3
  242. 1:
  243. mfspr r3,SPRN_MMUCSR0
  244. andi. r3,r3,MMUCSR0_TLBFI@l
  245. bne 1b
  246. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBILX)
  247. msync
  248. isync
  249. blr
  250. /*
  251. * Flush MMU TLB for a particular address, but only on the local processor
  252. * (no broadcast)
  253. */
  254. _GLOBAL(__tlbil_va)
  255. mfmsr r10
  256. wrteei 0
  257. slwi r4,r4,16
  258. ori r4,r4,(MAS6_ISIZE(BOOK3E_PAGESZ_4K))@l
  259. mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
  260. BEGIN_MMU_FTR_SECTION
  261. tlbsx 0,r3
  262. mfspr r4,SPRN_MAS1 /* check valid */
  263. andis. r3,r4,MAS1_VALID@h
  264. beq 1f
  265. rlwinm r4,r4,0,1,31
  266. mtspr SPRN_MAS1,r4
  267. tlbwe
  268. MMU_FTR_SECTION_ELSE
  269. PPC_TLBILX_VA(0,R3)
  270. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
  271. msync
  272. isync
  273. 1: wrtee r10
  274. blr
  275. #elif defined(CONFIG_PPC_BOOK3E_64)
  276. /*
  277. * New Book3E (>= 2.06) implementation
  278. *
  279. * Note: We may be able to get away without the interrupt masking stuff
  280. * if we save/restore MAS6 on exceptions that might modify it
  281. */
  282. _GLOBAL(_tlbil_pid)
  283. slwi r4,r3,MAS6_SPID_SHIFT
  284. mfmsr r10
  285. wrteei 0
  286. mtspr SPRN_MAS6,r4
  287. PPC_TLBILX_PID(0,R0)
  288. wrtee r10
  289. msync
  290. isync
  291. blr
  292. _GLOBAL(_tlbil_pid_noind)
  293. slwi r4,r3,MAS6_SPID_SHIFT
  294. mfmsr r10
  295. ori r4,r4,MAS6_SIND
  296. wrteei 0
  297. mtspr SPRN_MAS6,r4
  298. PPC_TLBILX_PID(0,R0)
  299. wrtee r10
  300. msync
  301. isync
  302. blr
  303. _GLOBAL(_tlbil_all)
  304. PPC_TLBILX_ALL(0,R0)
  305. msync
  306. isync
  307. blr
  308. _GLOBAL(_tlbil_va)
  309. mfmsr r10
  310. wrteei 0
  311. cmpwi cr0,r6,0
  312. slwi r4,r4,MAS6_SPID_SHIFT
  313. rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
  314. beq 1f
  315. rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
  316. 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
  317. PPC_TLBILX_VA(0,R3)
  318. msync
  319. isync
  320. wrtee r10
  321. blr
  322. _GLOBAL(_tlbivax_bcast)
  323. mfmsr r10
  324. wrteei 0
  325. cmpwi cr0,r6,0
  326. slwi r4,r4,MAS6_SPID_SHIFT
  327. rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK
  328. beq 1f
  329. rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
  330. 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
  331. PPC_TLBIVAX(0,R3)
  332. mbar
  333. tlbsync
  334. sync
  335. wrtee r10
  336. blr
  337. #else
  338. #error Unsupported processor type !
  339. #endif
  340. #if defined(CONFIG_PPC_E500)
  341. /*
  342. * extern void loadcam_entry(unsigned int index)
  343. *
  344. * Load TLBCAM[index] entry in to the L2 CAM MMU
  345. * Must preserve r7, r8, r9, r10, r11, r12
  346. */
  347. _GLOBAL(loadcam_entry)
  348. mflr r5
  349. LOAD_REG_ADDR_PIC(r4, TLBCAM)
  350. mtlr r5
  351. mulli r5,r3,TLBCAM_SIZE
  352. add r3,r5,r4
  353. lwz r4,TLBCAM_MAS0(r3)
  354. mtspr SPRN_MAS0,r4
  355. lwz r4,TLBCAM_MAS1(r3)
  356. mtspr SPRN_MAS1,r4
  357. PPC_LL r4,TLBCAM_MAS2(r3)
  358. mtspr SPRN_MAS2,r4
  359. lwz r4,TLBCAM_MAS3(r3)
  360. mtspr SPRN_MAS3,r4
  361. BEGIN_MMU_FTR_SECTION
  362. lwz r4,TLBCAM_MAS7(r3)
  363. mtspr SPRN_MAS7,r4
  364. END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
  365. isync
  366. tlbwe
  367. isync
  368. blr
  369. /*
  370. * Load multiple TLB entries at once, using an alternate-space
  371. * trampoline so that we don't have to care about whether the same
  372. * TLB entry maps us before and after.
  373. *
  374. * r3 = first entry to write
  375. * r4 = number of entries to write
  376. * r5 = temporary tlb entry (0 means no switch to AS1)
  377. */
  378. _GLOBAL(loadcam_multi)
  379. mflr r8
  380. /* Don't switch to AS=1 if already there */
  381. mfmsr r11
  382. andi. r11,r11,MSR_IS
  383. bne 10f
  384. mr. r12, r5
  385. beq 10f
  386. /*
  387. * Set up temporary TLB entry that is the same as what we're
  388. * running from, but in AS=1.
  389. */
  390. bcl 20,31,$+4
  391. 1: mflr r6
  392. tlbsx 0,r8
  393. mfspr r6,SPRN_MAS1
  394. ori r6,r6,MAS1_TS
  395. mtspr SPRN_MAS1,r6
  396. mfspr r6,SPRN_MAS0
  397. rlwimi r6,r5,MAS0_ESEL_SHIFT,MAS0_ESEL_MASK
  398. mr r7,r5
  399. mtspr SPRN_MAS0,r6
  400. isync
  401. tlbwe
  402. isync
  403. /* Switch to AS=1 */
  404. mfmsr r6
  405. ori r6,r6,MSR_IS|MSR_DS
  406. mtmsr r6
  407. isync
  408. 10:
  409. mr r9,r3
  410. add r10,r3,r4
  411. 2: bl loadcam_entry
  412. addi r9,r9,1
  413. cmpw r9,r10
  414. mr r3,r9
  415. blt 2b
  416. /* Don't return to AS=0 if we were in AS=1 at function start */
  417. andi. r11,r11,MSR_IS
  418. bne 3f
  419. cmpwi r12, 0
  420. beq 3f
  421. /* Return to AS=0 and clear the temporary entry */
  422. mfmsr r6
  423. rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
  424. mtmsr r6
  425. isync
  426. li r6,0
  427. mtspr SPRN_MAS1,r6
  428. rlwinm r6,r7,MAS0_ESEL_SHIFT,MAS0_ESEL_MASK
  429. oris r6,r6,MAS0_TLBSEL(1)@h
  430. mtspr SPRN_MAS0,r6
  431. isync
  432. tlbwe
  433. isync
  434. 3:
  435. mtlr r8
  436. blr
  437. #endif