tlbex.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. */
  5. #include <asm/asm.h>
  6. #include <asm/export.h>
  7. #include <asm/loongarch.h>
  8. #include <asm/page.h>
  9. #include <asm/pgtable.h>
  10. #include <asm/regdef.h>
  11. #include <asm/stackframe.h>
  12. #define INVTLB_ADDR_GFALSE_AND_ASID 5
  13. #define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
  14. #define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
  15. #define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
  16. #define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
  17. .macro tlb_do_page_fault, write
  18. SYM_FUNC_START(tlb_do_page_fault_\write)
  19. SAVE_ALL
  20. csrrd a2, LOONGARCH_CSR_BADV
  21. move a0, sp
  22. REG_S a2, sp, PT_BVADDR
  23. li.w a1, \write
  24. la.abs t0, do_page_fault
  25. jirl ra, t0, 0
  26. RESTORE_ALL_AND_RET
  27. SYM_FUNC_END(tlb_do_page_fault_\write)
  28. .endm
  29. tlb_do_page_fault 0
  30. tlb_do_page_fault 1
  31. SYM_FUNC_START(handle_tlb_protect)
  32. BACKUP_T0T1
  33. SAVE_ALL
  34. move a0, sp
  35. move a1, zero
  36. csrrd a2, LOONGARCH_CSR_BADV
  37. REG_S a2, sp, PT_BVADDR
  38. la.abs t0, do_page_fault
  39. jirl ra, t0, 0
  40. RESTORE_ALL_AND_RET
  41. SYM_FUNC_END(handle_tlb_protect)
  42. SYM_FUNC_START(handle_tlb_load)
  43. csrwr t0, EXCEPTION_KS0
  44. csrwr t1, EXCEPTION_KS1
  45. csrwr ra, EXCEPTION_KS2
  46. /*
  47. * The vmalloc handling is not in the hotpath.
  48. */
  49. csrrd t0, LOONGARCH_CSR_BADV
  50. bltz t0, vmalloc_load
  51. csrrd t1, LOONGARCH_CSR_PGDL
  52. vmalloc_done_load:
  53. /* Get PGD offset in bytes */
  54. bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
  55. alsl.d t1, ra, t1, 3
  56. #if CONFIG_PGTABLE_LEVELS > 3
  57. ld.d t1, t1, 0
  58. bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
  59. alsl.d t1, ra, t1, 3
  60. #endif
  61. #if CONFIG_PGTABLE_LEVELS > 2
  62. ld.d t1, t1, 0
  63. bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
  64. alsl.d t1, ra, t1, 3
  65. #endif
  66. ld.d ra, t1, 0
  67. /*
  68. * For huge tlb entries, pmde doesn't contain an address but
  69. * instead contains the tlb pte. Check the PAGE_HUGE bit and
  70. * see if we need to jump to huge tlb processing.
  71. */
  72. rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
  73. bltz ra, tlb_huge_update_load
  74. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  75. bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
  76. alsl.d t1, t0, ra, _PTE_T_LOG2
  77. #ifdef CONFIG_SMP
  78. smp_pgtable_change_load:
  79. ll.d t0, t1, 0
  80. #else
  81. ld.d t0, t1, 0
  82. #endif
  83. andi ra, t0, _PAGE_PRESENT
  84. beqz ra, nopage_tlb_load
  85. ori t0, t0, _PAGE_VALID
  86. #ifdef CONFIG_SMP
  87. sc.d t0, t1, 0
  88. beqz t0, smp_pgtable_change_load
  89. #else
  90. st.d t0, t1, 0
  91. #endif
  92. tlbsrch
  93. bstrins.d t1, zero, 3, 3
  94. ld.d t0, t1, 0
  95. ld.d t1, t1, 8
  96. csrwr t0, LOONGARCH_CSR_TLBELO0
  97. csrwr t1, LOONGARCH_CSR_TLBELO1
  98. tlbwr
  99. csrrd t0, EXCEPTION_KS0
  100. csrrd t1, EXCEPTION_KS1
  101. csrrd ra, EXCEPTION_KS2
  102. ertn
  103. #ifdef CONFIG_64BIT
  104. vmalloc_load:
  105. la.abs t1, swapper_pg_dir
  106. b vmalloc_done_load
  107. #endif
  108. /* This is the entry point of a huge page. */
  109. tlb_huge_update_load:
  110. #ifdef CONFIG_SMP
  111. ll.d ra, t1, 0
  112. #endif
  113. andi t0, ra, _PAGE_PRESENT
  114. beqz t0, nopage_tlb_load
  115. #ifdef CONFIG_SMP
  116. ori t0, ra, _PAGE_VALID
  117. sc.d t0, t1, 0
  118. beqz t0, tlb_huge_update_load
  119. ori t0, ra, _PAGE_VALID
  120. #else
  121. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  122. ori t0, ra, _PAGE_VALID
  123. st.d t0, t1, 0
  124. #endif
  125. csrrd ra, LOONGARCH_CSR_ASID
  126. csrrd t1, LOONGARCH_CSR_BADV
  127. andi ra, ra, CSR_ASID_ASID
  128. invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
  129. /*
  130. * A huge PTE describes an area the size of the
  131. * configured huge page size. This is twice the
  132. * of the large TLB entry size we intend to use.
  133. * A TLB entry half the size of the configured
  134. * huge page size is configured into entrylo0
  135. * and entrylo1 to cover the contiguous huge PTE
  136. * address space.
  137. */
  138. /* Huge page: Move Global bit */
  139. xori t0, t0, _PAGE_HUGE
  140. lu12i.w t1, _PAGE_HGLOBAL >> 12
  141. and t1, t0, t1
  142. srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
  143. or t0, t0, t1
  144. move ra, t0
  145. csrwr ra, LOONGARCH_CSR_TLBELO0
  146. /* Convert to entrylo1 */
  147. addi.d t1, zero, 1
  148. slli.d t1, t1, (HPAGE_SHIFT - 1)
  149. add.d t0, t0, t1
  150. csrwr t0, LOONGARCH_CSR_TLBELO1
  151. /* Set huge page tlb entry size */
  152. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  153. addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  154. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  155. tlbfill
  156. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  157. addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  158. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  159. csrrd t0, EXCEPTION_KS0
  160. csrrd t1, EXCEPTION_KS1
  161. csrrd ra, EXCEPTION_KS2
  162. ertn
  163. nopage_tlb_load:
  164. dbar 0
  165. csrrd ra, EXCEPTION_KS2
  166. la.abs t0, tlb_do_page_fault_0
  167. jr t0
  168. SYM_FUNC_END(handle_tlb_load)
  169. SYM_FUNC_START(handle_tlb_store)
  170. csrwr t0, EXCEPTION_KS0
  171. csrwr t1, EXCEPTION_KS1
  172. csrwr ra, EXCEPTION_KS2
  173. /*
  174. * The vmalloc handling is not in the hotpath.
  175. */
  176. csrrd t0, LOONGARCH_CSR_BADV
  177. bltz t0, vmalloc_store
  178. csrrd t1, LOONGARCH_CSR_PGDL
  179. vmalloc_done_store:
  180. /* Get PGD offset in bytes */
  181. bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
  182. alsl.d t1, ra, t1, 3
  183. #if CONFIG_PGTABLE_LEVELS > 3
  184. ld.d t1, t1, 0
  185. bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
  186. alsl.d t1, ra, t1, 3
  187. #endif
  188. #if CONFIG_PGTABLE_LEVELS > 2
  189. ld.d t1, t1, 0
  190. bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
  191. alsl.d t1, ra, t1, 3
  192. #endif
  193. ld.d ra, t1, 0
  194. /*
  195. * For huge tlb entries, pmde doesn't contain an address but
  196. * instead contains the tlb pte. Check the PAGE_HUGE bit and
  197. * see if we need to jump to huge tlb processing.
  198. */
  199. rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
  200. bltz ra, tlb_huge_update_store
  201. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  202. bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
  203. alsl.d t1, t0, ra, _PTE_T_LOG2
  204. #ifdef CONFIG_SMP
  205. smp_pgtable_change_store:
  206. ll.d t0, t1, 0
  207. #else
  208. ld.d t0, t1, 0
  209. #endif
  210. andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
  211. xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
  212. bnez ra, nopage_tlb_store
  213. ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  214. #ifdef CONFIG_SMP
  215. sc.d t0, t1, 0
  216. beqz t0, smp_pgtable_change_store
  217. #else
  218. st.d t0, t1, 0
  219. #endif
  220. tlbsrch
  221. bstrins.d t1, zero, 3, 3
  222. ld.d t0, t1, 0
  223. ld.d t1, t1, 8
  224. csrwr t0, LOONGARCH_CSR_TLBELO0
  225. csrwr t1, LOONGARCH_CSR_TLBELO1
  226. tlbwr
  227. csrrd t0, EXCEPTION_KS0
  228. csrrd t1, EXCEPTION_KS1
  229. csrrd ra, EXCEPTION_KS2
  230. ertn
  231. #ifdef CONFIG_64BIT
  232. vmalloc_store:
  233. la.abs t1, swapper_pg_dir
  234. b vmalloc_done_store
  235. #endif
  236. /* This is the entry point of a huge page. */
  237. tlb_huge_update_store:
  238. #ifdef CONFIG_SMP
  239. ll.d ra, t1, 0
  240. #endif
  241. andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
  242. xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
  243. bnez t0, nopage_tlb_store
  244. #ifdef CONFIG_SMP
  245. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  246. sc.d t0, t1, 0
  247. beqz t0, tlb_huge_update_store
  248. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  249. #else
  250. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  251. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  252. st.d t0, t1, 0
  253. #endif
  254. csrrd ra, LOONGARCH_CSR_ASID
  255. csrrd t1, LOONGARCH_CSR_BADV
  256. andi ra, ra, CSR_ASID_ASID
  257. invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
  258. /*
  259. * A huge PTE describes an area the size of the
  260. * configured huge page size. This is twice the
  261. * of the large TLB entry size we intend to use.
  262. * A TLB entry half the size of the configured
  263. * huge page size is configured into entrylo0
  264. * and entrylo1 to cover the contiguous huge PTE
  265. * address space.
  266. */
  267. /* Huge page: Move Global bit */
  268. xori t0, t0, _PAGE_HUGE
  269. lu12i.w t1, _PAGE_HGLOBAL >> 12
  270. and t1, t0, t1
  271. srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
  272. or t0, t0, t1
  273. move ra, t0
  274. csrwr ra, LOONGARCH_CSR_TLBELO0
  275. /* Convert to entrylo1 */
  276. addi.d t1, zero, 1
  277. slli.d t1, t1, (HPAGE_SHIFT - 1)
  278. add.d t0, t0, t1
  279. csrwr t0, LOONGARCH_CSR_TLBELO1
  280. /* Set huge page tlb entry size */
  281. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  282. addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  283. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  284. tlbfill
  285. /* Reset default page size */
  286. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  287. addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  288. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  289. csrrd t0, EXCEPTION_KS0
  290. csrrd t1, EXCEPTION_KS1
  291. csrrd ra, EXCEPTION_KS2
  292. ertn
  293. nopage_tlb_store:
  294. dbar 0
  295. csrrd ra, EXCEPTION_KS2
  296. la.abs t0, tlb_do_page_fault_1
  297. jr t0
  298. SYM_FUNC_END(handle_tlb_store)
  299. SYM_FUNC_START(handle_tlb_modify)
  300. csrwr t0, EXCEPTION_KS0
  301. csrwr t1, EXCEPTION_KS1
  302. csrwr ra, EXCEPTION_KS2
  303. /*
  304. * The vmalloc handling is not in the hotpath.
  305. */
  306. csrrd t0, LOONGARCH_CSR_BADV
  307. bltz t0, vmalloc_modify
  308. csrrd t1, LOONGARCH_CSR_PGDL
  309. vmalloc_done_modify:
  310. /* Get PGD offset in bytes */
  311. bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
  312. alsl.d t1, ra, t1, 3
  313. #if CONFIG_PGTABLE_LEVELS > 3
  314. ld.d t1, t1, 0
  315. bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
  316. alsl.d t1, ra, t1, 3
  317. #endif
  318. #if CONFIG_PGTABLE_LEVELS > 2
  319. ld.d t1, t1, 0
  320. bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
  321. alsl.d t1, ra, t1, 3
  322. #endif
  323. ld.d ra, t1, 0
  324. /*
  325. * For huge tlb entries, pmde doesn't contain an address but
  326. * instead contains the tlb pte. Check the PAGE_HUGE bit and
  327. * see if we need to jump to huge tlb processing.
  328. */
  329. rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
  330. bltz ra, tlb_huge_update_modify
  331. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  332. bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
  333. alsl.d t1, t0, ra, _PTE_T_LOG2
  334. #ifdef CONFIG_SMP
  335. smp_pgtable_change_modify:
  336. ll.d t0, t1, 0
  337. #else
  338. ld.d t0, t1, 0
  339. #endif
  340. andi ra, t0, _PAGE_WRITE
  341. beqz ra, nopage_tlb_modify
  342. ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  343. #ifdef CONFIG_SMP
  344. sc.d t0, t1, 0
  345. beqz t0, smp_pgtable_change_modify
  346. #else
  347. st.d t0, t1, 0
  348. #endif
  349. tlbsrch
  350. bstrins.d t1, zero, 3, 3
  351. ld.d t0, t1, 0
  352. ld.d t1, t1, 8
  353. csrwr t0, LOONGARCH_CSR_TLBELO0
  354. csrwr t1, LOONGARCH_CSR_TLBELO1
  355. tlbwr
  356. csrrd t0, EXCEPTION_KS0
  357. csrrd t1, EXCEPTION_KS1
  358. csrrd ra, EXCEPTION_KS2
  359. ertn
  360. #ifdef CONFIG_64BIT
  361. vmalloc_modify:
  362. la.abs t1, swapper_pg_dir
  363. b vmalloc_done_modify
  364. #endif
  365. /* This is the entry point of a huge page. */
  366. tlb_huge_update_modify:
  367. #ifdef CONFIG_SMP
  368. ll.d ra, t1, 0
  369. #endif
  370. andi t0, ra, _PAGE_WRITE
  371. beqz t0, nopage_tlb_modify
  372. #ifdef CONFIG_SMP
  373. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  374. sc.d t0, t1, 0
  375. beqz t0, tlb_huge_update_modify
  376. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  377. #else
  378. rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
  379. ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
  380. st.d t0, t1, 0
  381. #endif
  382. csrrd ra, LOONGARCH_CSR_ASID
  383. csrrd t1, LOONGARCH_CSR_BADV
  384. andi ra, ra, CSR_ASID_ASID
  385. invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
  386. /*
  387. * A huge PTE describes an area the size of the
  388. * configured huge page size. This is twice the
  389. * of the large TLB entry size we intend to use.
  390. * A TLB entry half the size of the configured
  391. * huge page size is configured into entrylo0
  392. * and entrylo1 to cover the contiguous huge PTE
  393. * address space.
  394. */
  395. /* Huge page: Move Global bit */
  396. xori t0, t0, _PAGE_HUGE
  397. lu12i.w t1, _PAGE_HGLOBAL >> 12
  398. and t1, t0, t1
  399. srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
  400. or t0, t0, t1
  401. move ra, t0
  402. csrwr ra, LOONGARCH_CSR_TLBELO0
  403. /* Convert to entrylo1 */
  404. addi.d t1, zero, 1
  405. slli.d t1, t1, (HPAGE_SHIFT - 1)
  406. add.d t0, t0, t1
  407. csrwr t0, LOONGARCH_CSR_TLBELO1
  408. /* Set huge page tlb entry size */
  409. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  410. addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  411. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  412. tlbfill
  413. /* Reset default page size */
  414. addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
  415. addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
  416. csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
  417. csrrd t0, EXCEPTION_KS0
  418. csrrd t1, EXCEPTION_KS1
  419. csrrd ra, EXCEPTION_KS2
  420. ertn
  421. nopage_tlb_modify:
  422. dbar 0
  423. csrrd ra, EXCEPTION_KS2
  424. la.abs t0, tlb_do_page_fault_1
  425. jr t0
  426. SYM_FUNC_END(handle_tlb_modify)
  427. SYM_FUNC_START(handle_tlb_refill)
  428. csrwr t0, LOONGARCH_CSR_TLBRSAVE
  429. csrrd t0, LOONGARCH_CSR_PGD
  430. lddir t0, t0, 3
  431. #if CONFIG_PGTABLE_LEVELS > 3
  432. lddir t0, t0, 2
  433. #endif
  434. #if CONFIG_PGTABLE_LEVELS > 2
  435. lddir t0, t0, 1
  436. #endif
  437. ldpte t0, 0
  438. ldpte t0, 1
  439. tlbfill
  440. csrrd t0, LOONGARCH_CSR_TLBRSAVE
  441. ertn
  442. SYM_FUNC_END(handle_tlb_refill)