123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505 |
- /* SPDX-License-Identifier: GPL-2.0 */
- /*
- * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
- */
- #include <asm/asm.h>
- #include <asm/export.h>
- #include <asm/loongarch.h>
- #include <asm/page.h>
- #include <asm/pgtable.h>
- #include <asm/regdef.h>
- #include <asm/stackframe.h>
- #define INVTLB_ADDR_GFALSE_AND_ASID 5
- #define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
- #define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
- #define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
- #define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
- .macro tlb_do_page_fault, write
- SYM_FUNC_START(tlb_do_page_fault_\write)
- SAVE_ALL
- csrrd a2, LOONGARCH_CSR_BADV
- move a0, sp
- REG_S a2, sp, PT_BVADDR
- li.w a1, \write
- la.abs t0, do_page_fault
- jirl ra, t0, 0
- RESTORE_ALL_AND_RET
- SYM_FUNC_END(tlb_do_page_fault_\write)
- .endm
- tlb_do_page_fault 0
- tlb_do_page_fault 1
- SYM_FUNC_START(handle_tlb_protect)
- BACKUP_T0T1
- SAVE_ALL
- move a0, sp
- move a1, zero
- csrrd a2, LOONGARCH_CSR_BADV
- REG_S a2, sp, PT_BVADDR
- la.abs t0, do_page_fault
- jirl ra, t0, 0
- RESTORE_ALL_AND_RET
- SYM_FUNC_END(handle_tlb_protect)
- SYM_FUNC_START(handle_tlb_load)
- csrwr t0, EXCEPTION_KS0
- csrwr t1, EXCEPTION_KS1
- csrwr ra, EXCEPTION_KS2
- /*
- * The vmalloc handling is not in the hotpath.
- */
- csrrd t0, LOONGARCH_CSR_BADV
- bltz t0, vmalloc_load
- csrrd t1, LOONGARCH_CSR_PGDL
- vmalloc_done_load:
- /* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
- #if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
- #endif
- #if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
- #endif
- ld.d ra, t1, 0
- /*
- * For huge tlb entries, pmde doesn't contain an address but
- * instead contains the tlb pte. Check the PAGE_HUGE bit and
- * see if we need to jump to huge tlb processing.
- */
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
- bltz ra, tlb_huge_update_load
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
- #ifdef CONFIG_SMP
- smp_pgtable_change_load:
- ll.d t0, t1, 0
- #else
- ld.d t0, t1, 0
- #endif
- andi ra, t0, _PAGE_PRESENT
- beqz ra, nopage_tlb_load
- ori t0, t0, _PAGE_VALID
- #ifdef CONFIG_SMP
- sc.d t0, t1, 0
- beqz t0, smp_pgtable_change_load
- #else
- st.d t0, t1, 0
- #endif
- tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
- csrwr t0, LOONGARCH_CSR_TLBELO0
- csrwr t1, LOONGARCH_CSR_TLBELO1
- tlbwr
- csrrd t0, EXCEPTION_KS0
- csrrd t1, EXCEPTION_KS1
- csrrd ra, EXCEPTION_KS2
- ertn
- #ifdef CONFIG_64BIT
- vmalloc_load:
- la.abs t1, swapper_pg_dir
- b vmalloc_done_load
- #endif
- /* This is the entry point of a huge page. */
- tlb_huge_update_load:
- #ifdef CONFIG_SMP
- ll.d ra, t1, 0
- #endif
- andi t0, ra, _PAGE_PRESENT
- beqz t0, nopage_tlb_load
- #ifdef CONFIG_SMP
- ori t0, ra, _PAGE_VALID
- sc.d t0, t1, 0
- beqz t0, tlb_huge_update_load
- ori t0, ra, _PAGE_VALID
- #else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- ori t0, ra, _PAGE_VALID
- st.d t0, t1, 0
- #endif
- csrrd ra, LOONGARCH_CSR_ASID
- csrrd t1, LOONGARCH_CSR_BADV
- andi ra, ra, CSR_ASID_ASID
- invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
- /*
- * A huge PTE describes an area the size of the
- * configured huge page size. This is twice the
- * of the large TLB entry size we intend to use.
- * A TLB entry half the size of the configured
- * huge page size is configured into entrylo0
- * and entrylo1 to cover the contiguous huge PTE
- * address space.
- */
- /* Huge page: Move Global bit */
- xori t0, t0, _PAGE_HUGE
- lu12i.w t1, _PAGE_HGLOBAL >> 12
- and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
- or t0, t0, t1
- move ra, t0
- csrwr ra, LOONGARCH_CSR_TLBELO0
- /* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
- csrwr t0, LOONGARCH_CSR_TLBELO1
- /* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
- csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
- tlbfill
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
- csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
- csrrd t0, EXCEPTION_KS0
- csrrd t1, EXCEPTION_KS1
- csrrd ra, EXCEPTION_KS2
- ertn
- nopage_tlb_load:
- dbar 0
- csrrd ra, EXCEPTION_KS2
- la.abs t0, tlb_do_page_fault_0
- jr t0
- SYM_FUNC_END(handle_tlb_load)
- SYM_FUNC_START(handle_tlb_store)
- csrwr t0, EXCEPTION_KS0
- csrwr t1, EXCEPTION_KS1
- csrwr ra, EXCEPTION_KS2
- /*
- * The vmalloc handling is not in the hotpath.
- */
- csrrd t0, LOONGARCH_CSR_BADV
- bltz t0, vmalloc_store
- csrrd t1, LOONGARCH_CSR_PGDL
- vmalloc_done_store:
- /* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
- #if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
- #endif
- #if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
- #endif
- ld.d ra, t1, 0
- /*
- * For huge tlb entries, pmde doesn't contain an address but
- * instead contains the tlb pte. Check the PAGE_HUGE bit and
- * see if we need to jump to huge tlb processing.
- */
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
- bltz ra, tlb_huge_update_store
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
- #ifdef CONFIG_SMP
- smp_pgtable_change_store:
- ll.d t0, t1, 0
- #else
- ld.d t0, t1, 0
- #endif
- andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
- xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
- bnez ra, nopage_tlb_store
- ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- #ifdef CONFIG_SMP
- sc.d t0, t1, 0
- beqz t0, smp_pgtable_change_store
- #else
- st.d t0, t1, 0
- #endif
- tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
- csrwr t0, LOONGARCH_CSR_TLBELO0
- csrwr t1, LOONGARCH_CSR_TLBELO1
- tlbwr
- csrrd t0, EXCEPTION_KS0
- csrrd t1, EXCEPTION_KS1
- csrrd ra, EXCEPTION_KS2
- ertn
- #ifdef CONFIG_64BIT
- vmalloc_store:
- la.abs t1, swapper_pg_dir
- b vmalloc_done_store
- #endif
- /* This is the entry point of a huge page. */
- tlb_huge_update_store:
- #ifdef CONFIG_SMP
- ll.d ra, t1, 0
- #endif
- andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
- xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
- bnez t0, nopage_tlb_store
- #ifdef CONFIG_SMP
- ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- sc.d t0, t1, 0
- beqz t0, tlb_huge_update_store
- ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- #else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- st.d t0, t1, 0
- #endif
- csrrd ra, LOONGARCH_CSR_ASID
- csrrd t1, LOONGARCH_CSR_BADV
- andi ra, ra, CSR_ASID_ASID
- invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
- /*
- * A huge PTE describes an area the size of the
- * configured huge page size. This is twice the
- * of the large TLB entry size we intend to use.
- * A TLB entry half the size of the configured
- * huge page size is configured into entrylo0
- * and entrylo1 to cover the contiguous huge PTE
- * address space.
- */
- /* Huge page: Move Global bit */
- xori t0, t0, _PAGE_HUGE
- lu12i.w t1, _PAGE_HGLOBAL >> 12
- and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
- or t0, t0, t1
- move ra, t0
- csrwr ra, LOONGARCH_CSR_TLBELO0
- /* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
- csrwr t0, LOONGARCH_CSR_TLBELO1
- /* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
- csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
- tlbfill
- /* Reset default page size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
- csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
- csrrd t0, EXCEPTION_KS0
- csrrd t1, EXCEPTION_KS1
- csrrd ra, EXCEPTION_KS2
- ertn
- nopage_tlb_store:
- dbar 0
- csrrd ra, EXCEPTION_KS2
- la.abs t0, tlb_do_page_fault_1
- jr t0
- SYM_FUNC_END(handle_tlb_store)
- SYM_FUNC_START(handle_tlb_modify)
- csrwr t0, EXCEPTION_KS0
- csrwr t1, EXCEPTION_KS1
- csrwr ra, EXCEPTION_KS2
- /*
- * The vmalloc handling is not in the hotpath.
- */
- csrrd t0, LOONGARCH_CSR_BADV
- bltz t0, vmalloc_modify
- csrrd t1, LOONGARCH_CSR_PGDL
- vmalloc_done_modify:
- /* Get PGD offset in bytes */
- bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
- alsl.d t1, ra, t1, 3
- #if CONFIG_PGTABLE_LEVELS > 3
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
- alsl.d t1, ra, t1, 3
- #endif
- #if CONFIG_PGTABLE_LEVELS > 2
- ld.d t1, t1, 0
- bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
- alsl.d t1, ra, t1, 3
- #endif
- ld.d ra, t1, 0
- /*
- * For huge tlb entries, pmde doesn't contain an address but
- * instead contains the tlb pte. Check the PAGE_HUGE bit and
- * see if we need to jump to huge tlb processing.
- */
- rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
- bltz ra, tlb_huge_update_modify
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
- alsl.d t1, t0, ra, _PTE_T_LOG2
- #ifdef CONFIG_SMP
- smp_pgtable_change_modify:
- ll.d t0, t1, 0
- #else
- ld.d t0, t1, 0
- #endif
- andi ra, t0, _PAGE_WRITE
- beqz ra, nopage_tlb_modify
- ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- #ifdef CONFIG_SMP
- sc.d t0, t1, 0
- beqz t0, smp_pgtable_change_modify
- #else
- st.d t0, t1, 0
- #endif
- tlbsrch
- bstrins.d t1, zero, 3, 3
- ld.d t0, t1, 0
- ld.d t1, t1, 8
- csrwr t0, LOONGARCH_CSR_TLBELO0
- csrwr t1, LOONGARCH_CSR_TLBELO1
- tlbwr
- csrrd t0, EXCEPTION_KS0
- csrrd t1, EXCEPTION_KS1
- csrrd ra, EXCEPTION_KS2
- ertn
- #ifdef CONFIG_64BIT
- vmalloc_modify:
- la.abs t1, swapper_pg_dir
- b vmalloc_done_modify
- #endif
- /* This is the entry point of a huge page. */
- tlb_huge_update_modify:
- #ifdef CONFIG_SMP
- ll.d ra, t1, 0
- #endif
- andi t0, ra, _PAGE_WRITE
- beqz t0, nopage_tlb_modify
- #ifdef CONFIG_SMP
- ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- sc.d t0, t1, 0
- beqz t0, tlb_huge_update_modify
- ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- #else
- rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
- ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
- st.d t0, t1, 0
- #endif
- csrrd ra, LOONGARCH_CSR_ASID
- csrrd t1, LOONGARCH_CSR_BADV
- andi ra, ra, CSR_ASID_ASID
- invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
- /*
- * A huge PTE describes an area the size of the
- * configured huge page size. This is twice the
- * of the large TLB entry size we intend to use.
- * A TLB entry half the size of the configured
- * huge page size is configured into entrylo0
- * and entrylo1 to cover the contiguous huge PTE
- * address space.
- */
- /* Huge page: Move Global bit */
- xori t0, t0, _PAGE_HUGE
- lu12i.w t1, _PAGE_HGLOBAL >> 12
- and t1, t0, t1
- srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
- or t0, t0, t1
- move ra, t0
- csrwr ra, LOONGARCH_CSR_TLBELO0
- /* Convert to entrylo1 */
- addi.d t1, zero, 1
- slli.d t1, t1, (HPAGE_SHIFT - 1)
- add.d t0, t0, t1
- csrwr t0, LOONGARCH_CSR_TLBELO1
- /* Set huge page tlb entry size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
- csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
- tlbfill
- /* Reset default page size */
- addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
- addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
- csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
- csrrd t0, EXCEPTION_KS0
- csrrd t1, EXCEPTION_KS1
- csrrd ra, EXCEPTION_KS2
- ertn
- nopage_tlb_modify:
- dbar 0
- csrrd ra, EXCEPTION_KS2
- la.abs t0, tlb_do_page_fault_1
- jr t0
- SYM_FUNC_END(handle_tlb_modify)
- SYM_FUNC_START(handle_tlb_refill)
- csrwr t0, LOONGARCH_CSR_TLBRSAVE
- csrrd t0, LOONGARCH_CSR_PGD
- lddir t0, t0, 3
- #if CONFIG_PGTABLE_LEVELS > 3
- lddir t0, t0, 2
- #endif
- #if CONFIG_PGTABLE_LEVELS > 2
- lddir t0, t0, 1
- #endif
- ldpte t0, 0
- ldpte t0, 1
- tlbfill
- csrrd t0, LOONGARCH_CSR_TLBRSAVE
- ertn
- SYM_FUNC_END(handle_tlb_refill)
|