123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564 |
- // SPDX-License-Identifier: GPL-2.0-or-later
- /*
- * Copyright 2005, Paul Mackerras, IBM Corporation.
- * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
- * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
- */
- #include <linux/sched.h>
- #include <linux/mm_types.h>
- #include <linux/mm.h>
- #include <linux/stop_machine.h>
- #include <asm/sections.h>
- #include <asm/mmu.h>
- #include <asm/tlb.h>
- #include <asm/firmware.h>
- #include <mm/mmu_decl.h>
- #include <trace/events/thp.h>
- #if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
- #warning Limited user VSID range means pagetable space is wasted
- #endif
- #ifdef CONFIG_SPARSEMEM_VMEMMAP
- /*
- * vmemmap is the starting address of the virtual address space where
- * struct pages are allocated for all possible PFNs present on the system
- * including holes and bad memory (hence sparse). These virtual struct
- * pages are stored in sequence in this virtual address space irrespective
- * of the fact whether the corresponding PFN is valid or not. This achieves
- * constant relationship between address of struct page and its PFN.
- *
- * During boot or memory hotplug operation when a new memory section is
- * added, physical memory allocation (including hash table bolting) will
- * be performed for the set of struct pages which are part of the memory
- * section. This saves memory by not allocating struct pages for PFNs
- * which are not valid.
- *
- * ----------------------------------------------
- * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES|
- * ----------------------------------------------
- *
- * f000000000000000 c000000000000000
- * vmemmap +--------------+ +--------------+
- * + | page struct | +--------------> | page struct |
- * | +--------------+ +--------------+
- * | | page struct | +--------------> | page struct |
- * | +--------------+ | +--------------+
- * | | page struct | + +------> | page struct |
- * | +--------------+ | +--------------+
- * | | page struct | | +--> | page struct |
- * | +--------------+ | | +--------------+
- * | | page struct | | |
- * | +--------------+ | |
- * | | page struct | | |
- * | +--------------+ | |
- * | | page struct | | |
- * | +--------------+ | |
- * | | page struct | | |
- * | +--------------+ | |
- * | | page struct | +-------+ |
- * | +--------------+ |
- * | | page struct | +-----------+
- * | +--------------+
- * | | page struct | No mapping
- * | +--------------+
- * | | page struct | No mapping
- * v +--------------+
- *
- * -----------------------------------------
- * | RELATION BETWEEN STRUCT PAGES AND PFNS|
- * -----------------------------------------
- *
- * vmemmap +--------------+ +---------------+
- * + | page struct | +-------------> | PFN |
- * | +--------------+ +---------------+
- * | | page struct | +-------------> | PFN |
- * | +--------------+ +---------------+
- * | | page struct | +-------------> | PFN |
- * | +--------------+ +---------------+
- * | | page struct | +-------------> | PFN |
- * | +--------------+ +---------------+
- * | | |
- * | +--------------+
- * | | |
- * | +--------------+
- * | | |
- * | +--------------+ +---------------+
- * | | page struct | +-------------> | PFN |
- * | +--------------+ +---------------+
- * | | |
- * | +--------------+
- * | | |
- * | +--------------+ +---------------+
- * | | page struct | +-------------> | PFN |
- * | +--------------+ +---------------+
- * | | page struct | +-------------> | PFN |
- * v +--------------+ +---------------+
- */
- /*
- * On hash-based CPUs, the vmemmap is bolted in the hash table.
- *
- */
- int __meminit hash__vmemmap_create_mapping(unsigned long start,
- unsigned long page_size,
- unsigned long phys)
- {
- int rc;
- if ((start + page_size) >= H_VMEMMAP_END) {
- pr_warn("Outside the supported range\n");
- return -1;
- }
- rc = htab_bolt_mapping(start, start + page_size, phys,
- pgprot_val(PAGE_KERNEL),
- mmu_vmemmap_psize, mmu_kernel_ssize);
- if (rc < 0) {
- int rc2 = htab_remove_mapping(start, start + page_size,
- mmu_vmemmap_psize,
- mmu_kernel_ssize);
- BUG_ON(rc2 && (rc2 != -ENOENT));
- }
- return rc;
- }
- #ifdef CONFIG_MEMORY_HOTPLUG
- void hash__vmemmap_remove_mapping(unsigned long start,
- unsigned long page_size)
- {
- int rc = htab_remove_mapping(start, start + page_size,
- mmu_vmemmap_psize,
- mmu_kernel_ssize);
- BUG_ON((rc < 0) && (rc != -ENOENT));
- WARN_ON(rc == -ENOENT);
- }
- #endif
- #endif /* CONFIG_SPARSEMEM_VMEMMAP */
- /*
- * map_kernel_page currently only called by __ioremap
- * map_kernel_page adds an entry to the ioremap page table
- * and adds an entry to the HPT, possibly bolting it
- */
- int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
- {
- pgd_t *pgdp;
- p4d_t *p4dp;
- pud_t *pudp;
- pmd_t *pmdp;
- pte_t *ptep;
- BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
- if (slab_is_available()) {
- pgdp = pgd_offset_k(ea);
- p4dp = p4d_offset(pgdp, ea);
- pudp = pud_alloc(&init_mm, p4dp, ea);
- if (!pudp)
- return -ENOMEM;
- pmdp = pmd_alloc(&init_mm, pudp, ea);
- if (!pmdp)
- return -ENOMEM;
- ptep = pte_alloc_kernel(pmdp, ea);
- if (!ptep)
- return -ENOMEM;
- set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
- } else {
- /*
- * If the mm subsystem is not fully up, we cannot create a
- * linux page table entry for this mapping. Simply bolt an
- * entry in the hardware page table.
- *
- */
- if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
- mmu_io_psize, mmu_kernel_ssize)) {
- printk(KERN_ERR "Failed to do bolted mapping IO "
- "memory at %016lx !\n", pa);
- return -ENOMEM;
- }
- }
- smp_wmb();
- return 0;
- }
- #ifdef CONFIG_TRANSPARENT_HUGEPAGE
- unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, unsigned long clr,
- unsigned long set)
- {
- __be64 old_be, tmp;
- unsigned long old;
- #ifdef CONFIG_DEBUG_VM
- WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
- assert_spin_locked(pmd_lockptr(mm, pmdp));
- #endif
- __asm__ __volatile__(
- "1: ldarx %0,0,%3\n\
- and. %1,%0,%6\n\
- bne- 1b \n\
- andc %1,%0,%4 \n\
- or %1,%1,%7\n\
- stdcx. %1,0,%3 \n\
- bne- 1b"
- : "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
- : "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
- "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
- : "cc" );
- old = be64_to_cpu(old_be);
- trace_hugepage_update(addr, old, clr, set);
- if (old & H_PAGE_HASHPTE)
- hpte_do_hugepage_flush(mm, addr, pmdp, old);
- return old;
- }
- pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp)
- {
- pmd_t pmd;
- VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(pmd_trans_huge(*pmdp));
- VM_BUG_ON(pmd_devmap(*pmdp));
- pmd = *pmdp;
- pmd_clear(pmdp);
- /*
- * Wait for all pending hash_page to finish. This is needed
- * in case of subpage collapse. When we collapse normal pages
- * to hugepage, we first clear the pmd, then invalidate all
- * the PTE entries. The assumption here is that any low level
- * page fault will see a none pmd and take the slow path that
- * will wait on mmap_lock. But we could very well be in a
- * hash_page with local ptep pointer value. Such a hash page
- * can result in adding new HPTE entries for normal subpages.
- * That means we could be modifying the page content as we
- * copy them to a huge page. So wait for parallel hash_page
- * to finish before invalidating HPTE entries. We can do this
- * by sending an IPI to all the cpus and executing a dummy
- * function there.
- */
- serialize_against_pte_lookup(vma->vm_mm);
- /*
- * Now invalidate the hpte entries in the range
- * covered by pmd. This make sure we take a
- * fault and will find the pmd as none, which will
- * result in a major fault which takes mmap_lock and
- * hence wait for collapse to complete. Without this
- * the __collapse_huge_page_copy can result in copying
- * the old content.
- */
- flush_hash_table_pmd_range(vma->vm_mm, &pmd, address);
- return pmd;
- }
- /*
- * We want to put the pgtable in pmd and use pgtable for tracking
- * the base page size hptes
- */
- void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pgtable)
- {
- pgtable_t *pgtable_slot;
- assert_spin_locked(pmd_lockptr(mm, pmdp));
- /*
- * we store the pgtable in the second half of PMD
- */
- pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
- *pgtable_slot = pgtable;
- /*
- * expose the deposited pgtable to other cpus.
- * before we set the hugepage PTE at pmd level
- * hash fault code looks at the deposted pgtable
- * to store hash index values.
- */
- smp_wmb();
- }
- pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
- {
- pgtable_t pgtable;
- pgtable_t *pgtable_slot;
- assert_spin_locked(pmd_lockptr(mm, pmdp));
- pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
- pgtable = *pgtable_slot;
- /*
- * Once we withdraw, mark the entry NULL.
- */
- *pgtable_slot = NULL;
- /*
- * We store HPTE information in the deposited PTE fragment.
- * zero out the content on withdraw.
- */
- memset(pgtable, 0, PTE_FRAG_SIZE);
- return pgtable;
- }
- /*
- * A linux hugepage PMD was changed and the corresponding hash table entries
- * neesd to be flushed.
- */
- void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, unsigned long old_pmd)
- {
- int ssize;
- unsigned int psize;
- unsigned long vsid;
- unsigned long flags = 0;
- /* get the base page size,vsid and segment size */
- #ifdef CONFIG_DEBUG_VM
- psize = get_slice_psize(mm, addr);
- BUG_ON(psize == MMU_PAGE_16M);
- #endif
- if (old_pmd & H_PAGE_COMBO)
- psize = MMU_PAGE_4K;
- else
- psize = MMU_PAGE_64K;
- if (!is_kernel_addr(addr)) {
- ssize = user_segment_size(addr);
- vsid = get_user_vsid(&mm->context, addr, ssize);
- WARN_ON(vsid == 0);
- } else {
- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
- ssize = mmu_kernel_ssize;
- }
- if (mm_is_thread_local(mm))
- flags |= HPTE_LOCAL_UPDATE;
- return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
- }
- pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pmd_t *pmdp)
- {
- pmd_t old_pmd;
- pgtable_t pgtable;
- unsigned long old;
- pgtable_t *pgtable_slot;
- old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
- old_pmd = __pmd(old);
- /*
- * We have pmd == none and we are holding page_table_lock.
- * So we can safely go and clear the pgtable hash
- * index info.
- */
- pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
- pgtable = *pgtable_slot;
- /*
- * Let's zero out old valid and hash index details
- * hash fault look at them.
- */
- memset(pgtable, 0, PTE_FRAG_SIZE);
- return old_pmd;
- }
- int hash__has_transparent_hugepage(void)
- {
- if (!mmu_has_feature(MMU_FTR_16M_PAGE))
- return 0;
- /*
- * We support THP only if PMD_SIZE is 16MB.
- */
- if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
- return 0;
- /*
- * We need to make sure that we support 16MB hugepage in a segment
- * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
- * of 64K.
- */
- /*
- * If we have 64K HPTE, we will be using that by default
- */
- if (mmu_psize_defs[MMU_PAGE_64K].shift &&
- (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
- return 0;
- /*
- * Ok we only have 4K HPTE
- */
- if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
- return 0;
- return 1;
- }
- EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
- #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
- #ifdef CONFIG_STRICT_KERNEL_RWX
- struct change_memory_parms {
- unsigned long start, end, newpp;
- unsigned int step, nr_cpus;
- atomic_t master_cpu;
- atomic_t cpu_counter;
- };
- // We'd rather this was on the stack but it has to be in the RMO
- static struct change_memory_parms chmem_parms;
- // And therefore we need a lock to protect it from concurrent use
- static DEFINE_MUTEX(chmem_lock);
- static void change_memory_range(unsigned long start, unsigned long end,
- unsigned int step, unsigned long newpp)
- {
- unsigned long idx;
- pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
- start, end, newpp, step);
- for (idx = start; idx < end; idx += step)
- /* Not sure if we can do much with the return value */
- mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
- mmu_kernel_ssize);
- }
- static int notrace chmem_secondary_loop(struct change_memory_parms *parms)
- {
- unsigned long msr, tmp, flags;
- int *p;
- p = &parms->cpu_counter.counter;
- local_irq_save(flags);
- hard_irq_disable();
- asm volatile (
- // Switch to real mode and leave interrupts off
- "mfmsr %[msr] ;"
- "li %[tmp], %[MSR_IR_DR] ;"
- "andc %[tmp], %[msr], %[tmp] ;"
- "mtmsrd %[tmp] ;"
- // Tell the master we are in real mode
- "1: "
- "lwarx %[tmp], 0, %[p] ;"
- "addic %[tmp], %[tmp], -1 ;"
- "stwcx. %[tmp], 0, %[p] ;"
- "bne- 1b ;"
- // Spin until the counter goes to zero
- "2: ;"
- "lwz %[tmp], 0(%[p]) ;"
- "cmpwi %[tmp], 0 ;"
- "bne- 2b ;"
- // Switch back to virtual mode
- "mtmsrd %[msr] ;"
- : // outputs
- [msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p)
- : // inputs
- [p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR)
- : // clobbers
- "cc", "xer"
- );
- local_irq_restore(flags);
- return 0;
- }
- static int change_memory_range_fn(void *data)
- {
- struct change_memory_parms *parms = data;
- // First CPU goes through, all others wait.
- if (atomic_xchg(&parms->master_cpu, 1) == 1)
- return chmem_secondary_loop(parms);
- // Wait for all but one CPU (this one) to call-in
- while (atomic_read(&parms->cpu_counter) > 1)
- barrier();
- change_memory_range(parms->start, parms->end, parms->step, parms->newpp);
- mb();
- // Signal the other CPUs that we're done
- atomic_dec(&parms->cpu_counter);
- return 0;
- }
- static bool hash__change_memory_range(unsigned long start, unsigned long end,
- unsigned long newpp)
- {
- unsigned int step, shift;
- shift = mmu_psize_defs[mmu_linear_psize].shift;
- step = 1 << shift;
- start = ALIGN_DOWN(start, step);
- end = ALIGN(end, step); // aligns up
- if (start >= end)
- return false;
- if (firmware_has_feature(FW_FEATURE_LPAR)) {
- mutex_lock(&chmem_lock);
- chmem_parms.start = start;
- chmem_parms.end = end;
- chmem_parms.step = step;
- chmem_parms.newpp = newpp;
- atomic_set(&chmem_parms.master_cpu, 0);
- cpus_read_lock();
- atomic_set(&chmem_parms.cpu_counter, num_online_cpus());
- // Ensure state is consistent before we call the other CPUs
- mb();
- stop_machine_cpuslocked(change_memory_range_fn, &chmem_parms,
- cpu_online_mask);
- cpus_read_unlock();
- mutex_unlock(&chmem_lock);
- } else
- change_memory_range(start, end, step, newpp);
- return true;
- }
- void hash__mark_rodata_ro(void)
- {
- unsigned long start, end, pp;
- start = (unsigned long)_stext;
- end = (unsigned long)__end_rodata;
- pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
- WARN_ON(!hash__change_memory_range(start, end, pp));
- }
- void hash__mark_initmem_nx(void)
- {
- unsigned long start, end, pp;
- start = (unsigned long)__init_begin;
- end = (unsigned long)__init_end;
- pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY);
- WARN_ON(!hash__change_memory_range(start, end, pp));
- }
- #endif
|