Merge tag 'powerpc-4.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull more powerpc updates from Michael Ellerman: "These were delayed for various reasons, so I let them sit in next a bit longer, rather than including them in my first pull request. Fixes: - Fix early access to cpu_spec relocation from Benjamin Herrenschmidt - Fix incorrect event codes in power9-event-list from Madhavan Srinivasan - Move register_process_table() out of ppc_md from Michael Ellerman Use jump_label use for [cpu|mmu]_has_feature(): - Add mmu_early_init_devtree() from Michael Ellerman - Move disable_radix handling into mmu_early_init_devtree() from Michael Ellerman - Do hash device tree scanning earlier from Michael Ellerman - Do radix device tree scanning earlier from Michael Ellerman - Do feature patching before MMU init from Michael Ellerman - Check features don't change after patching from Michael Ellerman - Make MMU_FTR_RADIX a MMU family feature from Aneesh Kumar K.V - Convert mmu_has_feature() to returning bool from Michael Ellerman - Convert cpu_has_feature() to returning bool from Michael Ellerman - Define radix_enabled() in one place & use static inline from Michael Ellerman - Add early_[cpu|mmu]_has_feature() from Michael Ellerman - Convert early cpu/mmu feature check to use the new helpers from Aneesh Kumar K.V - jump_label: Make it possible for arches to invoke jump_label_init() earlier from Kevin Hao - Call jump_label_init() in apply_feature_fixups() from Aneesh Kumar K.V - Remove mfvtb() from Kevin Hao - Move cpu_has_feature() to a separate file from Kevin Hao - Add kconfig option to use jump labels for cpu/mmu_has_feature() from Michael Ellerman - Add option to use jump label for cpu_has_feature() from Kevin Hao - Add option to use jump label for mmu_has_feature() from Kevin Hao - Catch usage of cpu/mmu_has_feature() before jump label init from Aneesh Kumar K.V - Annotate jump label assembly from Michael Ellerman TLB flush enhancements from Aneesh Kumar K.V: - radix: Implement tlb mmu gather flush efficiently - Add helper for finding SLBE LLP encoding - Use hugetlb flush functions - Drop multiple definition of mm_is_core_local - radix: Add tlb flush of THP ptes - radix: Rename function and drop unused arg - radix/hugetlb: Add helper for finding page size - hugetlb: Add flush_hugetlb_tlb_range - remove flush_tlb_page_nohash Add new ptrace regsets from Anshuman Khandual and Simon Guo: - elf: Add powerpc specific core note sections - Add the function flush_tmregs_to_thread - Enable in transaction NT_PRFPREG ptrace requests - Enable in transaction NT_PPC_VMX ptrace requests - Enable in transaction NT_PPC_VSX ptrace requests - Adapt gpr32_get, gpr32_set functions for transaction - Enable support for NT_PPC_CGPR - Enable support for NT_PPC_CFPR - Enable support for NT_PPC_CVMX - Enable support for NT_PPC_CVSX - Enable support for TM SPR state - Enable NT_PPC_TM_CTAR, NT_PPC_TM_CPPR, NT_PPC_TM_CDSCR - Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR - Enable support for EBB registers - Enable support for Performance Monitor registers" * tag 'powerpc-4.8-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (48 commits) powerpc/mm: Move register_process_table() out of ppc_md powerpc/perf: Fix incorrect event codes in power9-event-list powerpc/32: Fix early access to cpu_spec relocation powerpc/ptrace: Enable support for Performance Monitor registers powerpc/ptrace: Enable support for EBB registers powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR powerpc/ptrace: Enable NT_PPC_TM_CTAR, NT_PPC_TM_CPPR, NT_PPC_TM_CDSCR powerpc/ptrace: Enable support for TM SPR state powerpc/ptrace: Enable support for NT_PPC_CVSX powerpc/ptrace: Enable support for NT_PPC_CVMX powerpc/ptrace: Enable support for NT_PPC_CFPR powerpc/ptrace: Enable support for NT_PPC_CGPR powerpc/ptrace: Adapt gpr32_get, gpr32_set functions for transaction powerpc/ptrace: Enable in transaction NT_PPC_VSX ptrace requests powerpc/ptrace: Enable in transaction NT_PPC_VMX ptrace requests powerpc/ptrace: Enable in transaction NT_PRFPREG ptrace requests powerpc/process: Add the function flush_tmregs_to_thread elf: Add powerpc specific core note sections powerpc/mm: remove flush_tlb_page_nohash powerpc/mm/hugetlb: Add flush_hugetlb_tlb_range ...
此提交包含在:
@@ -72,8 +72,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
|
||||
/* clear out bits after (52) [0....52.....63] */
|
||||
va &= ~((1ul << (64 - 52)) - 1);
|
||||
va |= ssize << 8;
|
||||
sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
|
||||
((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
|
||||
sllp = get_sllp_encoding(apsize);
|
||||
va |= sllp << 5;
|
||||
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
|
||||
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
|
||||
@@ -122,8 +121,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
|
||||
/* clear out bits after(52) [0....52.....63] */
|
||||
va &= ~((1ul << (64 - 52)) - 1);
|
||||
va |= ssize << 8;
|
||||
sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
|
||||
((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
|
||||
sllp = get_sllp_encoding(apsize);
|
||||
va |= sllp << 5;
|
||||
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
|
||||
: : "r"(va) : "memory");
|
||||
@@ -749,5 +747,5 @@ void __init hpte_init_native(void)
|
||||
mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
ppc_md.register_process_table = native_register_proc_table;
|
||||
register_process_table = native_register_proc_table;
|
||||
}
|
||||
|
@@ -363,11 +363,6 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init htab_init_seg_sizes(void)
|
||||
{
|
||||
of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
|
||||
}
|
||||
|
||||
static int __init get_idx_from_shift(unsigned int shift)
|
||||
{
|
||||
int idx = -1;
|
||||
@@ -539,7 +534,7 @@ static bool might_have_hea(void)
|
||||
|
||||
#endif /* #ifdef CONFIG_PPC_64K_PAGES */
|
||||
|
||||
static void __init htab_init_page_sizes(void)
|
||||
static void __init htab_scan_page_sizes(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@@ -554,17 +549,23 @@ static void __init htab_init_page_sizes(void)
|
||||
* Try to find the available page sizes in the device-tree
|
||||
*/
|
||||
rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
|
||||
if (rc != 0) /* Found */
|
||||
goto found;
|
||||
|
||||
/*
|
||||
* Not in the device-tree, let's fallback on known size
|
||||
* list for 16M capable GP & GR
|
||||
*/
|
||||
if (mmu_has_feature(MMU_FTR_16M_PAGE))
|
||||
if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) {
|
||||
/*
|
||||
* Nothing in the device-tree, but the CPU supports 16M pages,
|
||||
* so let's fallback on a known size list for 16M capable CPUs.
|
||||
*/
|
||||
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
|
||||
sizeof(mmu_psize_defaults_gp));
|
||||
found:
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
/* Reserve 16G huge page memory sections for huge pages */
|
||||
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
}
|
||||
|
||||
static void __init htab_init_page_sizes(void)
|
||||
{
|
||||
if (!debug_pagealloc_enabled()) {
|
||||
/*
|
||||
* Pick a size for the linear mapping. Currently, we only
|
||||
@@ -630,11 +631,6 @@ found:
|
||||
,mmu_psize_defs[mmu_vmemmap_psize].shift
|
||||
#endif
|
||||
);
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
/* Reserve 16G huge page memory sections for huge pages */
|
||||
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
}
|
||||
|
||||
static int __init htab_dt_scan_pftsize(unsigned long node,
|
||||
@@ -759,12 +755,6 @@ static void __init htab_initialize(void)
|
||||
|
||||
DBG(" -> htab_initialize()\n");
|
||||
|
||||
/* Initialize segment sizes */
|
||||
htab_init_seg_sizes();
|
||||
|
||||
/* Initialize page sizes */
|
||||
htab_init_page_sizes();
|
||||
|
||||
if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
|
||||
mmu_kernel_ssize = MMU_SEGSIZE_1T;
|
||||
mmu_highuser_ssize = MMU_SEGSIZE_1T;
|
||||
@@ -885,8 +875,19 @@ static void __init htab_initialize(void)
|
||||
#undef KB
|
||||
#undef MB
|
||||
|
||||
void __init hash__early_init_devtree(void)
|
||||
{
|
||||
/* Initialize segment sizes */
|
||||
of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
|
||||
|
||||
/* Initialize page sizes */
|
||||
htab_scan_page_sizes();
|
||||
}
|
||||
|
||||
void __init hash__early_init_mmu(void)
|
||||
{
|
||||
htab_init_page_sizes();
|
||||
|
||||
/*
|
||||
* initialize page table size
|
||||
*/
|
||||
|
@@ -5,39 +5,34 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/mman.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
{
|
||||
unsigned long ap, shift;
|
||||
int psize;
|
||||
struct hstate *hstate = hstate_file(vma->vm_file);
|
||||
|
||||
shift = huge_page_shift(hstate);
|
||||
if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
|
||||
ap = mmu_get_ap(MMU_PAGE_2M);
|
||||
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
|
||||
ap = mmu_get_ap(MMU_PAGE_1G);
|
||||
else {
|
||||
WARN(1, "Wrong huge page shift\n");
|
||||
return ;
|
||||
}
|
||||
radix___flush_tlb_page(vma->vm_mm, vmaddr, ap, 0);
|
||||
psize = hstate_get_psize(hstate);
|
||||
radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
|
||||
}
|
||||
|
||||
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
{
|
||||
unsigned long ap, shift;
|
||||
int psize;
|
||||
struct hstate *hstate = hstate_file(vma->vm_file);
|
||||
|
||||
shift = huge_page_shift(hstate);
|
||||
if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
|
||||
ap = mmu_get_ap(MMU_PAGE_2M);
|
||||
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
|
||||
ap = mmu_get_ap(MMU_PAGE_1G);
|
||||
else {
|
||||
WARN(1, "Wrong huge page shift\n");
|
||||
return ;
|
||||
}
|
||||
radix___local_flush_tlb_page(vma->vm_mm, vmaddr, ap, 0);
|
||||
psize = hstate_get_psize(hstate);
|
||||
radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
|
||||
}
|
||||
|
||||
void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
int psize;
|
||||
struct hstate *hstate = hstate_file(vma->vm_file);
|
||||
|
||||
psize = hstate_get_psize(hstate);
|
||||
radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -411,3 +411,25 @@ struct page *realmode_pfn_to_page(unsigned long pfn)
|
||||
EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
|
||||
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
|
||||
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
static bool disable_radix;
|
||||
static int __init parse_disable_radix(char *p)
|
||||
{
|
||||
disable_radix = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("disable_radix", parse_disable_radix);
|
||||
|
||||
void __init mmu_early_init_devtree(void)
|
||||
{
|
||||
/* Disable radix mode based on kernel command line. */
|
||||
if (disable_radix)
|
||||
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
|
||||
|
||||
if (early_radix_enabled())
|
||||
radix__early_init_devtree();
|
||||
else
|
||||
hash__early_init_devtree();
|
||||
}
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
|
@@ -14,6 +14,9 @@
|
||||
#include "mmu_decl.h"
|
||||
#include <trace/events/thp.h>
|
||||
|
||||
int (*register_process_table)(unsigned long base, unsigned long page_size,
|
||||
unsigned long tbl_size);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
/*
|
||||
* This is called when relaxing access to a hugepage. It's also called in the page
|
||||
@@ -33,7 +36,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
||||
changed = !pmd_same(*(pmdp), entry);
|
||||
if (changed) {
|
||||
__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
|
||||
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
@@ -66,7 +69,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmdp)
|
||||
{
|
||||
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
|
||||
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
/*
|
||||
* This ensures that generic code that rely on IRQ disabling
|
||||
* to prevent a parallel THP split work as expected.
|
||||
|
@@ -171,7 +171,7 @@ redo:
|
||||
* of process table here. But our linear mapping also enable us to use
|
||||
* physical address here.
|
||||
*/
|
||||
ppc_md.register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
|
||||
register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
|
||||
pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@ static void __init radix_init_partition_table(void)
|
||||
|
||||
void __init radix_init_native(void)
|
||||
{
|
||||
ppc_md.register_process_table = native_register_process_table;
|
||||
register_process_table = native_register_process_table;
|
||||
}
|
||||
|
||||
static int __init get_idx_from_shift(unsigned int shift)
|
||||
@@ -264,7 +264,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void __init radix_init_page_sizes(void)
|
||||
void __init radix__early_init_devtree(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@@ -343,7 +343,6 @@ void __init radix__early_init_mmu(void)
|
||||
__pte_frag_nr = H_PTE_FRAG_NR;
|
||||
__pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
|
||||
|
||||
radix_init_page_sizes();
|
||||
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
radix_init_native();
|
||||
lpcr = mfspr(SPRN_LPCR);
|
||||
|
@@ -225,7 +225,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
||||
if (!is_vm_hugetlb_page(vma))
|
||||
assert_pte_locked(vma->vm_mm, address);
|
||||
__ptep_set_access_flags(ptep, entry);
|
||||
flush_tlb_page_nohash(vma, address);
|
||||
flush_tlb_page(vma, address);
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
@@ -140,10 +140,11 @@ void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
|
||||
}
|
||||
EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
|
||||
|
||||
void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
||||
unsigned long ap, int nid)
|
||||
void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
|
||||
int psize)
|
||||
{
|
||||
unsigned long pid;
|
||||
unsigned long ap = mmu_get_ap(psize);
|
||||
|
||||
preempt_disable();
|
||||
pid = mm ? mm->context.id : 0;
|
||||
@@ -159,18 +160,12 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
|
||||
if (vma && is_vm_hugetlb_page(vma))
|
||||
return __local_flush_hugetlb_page(vma, vmaddr);
|
||||
#endif
|
||||
radix___local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
|
||||
mmu_get_ap(mmu_virtual_psize), 0);
|
||||
radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
|
||||
mmu_virtual_psize);
|
||||
}
|
||||
EXPORT_SYMBOL(radix__local_flush_tlb_page);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int mm_is_core_local(struct mm_struct *mm)
|
||||
{
|
||||
return cpumask_subset(mm_cpumask(mm),
|
||||
topology_sibling_cpumask(smp_processor_id()));
|
||||
}
|
||||
|
||||
void radix__flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long pid;
|
||||
@@ -221,10 +216,11 @@ no_context:
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_pwc);
|
||||
|
||||
void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
|
||||
unsigned long ap, int nid)
|
||||
void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
|
||||
int psize)
|
||||
{
|
||||
unsigned long pid;
|
||||
unsigned long ap = mmu_get_ap(psize);
|
||||
|
||||
preempt_disable();
|
||||
pid = mm ? mm->context.id : 0;
|
||||
@@ -250,8 +246,8 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
|
||||
if (vma && is_vm_hugetlb_page(vma))
|
||||
return flush_hugetlb_page(vma, vmaddr);
|
||||
#endif
|
||||
radix___flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
|
||||
mmu_get_ap(mmu_virtual_psize), 0);
|
||||
radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
|
||||
mmu_virtual_psize);
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_page);
|
||||
|
||||
@@ -299,8 +295,65 @@ static int radix_get_mmu_psize(int page_size)
|
||||
|
||||
void radix__tlb_flush(struct mmu_gather *tlb)
|
||||
{
|
||||
int psize = 0;
|
||||
struct mm_struct *mm = tlb->mm;
|
||||
radix__flush_tlb_mm(mm);
|
||||
int page_size = tlb->page_size;
|
||||
|
||||
psize = radix_get_mmu_psize(page_size);
|
||||
/*
|
||||
* if page size is not something we understand, do a full mm flush
|
||||
*/
|
||||
if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
|
||||
radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
|
||||
else
|
||||
radix__flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
#define TLB_FLUSH_ALL -1UL
|
||||
/*
|
||||
* Number of pages above which we will do a bcast tlbie. Just a
|
||||
* number at this point copied from x86
|
||||
*/
|
||||
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
|
||||
|
||||
void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end, int psize)
|
||||
{
|
||||
unsigned long pid;
|
||||
unsigned long addr;
|
||||
int local = mm_is_core_local(mm);
|
||||
unsigned long ap = mmu_get_ap(psize);
|
||||
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
||||
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
|
||||
|
||||
|
||||
preempt_disable();
|
||||
pid = mm ? mm->context.id : 0;
|
||||
if (unlikely(pid == MMU_NO_CONTEXT))
|
||||
goto err_out;
|
||||
|
||||
if (end == TLB_FLUSH_ALL ||
|
||||
(end - start) > tlb_single_page_flush_ceiling * page_size) {
|
||||
if (local)
|
||||
_tlbiel_pid(pid, RIC_FLUSH_TLB);
|
||||
else
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
goto err_out;
|
||||
}
|
||||
for (addr = start; addr < end; addr += page_size) {
|
||||
|
||||
if (local)
|
||||
_tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
|
||||
else {
|
||||
if (lock_tlbie)
|
||||
raw_spin_lock(&native_tlbie_lock);
|
||||
_tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
|
||||
if (lock_tlbie)
|
||||
raw_spin_unlock(&native_tlbie_lock);
|
||||
}
|
||||
}
|
||||
err_out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
|
||||
@@ -340,3 +393,10 @@ void radix__flush_tlb_lpid(unsigned long lpid)
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_lpid);
|
||||
|
||||
void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
|
||||
|
@@ -48,17 +48,6 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
|
||||
}
|
||||
EXPORT_SYMBOL(flush_hash_entry);
|
||||
|
||||
/*
|
||||
* Called by ptep_set_access_flags, must flush on CPUs for which the
|
||||
* DSI handler can't just "fixup" the TLB on a write fault
|
||||
*/
|
||||
void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
if (Hash != 0)
|
||||
return;
|
||||
_tlbie(addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called at the end of a mmu_gather operation to make sure the
|
||||
* TLB flush is completely done.
|
||||
|
@@ -215,12 +215,6 @@ EXPORT_SYMBOL(local_flush_tlb_page);
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(tlbivax_lock);
|
||||
|
||||
static int mm_is_core_local(struct mm_struct *mm)
|
||||
{
|
||||
return cpumask_subset(mm_cpumask(mm),
|
||||
topology_sibling_cpumask(smp_processor_id()));
|
||||
}
|
||||
|
||||
struct tlb_flush_param {
|
||||
unsigned long addr;
|
||||
unsigned int pid;
|
||||
|
新增問題並參考
封鎖使用者