Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 PTI preparatory patches from Thomas Gleixner: "Todays Advent calendar window contains twentyfour easy to digest patches. The original plan was to have twenty three matching the date, but a late fixup made that moot. - Move the cpu_entry_area mapping out of the fixmap into a separate address space. That's necessary because the fixmap becomes too big with NRCPUS=8192 and this caused already subtle and hard to diagnose failures. The top most patch is fresh from today and cures a brain slip of that tall grumpy german greybeard, who ignored the intricacies of 32bit wraparounds. - Limit the number of CPUs on 32bit to 64. That's insane big already, but at least it's small enough to prevent address space issues with the cpu_entry_area map, which have been observed and debugged with the fixmap code - A few TLB flush fixes in various places plus documentation which of the TLB functions should be used for what. - Rename the SYSENTER stack to CPU_ENTRY_AREA stack as it is used for more than sysenter now and keeping the name makes backtraces confusing. - Prevent LDT inheritance on exec() by moving it to arch_dup_mmap(), which is only invoked on fork(). - Make vysycall more robust. - A few fixes and cleanups of the debug_pagetables code. Check PAGE_PRESENT instead of checking the PTE for 0 and a cleanup of the C89 initialization of the address hint array which already was out of sync with the index enums. - Move the ESPFIX init to a different place to prepare for PTI. - Several code moves with no functional change to make PTI integration simpler and header files less convoluted. - Documentation fixes and clarifications" * 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) x86/cpu_entry_area: Prevent wraparound in setup_cpu_entry_area_ptes() on 32bit init: Invoke init_espfix_bsp() from mm_init() x86/cpu_entry_area: Move it out of the fixmap x86/cpu_entry_area: Move it to a separate unit x86/mm: Create asm/invpcid.h x86/mm: Put MMU to hardware ASID translation in one place x86/mm: Remove hard-coded ASID limit checks x86/mm: Move the CR3 construction functions to tlbflush.h x86/mm: Add comments to clarify which TLB-flush functions are supposed to flush what x86/mm: Remove superfluous barriers x86/mm: Use __flush_tlb_one() for kernel memory x86/microcode: Dont abuse the TLB-flush interface x86/uv: Use the right TLB-flush API x86/entry: Rename SYSENTER_stack to CPU_ENTRY_AREA_entry_stack x86/doc: Remove obvious weirdnesses from the x86 MM layout documentation x86/mm/64: Improve the memory map documentation x86/ldt: Prevent LDT inheritance on exec x86/ldt: Rework locking arch, mm: Allow arch_dup_mmap() to fail x86/vsyscall/64: Warn and fail vsyscall emulation in NATIVE mode ...
This commit is contained in:
@@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o = -pg
|
||||
endif
|
||||
|
||||
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
|
||||
pat.o pgtable.o physaddr.o setup_nx.o tlb.o
|
||||
pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o
|
||||
|
||||
# Make sure __phys_addr has no stackprotector
|
||||
nostackp := $(call cc-option, -fno-stack-protector)
|
||||
|
139
arch/x86/mm/cpu_entry_area.c
Normal file
139
arch/x86/mm/cpu_entry_area.c
Normal file
@@ -0,0 +1,139 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#include <asm/cpu_entry_area.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
|
||||
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
|
||||
#endif
|
||||
|
||||
struct cpu_entry_area *get_cpu_entry_area(int cpu)
|
||||
{
|
||||
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
|
||||
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
|
||||
|
||||
return (struct cpu_entry_area *) va;
|
||||
}
|
||||
EXPORT_SYMBOL(get_cpu_entry_area);
|
||||
|
||||
void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
|
||||
{
|
||||
unsigned long va = (unsigned long) cea_vaddr;
|
||||
|
||||
set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
|
||||
}
|
||||
|
||||
static void __init
|
||||
cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
|
||||
{
|
||||
for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
|
||||
cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
|
||||
}
|
||||
|
||||
/* Setup the fixmap mappings only once per-processor */
|
||||
static void __init setup_cpu_entry_area(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
extern char _entry_trampoline[];
|
||||
|
||||
/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
|
||||
pgprot_t gdt_prot = PAGE_KERNEL_RO;
|
||||
pgprot_t tss_prot = PAGE_KERNEL_RO;
|
||||
#else
|
||||
/*
|
||||
* On native 32-bit systems, the GDT cannot be read-only because
|
||||
* our double fault handler uses a task gate, and entering through
|
||||
* a task gate needs to change an available TSS to busy. If the
|
||||
* GDT is read-only, that will triple fault. The TSS cannot be
|
||||
* read-only because the CPU writes to it on task switches.
|
||||
*
|
||||
* On Xen PV, the GDT must be read-only because the hypervisor
|
||||
* requires it.
|
||||
*/
|
||||
pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
|
||||
PAGE_KERNEL_RO : PAGE_KERNEL;
|
||||
pgprot_t tss_prot = PAGE_KERNEL;
|
||||
#endif
|
||||
|
||||
cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
|
||||
gdt_prot);
|
||||
|
||||
cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
|
||||
per_cpu_ptr(&entry_stack_storage, cpu), 1,
|
||||
PAGE_KERNEL);
|
||||
|
||||
/*
|
||||
* The Intel SDM says (Volume 3, 7.2.1):
|
||||
*
|
||||
* Avoid placing a page boundary in the part of the TSS that the
|
||||
* processor reads during a task switch (the first 104 bytes). The
|
||||
* processor may not correctly perform address translations if a
|
||||
* boundary occurs in this area. During a task switch, the processor
|
||||
* reads and writes into the first 104 bytes of each TSS (using
|
||||
* contiguous physical addresses beginning with the physical address
|
||||
* of the first byte of the TSS). So, after TSS access begins, if
|
||||
* part of the 104 bytes is not physically contiguous, the processor
|
||||
* will access incorrect information without generating a page-fault
|
||||
* exception.
|
||||
*
|
||||
* There are also a lot of errata involving the TSS spanning a page
|
||||
* boundary. Assert that we're not doing that.
|
||||
*/
|
||||
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
|
||||
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
|
||||
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
|
||||
cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
|
||||
&per_cpu(cpu_tss_rw, cpu),
|
||||
sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
|
||||
BUILD_BUG_ON(sizeof(exception_stacks) !=
|
||||
sizeof(((struct cpu_entry_area *)0)->exception_stacks));
|
||||
cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
|
||||
&per_cpu(exception_stacks, cpu),
|
||||
sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
|
||||
|
||||
cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
|
||||
__pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __init void setup_cpu_entry_area_ptes(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
unsigned long start, end;
|
||||
|
||||
BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
|
||||
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
|
||||
|
||||
start = CPU_ENTRY_AREA_BASE;
|
||||
end = start + CPU_ENTRY_AREA_MAP_SIZE;
|
||||
|
||||
/* Careful here: start + PMD_SIZE might wrap around */
|
||||
for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
|
||||
populate_extra_pte(start);
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init setup_cpu_entry_areas(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
setup_cpu_entry_area_ptes();
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
setup_cpu_entry_area(cpu);
|
||||
}
|
@@ -44,10 +44,12 @@ struct addr_marker {
|
||||
unsigned long max_lines;
|
||||
};
|
||||
|
||||
/* indices for address_markers; keep sync'd w/ address_markers below */
|
||||
/* Address space markers hints */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
enum address_markers_idx {
|
||||
USER_SPACE_NR = 0,
|
||||
#ifdef CONFIG_X86_64
|
||||
KERNEL_SPACE_NR,
|
||||
LOW_KERNEL_NR,
|
||||
VMALLOC_START_NR,
|
||||
@@ -56,56 +58,74 @@ enum address_markers_idx {
|
||||
KASAN_SHADOW_START_NR,
|
||||
KASAN_SHADOW_END_NR,
|
||||
#endif
|
||||
# ifdef CONFIG_X86_ESPFIX64
|
||||
CPU_ENTRY_AREA_NR,
|
||||
#ifdef CONFIG_X86_ESPFIX64
|
||||
ESPFIX_START_NR,
|
||||
# endif
|
||||
#endif
|
||||
#ifdef CONFIG_EFI
|
||||
EFI_END_NR,
|
||||
#endif
|
||||
HIGH_KERNEL_NR,
|
||||
MODULES_VADDR_NR,
|
||||
MODULES_END_NR,
|
||||
#else
|
||||
FIXADDR_START_NR,
|
||||
END_OF_SPACE_NR,
|
||||
};
|
||||
|
||||
static struct addr_marker address_markers[] = {
|
||||
[USER_SPACE_NR] = { 0, "User Space" },
|
||||
[KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" },
|
||||
[LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" },
|
||||
[VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
|
||||
[VMEMMAP_START_NR] = { 0UL, "Vmemmap" },
|
||||
#ifdef CONFIG_KASAN
|
||||
[KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" },
|
||||
[KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" },
|
||||
#endif
|
||||
[CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
|
||||
#ifdef CONFIG_X86_ESPFIX64
|
||||
[ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
|
||||
#endif
|
||||
#ifdef CONFIG_EFI
|
||||
[EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" },
|
||||
#endif
|
||||
[HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" },
|
||||
[MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" },
|
||||
[MODULES_END_NR] = { MODULES_END, "End Modules" },
|
||||
[FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" },
|
||||
[END_OF_SPACE_NR] = { -1, NULL }
|
||||
};
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
enum address_markers_idx {
|
||||
USER_SPACE_NR = 0,
|
||||
KERNEL_SPACE_NR,
|
||||
VMALLOC_START_NR,
|
||||
VMALLOC_END_NR,
|
||||
# ifdef CONFIG_HIGHMEM
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
PKMAP_BASE_NR,
|
||||
# endif
|
||||
FIXADDR_START_NR,
|
||||
#endif
|
||||
CPU_ENTRY_AREA_NR,
|
||||
FIXADDR_START_NR,
|
||||
END_OF_SPACE_NR,
|
||||
};
|
||||
|
||||
/* Address space markers hints */
|
||||
static struct addr_marker address_markers[] = {
|
||||
{ 0, "User Space" },
|
||||
#ifdef CONFIG_X86_64
|
||||
{ 0x8000000000000000UL, "Kernel Space" },
|
||||
{ 0/* PAGE_OFFSET */, "Low Kernel Mapping" },
|
||||
{ 0/* VMALLOC_START */, "vmalloc() Area" },
|
||||
{ 0/* VMEMMAP_START */, "Vmemmap" },
|
||||
#ifdef CONFIG_KASAN
|
||||
{ KASAN_SHADOW_START, "KASAN shadow" },
|
||||
{ KASAN_SHADOW_END, "KASAN shadow end" },
|
||||
[USER_SPACE_NR] = { 0, "User Space" },
|
||||
[KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" },
|
||||
[VMALLOC_START_NR] = { 0UL, "vmalloc() Area" },
|
||||
[VMALLOC_END_NR] = { 0UL, "vmalloc() End" },
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
[PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
|
||||
#endif
|
||||
# ifdef CONFIG_X86_ESPFIX64
|
||||
{ ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
|
||||
# endif
|
||||
# ifdef CONFIG_EFI
|
||||
{ EFI_VA_END, "EFI Runtime Services" },
|
||||
# endif
|
||||
{ __START_KERNEL_map, "High Kernel Mapping" },
|
||||
{ MODULES_VADDR, "Modules" },
|
||||
{ MODULES_END, "End Modules" },
|
||||
#else
|
||||
{ PAGE_OFFSET, "Kernel Mapping" },
|
||||
{ 0/* VMALLOC_START */, "vmalloc() Area" },
|
||||
{ 0/*VMALLOC_END*/, "vmalloc() End" },
|
||||
# ifdef CONFIG_HIGHMEM
|
||||
{ 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
|
||||
# endif
|
||||
{ 0/*FIXADDR_START*/, "Fixmap Area" },
|
||||
#endif
|
||||
{ -1, NULL } /* End of list */
|
||||
[CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
|
||||
[FIXADDR_START_NR] = { 0UL, "Fixmap area" },
|
||||
[END_OF_SPACE_NR] = { -1, NULL }
|
||||
};
|
||||
|
||||
#endif /* !CONFIG_X86_64 */
|
||||
|
||||
/* Multipliers for offsets within the PTEs */
|
||||
#define PTE_LEVEL_MULT (PAGE_SIZE)
|
||||
#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
|
||||
@@ -140,7 +160,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
|
||||
static const char * const level_name[] =
|
||||
{ "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
|
||||
|
||||
if (!pgprot_val(prot)) {
|
||||
if (!(pr & _PAGE_PRESENT)) {
|
||||
/* Not present */
|
||||
pt_dump_cont_printf(m, dmsg, " ");
|
||||
} else {
|
||||
@@ -525,8 +545,8 @@ static int __init pt_dump_init(void)
|
||||
address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
|
||||
# endif
|
||||
address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
|
||||
address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
__initcall(pt_dump_init);
|
||||
|
@@ -50,6 +50,7 @@
|
||||
#include <asm/setup.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/page_types.h>
|
||||
#include <asm/cpu_entry_area.h>
|
||||
#include <asm/init.h>
|
||||
|
||||
#include "mm_internal.h"
|
||||
@@ -766,6 +767,7 @@ void __init mem_init(void)
|
||||
mem_init_print_info(NULL);
|
||||
printk(KERN_INFO "virtual kernel memory layout:\n"
|
||||
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
" cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
" pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
|
||||
#endif
|
||||
@@ -777,6 +779,10 @@ void __init mem_init(void)
|
||||
FIXADDR_START, FIXADDR_TOP,
|
||||
(FIXADDR_TOP - FIXADDR_START) >> 10,
|
||||
|
||||
CPU_ENTRY_AREA_BASE,
|
||||
CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
|
||||
CPU_ENTRY_AREA_MAP_SIZE >> 10,
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
|
||||
(LAST_PKMAP*PAGE_SIZE) >> 10,
|
||||
|
@@ -15,6 +15,7 @@
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cpu_entry_area.h>
|
||||
|
||||
extern struct range pfn_mapped[E820_MAX_ENTRIES];
|
||||
|
||||
@@ -322,31 +323,33 @@ void __init kasan_init(void)
|
||||
map_range(&pfn_mapped[i]);
|
||||
}
|
||||
|
||||
shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
|
||||
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
|
||||
shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
|
||||
PAGE_SIZE);
|
||||
|
||||
shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
|
||||
CPU_ENTRY_AREA_MAP_SIZE);
|
||||
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
|
||||
shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
|
||||
PAGE_SIZE);
|
||||
|
||||
kasan_populate_zero_shadow(
|
||||
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
|
||||
kasan_mem_to_shadow((void *)__START_KERNEL_map));
|
||||
shadow_cpu_entry_begin);
|
||||
|
||||
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
|
||||
(unsigned long)shadow_cpu_entry_end, 0);
|
||||
|
||||
kasan_populate_zero_shadow(shadow_cpu_entry_end,
|
||||
kasan_mem_to_shadow((void *)__START_KERNEL_map));
|
||||
|
||||
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
|
||||
(unsigned long)kasan_mem_to_shadow(_end),
|
||||
early_pfn_to_nid(__pa(_stext)));
|
||||
|
||||
shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM);
|
||||
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
|
||||
shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
|
||||
PAGE_SIZE);
|
||||
|
||||
shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE);
|
||||
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
|
||||
shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
|
||||
PAGE_SIZE);
|
||||
|
||||
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
|
||||
shadow_cpu_entry_begin);
|
||||
|
||||
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
|
||||
(unsigned long)shadow_cpu_entry_end, 0);
|
||||
|
||||
kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END);
|
||||
(void *)KASAN_SHADOW_END);
|
||||
|
||||
load_cr3(init_top_pgt);
|
||||
__flush_tlb_all();
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <asm/cpu_entry_area.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
@@ -128,7 +128,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||
* isn't free.
|
||||
*/
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) {
|
||||
if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) {
|
||||
/*
|
||||
* If we were to BUG here, we'd be very likely to kill
|
||||
* the system so hard that we don't see the call trace.
|
||||
@@ -195,7 +195,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||
if (need_flush) {
|
||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
||||
this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
||||
write_cr3(build_cr3(next, new_asid));
|
||||
write_cr3(build_cr3(next->pgd, new_asid));
|
||||
|
||||
/*
|
||||
* NB: This gets called via leave_mm() in the idle path
|
||||
@@ -208,7 +208,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
||||
} else {
|
||||
/* The new ASID is already up to date. */
|
||||
write_cr3(build_cr3_noflush(next, new_asid));
|
||||
write_cr3(build_cr3_noflush(next->pgd, new_asid));
|
||||
|
||||
/* See above wrt _rcuidle. */
|
||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
||||
@@ -288,7 +288,7 @@ void initialize_tlbstate_and_flush(void)
|
||||
!(cr4_read_shadow() & X86_CR4_PCIDE));
|
||||
|
||||
/* Force ASID 0 and force a TLB flush. */
|
||||
write_cr3(build_cr3(mm, 0));
|
||||
write_cr3(build_cr3(mm->pgd, 0));
|
||||
|
||||
/* Reinitialize tlbstate. */
|
||||
this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
|
||||
@@ -551,7 +551,7 @@ static void do_kernel_range_flush(void *info)
|
||||
|
||||
/* flush range by one by one 'invlpg' */
|
||||
for (addr = f->start; addr < f->end; addr += PAGE_SIZE)
|
||||
__flush_tlb_single(addr);
|
||||
__flush_tlb_one(addr);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
|
Reference in New Issue
Block a user