Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
This commit is contained in:
@@ -360,7 +360,7 @@ static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
|
||||
void *pt)
|
||||
{
|
||||
if (__pa(pt) == __pa(kasan_zero_pmd) ||
|
||||
(pgtable_l5_enabled && __pa(pt) == __pa(kasan_zero_p4d)) ||
|
||||
(pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
|
||||
__pa(pt) == __pa(kasan_zero_pud)) {
|
||||
pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
|
||||
note_page(m, st, __pgprot(prot), 0, 5);
|
||||
@@ -476,8 +476,8 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
|
||||
}
|
||||
}
|
||||
|
||||
#define pgd_large(a) (pgtable_l5_enabled ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
|
||||
#define pgd_none(a) (pgtable_l5_enabled ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
|
||||
#define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
|
||||
#define pgd_none(a) (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
|
||||
|
||||
static inline bool is_hypervisor_range(int idx)
|
||||
{
|
||||
|
@@ -209,6 +209,7 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
|
||||
unsigned lsb = 0;
|
||||
siginfo_t info;
|
||||
|
||||
clear_siginfo(&info);
|
||||
info.si_signo = si_signo;
|
||||
info.si_errno = 0;
|
||||
info.si_code = si_code;
|
||||
@@ -439,7 +440,7 @@ static noinline int vmalloc_fault(unsigned long address)
|
||||
if (pgd_none(*pgd_k))
|
||||
return -1;
|
||||
|
||||
if (pgtable_l5_enabled) {
|
||||
if (pgtable_l5_enabled()) {
|
||||
if (pgd_none(*pgd)) {
|
||||
set_pgd(pgd, *pgd_k);
|
||||
arch_flush_lazy_mmu_mode();
|
||||
@@ -454,7 +455,7 @@ static noinline int vmalloc_fault(unsigned long address)
|
||||
if (p4d_none(*p4d_k))
|
||||
return -1;
|
||||
|
||||
if (p4d_none(*p4d) && !pgtable_l5_enabled) {
|
||||
if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
|
||||
set_p4d(p4d, *p4d_k);
|
||||
arch_flush_lazy_mmu_mode();
|
||||
} else {
|
||||
@@ -828,6 +829,8 @@ static inline void
|
||||
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
|
||||
unsigned long address, struct task_struct *tsk)
|
||||
{
|
||||
const char *loglvl = task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG;
|
||||
|
||||
if (!unhandled_signal(tsk, SIGSEGV))
|
||||
return;
|
||||
|
||||
@@ -835,13 +838,14 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
|
||||
return;
|
||||
|
||||
printk("%s%s[%d]: segfault at %lx ip %px sp %px error %lx",
|
||||
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
|
||||
tsk->comm, task_pid_nr(tsk), address,
|
||||
loglvl, tsk->comm, task_pid_nr(tsk), address,
|
||||
(void *)regs->ip, (void *)regs->sp, error_code);
|
||||
|
||||
print_vma_addr(KERN_CONT " in ", regs->ip);
|
||||
|
||||
printk(KERN_CONT "\n");
|
||||
|
||||
show_opcodes((u8 *)regs->ip, loglvl);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -123,7 +123,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
||||
result = ident_p4d_init(info, p4d, addr, next);
|
||||
if (result)
|
||||
return result;
|
||||
if (pgtable_l5_enabled) {
|
||||
if (pgtable_l5_enabled()) {
|
||||
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
|
||||
} else {
|
||||
/*
|
||||
|
@@ -706,7 +706,9 @@ void __init init_mem_mapping(void)
|
||||
*/
|
||||
int devmem_is_allowed(unsigned long pagenr)
|
||||
{
|
||||
if (page_is_ram(pagenr)) {
|
||||
if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
|
||||
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
|
||||
!= REGION_DISJOINT) {
|
||||
/*
|
||||
* For disallowed memory regions in the low 1MB range,
|
||||
* request that the page be shown as all zeros.
|
||||
|
@@ -692,7 +692,7 @@ void __init initmem_init(void)
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
|
||||
#endif
|
||||
|
||||
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
|
||||
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
|
||||
sparse_memory_present_with_active_regions(0);
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
|
@@ -180,7 +180,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end)
|
||||
*/
|
||||
void sync_global_pgds(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (pgtable_l5_enabled)
|
||||
if (pgtable_l5_enabled())
|
||||
sync_global_pgds_l5(start, end);
|
||||
else
|
||||
sync_global_pgds_l4(start, end);
|
||||
@@ -643,7 +643,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
|
||||
unsigned long vaddr = (unsigned long)__va(paddr);
|
||||
int i = p4d_index(vaddr);
|
||||
|
||||
if (!pgtable_l5_enabled)
|
||||
if (!pgtable_l5_enabled())
|
||||
return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask);
|
||||
|
||||
for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) {
|
||||
@@ -723,7 +723,7 @@ kernel_physical_mapping_init(unsigned long paddr_start,
|
||||
page_size_mask);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
if (pgtable_l5_enabled)
|
||||
if (pgtable_l5_enabled())
|
||||
pgd_populate(&init_mm, pgd, p4d);
|
||||
else
|
||||
p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
|
||||
@@ -742,7 +742,7 @@ kernel_physical_mapping_init(unsigned long paddr_start,
|
||||
#ifndef CONFIG_NUMA
|
||||
void __init initmem_init(void)
|
||||
{
|
||||
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
|
||||
memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1100,7 +1100,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
|
||||
* 5-level case we should free them. This code will have to change
|
||||
* to adapt for boot-time switching between 4 and 5 level page tables.
|
||||
*/
|
||||
if (pgtable_l5_enabled)
|
||||
if (pgtable_l5_enabled())
|
||||
free_pud_table(pud_base, p4d);
|
||||
}
|
||||
|
||||
|
@@ -2,10 +2,8 @@
|
||||
#define DISABLE_BRANCH_PROFILING
|
||||
#define pr_fmt(fmt) "kasan: " fmt
|
||||
|
||||
#ifdef CONFIG_X86_5LEVEL
|
||||
/* Too early to use cpu_feature_enabled() */
|
||||
#define pgtable_l5_enabled __pgtable_l5_enabled
|
||||
#endif
|
||||
/* cpu_feature_enabled() cannot be used this early */
|
||||
#define USE_EARLY_PGTABLE_L5
|
||||
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/kasan.h>
|
||||
@@ -182,7 +180,7 @@ static void __init clear_pgds(unsigned long start,
|
||||
* With folded p4d, pgd_clear() is nop, use p4d_clear()
|
||||
* instead.
|
||||
*/
|
||||
if (pgtable_l5_enabled)
|
||||
if (pgtable_l5_enabled())
|
||||
pgd_clear(pgd);
|
||||
else
|
||||
p4d_clear(p4d_offset(pgd, start));
|
||||
@@ -197,7 +195,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
|
||||
{
|
||||
unsigned long p4d;
|
||||
|
||||
if (!pgtable_l5_enabled)
|
||||
if (!pgtable_l5_enabled())
|
||||
return (p4d_t *)pgd;
|
||||
|
||||
p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
|
||||
@@ -284,7 +282,7 @@ void __init kasan_early_init(void)
|
||||
for (i = 0; i < PTRS_PER_PUD; i++)
|
||||
kasan_zero_pud[i] = __pud(pud_val);
|
||||
|
||||
for (i = 0; pgtable_l5_enabled && i < PTRS_PER_P4D; i++)
|
||||
for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
|
||||
kasan_zero_p4d[i] = __p4d(p4d_val);
|
||||
|
||||
kasan_map_early_shadow(early_top_pgt);
|
||||
@@ -315,7 +313,7 @@ void __init kasan_init(void)
|
||||
* bunch of things like kernel code, modules, EFI mapping, etc.
|
||||
* We need to take extra steps to not overwrite them.
|
||||
*/
|
||||
if (pgtable_l5_enabled) {
|
||||
if (pgtable_l5_enabled()) {
|
||||
void *ptr;
|
||||
|
||||
ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
|
||||
|
@@ -78,7 +78,7 @@ void __init kernel_randomize_memory(void)
|
||||
struct rnd_state rand_state;
|
||||
unsigned long remain_entropy;
|
||||
|
||||
vaddr_start = pgtable_l5_enabled ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
|
||||
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
|
||||
vaddr = vaddr_start;
|
||||
|
||||
/*
|
||||
@@ -124,7 +124,7 @@ void __init kernel_randomize_memory(void)
|
||||
*/
|
||||
entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
|
||||
prandom_bytes_state(&rand_state, &rand, sizeof(rand));
|
||||
if (pgtable_l5_enabled)
|
||||
if (pgtable_l5_enabled())
|
||||
entropy = (rand % (entropy + 1)) & P4D_MASK;
|
||||
else
|
||||
entropy = (rand % (entropy + 1)) & PUD_MASK;
|
||||
@@ -136,7 +136,7 @@ void __init kernel_randomize_memory(void)
|
||||
* randomization alignment.
|
||||
*/
|
||||
vaddr += get_padding(&kaslr_regions[i]);
|
||||
if (pgtable_l5_enabled)
|
||||
if (pgtable_l5_enabled())
|
||||
vaddr = round_up(vaddr + 1, P4D_SIZE);
|
||||
else
|
||||
vaddr = round_up(vaddr + 1, PUD_SIZE);
|
||||
@@ -212,7 +212,7 @@ void __meminit init_trampoline(void)
|
||||
return;
|
||||
}
|
||||
|
||||
if (pgtable_l5_enabled)
|
||||
if (pgtable_l5_enabled())
|
||||
init_trampoline_p4d();
|
||||
else
|
||||
init_trampoline_pud();
|
||||
|
@@ -136,13 +136,13 @@ static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
|
||||
|
||||
/* whine about and ignore invalid blks */
|
||||
if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
|
||||
pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
|
||||
nid, start, end - 1);
|
||||
pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
|
||||
nid, start, end - 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (mi->nr_blks >= NR_NODE_MEMBLKS) {
|
||||
pr_err("NUMA: too many memblk ranges\n");
|
||||
pr_err("too many memblk ranges\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -267,14 +267,14 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
|
||||
*/
|
||||
if (bi->end > bj->start && bi->start < bj->end) {
|
||||
if (bi->nid != bj->nid) {
|
||||
pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
|
||||
pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
|
||||
bi->nid, bi->start, bi->end - 1,
|
||||
bj->nid, bj->start, bj->end - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
|
||||
bi->nid, bi->start, bi->end - 1,
|
||||
bj->start, bj->end - 1);
|
||||
pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
|
||||
bi->nid, bi->start, bi->end - 1,
|
||||
bj->start, bj->end - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -364,7 +364,7 @@ static int __init numa_alloc_distance(void)
|
||||
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
||||
size, PAGE_SIZE);
|
||||
if (!phys) {
|
||||
pr_warning("NUMA: Warning: can't allocate distance table!\n");
|
||||
pr_warn("Warning: can't allocate distance table!\n");
|
||||
/* don't retry until explicitly reset */
|
||||
numa_distance = (void *)1LU;
|
||||
return -ENOMEM;
|
||||
@@ -410,14 +410,14 @@ void __init numa_set_distance(int from, int to, int distance)
|
||||
|
||||
if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
|
||||
from < 0 || to < 0) {
|
||||
pr_warn_once("NUMA: Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
|
||||
from, to, distance);
|
||||
pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
|
||||
from, to, distance);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((u8)distance != distance ||
|
||||
(from == to && distance != LOCAL_DISTANCE)) {
|
||||
pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
|
||||
pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
|
||||
from, to, distance);
|
||||
return;
|
||||
}
|
||||
|
@@ -119,13 +119,12 @@ static inline void pgd_list_del(pgd_t *pgd)
|
||||
|
||||
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
|
||||
virt_to_page(pgd)->index = (pgoff_t)mm;
|
||||
virt_to_page(pgd)->pt_mm = mm;
|
||||
}
|
||||
|
||||
struct mm_struct *pgd_page_get_mm(struct page *page)
|
||||
{
|
||||
return (struct mm_struct *)page->index;
|
||||
return page->pt_mm;
|
||||
}
|
||||
|
||||
static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
@@ -157,7 +157,7 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
|
||||
unsigned long sp = current_stack_pointer;
|
||||
pgd_t *pgd = pgd_offset(mm, sp);
|
||||
|
||||
if (pgtable_l5_enabled) {
|
||||
if (pgtable_l5_enabled()) {
|
||||
if (unlikely(pgd_none(*pgd))) {
|
||||
pgd_t *pgd_ref = pgd_offset_k(sp);
|
||||
|
||||
|
Reference in New Issue
Block a user