kasan: rename kasan_zero_page to kasan_early_shadow_page
With tag based KASAN mode the early shadow value is 0xff and not 0x00, so this patch renames kasan_zero_(page|pte|pmd|pud|p4d) to kasan_early_shadow_(page|pte|pmd|pud|p4d) to avoid confusion. Link: http://lkml.kernel.org/r/3fed313280ebf4f88645f5b89ccbc066d320e177.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Suggested-by: Mark Rutland <mark.rutland@arm.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

父節點
b2f557eae9
當前提交
9577dd7486
@@ -377,7 +377,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
|
||||
|
||||
/*
|
||||
* This is an optimization for KASAN=y case. Since all kasan page tables
|
||||
* eventually point to the kasan_zero_page we could call note_page()
|
||||
* eventually point to the kasan_early_shadow_page we could call note_page()
|
||||
* right away without walking through lower level page tables. This saves
|
||||
* us dozens of seconds (minutes for 5-level config) while checking for
|
||||
* W+X mapping or reading kernel_page_tables debugfs file.
|
||||
@@ -385,10 +385,11 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
|
||||
static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
|
||||
void *pt)
|
||||
{
|
||||
if (__pa(pt) == __pa(kasan_zero_pmd) ||
|
||||
(pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
|
||||
__pa(pt) == __pa(kasan_zero_pud)) {
|
||||
pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
|
||||
if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
|
||||
(pgtable_l5_enabled() &&
|
||||
__pa(pt) == __pa(kasan_early_shadow_p4d)) ||
|
||||
__pa(pt) == __pa(kasan_early_shadow_pud)) {
|
||||
pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
|
||||
note_page(m, st, __pgprot(prot), 0, 5);
|
||||
return true;
|
||||
}
|
||||
|
@@ -211,7 +211,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
|
||||
unsigned long next;
|
||||
|
||||
if (pgd_none(*pgd)) {
|
||||
pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
|
||||
pgd_entry = __pgd(_KERNPG_TABLE |
|
||||
__pa_nodebug(kasan_early_shadow_p4d));
|
||||
set_pgd(pgd, pgd_entry);
|
||||
}
|
||||
|
||||
@@ -222,7 +223,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
|
||||
if (!p4d_none(*p4d))
|
||||
continue;
|
||||
|
||||
p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
|
||||
p4d_entry = __p4d(_KERNPG_TABLE |
|
||||
__pa_nodebug(kasan_early_shadow_pud));
|
||||
set_p4d(p4d, p4d_entry);
|
||||
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
|
||||
}
|
||||
@@ -261,10 +263,11 @@ static struct notifier_block kasan_die_notifier = {
|
||||
void __init kasan_early_init(void)
|
||||
{
|
||||
int i;
|
||||
pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
|
||||
pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
|
||||
pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
|
||||
p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
|
||||
pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
|
||||
__PAGE_KERNEL | _PAGE_ENC;
|
||||
pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
|
||||
pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
|
||||
p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
|
||||
|
||||
/* Mask out unsupported __PAGE_KERNEL bits: */
|
||||
pte_val &= __default_kernel_pte_mask;
|
||||
@@ -273,16 +276,16 @@ void __init kasan_early_init(void)
|
||||
p4d_val &= __default_kernel_pte_mask;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||
kasan_zero_pte[i] = __pte(pte_val);
|
||||
kasan_early_shadow_pte[i] = __pte(pte_val);
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++)
|
||||
kasan_zero_pmd[i] = __pmd(pmd_val);
|
||||
kasan_early_shadow_pmd[i] = __pmd(pmd_val);
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD; i++)
|
||||
kasan_zero_pud[i] = __pud(pud_val);
|
||||
kasan_early_shadow_pud[i] = __pud(pud_val);
|
||||
|
||||
for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
|
||||
kasan_zero_p4d[i] = __p4d(p4d_val);
|
||||
kasan_early_shadow_p4d[i] = __p4d(p4d_val);
|
||||
|
||||
kasan_map_early_shadow(early_top_pgt);
|
||||
kasan_map_early_shadow(init_top_pgt);
|
||||
@@ -326,7 +329,7 @@ void __init kasan_init(void)
|
||||
|
||||
clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
|
||||
|
||||
kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
|
||||
kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
|
||||
kasan_mem_to_shadow((void *)PAGE_OFFSET));
|
||||
|
||||
for (i = 0; i < E820_MAX_ENTRIES; i++) {
|
||||
@@ -338,41 +341,41 @@ void __init kasan_init(void)
|
||||
|
||||
shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
|
||||
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
|
||||
shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
|
||||
PAGE_SIZE);
|
||||
shadow_cpu_entry_begin = (void *)round_down(
|
||||
(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
|
||||
|
||||
shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
|
||||
CPU_ENTRY_AREA_MAP_SIZE);
|
||||
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
|
||||
shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
|
||||
PAGE_SIZE);
|
||||
shadow_cpu_entry_end = (void *)round_up(
|
||||
(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
|
||||
|
||||
kasan_populate_zero_shadow(
|
||||
kasan_populate_early_shadow(
|
||||
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
|
||||
shadow_cpu_entry_begin);
|
||||
|
||||
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
|
||||
(unsigned long)shadow_cpu_entry_end, 0);
|
||||
|
||||
kasan_populate_zero_shadow(shadow_cpu_entry_end,
|
||||
kasan_mem_to_shadow((void *)__START_KERNEL_map));
|
||||
kasan_populate_early_shadow(shadow_cpu_entry_end,
|
||||
kasan_mem_to_shadow((void *)__START_KERNEL_map));
|
||||
|
||||
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
|
||||
(unsigned long)kasan_mem_to_shadow(_end),
|
||||
early_pfn_to_nid(__pa(_stext)));
|
||||
|
||||
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
|
||||
(void *)KASAN_SHADOW_END);
|
||||
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
|
||||
(void *)KASAN_SHADOW_END);
|
||||
|
||||
load_cr3(init_top_pgt);
|
||||
__flush_tlb_all();
|
||||
|
||||
/*
|
||||
* kasan_zero_page has been used as early shadow memory, thus it may
|
||||
* contain some garbage. Now we can clear and write protect it, since
|
||||
* after the TLB flush no one should write to it.
|
||||
* kasan_early_shadow_page has been used as early shadow memory, thus
|
||||
* it may contain some garbage. Now we can clear and write protect it,
|
||||
* since after the TLB flush no one should write to it.
|
||||
*/
|
||||
memset(kasan_zero_page, 0, PAGE_SIZE);
|
||||
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
pte_t pte;
|
||||
pgprot_t prot;
|
||||
@@ -380,8 +383,8 @@ void __init kasan_init(void)
|
||||
prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
|
||||
pgprot_val(prot) &= __default_kernel_pte_mask;
|
||||
|
||||
pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
|
||||
set_pte(&kasan_zero_pte[i], pte);
|
||||
pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
|
||||
set_pte(&kasan_early_shadow_pte[i], pte);
|
||||
}
|
||||
/* Flush TLBs again to be sure that write protection applied. */
|
||||
__flush_tlb_all();
|
||||
|
Reference in New Issue
Block a user