Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - large KASAN update to use arm's "software tag-based mode" - a few misc things - sh updates - ocfs2 updates - just about all of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (167 commits) kernel/fork.c: mark 'stack_vm_area' with __maybe_unused memcg, oom: notify on oom killer invocation from the charge path mm, swap: fix swapoff with KSM pages include/linux/gfp.h: fix typo mm/hmm: fix memremap.h, move dev_page_fault_t callback to hmm hugetlbfs: Use i_mmap_rwsem to fix page fault/truncate race hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization memory_hotplug: add missing newlines to debugging output mm: remove __hugepage_set_anon_rmap() include/linux/vmstat.h: remove unused page state adjustment macro mm/page_alloc.c: allow error injection mm: migrate: drop unused argument of migrate_page_move_mapping() blkdev: avoid migration stalls for blkdev pages mm: migrate: provide buffer_migrate_page_norefs() mm: migrate: move migrate_page_lock_buffers() mm: migrate: lock buffers before migrate_page_move_mapping() mm: migration: factor out code to compute expected number of page references mm, page_alloc: enable pcpu_drain with zone capability kmemleak: add config to select auto scan mm/page_alloc.c: don't call kasan_free_pages() at deferred mem init ...
This commit is contained in:
@@ -593,6 +593,21 @@ config DEBUG_KMEMLEAK_DEFAULT_OFF
|
||||
Say Y here to disable kmemleak by default. It can then be enabled
|
||||
on the command line via kmemleak=on.
|
||||
|
||||
config DEBUG_KMEMLEAK_AUTO_SCAN
|
||||
bool "Enable kmemleak auto scan thread on boot up"
|
||||
default y
|
||||
depends on DEBUG_KMEMLEAK
|
||||
help
|
||||
Depending on the cpu, kmemleak scan may be cpu intensive and can
|
||||
stall user tasks at times. This option enables/disables automatic
|
||||
kmemleak scan at boot up.
|
||||
|
||||
Say N here to disable kmemleak auto scan thread to stop automatic
|
||||
scanning. Disabling this option disables automatic reporting of
|
||||
memory leaks.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config DEBUG_STACK_USAGE
|
||||
bool "Stack utilization instrumentation"
|
||||
depends on DEBUG_KERNEL && !IA64
|
||||
|
@@ -1,36 +1,92 @@
|
||||
# This config refers to the generic KASAN mode.
|
||||
config HAVE_ARCH_KASAN
|
||||
bool
|
||||
|
||||
if HAVE_ARCH_KASAN
|
||||
config HAVE_ARCH_KASAN_SW_TAGS
|
||||
bool
|
||||
|
||||
config CC_HAS_KASAN_GENERIC
|
||||
def_bool $(cc-option, -fsanitize=kernel-address)
|
||||
|
||||
config CC_HAS_KASAN_SW_TAGS
|
||||
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
|
||||
|
||||
config KASAN
|
||||
bool "KASan: runtime memory debugger"
|
||||
bool "KASAN: runtime memory debugger"
|
||||
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
|
||||
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
|
||||
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
|
||||
help
|
||||
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
|
||||
designed to find out-of-bounds accesses and use-after-free bugs.
|
||||
See Documentation/dev-tools/kasan.rst for details.
|
||||
|
||||
choice
|
||||
prompt "KASAN mode"
|
||||
depends on KASAN
|
||||
default KASAN_GENERIC
|
||||
help
|
||||
KASAN has two modes: generic KASAN (similar to userspace ASan,
|
||||
x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and
|
||||
software tag-based KASAN (a version based on software memory
|
||||
tagging, arm64 only, similar to userspace HWASan, enabled with
|
||||
CONFIG_KASAN_SW_TAGS).
|
||||
Both generic and tag-based KASAN are strictly debugging features.
|
||||
|
||||
config KASAN_GENERIC
|
||||
bool "Generic mode"
|
||||
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
|
||||
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
|
||||
select SLUB_DEBUG if SLUB
|
||||
select CONSTRUCTORS
|
||||
select STACKDEPOT
|
||||
help
|
||||
Enables kernel address sanitizer - runtime memory debugger,
|
||||
designed to find out-of-bounds accesses and use-after-free bugs.
|
||||
This is strictly a debugging feature and it requires a gcc version
|
||||
of 4.9.2 or later. Detection of out of bounds accesses to stack or
|
||||
global variables requires gcc 5.0 or later.
|
||||
This feature consumes about 1/8 of available memory and brings about
|
||||
~x3 performance slowdown.
|
||||
Enables generic KASAN mode.
|
||||
Supported in both GCC and Clang. With GCC it requires version 4.9.2
|
||||
or later for basic support and version 5.0 or later for detection of
|
||||
out-of-bounds accesses for stack and global variables and for inline
|
||||
instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires
|
||||
version 3.7.0 or later and it doesn't support detection of
|
||||
out-of-bounds accesses for global variables yet.
|
||||
This mode consumes about 1/8th of available memory at kernel start
|
||||
and introduces an overhead of ~x1.5 for the rest of the allocations.
|
||||
The performance slowdown is ~x3.
|
||||
For better error detection enable CONFIG_STACKTRACE.
|
||||
Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
|
||||
Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
|
||||
(the resulting kernel does not boot).
|
||||
|
||||
config KASAN_EXTRA
|
||||
bool "KAsan: extra checks"
|
||||
depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST
|
||||
config KASAN_SW_TAGS
|
||||
bool "Software tag-based mode"
|
||||
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
|
||||
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
|
||||
select SLUB_DEBUG if SLUB
|
||||
select CONSTRUCTORS
|
||||
select STACKDEPOT
|
||||
help
|
||||
This enables further checks in the kernel address sanitizer, for now
|
||||
it only includes the address-use-after-scope check that can lead
|
||||
to excessive kernel stack usage, frame size warnings and longer
|
||||
compile time.
|
||||
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more
|
||||
Enables software tag-based KASAN mode.
|
||||
This mode requires Top Byte Ignore support by the CPU and therefore
|
||||
is only supported for arm64.
|
||||
This mode requires Clang version 7.0.0 or later.
|
||||
This mode consumes about 1/16th of available memory at kernel start
|
||||
and introduces an overhead of ~20% for the rest of the allocations.
|
||||
This mode may potentially introduce problems relating to pointer
|
||||
casting and comparison, as it embeds tags into the top byte of each
|
||||
pointer.
|
||||
For better error detection enable CONFIG_STACKTRACE.
|
||||
Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
|
||||
(the resulting kernel does not boot).
|
||||
|
||||
endchoice
|
||||
|
||||
config KASAN_EXTRA
|
||||
bool "KASAN: extra checks"
|
||||
depends on KASAN_GENERIC && DEBUG_KERNEL && !COMPILE_TEST
|
||||
help
|
||||
This enables further checks in generic KASAN, for now it only
|
||||
includes the address-use-after-scope check that can lead to
|
||||
excessive kernel stack usage, frame size warnings and longer
|
||||
compile time.
|
||||
See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715
|
||||
|
||||
choice
|
||||
prompt "Instrumentation type"
|
||||
@@ -53,7 +109,7 @@ config KASAN_INLINE
|
||||
memory accesses. This is faster than outline (in some workloads
|
||||
it gives about x2 boost over outline instrumentation), but
|
||||
make kernel's .text size much bigger.
|
||||
This requires a gcc version of 5.0 or later.
|
||||
For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later.
|
||||
|
||||
endchoice
|
||||
|
||||
@@ -67,11 +123,9 @@ config KASAN_S390_4_LEVEL_PAGING
|
||||
4-level paging instead.
|
||||
|
||||
config TEST_KASAN
|
||||
tristate "Module for testing kasan for bug detection"
|
||||
tristate "Module for testing KASAN for bug detection"
|
||||
depends on m && KASAN
|
||||
help
|
||||
This is a test module doing various nasty things like
|
||||
out of bounds accesses, use after free. It is useful for testing
|
||||
kernel debugging features like kernel address sanitizer.
|
||||
|
||||
endif
|
||||
kernel debugging features like KASAN.
|
||||
|
@@ -1131,11 +1131,10 @@ static int __init debug_objects_replace_static_objects(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* When debug_objects_mem_init() is called we know that only
|
||||
* one CPU is up, so disabling interrupts is enough
|
||||
* protection. This avoids the lockdep hell of lock ordering.
|
||||
* debug_objects_mem_init() is now called early that only one CPU is up
|
||||
* and interrupts have been disabled, so it is safe to replace the
|
||||
* active object references.
|
||||
*/
|
||||
local_irq_disable();
|
||||
|
||||
/* Remove the statically allocated objects from the pool */
|
||||
hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
|
||||
@@ -1156,7 +1155,6 @@ static int __init debug_objects_replace_static_objects(void)
|
||||
cnt++;
|
||||
}
|
||||
}
|
||||
local_irq_enable();
|
||||
|
||||
pr_debug("%d of %d active objects replaced\n",
|
||||
cnt, obj_pool_used);
|
||||
|
103
lib/ioremap.c
103
lib/ioremap.c
@@ -76,83 +76,123 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys_addr,
|
||||
pgprot_t prot)
|
||||
{
|
||||
if (!ioremap_pmd_enabled())
|
||||
return 0;
|
||||
|
||||
if ((end - addr) != PMD_SIZE)
|
||||
return 0;
|
||||
|
||||
if (!IS_ALIGNED(phys_addr, PMD_SIZE))
|
||||
return 0;
|
||||
|
||||
if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
|
||||
return 0;
|
||||
|
||||
return pmd_set_huge(pmd, phys_addr, prot);
|
||||
}
|
||||
|
||||
static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
unsigned long next;
|
||||
|
||||
phys_addr -= addr;
|
||||
pmd = pmd_alloc(&init_mm, pud, addr);
|
||||
if (!pmd)
|
||||
return -ENOMEM;
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
if (ioremap_pmd_enabled() &&
|
||||
((next - addr) == PMD_SIZE) &&
|
||||
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
|
||||
pmd_free_pte_page(pmd, addr)) {
|
||||
if (pmd_set_huge(pmd, phys_addr + addr, prot))
|
||||
continue;
|
||||
}
|
||||
if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
|
||||
continue;
|
||||
|
||||
if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
|
||||
if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
|
||||
return -ENOMEM;
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys_addr,
|
||||
pgprot_t prot)
|
||||
{
|
||||
if (!ioremap_pud_enabled())
|
||||
return 0;
|
||||
|
||||
if ((end - addr) != PUD_SIZE)
|
||||
return 0;
|
||||
|
||||
if (!IS_ALIGNED(phys_addr, PUD_SIZE))
|
||||
return 0;
|
||||
|
||||
if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
|
||||
return 0;
|
||||
|
||||
return pud_set_huge(pud, phys_addr, prot);
|
||||
}
|
||||
|
||||
static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
|
||||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
|
||||
phys_addr -= addr;
|
||||
pud = pud_alloc(&init_mm, p4d, addr);
|
||||
if (!pud)
|
||||
return -ENOMEM;
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
|
||||
if (ioremap_pud_enabled() &&
|
||||
((next - addr) == PUD_SIZE) &&
|
||||
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
|
||||
pud_free_pmd_page(pud, addr)) {
|
||||
if (pud_set_huge(pud, phys_addr + addr, prot))
|
||||
continue;
|
||||
}
|
||||
if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
|
||||
continue;
|
||||
|
||||
if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
|
||||
if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
|
||||
return -ENOMEM;
|
||||
} while (pud++, addr = next, addr != end);
|
||||
} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys_addr,
|
||||
pgprot_t prot)
|
||||
{
|
||||
if (!ioremap_p4d_enabled())
|
||||
return 0;
|
||||
|
||||
if ((end - addr) != P4D_SIZE)
|
||||
return 0;
|
||||
|
||||
if (!IS_ALIGNED(phys_addr, P4D_SIZE))
|
||||
return 0;
|
||||
|
||||
if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
|
||||
return 0;
|
||||
|
||||
return p4d_set_huge(p4d, phys_addr, prot);
|
||||
}
|
||||
|
||||
static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
|
||||
{
|
||||
p4d_t *p4d;
|
||||
unsigned long next;
|
||||
|
||||
phys_addr -= addr;
|
||||
p4d = p4d_alloc(&init_mm, pgd, addr);
|
||||
if (!p4d)
|
||||
return -ENOMEM;
|
||||
do {
|
||||
next = p4d_addr_end(addr, end);
|
||||
|
||||
if (ioremap_p4d_enabled() &&
|
||||
((next - addr) == P4D_SIZE) &&
|
||||
IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
|
||||
if (p4d_set_huge(p4d, phys_addr + addr, prot))
|
||||
continue;
|
||||
}
|
||||
if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
|
||||
continue;
|
||||
|
||||
if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
|
||||
if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
|
||||
return -ENOMEM;
|
||||
} while (p4d++, addr = next, addr != end);
|
||||
} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -168,14 +208,13 @@ int ioremap_page_range(unsigned long addr,
|
||||
BUG_ON(addr >= end);
|
||||
|
||||
start = addr;
|
||||
phys_addr -= addr;
|
||||
pgd = pgd_offset_k(addr);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
|
||||
err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
|
||||
if (err)
|
||||
break;
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
|
||||
|
||||
flush_cache_vmap(start, end);
|
||||
|
||||
|
@@ -18,22 +18,19 @@ void show_mem(unsigned int filter, nodemask_t *nodemask)
|
||||
show_free_areas(filter, nodemask);
|
||||
|
||||
for_each_online_pgdat(pgdat) {
|
||||
unsigned long flags;
|
||||
int zoneid;
|
||||
|
||||
pgdat_resize_lock(pgdat, &flags);
|
||||
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
|
||||
struct zone *zone = &pgdat->node_zones[zoneid];
|
||||
if (!populated_zone(zone))
|
||||
continue;
|
||||
|
||||
total += zone->present_pages;
|
||||
reserved += zone->present_pages - zone->managed_pages;
|
||||
reserved += zone->present_pages - zone_managed_pages(zone);
|
||||
|
||||
if (is_highmem_idx(zoneid))
|
||||
highmem += zone->present_pages;
|
||||
}
|
||||
pgdat_resize_unlock(pgdat, &flags);
|
||||
}
|
||||
|
||||
printk("%lu pages RAM\n", total);
|
||||
|
Reference in New Issue
Block a user