FROMGIT: kasan, mm: integrate page_alloc init with HW_TAGS

This change uses the previously added memory initialization feature of
HW_TAGS KASAN routines for page_alloc memory when init_on_alloc/free is
enabled.

With this change, kernel_init_free_pages() is no longer called when both
HW_TAGS KASAN and init_on_alloc/free are enabled.  Instead, memory is
initialized in KASAN runtime.

To avoid discrepancies with which memory gets initialized that can be
caused by future changes, both KASAN and kernel_init_free_pages() hooks
are put together and a warning comment is added.

This patch changes the order in which memory initialization and page
poisoning hooks are called.  This doesn't lead to any side-effects, as
whenever page poisoning is enabled, memory initialization gets disabled.

Combining setting allocation tags with memory initialization improves
HW_TAGS KASAN performance when init_on_alloc/free is enabled.

Link: https://lkml.kernel.org/r/e77f0d5b1b20658ef0b8288625c74c2b3690e725.1615296150.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>

(cherry picked from commit 26a7ee1a170e0bc17505d04120e595cba0b9cc1b
 https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git akpm)
Bug: 182930667
Signed-off-by: Alexander Potapenko <glider@google.com>
Change-Id: Iac6cf801657c260b15ec9ef49bd1b02dc83660bc
This commit is contained in:
Andrey Konovalov
2021-03-18 17:01:40 +11:00
committed by Todd Kjos
parent 9538c5a8c5
commit a15989497e
4 changed files with 54 additions and 25 deletions

View File

@@ -96,6 +96,11 @@ static __always_inline bool kasan_enabled(void)
return static_branch_likely(&kasan_flag_enabled); return static_branch_likely(&kasan_flag_enabled);
} }
static inline bool kasan_has_integrated_init(void)
{
return kasan_enabled();
}
#else /* CONFIG_KASAN_HW_TAGS */ #else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_enabled(void) static inline bool kasan_enabled(void)
@@ -103,6 +108,11 @@ static inline bool kasan_enabled(void)
return true; return true;
} }
static inline bool kasan_has_integrated_init(void)
{
return false;
}
#endif /* CONFIG_KASAN_HW_TAGS */ #endif /* CONFIG_KASAN_HW_TAGS */
slab_flags_t __kasan_never_merge(void); slab_flags_t __kasan_never_merge(void);
@@ -120,20 +130,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
__kasan_unpoison_range(addr, size); __kasan_unpoison_range(addr, size);
} }
void __kasan_alloc_pages(struct page *page, unsigned int order); void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
static __always_inline void kasan_alloc_pages(struct page *page, static __always_inline void kasan_alloc_pages(struct page *page,
unsigned int order) unsigned int order, bool init)
{ {
if (kasan_enabled()) if (kasan_enabled())
__kasan_alloc_pages(page, order); __kasan_alloc_pages(page, order, init);
} }
void __kasan_free_pages(struct page *page, unsigned int order); void __kasan_free_pages(struct page *page, unsigned int order, bool init);
static __always_inline void kasan_free_pages(struct page *page, static __always_inline void kasan_free_pages(struct page *page,
unsigned int order) unsigned int order, bool init)
{ {
if (kasan_enabled()) if (kasan_enabled())
__kasan_free_pages(page, order); __kasan_free_pages(page, order, init);
} }
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size, void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
@@ -277,13 +287,17 @@ static inline bool kasan_enabled(void)
{ {
return false; return false;
} }
static inline bool kasan_has_integrated_init(void)
{
return false;
}
static inline slab_flags_t kasan_never_merge(void) static inline slab_flags_t kasan_never_merge(void)
{ {
return 0; return 0;
} }
static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_unpoison_range(const void *address, size_t size) {}
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
static inline void kasan_cache_create(struct kmem_cache *cache, static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size, unsigned int *size,
slab_flags_t *flags) {} slab_flags_t *flags) {}

View File

@@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
return 0; return 0;
} }
void __kasan_alloc_pages(struct page *page, unsigned int order) void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
{ {
u8 tag; u8 tag;
unsigned long i; unsigned long i;
@@ -108,14 +108,14 @@ void __kasan_alloc_pages(struct page *page, unsigned int order)
tag = kasan_random_tag(); tag = kasan_random_tag();
for (i = 0; i < (1 << order); i++) for (i = 0; i < (1 << order); i++)
page_kasan_tag_set(page + i, tag); page_kasan_tag_set(page + i, tag);
kasan_unpoison(page_address(page), PAGE_SIZE << order, false); kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
} }
void __kasan_free_pages(struct page *page, unsigned int order) void __kasan_free_pages(struct page *page, unsigned int order, bool init)
{ {
if (likely(!PageHighMem(page))) if (likely(!PageHighMem(page)))
kasan_poison(page_address(page), PAGE_SIZE << order, kasan_poison(page_address(page), PAGE_SIZE << order,
KASAN_FREE_PAGE, false); KASAN_FREE_PAGE, init);
} }
/* /*

View File

@@ -106,7 +106,7 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_slab_free_mempool(element); kasan_slab_free_mempool(element);
else if (pool->alloc == mempool_alloc_pages) else if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data); kasan_free_pages(element, (unsigned long)pool->pool_data, false);
} }
static void kasan_unpoison_element(mempool_t *pool, void *element) static void kasan_unpoison_element(mempool_t *pool, void *element)
@@ -114,7 +114,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_unpoison_range(element, __ksize(element)); kasan_unpoison_range(element, __ksize(element));
else if (pool->alloc == mempool_alloc_pages) else if (pool->alloc == mempool_alloc_pages)
kasan_alloc_pages(element, (unsigned long)pool->pool_data); kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
} }
static __always_inline void add_element(mempool_t *pool, void *element) static __always_inline void add_element(mempool_t *pool, void *element)

View File

@@ -407,14 +407,14 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
* initialization is done, but this is not likely to happen. * initialization is done, but this is not likely to happen.
*/ */
static inline void kasan_free_nondeferred_pages(struct page *page, int order, static inline void kasan_free_nondeferred_pages(struct page *page, int order,
fpi_t fpi_flags) bool init, fpi_t fpi_flags)
{ {
if (static_branch_unlikely(&deferred_pages)) if (static_branch_unlikely(&deferred_pages))
return; return;
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
(fpi_flags & FPI_SKIP_KASAN_POISON)) (fpi_flags & FPI_SKIP_KASAN_POISON))
return; return;
kasan_free_pages(page, order); kasan_free_pages(page, order, init);
} }
/* Returns true if the struct page for the pfn is uninitialised */ /* Returns true if the struct page for the pfn is uninitialised */
@@ -466,12 +466,12 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
} }
#else #else
static inline void kasan_free_nondeferred_pages(struct page *page, int order, static inline void kasan_free_nondeferred_pages(struct page *page, int order,
fpi_t fpi_flags) bool init, fpi_t fpi_flags)
{ {
if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
(fpi_flags & FPI_SKIP_KASAN_POISON)) (fpi_flags & FPI_SKIP_KASAN_POISON))
return; return;
kasan_free_pages(page, order); kasan_free_pages(page, order, init);
} }
static inline bool early_page_uninitialised(unsigned long pfn) static inline bool early_page_uninitialised(unsigned long pfn)
@@ -1253,6 +1253,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
unsigned int order, bool check_free, fpi_t fpi_flags) unsigned int order, bool check_free, fpi_t fpi_flags)
{ {
int bad = 0; int bad = 0;
bool init;
VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(PageTail(page), page);
@@ -1310,16 +1311,21 @@ static __always_inline bool free_pages_prepare(struct page *page,
debug_check_no_obj_freed(page_address(page), debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order); PAGE_SIZE << order);
} }
if (want_init_on_free())
kernel_init_free_pages(page, 1 << order);
kernel_poison_pages(page, 1 << order); kernel_poison_pages(page, 1 << order);
/* /*
* As memory initialization might be integrated into KASAN,
* kasan_free_pages and kernel_init_free_pages must be
* kept together to avoid discrepancies in behavior.
*
* With hardware tag-based KASAN, memory tags must be set before the * With hardware tag-based KASAN, memory tags must be set before the
* page becomes unavailable via debug_pagealloc or arch_free_page. * page becomes unavailable via debug_pagealloc or arch_free_page.
*/ */
kasan_free_nondeferred_pages(page, order, fpi_flags); init = want_init_on_free();
if (init && !kasan_has_integrated_init())
kernel_init_free_pages(page, 1 << order);
kasan_free_nondeferred_pages(page, order, init, fpi_flags);
/* /*
* arch_free_page() can make the page's contents inaccessible. s390 * arch_free_page() can make the page's contents inaccessible. s390
@@ -2314,17 +2320,26 @@ static bool check_new_pages(struct page *page, unsigned int order)
inline void post_alloc_hook(struct page *page, unsigned int order, inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags) gfp_t gfp_flags)
{ {
bool init;
set_page_private(page, 0); set_page_private(page, 0);
set_page_refcounted(page); set_page_refcounted(page);
arch_alloc_page(page, order); arch_alloc_page(page, order);
debug_pagealloc_map_pages(page, 1 << order); debug_pagealloc_map_pages(page, 1 << order);
kasan_alloc_pages(page, order);
/*
* As memory initialization might be integrated into KASAN,
* kasan_alloc_pages and kernel_init_free_pages must be
* kept together to avoid discrepancies in behavior.
*/
init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
kasan_alloc_pages(page, order, init);
if (init && !kasan_has_integrated_init())
kernel_init_free_pages(page, 1 << order);
kernel_unpoison_pages(page, 1 << order); kernel_unpoison_pages(page, 1 << order);
set_page_owner(page, order, gfp_flags); set_page_owner(page, order, gfp_flags);
if (!want_init_on_free() && want_init_on_alloc(gfp_flags))
kernel_init_free_pages(page, 1 << order);
} }
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,