kasan: add tag related helper functions
This commit adds a few helper functions, that are meant to be used to work with tags embedded in the top byte of kernel pointers: to set, to get or to reset the top byte. Link: http://lkml.kernel.org/r/f6c6437bb8e143bc44f42c3c259c62e734be7935.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
9c23f84723
commit
3c9e3aa110
@@ -8,6 +8,10 @@
|
||||
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
|
||||
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
|
||||
|
||||
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
|
||||
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
|
||||
#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
|
||||
|
||||
#define KASAN_FREE_PAGE 0xFF /* page was freed */
|
||||
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
|
||||
#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
|
||||
@@ -126,6 +130,33 @@ static inline void quarantine_reduce(void) { }
|
||||
static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
|
||||
u8 random_tag(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline u8 random_tag(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef arch_kasan_set_tag
|
||||
#define arch_kasan_set_tag(addr, tag) ((void *)(addr))
|
||||
#endif
|
||||
#ifndef arch_kasan_reset_tag
|
||||
#define arch_kasan_reset_tag(addr) ((void *)(addr))
|
||||
#endif
|
||||
#ifndef arch_kasan_get_tag
|
||||
#define arch_kasan_get_tag(addr) 0
|
||||
#endif
|
||||
|
||||
#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
|
||||
#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr))
|
||||
#define get_tag(addr) arch_kasan_get_tag(addr)
|
||||
|
||||
/*
|
||||
* Exported functions for interfaces called from assembly or from generated
|
||||
* code. Declarations here to avoid warning about missing declarations.
|
||||
|
@@ -38,6 +38,43 @@
|
||||
#include "kasan.h"
|
||||
#include "../slab.h"
|
||||
|
||||
static DEFINE_PER_CPU(u32, prng_state);
|
||||
|
||||
void kasan_init_tags(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(prng_state, cpu) = get_random_u32();
|
||||
}
|
||||
|
||||
/*
|
||||
* If a preemption happens between this_cpu_read and this_cpu_write, the only
|
||||
* side effect is that we'll give a few allocated in different contexts objects
|
||||
* the same tag. Since tag-based KASAN is meant to be used a probabilistic
|
||||
* bug-detection debug feature, this doesn't have significant negative impact.
|
||||
*
|
||||
* Ideally the tags use strong randomness to prevent any attempts to predict
|
||||
* them during explicit exploit attempts. But strong randomness is expensive,
|
||||
* and we did an intentional trade-off to use a PRNG. This non-atomic RMW
|
||||
* sequence has in fact positive effect, since interrupts that randomly skew
|
||||
* PRNG at unpredictable points do only good.
|
||||
*/
|
||||
u8 random_tag(void)
|
||||
{
|
||||
u32 state = this_cpu_read(prng_state);
|
||||
|
||||
state = 1664525 * state + 1013904223;
|
||||
this_cpu_write(prng_state, state);
|
||||
|
||||
return (u8)(state % (KASAN_TAG_MAX + 1));
|
||||
}
|
||||
|
||||
void *kasan_reset_tag(const void *addr)
|
||||
{
|
||||
return reset_tag(addr);
|
||||
}
|
||||
|
||||
void check_memory_region(unsigned long addr, size_t size, bool write,
|
||||
unsigned long ret_ip)
|
||||
{
|
||||
|
Reference in New Issue
Block a user