mm, kasan: SLAB support

Add KASAN hooks to SLAB allocator.

This patch is based on the "mm: kasan: unified support for SLUB and SLAB
allocators" patch originally prepared by Dmitry Chernenkov.

Signed-off-by: Alexander Potapenko <glider@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrey Konovalov <adech.fo@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Alexander Potapenko
2016-03-25 14:21:59 -07:00
committed by Linus Torvalds
부모 e6e8379c87
커밋 7ed2f9e663
12개의 변경된 파일266개의 추가작업 그리고 22개의 파일을 삭제

파일 보기

@@ -3,6 +3,7 @@
#
KASAN_SANITIZE_slab_common.o := n
KASAN_SANITIZE_slab.o := n
KASAN_SANITIZE_slub.o := n
# These files are disabled because they produce non-interesting and/or

파일 보기

@@ -334,6 +334,59 @@ void kasan_free_pages(struct page *page, unsigned int order)
KASAN_FREE_PAGE);
}
#ifdef CONFIG_SLAB
/*
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
* For larger allocations larger redzones are used.
*/
static size_t optimal_redzone(size_t object_size)
{
int rz =
object_size <= 64 - 16 ? 16 :
object_size <= 128 - 32 ? 32 :
object_size <= 512 - 64 ? 64 :
object_size <= 4096 - 128 ? 128 :
object_size <= (1 << 14) - 256 ? 256 :
object_size <= (1 << 15) - 512 ? 512 :
object_size <= (1 << 16) - 1024 ? 1024 : 2048;
return rz;
}
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
unsigned long *flags)
{
int redzone_adjust;
/* Make sure the adjusted size is still less than
* KMALLOC_MAX_CACHE_SIZE.
* TODO: this check is only useful for SLAB, but not SLUB. We'll need
* to skip it for SLUB when it starts using kasan_cache_create().
*/
if (*size > KMALLOC_MAX_CACHE_SIZE -
sizeof(struct kasan_alloc_meta) -
sizeof(struct kasan_free_meta))
return;
*flags |= SLAB_KASAN;
/* Add alloc meta. */
cache->kasan_info.alloc_meta_offset = *size;
*size += sizeof(struct kasan_alloc_meta);
/* Add free meta. */
if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
cache->object_size < sizeof(struct kasan_free_meta)) {
cache->kasan_info.free_meta_offset = *size;
*size += sizeof(struct kasan_free_meta);
}
redzone_adjust = optimal_redzone(cache->object_size) -
(*size - cache->object_size);
if (redzone_adjust > 0)
*size += redzone_adjust;
*size = min(KMALLOC_MAX_CACHE_SIZE,
max(*size,
cache->object_size +
optimal_redzone(cache->object_size)));
}
#endif
void kasan_poison_slab(struct page *page)
{
kasan_poison_shadow(page_address(page),
@@ -351,8 +404,36 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
kasan_poison_shadow(object,
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
KASAN_KMALLOC_REDZONE);
#ifdef CONFIG_SLAB
if (cache->flags & SLAB_KASAN) {
struct kasan_alloc_meta *alloc_info =
get_alloc_info(cache, object);
alloc_info->state = KASAN_STATE_INIT;
}
#endif
}
static inline void set_track(struct kasan_track *track)
{
track->cpu = raw_smp_processor_id();
track->pid = current->pid;
track->when = jiffies;
}
#ifdef CONFIG_SLAB
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
const void *object)
{
return (void *)object + cache->kasan_info.alloc_meta_offset;
}
struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
const void *object)
{
return (void *)object + cache->kasan_info.free_meta_offset;
}
#endif
void kasan_slab_alloc(struct kmem_cache *cache, void *object)
{
kasan_kmalloc(cache, object, cache->object_size);
@@ -367,6 +448,17 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
return;
#ifdef CONFIG_SLAB
if (cache->flags & SLAB_KASAN) {
struct kasan_free_meta *free_info =
get_free_info(cache, object);
struct kasan_alloc_meta *alloc_info =
get_alloc_info(cache, object);
alloc_info->state = KASAN_STATE_FREE;
set_track(&free_info->track);
}
#endif
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
}
@@ -386,6 +478,16 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
kasan_unpoison_shadow(object, size);
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
KASAN_KMALLOC_REDZONE);
#ifdef CONFIG_SLAB
if (cache->flags & SLAB_KASAN) {
struct kasan_alloc_meta *alloc_info =
get_alloc_info(cache, object);
alloc_info->state = KASAN_STATE_ALLOC;
alloc_info->alloc_size = size;
set_track(&alloc_info->track);
}
#endif
}
EXPORT_SYMBOL(kasan_kmalloc);

파일 보기

@@ -54,6 +54,40 @@ struct kasan_global {
#endif
};
/**
* Structures to keep alloc and free tracks *
*/
enum kasan_state {
KASAN_STATE_INIT,
KASAN_STATE_ALLOC,
KASAN_STATE_FREE
};
struct kasan_track {
u64 cpu : 6; /* for NR_CPUS = 64 */
u64 pid : 16; /* 65536 processes */
u64 when : 42; /* ~140 years */
};
struct kasan_alloc_meta {
u32 state : 2; /* enum kasan_state */
u32 alloc_size : 30;
struct kasan_track track;
};
struct kasan_free_meta {
/* Allocator freelist pointer, unused by KASAN. */
void **freelist;
struct kasan_track track;
};
struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
const void *object);
struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
const void *object);
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)

파일 보기

@@ -115,6 +115,46 @@ static inline bool init_task_stack_addr(const void *addr)
sizeof(init_thread_union.stack));
}
#ifdef CONFIG_SLAB
static void print_track(struct kasan_track *track)
{
pr_err("PID = %u, CPU = %u, timestamp = %lu\n", track->pid,
track->cpu, (unsigned long)track->when);
}
static void object_err(struct kmem_cache *cache, struct page *page,
void *object, char *unused_reason)
{
struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
struct kasan_free_meta *free_info;
dump_stack();
pr_err("Object at %p, in cache %s\n", object, cache->name);
if (!(cache->flags & SLAB_KASAN))
return;
switch (alloc_info->state) {
case KASAN_STATE_INIT:
pr_err("Object not allocated yet\n");
break;
case KASAN_STATE_ALLOC:
pr_err("Object allocated with size %u bytes.\n",
alloc_info->alloc_size);
pr_err("Allocation:\n");
print_track(&alloc_info->track);
break;
case KASAN_STATE_FREE:
pr_err("Object freed, allocated with size %u bytes\n",
alloc_info->alloc_size);
free_info = get_free_info(cache, object);
pr_err("Allocation:\n");
print_track(&alloc_info->track);
pr_err("Deallocation:\n");
print_track(&free_info->track);
break;
}
}
#endif
static void print_address_description(struct kasan_access_info *info)
{
const void *addr = info->access_addr;
@@ -126,17 +166,10 @@ static void print_address_description(struct kasan_access_info *info)
if (PageSlab(page)) {
void *object;
struct kmem_cache *cache = page->slab_cache;
void *last_object;
object = virt_to_obj(cache, page_address(page), addr);
last_object = page_address(page) +
page->objects * cache->size;
if (unlikely(object > last_object))
object = last_object; /* we hit into padding */
object = nearest_obj(cache, page,
(void *)info->access_addr);
object_err(cache, page, object,
"kasan: bad access detected");
"kasan: bad access detected");
return;
}
dump_page(page, "kasan: bad access detected");
@@ -146,7 +179,6 @@ static void print_address_description(struct kasan_access_info *info)
if (!init_task_stack_addr(addr))
pr_err("Address belongs to variable %pS\n", addr);
}
dump_stack();
}

파일 보기

@@ -2086,6 +2086,8 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
}
#endif
kasan_cache_create(cachep, &size, &flags);
size = ALIGN(size, cachep->align);
/*
* We should restrict the number of objects in a slab to implement
@@ -2387,8 +2389,13 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
* cache which they are a constructor for. Otherwise, deadlock.
* They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
kasan_unpoison_object_data(cachep,
objp + obj_offset(cachep));
cachep->ctor(objp + obj_offset(cachep));
kasan_poison_object_data(
cachep, objp + obj_offset(cachep));
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2409,6 +2416,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
struct page *page)
{
int i;
void *objp;
cache_init_objs_debug(cachep, page);
@@ -2419,8 +2427,12 @@ static void cache_init_objs(struct kmem_cache *cachep,
for (i = 0; i < cachep->num; i++) {
/* constructor could break poison info */
if (DEBUG == 0 && cachep->ctor)
cachep->ctor(index_to_obj(cachep, page, i));
if (DEBUG == 0 && cachep->ctor) {
objp = index_to_obj(cachep, page, i);
kasan_unpoison_object_data(cachep, objp);
cachep->ctor(objp);
kasan_poison_object_data(cachep, objp);
}
set_free_obj(page, i, i);
}
@@ -2550,6 +2562,7 @@ static int cache_grow(struct kmem_cache *cachep,
slab_map_pages(cachep, page, freelist);
kasan_poison_slab(page);
cache_init_objs(cachep, page);
if (gfpflags_allow_blocking(local_flags))
@@ -3316,6 +3329,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
{
struct array_cache *ac = cpu_cache_get(cachep);
kasan_slab_free(cachep, objp);
check_irq_off();
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
@@ -3363,6 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = slab_alloc(cachep, flags, _RET_IP_);
kasan_slab_alloc(cachep, ret);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
@@ -3428,6 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
ret = slab_alloc(cachep, flags, _RET_IP_);
kasan_kmalloc(cachep, ret, size);
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
return ret;
@@ -3451,6 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
kasan_slab_alloc(cachep, ret);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
flags, nodeid);
@@ -3468,7 +3486,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
void *ret;
ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
kasan_kmalloc(cachep, ret, size);
trace_kmalloc_node(_RET_IP_, ret,
size, cachep->size,
flags, nodeid);
@@ -3481,11 +3499,15 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
struct kmem_cache *cachep;
void *ret;
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
return kmem_cache_alloc_node_trace(cachep, flags, node, size);
ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
kasan_kmalloc(cachep, ret, size);
return ret;
}
void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -3519,6 +3541,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
return cachep;
ret = slab_alloc(cachep, flags, caller);
kasan_kmalloc(cachep, ret, size);
trace_kmalloc(caller, ret,
size, cachep->size, flags);
@@ -4290,10 +4313,18 @@ module_init(slab_proc_init);
*/
size_t ksize(const void *objp)
{
size_t size;
BUG_ON(!objp);
if (unlikely(objp == ZERO_SIZE_PTR))
return 0;
return virt_to_cache(objp)->object_size;
size = virt_to_cache(objp)->object_size;
/* We assume that ksize callers could use the whole allocated area,
* so we need to unpoison this area.
*/
kasan_krealloc(objp, size);
return size;
}
EXPORT_SYMBOL(ksize);

파일 보기

@@ -35,7 +35,7 @@ struct kmem_cache *kmem_cache;
*/
#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
SLAB_FAILSLAB)
SLAB_FAILSLAB | SLAB_KASAN)
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
SLAB_NOTRACK | SLAB_ACCOUNT)