kasan: make kasan_cache_create() work with 32-bit slab cache sizes
If SLAB doesn't support 4GB+ kmem caches (it never did), KASAN should not do it as well. Link: http://lkml.kernel.org/r/20180305200730.15812-20-adobriyan@gmail.com Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Alexander Potapenko <glider@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
0293d1fdd6
commit
be4a7988b3
@@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark);
|
|||||||
void kasan_alloc_pages(struct page *page, unsigned int order);
|
void kasan_alloc_pages(struct page *page, unsigned int order);
|
||||||
void kasan_free_pages(struct page *page, unsigned int order);
|
void kasan_free_pages(struct page *page, unsigned int order);
|
||||||
|
|
||||||
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
|
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
|
||||||
slab_flags_t *flags);
|
slab_flags_t *flags);
|
||||||
void kasan_cache_shrink(struct kmem_cache *cache);
|
void kasan_cache_shrink(struct kmem_cache *cache);
|
||||||
void kasan_cache_shutdown(struct kmem_cache *cache);
|
void kasan_cache_shutdown(struct kmem_cache *cache);
|
||||||
@@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
|
|||||||
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
|
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
|
||||||
|
|
||||||
static inline void kasan_cache_create(struct kmem_cache *cache,
|
static inline void kasan_cache_create(struct kmem_cache *cache,
|
||||||
size_t *size,
|
unsigned int *size,
|
||||||
slab_flags_t *flags) {}
|
slab_flags_t *flags) {}
|
||||||
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
|
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
|
||||||
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
|
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
|
||||||
|
|||||||
@@ -323,9 +323,9 @@ void kasan_free_pages(struct page *page, unsigned int order)
|
|||||||
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
|
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
|
||||||
* For larger allocations larger redzones are used.
|
* For larger allocations larger redzones are used.
|
||||||
*/
|
*/
|
||||||
static size_t optimal_redzone(size_t object_size)
|
static unsigned int optimal_redzone(unsigned int object_size)
|
||||||
{
|
{
|
||||||
int rz =
|
return
|
||||||
object_size <= 64 - 16 ? 16 :
|
object_size <= 64 - 16 ? 16 :
|
||||||
object_size <= 128 - 32 ? 32 :
|
object_size <= 128 - 32 ? 32 :
|
||||||
object_size <= 512 - 64 ? 64 :
|
object_size <= 512 - 64 ? 64 :
|
||||||
@@ -333,14 +333,13 @@ static size_t optimal_redzone(size_t object_size)
|
|||||||
object_size <= (1 << 14) - 256 ? 256 :
|
object_size <= (1 << 14) - 256 ? 256 :
|
||||||
object_size <= (1 << 15) - 512 ? 512 :
|
object_size <= (1 << 15) - 512 ? 512 :
|
||||||
object_size <= (1 << 16) - 1024 ? 1024 : 2048;
|
object_size <= (1 << 16) - 1024 ? 1024 : 2048;
|
||||||
return rz;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
|
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
|
||||||
slab_flags_t *flags)
|
slab_flags_t *flags)
|
||||||
{
|
{
|
||||||
|
unsigned int orig_size = *size;
|
||||||
int redzone_adjust;
|
int redzone_adjust;
|
||||||
int orig_size = *size;
|
|
||||||
|
|
||||||
/* Add alloc meta. */
|
/* Add alloc meta. */
|
||||||
cache->kasan_info.alloc_meta_offset = *size;
|
cache->kasan_info.alloc_meta_offset = *size;
|
||||||
@@ -358,7 +357,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
|
|||||||
if (redzone_adjust > 0)
|
if (redzone_adjust > 0)
|
||||||
*size += redzone_adjust;
|
*size += redzone_adjust;
|
||||||
|
|
||||||
*size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
|
*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
|
||||||
|
max(*size, cache->object_size +
|
||||||
optimal_redzone(cache->object_size)));
|
optimal_redzone(cache->object_size)));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -1994,7 +1994,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
|
|||||||
size_t ralign = BYTES_PER_WORD;
|
size_t ralign = BYTES_PER_WORD;
|
||||||
gfp_t gfp;
|
gfp_t gfp;
|
||||||
int err;
|
int err;
|
||||||
size_t size = cachep->size;
|
unsigned int size = cachep->size;
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
#if FORCED_DEBUG
|
#if FORCED_DEBUG
|
||||||
|
|||||||
@@ -3458,7 +3458,7 @@ static void set_cpu_partial(struct kmem_cache *s)
|
|||||||
static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
||||||
{
|
{
|
||||||
slab_flags_t flags = s->flags;
|
slab_flags_t flags = s->flags;
|
||||||
size_t size = s->object_size;
|
unsigned int size = s->object_size;
|
||||||
int order;
|
int order;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
Reference in New Issue
Block a user