mm, kasan: add GFP flags to KASAN API
Add GFP flags to KASAN hooks for future patches to use. This patch is based on the "mm: kasan: unified support for SLUB and SLAB allocators" patch originally prepared by Dmitry Chernenkov. Signed-off-by: Alexander Potapenko <glider@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrey Konovalov <adech.fo@gmail.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
7ed2f9e663
commit
505f5dcb1c
15
mm/slab.c
15
mm/slab.c
@@ -3378,7 +3378,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
||||
{
|
||||
void *ret = slab_alloc(cachep, flags, _RET_IP_);
|
||||
|
||||
kasan_slab_alloc(cachep, ret);
|
||||
kasan_slab_alloc(cachep, ret, flags);
|
||||
trace_kmem_cache_alloc(_RET_IP_, ret,
|
||||
cachep->object_size, cachep->size, flags);
|
||||
|
||||
@@ -3444,7 +3444,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
|
||||
|
||||
ret = slab_alloc(cachep, flags, _RET_IP_);
|
||||
|
||||
kasan_kmalloc(cachep, ret, size);
|
||||
kasan_kmalloc(cachep, ret, size, flags);
|
||||
trace_kmalloc(_RET_IP_, ret,
|
||||
size, cachep->size, flags);
|
||||
return ret;
|
||||
@@ -3468,7 +3468,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
||||
{
|
||||
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
|
||||
|
||||
kasan_slab_alloc(cachep, ret);
|
||||
kasan_slab_alloc(cachep, ret, flags);
|
||||
trace_kmem_cache_alloc_node(_RET_IP_, ret,
|
||||
cachep->object_size, cachep->size,
|
||||
flags, nodeid);
|
||||
@@ -3486,7 +3486,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
|
||||
void *ret;
|
||||
|
||||
ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
|
||||
kasan_kmalloc(cachep, ret, size);
|
||||
|
||||
kasan_kmalloc(cachep, ret, size, flags);
|
||||
trace_kmalloc_node(_RET_IP_, ret,
|
||||
size, cachep->size,
|
||||
flags, nodeid);
|
||||
@@ -3505,7 +3506,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
|
||||
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
|
||||
return cachep;
|
||||
ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
|
||||
kasan_kmalloc(cachep, ret, size);
|
||||
kasan_kmalloc(cachep, ret, size, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -3541,7 +3542,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
|
||||
return cachep;
|
||||
ret = slab_alloc(cachep, flags, caller);
|
||||
|
||||
kasan_kmalloc(cachep, ret, size);
|
||||
kasan_kmalloc(cachep, ret, size, flags);
|
||||
trace_kmalloc(caller, ret,
|
||||
size, cachep->size, flags);
|
||||
|
||||
@@ -4323,7 +4324,7 @@ size_t ksize(const void *objp)
|
||||
/* We assume that ksize callers could use the whole allocated area,
|
||||
* so we need to unpoison this area.
|
||||
*/
|
||||
kasan_krealloc(objp, size);
|
||||
kasan_krealloc(objp, size, GFP_NOWAIT);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
Reference in New Issue
Block a user