Merge branch 'tracing/core-v2' into tracing-for-linus
Conflicts: include/linux/slub_def.h lib/Kconfig.debug mm/slob.c mm/slub.c
This commit is contained in:
71
mm/slab.c
71
mm/slab.c
@@ -102,6 +102,7 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/module.h>
|
||||
#include <trace/kmemtrace.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/uaccess.h>
|
||||
@@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KMEMTRACE
|
||||
size_t slab_buffer_size(struct kmem_cache *cachep)
|
||||
{
|
||||
return cachep->buffer_size;
|
||||
}
|
||||
EXPORT_SYMBOL(slab_buffer_size);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Do not go above this order unless 0 objects fit into the slab.
|
||||
*/
|
||||
@@ -3554,10 +3563,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
|
||||
*/
|
||||
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
||||
{
|
||||
return __cache_alloc(cachep, flags, __builtin_return_address(0));
|
||||
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
|
||||
|
||||
kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
|
||||
obj_size(cachep), cachep->buffer_size, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc);
|
||||
|
||||
#ifdef CONFIG_KMEMTRACE
|
||||
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
|
||||
{
|
||||
return __cache_alloc(cachep, flags, __builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
|
||||
* @cachep: the cache we're checking against
|
||||
@@ -3602,23 +3624,47 @@ out:
|
||||
#ifdef CONFIG_NUMA
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
||||
{
|
||||
return __cache_alloc_node(cachep, flags, nodeid,
|
||||
__builtin_return_address(0));
|
||||
void *ret = __cache_alloc_node(cachep, flags, nodeid,
|
||||
__builtin_return_address(0));
|
||||
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
|
||||
obj_size(cachep), cachep->buffer_size,
|
||||
flags, nodeid);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc_node);
|
||||
|
||||
#ifdef CONFIG_KMEMTRACE
|
||||
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
|
||||
gfp_t flags,
|
||||
int nodeid)
|
||||
{
|
||||
return __cache_alloc_node(cachep, flags, nodeid,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
|
||||
#endif
|
||||
|
||||
static __always_inline void *
|
||||
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
|
||||
{
|
||||
struct kmem_cache *cachep;
|
||||
void *ret;
|
||||
|
||||
cachep = kmem_find_general_cachep(size, flags);
|
||||
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
|
||||
return cachep;
|
||||
return kmem_cache_alloc_node(cachep, flags, node);
|
||||
ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
|
||||
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
|
||||
(unsigned long) caller, ret,
|
||||
size, cachep->buffer_size, flags, node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_SLAB
|
||||
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
return __do_kmalloc_node(size, flags, node,
|
||||
@@ -3651,6 +3697,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
|
||||
void *caller)
|
||||
{
|
||||
struct kmem_cache *cachep;
|
||||
void *ret;
|
||||
|
||||
/* If you want to save a few bytes .text space: replace
|
||||
* __ with kmem_.
|
||||
@@ -3660,11 +3707,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
|
||||
cachep = __find_general_cachep(size, flags);
|
||||
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
|
||||
return cachep;
|
||||
return __cache_alloc(cachep, flags, caller);
|
||||
ret = __cache_alloc(cachep, flags, caller);
|
||||
|
||||
kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
|
||||
(unsigned long) caller, ret,
|
||||
size, cachep->buffer_size, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_DEBUG_SLAB
|
||||
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
|
||||
void *__kmalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
return __do_kmalloc(size, flags, __builtin_return_address(0));
|
||||
@@ -3703,6 +3756,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
|
||||
debug_check_no_obj_freed(objp, obj_size(cachep));
|
||||
__cache_free(cachep, objp);
|
||||
local_irq_restore(flags);
|
||||
|
||||
kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
@@ -3729,6 +3784,8 @@ void kfree(const void *objp)
|
||||
debug_check_no_obj_freed(objp, obj_size(c));
|
||||
__cache_free(c, (void *)objp);
|
||||
local_irq_restore(flags);
|
||||
|
||||
kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
|
||||
}
|
||||
EXPORT_SYMBOL(kfree);
|
||||
|
||||
|
35
mm/slob.c
35
mm/slob.c
@@ -65,6 +65,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/list.h>
|
||||
#include <trace/kmemtrace.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
@@ -474,6 +475,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
||||
{
|
||||
unsigned int *m;
|
||||
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
|
||||
void *ret;
|
||||
|
||||
lockdep_trace_alloc(gfp);
|
||||
|
||||
@@ -482,12 +484,17 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
||||
return ZERO_SIZE_PTR;
|
||||
|
||||
m = slob_alloc(size + align, gfp, align, node);
|
||||
|
||||
if (!m)
|
||||
return NULL;
|
||||
*m = size;
|
||||
return (void *)m + align;
|
||||
ret = (void *)m + align;
|
||||
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
|
||||
_RET_IP_, ret,
|
||||
size, size + align, gfp, node);
|
||||
} else {
|
||||
void *ret;
|
||||
unsigned int order = get_order(size);
|
||||
|
||||
ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
|
||||
if (ret) {
|
||||
@@ -495,8 +502,13 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
|
||||
page = virt_to_page(ret);
|
||||
page->private = size;
|
||||
}
|
||||
return ret;
|
||||
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
|
||||
_RET_IP_, ret,
|
||||
size, PAGE_SIZE << order, gfp, node);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__kmalloc_node);
|
||||
|
||||
@@ -514,6 +526,8 @@ void kfree(const void *block)
|
||||
slob_free(m, *m + align);
|
||||
} else
|
||||
put_page(&sp->page);
|
||||
|
||||
kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
|
||||
}
|
||||
EXPORT_SYMBOL(kfree);
|
||||
|
||||
@@ -583,10 +597,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
|
||||
{
|
||||
void *b;
|
||||
|
||||
if (c->size < PAGE_SIZE)
|
||||
if (c->size < PAGE_SIZE) {
|
||||
b = slob_alloc(c->size, flags, c->align, node);
|
||||
else
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
|
||||
_RET_IP_, b, c->size,
|
||||
SLOB_UNITS(c->size) * SLOB_UNIT,
|
||||
flags, node);
|
||||
} else {
|
||||
b = slob_new_pages(flags, get_order(c->size), node);
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
|
||||
_RET_IP_, b, c->size,
|
||||
PAGE_SIZE << get_order(c->size),
|
||||
flags, node);
|
||||
}
|
||||
|
||||
if (c->ctor)
|
||||
c->ctor(b);
|
||||
@@ -622,6 +645,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
|
||||
} else {
|
||||
__kmem_cache_free(b, c->size);
|
||||
}
|
||||
|
||||
kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
|
83
mm/slub.c
83
mm/slub.c
@@ -16,6 +16,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <trace/kmemtrace.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/mempolicy.h>
|
||||
@@ -1618,18 +1619,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
||||
|
||||
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
|
||||
{
|
||||
return slab_alloc(s, gfpflags, -1, _RET_IP_);
|
||||
void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
|
||||
|
||||
kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
|
||||
s->objsize, s->size, gfpflags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc);
|
||||
|
||||
#ifdef CONFIG_KMEMTRACE
|
||||
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
|
||||
{
|
||||
return slab_alloc(s, gfpflags, -1, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
|
||||
{
|
||||
return slab_alloc(s, gfpflags, node, _RET_IP_);
|
||||
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
|
||||
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
|
||||
s->objsize, s->size, gfpflags, node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc_node);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KMEMTRACE
|
||||
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node)
|
||||
{
|
||||
return slab_alloc(s, gfpflags, node, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Slow patch handling. This may still be called frequently since objects
|
||||
* have a longer lifetime than the cpu slabs in most processing loads.
|
||||
@@ -1737,6 +1766,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
||||
page = virt_to_head_page(x);
|
||||
|
||||
slab_free(s, page, x, _RET_IP_);
|
||||
|
||||
kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
@@ -2659,6 +2690,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
|
||||
void *__kmalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
void *ret;
|
||||
|
||||
if (unlikely(size > SLUB_MAX_SIZE))
|
||||
return kmalloc_large(size, flags);
|
||||
@@ -2668,7 +2700,12 @@ void *__kmalloc(size_t size, gfp_t flags)
|
||||
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
||||
return s;
|
||||
|
||||
return slab_alloc(s, flags, -1, _RET_IP_);
|
||||
ret = slab_alloc(s, flags, -1, _RET_IP_);
|
||||
|
||||
kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
|
||||
size, s->size, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__kmalloc);
|
||||
|
||||
@@ -2687,16 +2724,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
void *ret;
|
||||
|
||||
if (unlikely(size > SLUB_MAX_SIZE))
|
||||
return kmalloc_large_node(size, flags, node);
|
||||
if (unlikely(size > SLUB_MAX_SIZE)) {
|
||||
ret = kmalloc_large_node(size, flags, node);
|
||||
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
|
||||
_RET_IP_, ret,
|
||||
size, PAGE_SIZE << get_order(size),
|
||||
flags, node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
s = get_slab(size, flags);
|
||||
|
||||
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
||||
return s;
|
||||
|
||||
return slab_alloc(s, flags, node, _RET_IP_);
|
||||
ret = slab_alloc(s, flags, node, _RET_IP_);
|
||||
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
|
||||
size, s->size, flags, node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__kmalloc_node);
|
||||
#endif
|
||||
@@ -2755,6 +2806,8 @@ void kfree(const void *x)
|
||||
return;
|
||||
}
|
||||
slab_free(page->slab, page, object, _RET_IP_);
|
||||
|
||||
kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
|
||||
}
|
||||
EXPORT_SYMBOL(kfree);
|
||||
|
||||
@@ -3224,6 +3277,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
|
||||
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
void *ret;
|
||||
|
||||
if (unlikely(size > SLUB_MAX_SIZE))
|
||||
return kmalloc_large(size, gfpflags);
|
||||
@@ -3233,13 +3287,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
||||
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
||||
return s;
|
||||
|
||||
return slab_alloc(s, gfpflags, -1, caller);
|
||||
ret = slab_alloc(s, gfpflags, -1, caller);
|
||||
|
||||
/* Honor the call site pointer we recieved. */
|
||||
kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
|
||||
s->size, gfpflags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
|
||||
int node, unsigned long caller)
|
||||
{
|
||||
struct kmem_cache *s;
|
||||
void *ret;
|
||||
|
||||
if (unlikely(size > SLUB_MAX_SIZE))
|
||||
return kmalloc_large_node(size, gfpflags, node);
|
||||
@@ -3249,7 +3310,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
|
||||
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
||||
return s;
|
||||
|
||||
return slab_alloc(s, gfpflags, node, caller);
|
||||
ret = slab_alloc(s, gfpflags, node, caller);
|
||||
|
||||
/* Honor the call site pointer we recieved. */
|
||||
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
|
||||
size, s->size, gfpflags, node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
|
Reference in New Issue
Block a user