Merge branch 'tracing/core-v2' into tracing-for-linus

Conflicts:
	include/linux/slub_def.h
	lib/Kconfig.debug
	mm/slob.c
	mm/slub.c
This commit is contained in:
Ingo Molnar
2009-04-01 21:54:19 +02:00
149 ha cambiato i file con 14659 aggiunte e 3726 eliminazioni

Vedi File

@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <trace/kmemtrace.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
@@ -1618,18 +1619,46 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
return slab_alloc(s, gfpflags, -1, _RET_IP_);
void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
s->objsize, s->size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
{
return slab_alloc(s, gfpflags, -1, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
#endif
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
return slab_alloc(s, gfpflags, node, _RET_IP_);
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
s->objsize, s->size, gfpflags, node);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node)
{
return slab_alloc(s, gfpflags, node, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
#endif
/*
* Slow patch handling. This may still be called frequently since objects
* have a longer lifetime than the cpu slabs in most processing loads.
@@ -1737,6 +1766,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
page = virt_to_head_page(x);
slab_free(s, page, x, _RET_IP_);
kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, x);
}
EXPORT_SYMBOL(kmem_cache_free);
@@ -2659,6 +2690,7 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s;
void *ret;
if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, flags);
@@ -2668,7 +2700,12 @@ void *__kmalloc(size_t size, gfp_t flags)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, flags, -1, _RET_IP_);
ret = slab_alloc(s, flags, -1, _RET_IP_);
kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
size, s->size, flags);
return ret;
}
EXPORT_SYMBOL(__kmalloc);
@@ -2687,16 +2724,30 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s;
void *ret;
if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, flags, node);
if (unlikely(size > SLUB_MAX_SIZE)) {
ret = kmalloc_large_node(size, flags, node);
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
_RET_IP_, ret,
size, PAGE_SIZE << get_order(size),
flags, node);
return ret;
}
s = get_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, flags, node, _RET_IP_);
ret = slab_alloc(s, flags, node, _RET_IP_);
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, ret,
size, s->size, flags, node);
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -2755,6 +2806,8 @@ void kfree(const void *x)
return;
}
slab_free(page->slab, page, object, _RET_IP_);
kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, x);
}
EXPORT_SYMBOL(kfree);
@@ -3224,6 +3277,7 @@ static struct notifier_block __cpuinitdata slab_notifier = {
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
{
struct kmem_cache *s;
void *ret;
if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, gfpflags);
@@ -3233,13 +3287,20 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, gfpflags, -1, caller);
ret = slab_alloc(s, gfpflags, -1, caller);
/* Honor the call site pointer we recieved. */
kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, caller, ret, size,
s->size, gfpflags);
return ret;
}
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, unsigned long caller)
{
struct kmem_cache *s;
void *ret;
if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, gfpflags, node);
@@ -3249,7 +3310,13 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
return slab_alloc(s, gfpflags, node, caller);
ret = slab_alloc(s, gfpflags, node, caller);
/* Honor the call site pointer we recieved. */
kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, caller, ret,
size, s->size, gfpflags, node);
return ret;
}
#ifdef CONFIG_SLUB_DEBUG