Merge branch 'tracing/kmemtrace' into tracing/kmemtrace2
This commit is contained in:
50
mm/slub.c
50
mm/slub.c
@@ -25,6 +25,7 @@
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/fault-inject.h>
|
||||
|
||||
/*
|
||||
* Lock order:
|
||||
@@ -154,6 +155,10 @@
|
||||
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
|
||||
#endif
|
||||
|
||||
#define OO_SHIFT 16
|
||||
#define OO_MASK ((1 << OO_SHIFT) - 1)
|
||||
#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
|
||||
|
||||
/* Internal SLUB flags */
|
||||
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
||||
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
|
||||
@@ -291,7 +296,7 @@ static inline struct kmem_cache_order_objects oo_make(int order,
|
||||
unsigned long size)
|
||||
{
|
||||
struct kmem_cache_order_objects x = {
|
||||
(order << 16) + (PAGE_SIZE << order) / size
|
||||
(order << OO_SHIFT) + (PAGE_SIZE << order) / size
|
||||
};
|
||||
|
||||
return x;
|
||||
@@ -299,12 +304,12 @@ static inline struct kmem_cache_order_objects oo_make(int order,
|
||||
|
||||
static inline int oo_order(struct kmem_cache_order_objects x)
|
||||
{
|
||||
return x.x >> 16;
|
||||
return x.x >> OO_SHIFT;
|
||||
}
|
||||
|
||||
static inline int oo_objects(struct kmem_cache_order_objects x)
|
||||
{
|
||||
return x.x & ((1 << 16) - 1);
|
||||
return x.x & OO_MASK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
@@ -693,7 +698,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
|
||||
if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
|
||||
object_err(s, page, p, "Freepointer corrupt");
|
||||
/*
|
||||
* No choice but to zap it and thus loose the remainder
|
||||
* No choice but to zap it and thus lose the remainder
|
||||
* of the free objects in this slab. May cause
|
||||
* another error because the object count is now wrong.
|
||||
*/
|
||||
@@ -765,8 +770,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
|
||||
}
|
||||
|
||||
max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
|
||||
if (max_objects > 65535)
|
||||
max_objects = 65535;
|
||||
if (max_objects > MAX_OBJS_PER_PAGE)
|
||||
max_objects = MAX_OBJS_PER_PAGE;
|
||||
|
||||
if (page->objects != max_objects) {
|
||||
slab_err(s, page, "Wrong number of objects. Found %d but "
|
||||
@@ -1592,6 +1597,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
||||
unsigned long flags;
|
||||
unsigned int objsize;
|
||||
|
||||
might_sleep_if(gfpflags & __GFP_WAIT);
|
||||
|
||||
if (should_failslab(s->objsize, gfpflags))
|
||||
return NULL;
|
||||
|
||||
local_irq_save(flags);
|
||||
c = get_cpu_slab(s, smp_processor_id());
|
||||
objsize = c->objsize;
|
||||
@@ -1766,7 +1776,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free);
|
||||
|
||||
/* Figure out on which slab object the object resides */
|
||||
/* Figure out on which slab page the object resides */
|
||||
static struct page *get_object_page(const void *x)
|
||||
{
|
||||
struct page *page = virt_to_head_page(x);
|
||||
@@ -1838,8 +1848,8 @@ static inline int slab_order(int size, int min_objects,
|
||||
int rem;
|
||||
int min_order = slub_min_order;
|
||||
|
||||
if ((PAGE_SIZE << min_order) / size > 65535)
|
||||
return get_order(size * 65535) - 1;
|
||||
if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE)
|
||||
return get_order(size * MAX_OBJS_PER_PAGE) - 1;
|
||||
|
||||
for (order = max(min_order,
|
||||
fls(min_objects * size - 1) - PAGE_SHIFT);
|
||||
@@ -2104,8 +2114,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
|
||||
* when allocating for the kmalloc_node_cache. This is used for bootstrapping
|
||||
* memory on a fresh node that has no slab structures yet.
|
||||
*/
|
||||
static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
|
||||
int node)
|
||||
static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
|
||||
{
|
||||
struct page *page;
|
||||
struct kmem_cache_node *n;
|
||||
@@ -2143,7 +2152,6 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
|
||||
local_irq_save(flags);
|
||||
add_partial(n, page, 0);
|
||||
local_irq_restore(flags);
|
||||
return n;
|
||||
}
|
||||
|
||||
static void free_kmem_cache_nodes(struct kmem_cache *s)
|
||||
@@ -2175,8 +2183,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
|
||||
n = &s->local_node;
|
||||
else {
|
||||
if (slab_state == DOWN) {
|
||||
n = early_kmem_cache_node_alloc(gfpflags,
|
||||
node);
|
||||
early_kmem_cache_node_alloc(gfpflags, node);
|
||||
continue;
|
||||
}
|
||||
n = kmem_cache_alloc_node(kmalloc_caches,
|
||||
@@ -3176,8 +3183,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
||||
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
|
||||
up_write(&slub_lock);
|
||||
|
||||
if (sysfs_slab_alias(s, name))
|
||||
if (sysfs_slab_alias(s, name)) {
|
||||
down_write(&slub_lock);
|
||||
s->refcount--;
|
||||
up_write(&slub_lock);
|
||||
goto err;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
@@ -3187,8 +3198,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
||||
size, align, flags, ctor)) {
|
||||
list_add(&s->list, &slab_caches);
|
||||
up_write(&slub_lock);
|
||||
if (sysfs_slab_add(s))
|
||||
if (sysfs_slab_add(s)) {
|
||||
down_write(&slub_lock);
|
||||
list_del(&s->list);
|
||||
up_write(&slub_lock);
|
||||
kfree(s);
|
||||
goto err;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
kfree(s);
|
||||
@@ -4412,7 +4428,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
|
||||
|
||||
/*
|
||||
* Need to buffer aliases during bootup until sysfs becomes
|
||||
* available lest we loose that information.
|
||||
* available lest we lose that information.
|
||||
*/
|
||||
struct saved_alias {
|
||||
struct kmem_cache *s;
|
||||
|
Reference in New Issue
Block a user