mm/slub.c: wrap cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL
cpu_slab's field partial is used when CONFIG_SLUB_CPU_PARTIAL is set, which means we can save a pointer's space on each cpu for every slub item. This patch wraps cpu_slab->partial in CONFIG_SLUB_CPU_PARTIAL and wraps its sysfs use too. [akpm@linux-foundation.org: avoid strange 80-col tricks] Link: http://lkml.kernel.org/r/20170502144533.10729-3-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
@@ -41,12 +41,31 @@ struct kmem_cache_cpu {
|
||||
void **freelist; /* Pointer to next available object */
|
||||
unsigned long tid; /* Globally unique transaction id */
|
||||
struct page *page; /* The slab from which we are allocating */
|
||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||
struct page *partial; /* Partially allocated frozen slabs */
|
||||
#endif
|
||||
#ifdef CONFIG_SLUB_STATS
|
||||
unsigned stat[NR_SLUB_STAT_ITEMS];
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||
#define slub_percpu_partial(c) ((c)->partial)
|
||||
|
||||
#define slub_set_percpu_partial(c, p) \
|
||||
({ \
|
||||
slub_percpu_partial(c) = (p)->next; \
|
||||
})
|
||||
|
||||
#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
|
||||
#else
|
||||
#define slub_percpu_partial(c) NULL
|
||||
|
||||
#define slub_set_percpu_partial(c, p)
|
||||
|
||||
#define slub_percpu_partial_read_once(c) NULL
|
||||
#endif // CONFIG_SLUB_CPU_PARTIAL
|
||||
|
||||
/*
|
||||
* Word size structure that can be atomically updated or read and that
|
||||
* contains both the order and the number of objects that a slab of the
|
||||
|
Reference in New Issue
Block a user