mm/sl[aou]b: Common alignment code
Extract the code to do object alignment from the allocators. Do the alignment calculations in slab_common so that the __kmem_cache_create functions of the allocators do not have to deal with alignment. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:

committed by
Pekka Enberg

父節點
2f9baa9fcf
當前提交
4590685546
38
mm/slub.c
38
mm/slub.c
@@ -2760,32 +2760,6 @@ static inline int calculate_order(int size, int reserved)
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Figure out what the alignment of the objects will be.
|
||||
*/
|
||||
static unsigned long calculate_alignment(unsigned long flags,
|
||||
unsigned long align, unsigned long size)
|
||||
{
|
||||
/*
|
||||
* If the user wants hardware cache aligned objects then follow that
|
||||
* suggestion if the object is sufficiently large.
|
||||
*
|
||||
* The hardware cache alignment cannot override the specified
|
||||
* alignment though. If that is greater then use it.
|
||||
*/
|
||||
if (flags & SLAB_HWCACHE_ALIGN) {
|
||||
unsigned long ralign = cache_line_size();
|
||||
while (size <= ralign / 2)
|
||||
ralign /= 2;
|
||||
align = max(align, ralign);
|
||||
}
|
||||
|
||||
if (align < ARCH_SLAB_MINALIGN)
|
||||
align = ARCH_SLAB_MINALIGN;
|
||||
|
||||
return ALIGN(align, sizeof(void *));
|
||||
}
|
||||
|
||||
static void
|
||||
init_kmem_cache_node(struct kmem_cache_node *n)
|
||||
{
|
||||
@@ -2919,7 +2893,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
||||
{
|
||||
unsigned long flags = s->flags;
|
||||
unsigned long size = s->object_size;
|
||||
unsigned long align = s->align;
|
||||
int order;
|
||||
|
||||
/*
|
||||
@@ -2990,20 +2963,12 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
||||
size += sizeof(void *);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Determine the alignment based on various parameters that the
|
||||
* user specified and the dynamic determination of cache line size
|
||||
* on bootup.
|
||||
*/
|
||||
align = calculate_alignment(flags, align, s->object_size);
|
||||
s->align = align;
|
||||
|
||||
/*
|
||||
* SLUB stores one object immediately after another beginning from
|
||||
* offset 0. In order to align the objects we have to simply size
|
||||
* each object to conform to the alignment.
|
||||
*/
|
||||
size = ALIGN(size, align);
|
||||
size = ALIGN(size, s->align);
|
||||
s->size = size;
|
||||
if (forced_order >= 0)
|
||||
order = forced_order;
|
||||
@@ -3032,7 +2997,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
||||
s->max = s->oo;
|
||||
|
||||
return !!oo_objects(s->oo);
|
||||
|
||||
}
|
||||
|
||||
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
|
||||
|
Reference in New Issue
Block a user