123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122 |
- /* SPDX-License-Identifier: GPL-2.0 */
- #ifndef _LINUX_SLAB_DEF_H
- #define _LINUX_SLAB_DEF_H
- #include <linux/kfence.h>
- #include <linux/reciprocal_div.h>
- /*
- * Definitions unique to the original Linux SLAB allocator.
- */
- struct kmem_cache {
- struct array_cache __percpu *cpu_cache;
- /* 1) Cache tunables. Protected by slab_mutex */
- unsigned int batchcount;
- unsigned int limit;
- unsigned int shared;
- unsigned int size;
- struct reciprocal_value reciprocal_buffer_size;
- /* 2) touched by every alloc & free from the backend */
- slab_flags_t flags; /* constant flags */
- unsigned int num; /* # of objs per slab */
- /* 3) cache_grow/shrink */
- /* order of pgs per slab (2^n) */
- unsigned int gfporder;
- /* force GFP flags, e.g. GFP_DMA */
- gfp_t allocflags;
- size_t colour; /* cache colouring range */
- unsigned int colour_off; /* colour offset */
- unsigned int freelist_size;
- /* constructor func */
- void (*ctor)(void *obj);
- /* 4) cache creation/removal */
- const char *name;
- struct list_head list;
- int refcount;
- int object_size;
- int align;
- /* 5) statistics */
- #ifdef CONFIG_DEBUG_SLAB
- unsigned long num_active;
- unsigned long num_allocations;
- unsigned long high_mark;
- unsigned long grown;
- unsigned long reaped;
- unsigned long errors;
- unsigned long max_freeable;
- unsigned long node_allocs;
- unsigned long node_frees;
- unsigned long node_overflow;
- atomic_t allochit;
- atomic_t allocmiss;
- atomic_t freehit;
- atomic_t freemiss;
- /*
- * If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. 'size' contains the total
- * object size including these internal fields, while 'obj_offset'
- * and 'object_size' contain the offset to the user object and its
- * size.
- */
- int obj_offset;
- #endif /* CONFIG_DEBUG_SLAB */
- #ifdef CONFIG_KASAN
- struct kasan_cache kasan_info;
- #endif
- #ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
- #endif
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
- struct kmem_cache_node *node[MAX_NUMNODES];
- };
- static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x)
- {
- void *object = x - (x - slab->s_mem) % cache->size;
- void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
- if (unlikely(object > last_object))
- return last_object;
- else
- return object;
- }
- /*
- * We want to avoid an expensive divide : (offset / cache->size)
- * Using the fact that size is a constant for a particular cache,
- * we can replace (offset / cache->size) by
- * reciprocal_divide(offset, cache->reciprocal_buffer_size)
- */
- static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
- {
- u32 offset = (obj - slab->s_mem);
- return reciprocal_divide(offset, cache->reciprocal_buffer_size);
- }
- static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
- {
- if (is_kfence_address(slab_address(slab)))
- return 1;
- return cache->num;
- }
- #endif /* _LINUX_SLAB_DEF_H */
|