123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192 |
- /* SPDX-License-Identifier: GPL-2.0 */
- #ifndef _LINUX_SLUB_DEF_H
- #define _LINUX_SLUB_DEF_H
- /*
- * SLUB : A Slab allocator without object queues.
- *
- * (C) 2007 SGI, Christoph Lameter
- */
- #include <linux/kfence.h>
- #include <linux/kobject.h>
- #include <linux/reciprocal_div.h>
- #include <linux/local_lock.h>
- enum stat_item {
- ALLOC_FASTPATH, /* Allocation from cpu slab */
- ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
- FREE_FASTPATH, /* Free to cpu slab */
- FREE_SLOWPATH, /* Freeing not to cpu slab */
- FREE_FROZEN, /* Freeing to frozen slab */
- FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
- FREE_REMOVE_PARTIAL, /* Freeing removes last object */
- ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
- ALLOC_SLAB, /* Cpu slab acquired from page allocator */
- ALLOC_REFILL, /* Refill cpu slab from slab freelist */
- ALLOC_NODE_MISMATCH, /* Switching cpu slab */
- FREE_SLAB, /* Slab freed to the page allocator */
- CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
- DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
- DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
- DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
- DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
- DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
- DEACTIVATE_BYPASS, /* Implicit deactivation */
- ORDER_FALLBACK, /* Number of times fallback was necessary */
- CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
- CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
- CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
- CPU_PARTIAL_FREE, /* Refill cpu partial on free */
- CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
- CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
- NR_SLUB_STAT_ITEMS };
- /*
- * When changing the layout, make sure freelist and tid are still compatible
- * with this_cpu_cmpxchg_double() alignment requirements.
- */
- struct kmem_cache_cpu {
- void **freelist; /* Pointer to next available object */
- unsigned long tid; /* Globally unique transaction id */
- struct slab *slab; /* The slab from which we are allocating */
- #ifdef CONFIG_SLUB_CPU_PARTIAL
- struct slab *partial; /* Partially allocated frozen slabs */
- #endif
- local_lock_t lock; /* Protects the fields above */
- #ifdef CONFIG_SLUB_STATS
- unsigned stat[NR_SLUB_STAT_ITEMS];
- #endif
- };
- #ifdef CONFIG_SLUB_CPU_PARTIAL
- #define slub_percpu_partial(c) ((c)->partial)
- #define slub_set_percpu_partial(c, p) \
- ({ \
- slub_percpu_partial(c) = (p)->next; \
- })
- #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
- #else
- #define slub_percpu_partial(c) NULL
- #define slub_set_percpu_partial(c, p)
- #define slub_percpu_partial_read_once(c) NULL
- #endif // CONFIG_SLUB_CPU_PARTIAL
- /*
- * Word size structure that can be atomically updated or read and that
- * contains both the order and the number of objects that a slab of the
- * given order would contain.
- */
- struct kmem_cache_order_objects {
- unsigned int x;
- };
- /*
- * Slab cache management.
- */
- struct kmem_cache {
- struct kmem_cache_cpu __percpu *cpu_slab;
- /* Used for retrieving partial slabs, etc. */
- slab_flags_t flags;
- unsigned long min_partial;
- unsigned int size; /* The size of an object including metadata */
- unsigned int object_size;/* The size of an object without metadata */
- struct reciprocal_value reciprocal_size;
- unsigned int offset; /* Free pointer offset */
- #ifdef CONFIG_SLUB_CPU_PARTIAL
- /* Number of per cpu partial objects to keep around */
- unsigned int cpu_partial;
- /* Number of per cpu partial slabs to keep around */
- unsigned int cpu_partial_slabs;
- #endif
- struct kmem_cache_order_objects oo;
- /* Allocation and freeing of slabs */
- struct kmem_cache_order_objects min;
- gfp_t allocflags; /* gfp flags to use on each alloc */
- int refcount; /* Refcount for slab cache destroy */
- void (*ctor)(void *);
- unsigned int inuse; /* Offset to metadata */
- unsigned int align; /* Alignment */
- unsigned int red_left_pad; /* Left redzone padding size */
- const char *name; /* Name (only for display!) */
- struct list_head list; /* List of slab caches */
- #ifdef CONFIG_SYSFS
- struct kobject kobj; /* For sysfs */
- #endif
- #ifdef CONFIG_SLAB_FREELIST_HARDENED
- unsigned long random;
- #endif
- #ifdef CONFIG_NUMA
- /*
- * Defragmentation by allocating from a remote node.
- */
- unsigned int remote_node_defrag_ratio;
- #endif
- #ifdef CONFIG_SLAB_FREELIST_RANDOM
- unsigned int *random_seq;
- #endif
- #ifdef CONFIG_KASAN
- struct kasan_cache kasan_info;
- #endif
- unsigned int useroffset; /* Usercopy region offset */
- unsigned int usersize; /* Usercopy region size */
- struct kmem_cache_node *node[MAX_NUMNODES];
- };
- #ifdef CONFIG_SYSFS
- #define SLAB_SUPPORTS_SYSFS
- void sysfs_slab_unlink(struct kmem_cache *);
- void sysfs_slab_release(struct kmem_cache *);
- #else
- static inline void sysfs_slab_unlink(struct kmem_cache *s)
- {
- }
- static inline void sysfs_slab_release(struct kmem_cache *s)
- {
- }
- #endif
- void *fixup_red_left(struct kmem_cache *s, void *p);
- static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
- void *x) {
- void *object = x - (x - slab_address(slab)) % cache->size;
- void *last_object = slab_address(slab) +
- (slab->objects - 1) * cache->size;
- void *result = (unlikely(object > last_object)) ? last_object : object;
- result = fixup_red_left(cache, result);
- return result;
- }
- /* Determine object index from a given position */
- static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
- {
- return reciprocal_divide(kasan_reset_tag(obj) - addr,
- cache->reciprocal_size);
- }
- static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
- {
- if (is_kfence_address(obj))
- return 0;
- return __obj_to_index(cache, slab_address(slab), obj);
- }
- static inline int objs_per_slab(const struct kmem_cache *cache,
- const struct slab *slab)
- {
- return slab->objects;
- }
- #endif /* _LINUX_SLUB_DEF_H */
|