slub_def.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SLUB_DEF_H
  3. #define _LINUX_SLUB_DEF_H
  4. /*
  5. * SLUB : A Slab allocator without object queues.
  6. *
  7. * (C) 2007 SGI, Christoph Lameter
  8. */
  9. #include <linux/kfence.h>
  10. #include <linux/kobject.h>
  11. #include <linux/reciprocal_div.h>
  12. #include <linux/local_lock.h>
  13. enum stat_item {
  14. ALLOC_FASTPATH, /* Allocation from cpu slab */
  15. ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
  16. FREE_FASTPATH, /* Free to cpu slab */
  17. FREE_SLOWPATH, /* Freeing not to cpu slab */
  18. FREE_FROZEN, /* Freeing to frozen slab */
  19. FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
  20. FREE_REMOVE_PARTIAL, /* Freeing removes last object */
  21. ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
  22. ALLOC_SLAB, /* Cpu slab acquired from page allocator */
  23. ALLOC_REFILL, /* Refill cpu slab from slab freelist */
  24. ALLOC_NODE_MISMATCH, /* Switching cpu slab */
  25. FREE_SLAB, /* Slab freed to the page allocator */
  26. CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
  27. DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
  28. DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
  29. DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
  30. DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
  31. DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
  32. DEACTIVATE_BYPASS, /* Implicit deactivation */
  33. ORDER_FALLBACK, /* Number of times fallback was necessary */
  34. CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
  35. CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
  36. CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
  37. CPU_PARTIAL_FREE, /* Refill cpu partial on free */
  38. CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
  39. CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
  40. NR_SLUB_STAT_ITEMS };
  41. /*
  42. * When changing the layout, make sure freelist and tid are still compatible
  43. * with this_cpu_cmpxchg_double() alignment requirements.
  44. */
  45. struct kmem_cache_cpu {
  46. void **freelist; /* Pointer to next available object */
  47. unsigned long tid; /* Globally unique transaction id */
  48. struct slab *slab; /* The slab from which we are allocating */
  49. #ifdef CONFIG_SLUB_CPU_PARTIAL
  50. struct slab *partial; /* Partially allocated frozen slabs */
  51. #endif
  52. local_lock_t lock; /* Protects the fields above */
  53. #ifdef CONFIG_SLUB_STATS
  54. unsigned stat[NR_SLUB_STAT_ITEMS];
  55. #endif
  56. };
  57. #ifdef CONFIG_SLUB_CPU_PARTIAL
  58. #define slub_percpu_partial(c) ((c)->partial)
  59. #define slub_set_percpu_partial(c, p) \
  60. ({ \
  61. slub_percpu_partial(c) = (p)->next; \
  62. })
  63. #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
  64. #else
  65. #define slub_percpu_partial(c) NULL
  66. #define slub_set_percpu_partial(c, p)
  67. #define slub_percpu_partial_read_once(c) NULL
  68. #endif // CONFIG_SLUB_CPU_PARTIAL
  69. /*
  70. * Word size structure that can be atomically updated or read and that
  71. * contains both the order and the number of objects that a slab of the
  72. * given order would contain.
  73. */
  74. struct kmem_cache_order_objects {
  75. unsigned int x;
  76. };
  77. /*
  78. * Slab cache management.
  79. */
  80. struct kmem_cache {
  81. struct kmem_cache_cpu __percpu *cpu_slab;
  82. /* Used for retrieving partial slabs, etc. */
  83. slab_flags_t flags;
  84. unsigned long min_partial;
  85. unsigned int size; /* The size of an object including metadata */
  86. unsigned int object_size;/* The size of an object without metadata */
  87. struct reciprocal_value reciprocal_size;
  88. unsigned int offset; /* Free pointer offset */
  89. #ifdef CONFIG_SLUB_CPU_PARTIAL
  90. /* Number of per cpu partial objects to keep around */
  91. unsigned int cpu_partial;
  92. /* Number of per cpu partial slabs to keep around */
  93. unsigned int cpu_partial_slabs;
  94. #endif
  95. struct kmem_cache_order_objects oo;
  96. /* Allocation and freeing of slabs */
  97. struct kmem_cache_order_objects min;
  98. gfp_t allocflags; /* gfp flags to use on each alloc */
  99. int refcount; /* Refcount for slab cache destroy */
  100. void (*ctor)(void *);
  101. unsigned int inuse; /* Offset to metadata */
  102. unsigned int align; /* Alignment */
  103. unsigned int red_left_pad; /* Left redzone padding size */
  104. const char *name; /* Name (only for display!) */
  105. struct list_head list; /* List of slab caches */
  106. #ifdef CONFIG_SYSFS
  107. struct kobject kobj; /* For sysfs */
  108. #endif
  109. #ifdef CONFIG_SLAB_FREELIST_HARDENED
  110. unsigned long random;
  111. #endif
  112. #ifdef CONFIG_NUMA
  113. /*
  114. * Defragmentation by allocating from a remote node.
  115. */
  116. unsigned int remote_node_defrag_ratio;
  117. #endif
  118. #ifdef CONFIG_SLAB_FREELIST_RANDOM
  119. unsigned int *random_seq;
  120. #endif
  121. #ifdef CONFIG_KASAN
  122. struct kasan_cache kasan_info;
  123. #endif
  124. unsigned int useroffset; /* Usercopy region offset */
  125. unsigned int usersize; /* Usercopy region size */
  126. struct kmem_cache_node *node[MAX_NUMNODES];
  127. };
  128. #ifdef CONFIG_SYSFS
  129. #define SLAB_SUPPORTS_SYSFS
  130. void sysfs_slab_unlink(struct kmem_cache *);
  131. void sysfs_slab_release(struct kmem_cache *);
  132. #else
  133. static inline void sysfs_slab_unlink(struct kmem_cache *s)
  134. {
  135. }
  136. static inline void sysfs_slab_release(struct kmem_cache *s)
  137. {
  138. }
  139. #endif
  140. void *fixup_red_left(struct kmem_cache *s, void *p);
  141. static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
  142. void *x) {
  143. void *object = x - (x - slab_address(slab)) % cache->size;
  144. void *last_object = slab_address(slab) +
  145. (slab->objects - 1) * cache->size;
  146. void *result = (unlikely(object > last_object)) ? last_object : object;
  147. result = fixup_red_left(cache, result);
  148. return result;
  149. }
  150. /* Determine object index from a given position */
  151. static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
  152. void *addr, void *obj)
  153. {
  154. return reciprocal_divide(kasan_reset_tag(obj) - addr,
  155. cache->reciprocal_size);
  156. }
  157. static inline unsigned int obj_to_index(const struct kmem_cache *cache,
  158. const struct slab *slab, void *obj)
  159. {
  160. if (is_kfence_address(obj))
  161. return 0;
  162. return __obj_to_index(cache, slab_address(slab), obj);
  163. }
  164. static inline int objs_per_slab(const struct kmem_cache *cache,
  165. const struct slab *slab)
  166. {
  167. return slab->objects;
  168. }
  169. #endif /* _LINUX_SLUB_DEF_H */