linux.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include <malloc.h>
  5. #include <pthread.h>
  6. #include <unistd.h>
  7. #include <assert.h>
  8. #include <linux/gfp.h>
  9. #include <linux/poison.h>
  10. #include <linux/slab.h>
  11. #include <linux/radix-tree.h>
  12. #include <urcu/uatomic.h>
  13. int nr_allocated;
  14. int preempt_count;
  15. int test_verbose;
  16. struct kmem_cache {
  17. pthread_mutex_t lock;
  18. unsigned int size;
  19. unsigned int align;
  20. int nr_objs;
  21. void *objs;
  22. void (*ctor)(void *);
  23. unsigned int non_kernel;
  24. unsigned long nr_allocated;
  25. unsigned long nr_tallocated;
  26. };
  27. void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
  28. {
  29. cachep->non_kernel = val;
  30. }
  31. unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
  32. {
  33. return cachep->size * cachep->nr_allocated;
  34. }
  35. unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
  36. {
  37. return cachep->nr_allocated;
  38. }
  39. unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
  40. {
  41. return cachep->nr_tallocated;
  42. }
  43. void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
  44. {
  45. cachep->nr_tallocated = 0;
  46. }
  47. void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
  48. int gfp)
  49. {
  50. void *p;
  51. if (!(gfp & __GFP_DIRECT_RECLAIM)) {
  52. if (!cachep->non_kernel)
  53. return NULL;
  54. cachep->non_kernel--;
  55. }
  56. pthread_mutex_lock(&cachep->lock);
  57. if (cachep->nr_objs) {
  58. struct radix_tree_node *node = cachep->objs;
  59. cachep->nr_objs--;
  60. cachep->objs = node->parent;
  61. pthread_mutex_unlock(&cachep->lock);
  62. node->parent = NULL;
  63. p = node;
  64. } else {
  65. pthread_mutex_unlock(&cachep->lock);
  66. if (cachep->align)
  67. posix_memalign(&p, cachep->align, cachep->size);
  68. else
  69. p = malloc(cachep->size);
  70. if (cachep->ctor)
  71. cachep->ctor(p);
  72. else if (gfp & __GFP_ZERO)
  73. memset(p, 0, cachep->size);
  74. }
  75. uatomic_inc(&cachep->nr_allocated);
  76. uatomic_inc(&nr_allocated);
  77. uatomic_inc(&cachep->nr_tallocated);
  78. if (kmalloc_verbose)
  79. printf("Allocating %p from slab\n", p);
  80. return p;
  81. }
  82. void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
  83. {
  84. assert(objp);
  85. if (cachep->nr_objs > 10 || cachep->align) {
  86. memset(objp, POISON_FREE, cachep->size);
  87. free(objp);
  88. } else {
  89. struct radix_tree_node *node = objp;
  90. cachep->nr_objs++;
  91. node->parent = cachep->objs;
  92. cachep->objs = node;
  93. }
  94. }
  95. void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
  96. {
  97. uatomic_dec(&nr_allocated);
  98. uatomic_dec(&cachep->nr_allocated);
  99. if (kmalloc_verbose)
  100. printf("Freeing %p to slab\n", objp);
  101. __kmem_cache_free_locked(cachep, objp);
  102. }
  103. void kmem_cache_free(struct kmem_cache *cachep, void *objp)
  104. {
  105. pthread_mutex_lock(&cachep->lock);
  106. kmem_cache_free_locked(cachep, objp);
  107. pthread_mutex_unlock(&cachep->lock);
  108. }
  109. void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
  110. {
  111. if (kmalloc_verbose)
  112. pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
  113. pthread_mutex_lock(&cachep->lock);
  114. for (int i = 0; i < size; i++)
  115. kmem_cache_free_locked(cachep, list[i]);
  116. pthread_mutex_unlock(&cachep->lock);
  117. }
  118. void kmem_cache_shrink(struct kmem_cache *cachep)
  119. {
  120. }
  121. int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
  122. void **p)
  123. {
  124. size_t i;
  125. if (kmalloc_verbose)
  126. pr_debug("Bulk alloc %lu\n", size);
  127. pthread_mutex_lock(&cachep->lock);
  128. if (cachep->nr_objs >= size) {
  129. struct radix_tree_node *node;
  130. for (i = 0; i < size; i++) {
  131. if (!(gfp & __GFP_DIRECT_RECLAIM)) {
  132. if (!cachep->non_kernel)
  133. break;
  134. cachep->non_kernel--;
  135. }
  136. node = cachep->objs;
  137. cachep->nr_objs--;
  138. cachep->objs = node->parent;
  139. p[i] = node;
  140. node->parent = NULL;
  141. }
  142. pthread_mutex_unlock(&cachep->lock);
  143. } else {
  144. pthread_mutex_unlock(&cachep->lock);
  145. for (i = 0; i < size; i++) {
  146. if (!(gfp & __GFP_DIRECT_RECLAIM)) {
  147. if (!cachep->non_kernel)
  148. break;
  149. cachep->non_kernel--;
  150. }
  151. if (cachep->align) {
  152. posix_memalign(&p[i], cachep->align,
  153. cachep->size * size);
  154. } else {
  155. p[i] = malloc(cachep->size * size);
  156. if (!p[i])
  157. break;
  158. }
  159. if (cachep->ctor)
  160. cachep->ctor(p[i]);
  161. else if (gfp & __GFP_ZERO)
  162. memset(p[i], 0, cachep->size);
  163. }
  164. }
  165. if (i < size) {
  166. size = i;
  167. pthread_mutex_lock(&cachep->lock);
  168. for (i = 0; i < size; i++)
  169. __kmem_cache_free_locked(cachep, p[i]);
  170. pthread_mutex_unlock(&cachep->lock);
  171. return 0;
  172. }
  173. for (i = 0; i < size; i++) {
  174. uatomic_inc(&nr_allocated);
  175. uatomic_inc(&cachep->nr_allocated);
  176. uatomic_inc(&cachep->nr_tallocated);
  177. if (kmalloc_verbose)
  178. printf("Allocating %p from slab\n", p[i]);
  179. }
  180. return size;
  181. }
  182. struct kmem_cache *
  183. kmem_cache_create(const char *name, unsigned int size, unsigned int align,
  184. unsigned int flags, void (*ctor)(void *))
  185. {
  186. struct kmem_cache *ret = malloc(sizeof(*ret));
  187. pthread_mutex_init(&ret->lock, NULL);
  188. ret->size = size;
  189. ret->align = align;
  190. ret->nr_objs = 0;
  191. ret->nr_allocated = 0;
  192. ret->nr_tallocated = 0;
  193. ret->objs = NULL;
  194. ret->ctor = ctor;
  195. ret->non_kernel = 0;
  196. return ret;
  197. }
  198. /*
  199. * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
  200. */
  201. void test_kmem_cache_bulk(void)
  202. {
  203. int i;
  204. void *list[12];
  205. static struct kmem_cache *test_cache, *test_cache2;
  206. /*
  207. * Testing the bulk allocators without aligned kmem_cache to force the
  208. * bulk alloc/free to reuse
  209. */
  210. test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
  211. for (i = 0; i < 5; i++)
  212. list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
  213. for (i = 0; i < 5; i++)
  214. kmem_cache_free(test_cache, list[i]);
  215. assert(test_cache->nr_objs == 5);
  216. kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
  217. kmem_cache_free_bulk(test_cache, 5, list);
  218. for (i = 0; i < 12 ; i++)
  219. list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
  220. for (i = 0; i < 12; i++)
  221. kmem_cache_free(test_cache, list[i]);
  222. /* The last free will not be kept around */
  223. assert(test_cache->nr_objs == 11);
  224. /* Aligned caches will immediately free */
  225. test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
  226. kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
  227. kmem_cache_free_bulk(test_cache2, 10, list);
  228. assert(!test_cache2->nr_objs);
  229. }