memalloc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
  3. #include <linux/mm.h>
  4. #include <linux/llist.h>
  5. #include <linux/bpf.h>
  6. #include <linux/irq_work.h>
  7. #include <linux/bpf_mem_alloc.h>
  8. #include <linux/memcontrol.h>
  9. #include <asm/local.h>
  10. /* Any context (including NMI) BPF specific memory allocator.
  11. *
  12. * Tracing BPF programs can attach to kprobe and fentry. Hence they
  13. * run in unknown context where calling plain kmalloc() might not be safe.
  14. *
  15. * Front-end kmalloc() with per-cpu per-bucket cache of free elements.
  16. * Refill this cache asynchronously from irq_work.
  17. *
  18. * CPU_0 buckets
  19. * 16 32 64 96 128 196 256 512 1024 2048 4096
  20. * ...
  21. * CPU_N buckets
  22. * 16 32 64 96 128 196 256 512 1024 2048 4096
  23. *
  24. * The buckets are prefilled at the start.
  25. * BPF programs always run with migration disabled.
  26. * It's safe to allocate from cache of the current cpu with irqs disabled.
  27. * Free-ing is always done into bucket of the current cpu as well.
  28. * irq_work trims extra free elements from buckets with kfree
  29. * and refills them with kmalloc, so global kmalloc logic takes care
  30. * of freeing objects allocated by one cpu and freed on another.
  31. *
  32. * Every allocated objected is padded with extra 8 bytes that contains
  33. * struct llist_node.
  34. */
  35. #define LLIST_NODE_SZ sizeof(struct llist_node)
  36. /* similar to kmalloc, but sizeof == 8 bucket is gone */
  37. static u8 size_index[24] __ro_after_init = {
  38. 3, /* 8 */
  39. 3, /* 16 */
  40. 4, /* 24 */
  41. 4, /* 32 */
  42. 5, /* 40 */
  43. 5, /* 48 */
  44. 5, /* 56 */
  45. 5, /* 64 */
  46. 1, /* 72 */
  47. 1, /* 80 */
  48. 1, /* 88 */
  49. 1, /* 96 */
  50. 6, /* 104 */
  51. 6, /* 112 */
  52. 6, /* 120 */
  53. 6, /* 128 */
  54. 2, /* 136 */
  55. 2, /* 144 */
  56. 2, /* 152 */
  57. 2, /* 160 */
  58. 2, /* 168 */
  59. 2, /* 176 */
  60. 2, /* 184 */
  61. 2 /* 192 */
  62. };
  63. static int bpf_mem_cache_idx(size_t size)
  64. {
  65. if (!size || size > 4096)
  66. return -1;
  67. if (size <= 192)
  68. return size_index[(size - 1) / 8] - 1;
  69. return fls(size - 1) - 2;
  70. }
  71. #define NUM_CACHES 11
  72. struct bpf_mem_cache {
  73. /* per-cpu list of free objects of size 'unit_size'.
  74. * All accesses are done with interrupts disabled and 'active' counter
  75. * protection with __llist_add() and __llist_del_first().
  76. */
  77. struct llist_head free_llist;
  78. local_t active;
  79. /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill
  80. * are sequenced by per-cpu 'active' counter. But unit_free() cannot
  81. * fail. When 'active' is busy the unit_free() will add an object to
  82. * free_llist_extra.
  83. */
  84. struct llist_head free_llist_extra;
  85. struct irq_work refill_work;
  86. struct obj_cgroup *objcg;
  87. int unit_size;
  88. /* count of objects in free_llist */
  89. int free_cnt;
  90. int low_watermark, high_watermark, batch;
  91. int percpu_size;
  92. struct rcu_head rcu;
  93. struct llist_head free_by_rcu;
  94. struct llist_head waiting_for_gp;
  95. atomic_t call_rcu_in_progress;
  96. };
  97. struct bpf_mem_caches {
  98. struct bpf_mem_cache cache[NUM_CACHES];
  99. };
  100. static struct llist_node notrace *__llist_del_first(struct llist_head *head)
  101. {
  102. struct llist_node *entry, *next;
  103. entry = head->first;
  104. if (!entry)
  105. return NULL;
  106. next = entry->next;
  107. head->first = next;
  108. return entry;
  109. }
  110. static void *__alloc(struct bpf_mem_cache *c, int node)
  111. {
  112. /* Allocate, but don't deplete atomic reserves that typical
  113. * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
  114. * will allocate from the current numa node which is what we
  115. * want here.
  116. */
  117. gfp_t flags = GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT;
  118. if (c->percpu_size) {
  119. void **obj = kmalloc_node(c->percpu_size, flags, node);
  120. void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
  121. if (!obj || !pptr) {
  122. free_percpu(pptr);
  123. kfree(obj);
  124. return NULL;
  125. }
  126. obj[1] = pptr;
  127. return obj;
  128. }
  129. return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
  130. }
  131. static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
  132. {
  133. #ifdef CONFIG_MEMCG_KMEM
  134. if (c->objcg)
  135. return get_mem_cgroup_from_objcg(c->objcg);
  136. #endif
  137. #ifdef CONFIG_MEMCG
  138. return root_mem_cgroup;
  139. #else
  140. return NULL;
  141. #endif
  142. }
  143. /* Mostly runs from irq_work except __init phase. */
  144. static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
  145. {
  146. struct mem_cgroup *memcg = NULL, *old_memcg;
  147. unsigned long flags;
  148. void *obj;
  149. int i;
  150. memcg = get_memcg(c);
  151. old_memcg = set_active_memcg(memcg);
  152. for (i = 0; i < cnt; i++) {
  153. obj = __alloc(c, node);
  154. if (!obj)
  155. break;
  156. if (IS_ENABLED(CONFIG_PREEMPT_RT))
  157. /* In RT irq_work runs in per-cpu kthread, so disable
  158. * interrupts to avoid preemption and interrupts and
  159. * reduce the chance of bpf prog executing on this cpu
  160. * when active counter is busy.
  161. */
  162. local_irq_save(flags);
  163. /* alloc_bulk runs from irq_work which will not preempt a bpf
  164. * program that does unit_alloc/unit_free since IRQs are
  165. * disabled there. There is no race to increment 'active'
  166. * counter. It protects free_llist from corruption in case NMI
  167. * bpf prog preempted this loop.
  168. */
  169. WARN_ON_ONCE(local_inc_return(&c->active) != 1);
  170. __llist_add(obj, &c->free_llist);
  171. c->free_cnt++;
  172. local_dec(&c->active);
  173. if (IS_ENABLED(CONFIG_PREEMPT_RT))
  174. local_irq_restore(flags);
  175. }
  176. set_active_memcg(old_memcg);
  177. mem_cgroup_put(memcg);
  178. }
  179. static void free_one(struct bpf_mem_cache *c, void *obj)
  180. {
  181. if (c->percpu_size) {
  182. free_percpu(((void **)obj)[1]);
  183. kfree(obj);
  184. return;
  185. }
  186. kfree(obj);
  187. }
  188. static void __free_rcu(struct rcu_head *head)
  189. {
  190. struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
  191. struct llist_node *llnode = llist_del_all(&c->waiting_for_gp);
  192. struct llist_node *pos, *t;
  193. llist_for_each_safe(pos, t, llnode)
  194. free_one(c, pos);
  195. atomic_set(&c->call_rcu_in_progress, 0);
  196. }
  197. static void __free_rcu_tasks_trace(struct rcu_head *head)
  198. {
  199. struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
  200. call_rcu(&c->rcu, __free_rcu);
  201. }
  202. static void enque_to_free(struct bpf_mem_cache *c, void *obj)
  203. {
  204. struct llist_node *llnode = obj;
  205. /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
  206. * Nothing races to add to free_by_rcu list.
  207. */
  208. __llist_add(llnode, &c->free_by_rcu);
  209. }
  210. static void do_call_rcu(struct bpf_mem_cache *c)
  211. {
  212. struct llist_node *llnode, *t;
  213. if (atomic_xchg(&c->call_rcu_in_progress, 1))
  214. return;
  215. WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
  216. llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
  217. /* There is no concurrent __llist_add(waiting_for_gp) access.
  218. * It doesn't race with llist_del_all either.
  219. * But there could be two concurrent llist_del_all(waiting_for_gp):
  220. * from __free_rcu() and from drain_mem_cache().
  221. */
  222. __llist_add(llnode, &c->waiting_for_gp);
  223. /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
  224. * Then use call_rcu() to wait for normal progs to finish
  225. * and finally do free_one() on each element.
  226. */
  227. call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace);
  228. }
  229. static void free_bulk(struct bpf_mem_cache *c)
  230. {
  231. struct llist_node *llnode, *t;
  232. unsigned long flags;
  233. int cnt;
  234. do {
  235. if (IS_ENABLED(CONFIG_PREEMPT_RT))
  236. local_irq_save(flags);
  237. WARN_ON_ONCE(local_inc_return(&c->active) != 1);
  238. llnode = __llist_del_first(&c->free_llist);
  239. if (llnode)
  240. cnt = --c->free_cnt;
  241. else
  242. cnt = 0;
  243. local_dec(&c->active);
  244. if (IS_ENABLED(CONFIG_PREEMPT_RT))
  245. local_irq_restore(flags);
  246. if (llnode)
  247. enque_to_free(c, llnode);
  248. } while (cnt > (c->high_watermark + c->low_watermark) / 2);
  249. /* and drain free_llist_extra */
  250. llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
  251. enque_to_free(c, llnode);
  252. do_call_rcu(c);
  253. }
  254. static void bpf_mem_refill(struct irq_work *work)
  255. {
  256. struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work);
  257. int cnt;
  258. /* Racy access to free_cnt. It doesn't need to be 100% accurate */
  259. cnt = c->free_cnt;
  260. if (cnt < c->low_watermark)
  261. /* irq_work runs on this cpu and kmalloc will allocate
  262. * from the current numa node which is what we want here.
  263. */
  264. alloc_bulk(c, c->batch, NUMA_NO_NODE);
  265. else if (cnt > c->high_watermark)
  266. free_bulk(c);
  267. }
  268. static void notrace irq_work_raise(struct bpf_mem_cache *c)
  269. {
  270. irq_work_queue(&c->refill_work);
  271. }
  272. /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket
  273. * the freelist cache will be elem_size * 64 (or less) on each cpu.
  274. *
  275. * For bpf programs that don't have statically known allocation sizes and
  276. * assuming (low_mark + high_mark) / 2 as an average number of elements per
  277. * bucket and all buckets are used the total amount of memory in freelists
  278. * on each cpu will be:
  279. * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096
  280. * == ~ 116 Kbyte using below heuristic.
  281. * Initialized, but unused bpf allocator (not bpf map specific one) will
  282. * consume ~ 11 Kbyte per cpu.
  283. * Typical case will be between 11K and 116K closer to 11K.
  284. * bpf progs can and should share bpf_mem_cache when possible.
  285. */
  286. static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
  287. {
  288. init_irq_work(&c->refill_work, bpf_mem_refill);
  289. if (c->unit_size <= 256) {
  290. c->low_watermark = 32;
  291. c->high_watermark = 96;
  292. } else {
  293. /* When page_size == 4k, order-0 cache will have low_mark == 2
  294. * and high_mark == 6 with batch alloc of 3 individual pages at
  295. * a time.
  296. * 8k allocs and above low == 1, high == 3, batch == 1.
  297. */
  298. c->low_watermark = max(32 * 256 / c->unit_size, 1);
  299. c->high_watermark = max(96 * 256 / c->unit_size, 3);
  300. }
  301. c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
  302. /* To avoid consuming memory assume that 1st run of bpf
  303. * prog won't be doing more than 4 map_update_elem from
  304. * irq disabled region
  305. */
  306. alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu));
  307. }
  308. /* When size != 0 bpf_mem_cache for each cpu.
  309. * This is typical bpf hash map use case when all elements have equal size.
  310. *
  311. * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on
  312. * kmalloc/kfree. Max allocation size is 4096 in this case.
  313. * This is bpf_dynptr and bpf_kptr use case.
  314. */
  315. int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
  316. {
  317. static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
  318. struct bpf_mem_caches *cc, __percpu *pcc;
  319. struct bpf_mem_cache *c, __percpu *pc;
  320. struct obj_cgroup *objcg = NULL;
  321. int cpu, i, unit_size, percpu_size = 0;
  322. if (size) {
  323. pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
  324. if (!pc)
  325. return -ENOMEM;
  326. if (percpu)
  327. /* room for llist_node and per-cpu pointer */
  328. percpu_size = LLIST_NODE_SZ + sizeof(void *);
  329. else
  330. size += LLIST_NODE_SZ; /* room for llist_node */
  331. unit_size = size;
  332. #ifdef CONFIG_MEMCG_KMEM
  333. objcg = get_obj_cgroup_from_current();
  334. #endif
  335. for_each_possible_cpu(cpu) {
  336. c = per_cpu_ptr(pc, cpu);
  337. c->unit_size = unit_size;
  338. c->objcg = objcg;
  339. c->percpu_size = percpu_size;
  340. prefill_mem_cache(c, cpu);
  341. }
  342. ma->cache = pc;
  343. return 0;
  344. }
  345. /* size == 0 && percpu is an invalid combination */
  346. if (WARN_ON_ONCE(percpu))
  347. return -EINVAL;
  348. pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
  349. if (!pcc)
  350. return -ENOMEM;
  351. #ifdef CONFIG_MEMCG_KMEM
  352. objcg = get_obj_cgroup_from_current();
  353. #endif
  354. for_each_possible_cpu(cpu) {
  355. cc = per_cpu_ptr(pcc, cpu);
  356. for (i = 0; i < NUM_CACHES; i++) {
  357. c = &cc->cache[i];
  358. c->unit_size = sizes[i];
  359. c->objcg = objcg;
  360. prefill_mem_cache(c, cpu);
  361. }
  362. }
  363. ma->caches = pcc;
  364. return 0;
  365. }
  366. static void drain_mem_cache(struct bpf_mem_cache *c)
  367. {
  368. struct llist_node *llnode, *t;
  369. /* No progs are using this bpf_mem_cache, but htab_map_free() called
  370. * bpf_mem_cache_free() for all remaining elements and they can be in
  371. * free_by_rcu or in waiting_for_gp lists, so drain those lists now.
  372. *
  373. * Except for waiting_for_gp list, there are no concurrent operations
  374. * on these lists, so it is safe to use __llist_del_all().
  375. */
  376. llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
  377. free_one(c, llnode);
  378. llist_for_each_safe(llnode, t, llist_del_all(&c->waiting_for_gp))
  379. free_one(c, llnode);
  380. llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist))
  381. free_one(c, llnode);
  382. llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist_extra))
  383. free_one(c, llnode);
  384. }
  385. static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
  386. {
  387. free_percpu(ma->cache);
  388. free_percpu(ma->caches);
  389. ma->cache = NULL;
  390. ma->caches = NULL;
  391. }
  392. static void free_mem_alloc(struct bpf_mem_alloc *ma)
  393. {
  394. /* waiting_for_gp lists was drained, but __free_rcu might
  395. * still execute. Wait for it now before we freeing percpu caches.
  396. */
  397. rcu_barrier_tasks_trace();
  398. rcu_barrier();
  399. free_mem_alloc_no_barrier(ma);
  400. }
  401. static void free_mem_alloc_deferred(struct work_struct *work)
  402. {
  403. struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work);
  404. free_mem_alloc(ma);
  405. kfree(ma);
  406. }
  407. static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
  408. {
  409. struct bpf_mem_alloc *copy;
  410. if (!rcu_in_progress) {
  411. /* Fast path. No callbacks are pending, hence no need to do
  412. * rcu_barrier-s.
  413. */
  414. free_mem_alloc_no_barrier(ma);
  415. return;
  416. }
  417. copy = kmalloc(sizeof(*ma), GFP_KERNEL);
  418. if (!copy) {
  419. /* Slow path with inline barrier-s */
  420. free_mem_alloc(ma);
  421. return;
  422. }
  423. /* Defer barriers into worker to let the rest of map memory to be freed */
  424. copy->cache = ma->cache;
  425. ma->cache = NULL;
  426. copy->caches = ma->caches;
  427. ma->caches = NULL;
  428. INIT_WORK(&copy->work, free_mem_alloc_deferred);
  429. queue_work(system_unbound_wq, &copy->work);
  430. }
  431. void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
  432. {
  433. struct bpf_mem_caches *cc;
  434. struct bpf_mem_cache *c;
  435. int cpu, i, rcu_in_progress;
  436. if (ma->cache) {
  437. rcu_in_progress = 0;
  438. for_each_possible_cpu(cpu) {
  439. c = per_cpu_ptr(ma->cache, cpu);
  440. /*
  441. * refill_work may be unfinished for PREEMPT_RT kernel
  442. * in which irq work is invoked in a per-CPU RT thread.
  443. * It is also possible for kernel with
  444. * arch_irq_work_has_interrupt() being false and irq
  445. * work is invoked in timer interrupt. So waiting for
  446. * the completion of irq work to ease the handling of
  447. * concurrency.
  448. */
  449. irq_work_sync(&c->refill_work);
  450. drain_mem_cache(c);
  451. rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
  452. }
  453. /* objcg is the same across cpus */
  454. if (c->objcg)
  455. obj_cgroup_put(c->objcg);
  456. destroy_mem_alloc(ma, rcu_in_progress);
  457. }
  458. if (ma->caches) {
  459. rcu_in_progress = 0;
  460. for_each_possible_cpu(cpu) {
  461. cc = per_cpu_ptr(ma->caches, cpu);
  462. for (i = 0; i < NUM_CACHES; i++) {
  463. c = &cc->cache[i];
  464. irq_work_sync(&c->refill_work);
  465. drain_mem_cache(c);
  466. rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
  467. }
  468. }
  469. if (c->objcg)
  470. obj_cgroup_put(c->objcg);
  471. destroy_mem_alloc(ma, rcu_in_progress);
  472. }
  473. }
  474. /* notrace is necessary here and in other functions to make sure
  475. * bpf programs cannot attach to them and cause llist corruptions.
  476. */
  477. static void notrace *unit_alloc(struct bpf_mem_cache *c)
  478. {
  479. struct llist_node *llnode = NULL;
  480. unsigned long flags;
  481. int cnt = 0;
  482. /* Disable irqs to prevent the following race for majority of prog types:
  483. * prog_A
  484. * bpf_mem_alloc
  485. * preemption or irq -> prog_B
  486. * bpf_mem_alloc
  487. *
  488. * but prog_B could be a perf_event NMI prog.
  489. * Use per-cpu 'active' counter to order free_list access between
  490. * unit_alloc/unit_free/bpf_mem_refill.
  491. */
  492. local_irq_save(flags);
  493. if (local_inc_return(&c->active) == 1) {
  494. llnode = __llist_del_first(&c->free_llist);
  495. if (llnode)
  496. cnt = --c->free_cnt;
  497. }
  498. local_dec(&c->active);
  499. local_irq_restore(flags);
  500. WARN_ON(cnt < 0);
  501. if (cnt < c->low_watermark)
  502. irq_work_raise(c);
  503. return llnode;
  504. }
  505. /* Though 'ptr' object could have been allocated on a different cpu
  506. * add it to the free_llist of the current cpu.
  507. * Let kfree() logic deal with it when it's later called from irq_work.
  508. */
  509. static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
  510. {
  511. struct llist_node *llnode = ptr - LLIST_NODE_SZ;
  512. unsigned long flags;
  513. int cnt = 0;
  514. BUILD_BUG_ON(LLIST_NODE_SZ > 8);
  515. local_irq_save(flags);
  516. if (local_inc_return(&c->active) == 1) {
  517. __llist_add(llnode, &c->free_llist);
  518. cnt = ++c->free_cnt;
  519. } else {
  520. /* unit_free() cannot fail. Therefore add an object to atomic
  521. * llist. free_bulk() will drain it. Though free_llist_extra is
  522. * a per-cpu list we have to use atomic llist_add here, since
  523. * it also can be interrupted by bpf nmi prog that does another
  524. * unit_free() into the same free_llist_extra.
  525. */
  526. llist_add(llnode, &c->free_llist_extra);
  527. }
  528. local_dec(&c->active);
  529. local_irq_restore(flags);
  530. if (cnt > c->high_watermark)
  531. /* free few objects from current cpu into global kmalloc pool */
  532. irq_work_raise(c);
  533. }
  534. /* Called from BPF program or from sys_bpf syscall.
  535. * In both cases migration is disabled.
  536. */
  537. void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
  538. {
  539. int idx;
  540. void *ret;
  541. if (!size)
  542. return ZERO_SIZE_PTR;
  543. idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
  544. if (idx < 0)
  545. return NULL;
  546. ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx);
  547. return !ret ? NULL : ret + LLIST_NODE_SZ;
  548. }
  549. void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
  550. {
  551. int idx;
  552. if (!ptr)
  553. return;
  554. idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
  555. if (idx < 0)
  556. return;
  557. unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
  558. }
  559. void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma)
  560. {
  561. void *ret;
  562. ret = unit_alloc(this_cpu_ptr(ma->cache));
  563. return !ret ? NULL : ret + LLIST_NODE_SZ;
  564. }
  565. void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)
  566. {
  567. if (!ptr)
  568. return;
  569. unit_free(this_cpu_ptr(ma->cache), ptr);
  570. }