queueing.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015-2019 Jason A. Donenfeld <[email protected]>. All Rights Reserved.
  4. */
  5. #include "queueing.h"
  6. #include <linux/skb_array.h>
  7. struct multicore_worker __percpu *
  8. wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
  9. {
  10. int cpu;
  11. struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
  12. if (!worker)
  13. return NULL;
  14. for_each_possible_cpu(cpu) {
  15. per_cpu_ptr(worker, cpu)->ptr = ptr;
  16. INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
  17. }
  18. return worker;
  19. }
  20. int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
  21. unsigned int len)
  22. {
  23. int ret;
  24. memset(queue, 0, sizeof(*queue));
  25. queue->last_cpu = -1;
  26. ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
  27. if (ret)
  28. return ret;
  29. queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
  30. if (!queue->worker) {
  31. ptr_ring_cleanup(&queue->ring, NULL);
  32. return -ENOMEM;
  33. }
  34. return 0;
  35. }
  36. void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
  37. {
  38. free_percpu(queue->worker);
  39. WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
  40. ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
  41. }
  42. #define NEXT(skb) ((skb)->prev)
  43. #define STUB(queue) ((struct sk_buff *)&queue->empty)
  44. void wg_prev_queue_init(struct prev_queue *queue)
  45. {
  46. NEXT(STUB(queue)) = NULL;
  47. queue->head = queue->tail = STUB(queue);
  48. queue->peeked = NULL;
  49. atomic_set(&queue->count, 0);
  50. BUILD_BUG_ON(
  51. offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
  52. offsetof(struct prev_queue, empty) ||
  53. offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
  54. offsetof(struct prev_queue, empty));
  55. }
  56. static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
  57. {
  58. WRITE_ONCE(NEXT(skb), NULL);
  59. WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
  60. }
  61. bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
  62. {
  63. if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
  64. return false;
  65. __wg_prev_queue_enqueue(queue, skb);
  66. return true;
  67. }
  68. struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
  69. {
  70. struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
  71. if (tail == STUB(queue)) {
  72. if (!next)
  73. return NULL;
  74. queue->tail = next;
  75. tail = next;
  76. next = smp_load_acquire(&NEXT(next));
  77. }
  78. if (next) {
  79. queue->tail = next;
  80. atomic_dec(&queue->count);
  81. return tail;
  82. }
  83. if (tail != READ_ONCE(queue->head))
  84. return NULL;
  85. __wg_prev_queue_enqueue(queue, STUB(queue));
  86. next = smp_load_acquire(&NEXT(tail));
  87. if (next) {
  88. queue->tail = next;
  89. atomic_dec(&queue->count);
  90. return tail;
  91. }
  92. return NULL;
  93. }
  94. #undef NEXT
  95. #undef STUB