rmnet_ll.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * RmNet Low Latency channel handlers
  14. */
  15. #include <linux/netdevice.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/list.h>
  18. #include <linux/version.h>
  19. #include "rmnet_ll.h"
  20. #include "rmnet_ll_core.h"
  21. #define RMNET_LL_MAX_RECYCLE_ITER 16
  22. static struct rmnet_ll_stats rmnet_ll_stats;
  23. /* For TX sync with DMA operations */
  24. DEFINE_SPINLOCK(rmnet_ll_tx_lock);
  25. /* Client operations for respective underlying HW */
  26. extern struct rmnet_ll_client_ops rmnet_ll_client;
  27. static void rmnet_ll_buffers_submit(struct rmnet_ll_endpoint *ll_ep,
  28. struct list_head *buf_list)
  29. {
  30. struct rmnet_ll_buffer *ll_buf;
  31. list_for_each_entry(ll_buf, buf_list, list) {
  32. if (ll_buf->submitted)
  33. continue;
  34. if (!rmnet_ll_client.buffer_queue ||
  35. rmnet_ll_client.buffer_queue(ll_ep, ll_buf)) {
  36. rmnet_ll_stats.rx_queue_err++;
  37. /* Don't leak the page if we're not storing it */
  38. if (ll_buf->temp_alloc)
  39. put_page(ll_buf->page);
  40. } else {
  41. ll_buf->submitted = true;
  42. rmnet_ll_stats.rx_queue++;
  43. }
  44. }
  45. }
  46. static struct rmnet_ll_buffer *
  47. rmnet_ll_buffer_alloc(struct rmnet_ll_endpoint *ll_ep, gfp_t gfp)
  48. {
  49. struct rmnet_ll_buffer *ll_buf;
  50. struct page *page;
  51. void *addr;
  52. page = __dev_alloc_pages(gfp, ll_ep->page_order);
  53. if (!page)
  54. return NULL;
  55. /* Store the buffer information at the end */
  56. addr = page_address(page);
  57. ll_buf = addr + ll_ep->buf_len;
  58. ll_buf->page = page;
  59. ll_buf->submitted = false;
  60. INIT_LIST_HEAD(&ll_buf->list);
  61. return ll_buf;
  62. }
  63. int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep)
  64. {
  65. spin_lock_init(&ll_ep->buf_pool.pool_lock);
  66. INIT_LIST_HEAD(&ll_ep->buf_pool.buf_list);
  67. ll_ep->buf_pool.last = ll_ep->buf_pool.buf_list.next;
  68. ll_ep->buf_pool.pool_size = 0;
  69. return 0;
  70. }
  71. void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep)
  72. {
  73. struct rmnet_ll_buffer *ll_buf, *tmp;
  74. list_for_each_entry_safe(ll_buf, tmp, &ll_ep->buf_pool.buf_list, list) {
  75. list_del(&ll_buf->list);
  76. put_page(ll_buf->page);
  77. }
  78. ll_ep->buf_pool.last = NULL;
  79. }
  80. void rmnet_ll_buffers_recycle(struct rmnet_ll_endpoint *ll_ep)
  81. {
  82. struct rmnet_ll_buffer *ll_buf, *tmp;
  83. LIST_HEAD(buf_list);
  84. int num_tre, count = 0, iter = 0;
  85. if (!rmnet_ll_client.query_free_descriptors)
  86. goto out;
  87. num_tre = rmnet_ll_client.query_free_descriptors(ll_ep);
  88. if (!num_tre)
  89. goto out;
  90. list_for_each_entry_safe(ll_buf, tmp, ll_ep->buf_pool.last, list) {
  91. if (++iter > RMNET_LL_MAX_RECYCLE_ITER || count == num_tre)
  92. break;
  93. if (ll_buf->submitted)
  94. continue;
  95. count++;
  96. list_move_tail(&ll_buf->list, &buf_list);
  97. }
  98. /* Mark where we left off */
  99. ll_ep->buf_pool.last = &ll_buf->list;
  100. /* Submit any pool buffers to the HW if we found some */
  101. if (count) {
  102. rmnet_ll_buffers_submit(ll_ep, &buf_list);
  103. /* requeue immediately BEFORE the last checked element */
  104. list_splice_tail_init(&buf_list, ll_ep->buf_pool.last);
  105. }
  106. /* Do any temporary allocations needed to fill the rest */
  107. for (; count < num_tre; count++) {
  108. ll_buf = rmnet_ll_buffer_alloc(ll_ep, GFP_ATOMIC);
  109. if (!ll_buf)
  110. break;
  111. list_add_tail(&ll_buf->list, &buf_list);
  112. ll_buf->temp_alloc = true;
  113. rmnet_ll_stats.rx_tmp_allocs++;
  114. }
  115. if (!list_empty(&buf_list))
  116. rmnet_ll_buffers_submit(ll_ep, &buf_list);
  117. out:
  118. return;
  119. }
  120. int rmnet_ll_send_skb(struct sk_buff *skb)
  121. {
  122. int rc;
  123. spin_lock_bh(&rmnet_ll_tx_lock);
  124. rc = rmnet_ll_client.tx(skb);
  125. spin_unlock_bh(&rmnet_ll_tx_lock);
  126. if (rc)
  127. rmnet_ll_stats.tx_queue_err++;
  128. else
  129. rmnet_ll_stats.tx_queue++;
  130. return rc;
  131. }
  132. struct rmnet_ll_stats *rmnet_ll_get_stats(void)
  133. {
  134. return &rmnet_ll_stats;
  135. }
  136. int rmnet_ll_init(void)
  137. {
  138. return rmnet_ll_client.init();
  139. }
  140. void rmnet_ll_exit(void)
  141. {
  142. rmnet_ll_client.exit();
  143. }