rmnet_ll_ipa.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /* Copyright (c) 2021 The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * RmNet IPA Low Latency channel handlers
  14. */
  15. #include <linux/netdevice.h>
  16. #include <linux/skbuff.h>
  17. #if !defined(__arch_um__)
  18. #include <linux/ipa.h>
  19. #endif /* !defined(__arch_um__) */
  20. #include <linux/if_ether.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/version.h>
  23. #include "rmnet_ll.h"
  24. #include "rmnet_ll_core.h"
  25. #define IPA_RMNET_LL_RECEIVE 1
  26. #define IPA_RMNET_LL_FLOW_EVT 2
  27. #define MAX_Q_LEN 1000
  28. #if !defined(__arch_um__)
  29. static struct rmnet_ll_endpoint *rmnet_ll_ipa_ep;
  30. static struct sk_buff_head tx_pending_list;
  31. extern spinlock_t rmnet_ll_tx_lock;
  32. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
  33. static void rmnet_ll_ipa_tx_pending(unsigned long data);
  34. DECLARE_TASKLET(tx_pending_task, rmnet_ll_ipa_tx_pending, 0);
  35. static void rmnet_ll_ipa_tx_pending(unsigned long data)
  36. #else
  37. static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t);
  38. DECLARE_TASKLET(tx_pending_task, rmnet_ll_ipa_tx_pending);
  39. static void rmnet_ll_ipa_tx_pending(struct tasklet_struct *t)
  40. #endif
  41. {
  42. struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
  43. struct sk_buff *skb;
  44. int rc;
  45. spin_lock_bh(&rmnet_ll_tx_lock);
  46. while ((skb = __skb_dequeue(&tx_pending_list))) {
  47. rc = ipa_rmnet_ll_xmit(skb);
  48. if (rc == -EAGAIN) {
  49. stats->tx_disabled++;
  50. __skb_queue_head(&tx_pending_list, skb);
  51. break;
  52. }
  53. if (rc >= 0)
  54. stats->tx_fc_sent++;
  55. else
  56. stats->tx_fc_err++;
  57. }
  58. spin_unlock_bh(&rmnet_ll_tx_lock);
  59. }
  60. static void rmnet_ll_ipa_rx(void *arg, void *rx_data)
  61. {
  62. struct rmnet_ll_endpoint *ll_ep = rmnet_ll_ipa_ep;
  63. struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
  64. struct sk_buff *skb, *tmp;
  65. if (arg == (void *)(uintptr_t)(IPA_RMNET_LL_FLOW_EVT)) {
  66. stats->tx_enabled++;
  67. tasklet_schedule(&tx_pending_task);
  68. return;
  69. }
  70. if (unlikely(arg != (void *)(uintptr_t)(IPA_RMNET_LL_RECEIVE))) {
  71. pr_err("%s: invalid arg %u\n", __func__, (uintptr_t)arg);
  72. return;
  73. }
  74. skb = rx_data;
  75. /* Odds are IPA does this, but just to be safe */
  76. skb->dev = ll_ep->phys_dev;
  77. skb->protocol = htons(ETH_P_MAP);
  78. skb_record_rx_queue(skb, 1);
  79. tmp = skb;
  80. while (tmp) {
  81. /* Mark the SKB as low latency */
  82. tmp->priority = 0xda1a;
  83. tmp = skb_shinfo(tmp)->frag_list;
  84. }
  85. stats->rx_pkts++;
  86. netif_rx(skb);
  87. }
  88. static void rmnet_ll_ipa_probe(void *arg)
  89. {
  90. struct rmnet_ll_endpoint *ll_ep;
  91. ll_ep = kzalloc(sizeof(*ll_ep), GFP_KERNEL);
  92. if (!ll_ep) {
  93. pr_err("%s(): allocating LL CTX failed\n", __func__);
  94. return;
  95. }
  96. ll_ep->phys_dev = dev_get_by_name(&init_net, "rmnet_ipa0");
  97. if (!ll_ep->phys_dev) {
  98. pr_err("%s(): Invalid physical device\n", __func__);
  99. kfree(ll_ep);
  100. return;
  101. }
  102. *((struct rmnet_ll_endpoint **)arg) = ll_ep;
  103. }
  104. static void rmnet_ll_ipa_remove(void *arg)
  105. {
  106. struct rmnet_ll_endpoint **ll_ep = arg;
  107. struct sk_buff *skb;
  108. dev_put((*ll_ep)->phys_dev);
  109. kfree(*ll_ep);
  110. *ll_ep = NULL;
  111. spin_lock_bh(&rmnet_ll_tx_lock);
  112. while ((skb = __skb_dequeue(&tx_pending_list)))
  113. kfree_skb(skb);
  114. spin_unlock_bh(&rmnet_ll_tx_lock);
  115. }
  116. static void rmnet_ll_ipa_ready(void * __unused)
  117. {
  118. int rc;
  119. rc = ipa_register_rmnet_ll_cb(rmnet_ll_ipa_probe,
  120. (void *)&rmnet_ll_ipa_ep,
  121. rmnet_ll_ipa_remove,
  122. (void *)&rmnet_ll_ipa_ep,
  123. rmnet_ll_ipa_rx,
  124. (void *)&rmnet_ll_ipa_ep);
  125. if (rc)
  126. pr_err("%s(): Registering IPA LL callback failed with rc %d\n",
  127. __func__, rc);
  128. }
  129. static int rmnet_ll_ipa_tx(struct sk_buff *skb)
  130. {
  131. struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
  132. int rc;
  133. if (!rmnet_ll_ipa_ep)
  134. return -ENODEV;
  135. if (!skb_queue_empty(&tx_pending_list))
  136. goto queue_skb;
  137. rc = ipa_rmnet_ll_xmit(skb);
  138. /* rc >=0: success, return number of free descriptors left */
  139. if (rc >= 0)
  140. return 0;
  141. /* IPA handles freeing the SKB on failure */
  142. if (rc != -EAGAIN)
  143. return rc;
  144. stats->tx_disabled++;
  145. queue_skb:
  146. /* Flow controlled */
  147. if (skb_queue_len(&tx_pending_list) >= MAX_Q_LEN) {
  148. kfree_skb(skb);
  149. return -ENOMEM;
  150. }
  151. __skb_queue_tail(&tx_pending_list, skb);
  152. stats->tx_fc_queued++;
  153. return 0;
  154. }
  155. static int rmnet_ll_ipa_init(void)
  156. {
  157. int rc;
  158. __skb_queue_head_init(&tx_pending_list);
  159. rc = ipa_register_ipa_ready_cb(rmnet_ll_ipa_ready, NULL);
  160. if (rc == -EEXIST) {
  161. /* IPA is already up. Call it ourselves, since they don't */
  162. rmnet_ll_ipa_ready(NULL);
  163. rc = 0;
  164. }
  165. return rc;
  166. }
  167. static int rmnet_ll_ipa_exit(void)
  168. {
  169. if (rmnet_ll_ipa_ep) {
  170. ipa_unregister_rmnet_ll_cb();
  171. /* Teardown? */
  172. rmnet_ll_ipa_ep = NULL;
  173. }
  174. return 0;
  175. }
  176. #else
  177. static int rmnet_ll_ipa_tx(struct sk_buff *skb){return 0;};
  178. static int rmnet_ll_ipa_init(void){return 0;}
  179. static int rmnet_ll_ipa_exit(void){return 0;};
  180. #endif /* !defined(__arch_um__) */
  181. /* Export operations struct to the main framework */
  182. struct rmnet_ll_client_ops rmnet_ll_client = {
  183. .tx = rmnet_ll_ipa_tx,
  184. .init = rmnet_ll_ipa_init,
  185. .exit = rmnet_ll_ipa_exit,
  186. };