rmnet_ll_ipa.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /* Copyright (c) 2021 The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RmNet IPA Low Latency channel handlers
  13. */
  14. #include <linux/netdevice.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/ipa.h>
  17. #include <linux/if_ether.h>
  18. #include "rmnet_ll.h"
  19. #include "rmnet_ll_core.h"
  20. static struct rmnet_ll_endpoint *rmnet_ll_ipa_ep;
  21. static void rmnet_ll_ipa_rx(void *arg, void *rx_data)
  22. {
  23. struct rmnet_ll_endpoint *ll_ep = *((struct rmnet_ll_endpoint **)arg);
  24. struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
  25. struct sk_buff *skb, *tmp;
  26. skb = rx_data;
  27. /* Odds are IPA does this, but just to be safe */
  28. skb->dev = ll_ep->phys_dev;
  29. skb->protocol = htons(ETH_P_MAP);
  30. skb_record_rx_queue(skb, 1);
  31. tmp = skb;
  32. while (tmp) {
  33. /* Mark the SKB as low latency */
  34. tmp->priority = 0xda1a;
  35. tmp = skb_shinfo(tmp)->frag_list;
  36. }
  37. stats->rx_pkts++;
  38. netif_rx(skb);
  39. }
  40. static void rmnet_ll_ipa_probe(void *arg)
  41. {
  42. struct rmnet_ll_endpoint *ll_ep;
  43. ll_ep = kzalloc(sizeof(*ll_ep), GFP_KERNEL);
  44. if (!ll_ep) {
  45. pr_err("%s(): allocating LL CTX failed\n", __func__);
  46. return;
  47. }
  48. ll_ep->phys_dev = dev_get_by_name(&init_net, "rmnet_ipa0");
  49. if (!ll_ep->phys_dev) {
  50. pr_err("%s(): Invalid physical device\n", __func__);
  51. kfree(ll_ep);
  52. return;
  53. }
  54. *((struct rmnet_ll_endpoint **)arg) = ll_ep;
  55. }
  56. static void rmnet_ll_ipa_remove(void *arg)
  57. {
  58. struct rmnet_ll_endpoint **ll_ep = arg;
  59. dev_put((*ll_ep)->phys_dev);
  60. kfree(*ll_ep);
  61. *ll_ep = NULL;
  62. }
  63. static void rmnet_ll_ipa_ready(void * __unused)
  64. {
  65. int rc;
  66. rc = ipa_register_rmnet_ll_cb(rmnet_ll_ipa_probe,
  67. (void *)&rmnet_ll_ipa_ep,
  68. rmnet_ll_ipa_remove,
  69. (void *)&rmnet_ll_ipa_ep,
  70. rmnet_ll_ipa_rx,
  71. (void *)&rmnet_ll_ipa_ep);
  72. if (rc)
  73. pr_err("%s(): Registering IPA LL callback failed with rc %d\n",
  74. __func__, rc);
  75. }
  76. static int rmnet_ll_ipa_tx(struct sk_buff *skb)
  77. {
  78. if (!rmnet_ll_ipa_ep)
  79. return -ENODEV;
  80. /* IPA handles freeing the SKB on failure */
  81. return ipa_rmnet_ll_xmit(skb);
  82. }
  83. static int rmnet_ll_ipa_init(void)
  84. {
  85. int rc;
  86. rc = ipa_register_ipa_ready_cb(rmnet_ll_ipa_ready, NULL);
  87. if (rc == -EEXIST) {
  88. /* IPA is already up. Call it ourselves, since they don't */
  89. rmnet_ll_ipa_ready(NULL);
  90. rc = 0;
  91. }
  92. return rc;
  93. }
  94. static int rmnet_ll_ipa_exit(void)
  95. {
  96. if (rmnet_ll_ipa_ep) {
  97. ipa_unregister_rmnet_ll_cb();
  98. /* Teardown? */
  99. rmnet_ll_ipa_ep = NULL;
  100. }
  101. return 0;
  102. }
  103. /* Export operations struct to the main framework */
  104. struct rmnet_ll_client_ops rmnet_ll_client = {
  105. .tx = rmnet_ll_ipa_tx,
  106. .init = rmnet_ll_ipa_init,
  107. .exit = rmnet_ll_ipa_exit,
  108. };