rmnet_ll_mhi.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  2. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * RmNet MHI Low Latency channel handlers
  14. */
  15. #include <linux/device.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/of.h>
  18. #include <linux/skbuff.h>
  19. #include <linux/mhi.h>
  20. #include <linux/if_ether.h>
  21. #include <linux/mm.h>
  22. #include "rmnet_ll.h"
  23. #include "rmnet_ll_core.h"
  24. static struct rmnet_ll_endpoint *rmnet_ll_mhi_ep;
  25. static void rmnet_ll_mhi_rx(struct mhi_device *mhi_dev, struct mhi_result *res)
  26. {
  27. struct rmnet_ll_endpoint *ll_ep = dev_get_drvdata(&mhi_dev->dev);
  28. struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
  29. struct rmnet_ll_buffer *ll_buf;
  30. struct sk_buff *skb;
  31. /* Get the buffer struct back for our page information */
  32. ll_buf = res->buf_addr + ll_ep->buf_len;
  33. ll_buf->submitted = false;
  34. if (res->transaction_status) {
  35. stats->rx_status_err++;
  36. goto err;
  37. } else if (!res->bytes_xferd) {
  38. stats->rx_null++;
  39. goto err;
  40. }
  41. /* Store this away so we don't have to look it up every time */
  42. if (!ll_ep->phys_dev) {
  43. ll_ep->phys_dev = dev_get_by_name(&init_net, "rmnet_mhi0");
  44. if (!ll_ep->phys_dev)
  45. goto err;
  46. }
  47. skb = alloc_skb(0, GFP_ATOMIC);
  48. if (!skb) {
  49. stats->rx_oom++;
  50. goto err;
  51. }
  52. /* Build the SKB and pass it off to the stack */
  53. skb_add_rx_frag(skb, 0, ll_buf->page, 0, res->bytes_xferd,
  54. ll_ep->buf_len);
  55. if (!ll_buf->temp_alloc)
  56. get_page(ll_buf->page);
  57. skb->dev = ll_ep->phys_dev;
  58. skb->protocol = htons(ETH_P_MAP);
  59. /* Mark this as arriving on the LL channel. Allows rmnet to skip
  60. * module handling as needed.
  61. */
  62. skb->priority = 0xda1a;
  63. stats->rx_pkts++;
  64. netif_rx(skb);
  65. rmnet_ll_buffers_recycle(ll_ep);
  66. return;
  67. err:
  68. /* Go, and never darken my towels again! */
  69. if (ll_buf->temp_alloc)
  70. put_page(ll_buf->page);
  71. }
  72. static void rmnet_ll_mhi_tx_complete(struct mhi_device *mhi_dev,
  73. struct mhi_result *res)
  74. {
  75. struct rmnet_ll_stats *stats = rmnet_ll_get_stats();
  76. struct sk_buff *skb = res->buf_addr;
  77. /* Check the result and free the SKB */
  78. if (res->transaction_status)
  79. stats->tx_complete_err++;
  80. else
  81. stats->tx_complete++;
  82. dev_consume_skb_any(skb);
  83. }
  84. static int rmnet_ll_mhi_probe(struct mhi_device *mhi_dev,
  85. const struct mhi_device_id *id)
  86. {
  87. struct rmnet_ll_endpoint *ll_ep;
  88. int rc;
  89. /* Allocate space for our state from the managed pool tied to the life
  90. * of the mhi device.
  91. */
  92. ll_ep = devm_kzalloc(&mhi_dev->dev, sizeof(*ll_ep), GFP_KERNEL);
  93. if (!ll_ep)
  94. return -ENOMEM;
  95. /* Hold on to the mhi_dev so we can send data to it later */
  96. ll_ep->priv = (void *)mhi_dev;
  97. /* Grab the MRU of the device so we know the size of the pages we need
  98. * to allocate for the pool.
  99. */
  100. rc = of_property_read_u32(mhi_dev->dev.of_node, "mhi,mru",
  101. &ll_ep->dev_mru);
  102. if (rc || !ll_ep->dev_mru)
  103. /* Use our default mru */
  104. ll_ep->dev_mru = RMNET_LL_DEFAULT_MRU;
  105. ll_ep->page_order = get_order(ll_ep->dev_mru);
  106. /* We store some stuff at the end of the page, so don't let the HW
  107. * use that part of it.
  108. */
  109. ll_ep->buf_len = ll_ep->dev_mru - sizeof(struct rmnet_ll_buffer);
  110. /* Tell MHI to initialize the UL/DL channels for transfer */
  111. rc = mhi_prepare_for_transfer(mhi_dev);
  112. if (rc) {
  113. pr_err("%s(): Failed to prepare device for transfer: 0x%x\n",
  114. __func__, rc);
  115. return rc;
  116. }
  117. rc = rmnet_ll_buffer_pool_alloc(ll_ep);
  118. if (rc) {
  119. pr_err("%s(): Failed to allocate buffer pool: %d\n", __func__,
  120. rc);
  121. mhi_unprepare_from_transfer(mhi_dev);
  122. return rc;
  123. }
  124. rmnet_ll_buffers_recycle(ll_ep);
  125. /* Not a fan of storing this pointer in two locations, but I've yet to
  126. * come up with any other good way of accessing it on the TX path from
  127. * rmnet otherwise, since we won't have any references to the mhi_dev.
  128. */
  129. dev_set_drvdata(&mhi_dev->dev, ll_ep);
  130. rmnet_ll_mhi_ep = ll_ep;
  131. return 0;
  132. }
  133. static void rmnet_ll_mhi_remove(struct mhi_device *mhi_dev)
  134. {
  135. struct rmnet_ll_endpoint *ll_ep;
  136. ll_ep = dev_get_drvdata(&mhi_dev->dev);
  137. /* Remove our private data form the device. No need to free it though.
  138. * It will be freed once the mhi_dev is released since it was alloced
  139. * from a managed pool.
  140. */
  141. dev_set_drvdata(&mhi_dev->dev, NULL);
  142. rmnet_ll_mhi_ep = NULL;
  143. rmnet_ll_buffer_pool_free(ll_ep);
  144. }
  145. static const struct mhi_device_id rmnet_ll_mhi_channel_table[] = {
  146. {
  147. .chan = "RMNET_DATA_LL",
  148. },
  149. {},
  150. };
  151. static struct mhi_driver rmnet_ll_driver = {
  152. .probe = rmnet_ll_mhi_probe,
  153. .remove = rmnet_ll_mhi_remove,
  154. .dl_xfer_cb = rmnet_ll_mhi_rx,
  155. .ul_xfer_cb = rmnet_ll_mhi_tx_complete,
  156. .id_table = rmnet_ll_mhi_channel_table,
  157. .driver = {
  158. .name = "rmnet_ll",
  159. .owner = THIS_MODULE,
  160. },
  161. };
  162. static int rmnet_ll_mhi_queue(struct rmnet_ll_endpoint *ll_ep,
  163. struct rmnet_ll_buffer *ll_buf)
  164. {
  165. struct mhi_device *mhi_dev = ll_ep->priv;
  166. return mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE,
  167. page_address(ll_buf->page),
  168. ll_ep->buf_len, MHI_EOT);
  169. }
  170. static int rmnet_ll_mhi_query_free_descriptors(struct rmnet_ll_endpoint *ll_ep)
  171. {
  172. struct mhi_device *mhi_dev = ll_ep->priv;
  173. return mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
  174. }
  175. static int rmnet_ll_mhi_tx(struct sk_buff *skb)
  176. {
  177. struct mhi_device *mhi_dev;
  178. int rc;
  179. if (!rmnet_ll_mhi_ep)
  180. return -ENODEV;
  181. mhi_dev = rmnet_ll_mhi_ep->priv;
  182. rc = mhi_queue_skb(mhi_dev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
  183. if (rc)
  184. kfree_skb(skb);
  185. return rc;
  186. }
  187. static int rmnet_ll_mhi_init(void)
  188. {
  189. return mhi_driver_register(&rmnet_ll_driver);
  190. }
  191. static int rmnet_ll_mhi_exit(void)
  192. {
  193. mhi_driver_unregister(&rmnet_ll_driver);
  194. return 0;
  195. }
  196. /* Export operations struct to the main framework */
  197. struct rmnet_ll_client_ops rmnet_ll_client = {
  198. .buffer_queue = rmnet_ll_mhi_queue,
  199. .query_free_descriptors = rmnet_ll_mhi_query_free_descriptors,
  200. .tx = rmnet_ll_mhi_tx,
  201. .init = rmnet_ll_mhi_init,
  202. .exit = rmnet_ll_mhi_exit,
  203. };