rmnet_ll.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * RmNet Low Latency channel handlers
  13. */
  14. #include <linux/device.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/of.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/mhi.h>
  19. #include <linux/if_ether.h>
  20. #include <linux/mm.h>
  21. #include <linux/list.h>
  22. #include <linux/version.h>
  23. #include "rmnet_ll.h"
  24. #define RMNET_LL_DEFAULT_MRU 0x8000
  25. #define RMNET_LL_MAX_RECYCLE_ITER 16
  26. struct rmnet_ll_buffer {
  27. struct list_head list;
  28. struct page *page;
  29. bool temp_alloc;
  30. bool submitted;
  31. };
  32. struct rmnet_ll_buffer_pool {
  33. struct list_head buf_list;
  34. /* Protect access to the recycle buffer pool */
  35. spinlock_t pool_lock;
  36. struct list_head *last;
  37. u32 pool_size;
  38. };
  39. struct rmnet_ll_endpoint {
  40. struct rmnet_ll_buffer_pool buf_pool;
  41. struct mhi_device *mhi_dev;
  42. struct net_device *mhi_netdev;
  43. u32 dev_mru;
  44. u32 page_order;
  45. u32 buf_len;
  46. };
  47. static struct rmnet_ll_endpoint *rmnet_ll_ep;
  48. static struct rmnet_ll_stats rmnet_ll_stats;
  49. /* For TX synch with MHI via mhi_queue_transfer() */
  50. static DEFINE_SPINLOCK(rmnet_ll_tx_lock);
  51. static void rmnet_ll_buffers_submit(struct rmnet_ll_endpoint *ll_ep,
  52. struct list_head *buf_list)
  53. {
  54. struct rmnet_ll_buffer *ll_buf;
  55. int rc;
  56. list_for_each_entry(ll_buf, buf_list, list) {
  57. if (ll_buf->submitted)
  58. continue;
  59. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  60. rc = mhi_queue_transfer(ll_ep->mhi_dev, DMA_FROM_DEVICE,
  61. page_address(ll_buf->page),
  62. ll_ep->buf_len, MHI_EOT);
  63. #else
  64. rc = mhi_queue_buf(ll_ep->mhi_dev, DMA_FROM_DEVICE,
  65. page_address(ll_buf->page),
  66. ll_ep->buf_len, MHI_EOT);
  67. #endif
  68. if (rc) {
  69. rmnet_ll_stats.rx_queue_err++;
  70. /* Don't leak the page if we're not storing it */
  71. if (ll_buf->temp_alloc)
  72. put_page(ll_buf->page);
  73. } else {
  74. ll_buf->submitted = true;
  75. rmnet_ll_stats.rx_queue++;
  76. }
  77. }
  78. }
  79. static struct rmnet_ll_buffer *
  80. rmnet_ll_buffer_alloc(struct rmnet_ll_endpoint *ll_ep, gfp_t gfp)
  81. {
  82. struct rmnet_ll_buffer *ll_buf;
  83. struct page *page;
  84. void *addr;
  85. page = __dev_alloc_pages(gfp, ll_ep->page_order);
  86. if (!page)
  87. return NULL;
  88. /* Store the buffer information at the end */
  89. addr = page_address(page);
  90. ll_buf = addr + ll_ep->buf_len;
  91. ll_buf->page = page;
  92. ll_buf->submitted = false;
  93. INIT_LIST_HEAD(&ll_buf->list);
  94. return ll_buf;
  95. }
  96. static int rmnet_ll_buffer_pool_alloc(struct rmnet_ll_endpoint *ll_ep)
  97. {
  98. spin_lock_init(&ll_ep->buf_pool.pool_lock);
  99. INIT_LIST_HEAD(&ll_ep->buf_pool.buf_list);
  100. ll_ep->buf_pool.last = ll_ep->buf_pool.buf_list.next;
  101. ll_ep->buf_pool.pool_size = 0;
  102. return 0;
  103. }
  104. static void rmnet_ll_buffer_pool_free(struct rmnet_ll_endpoint *ll_ep)
  105. {
  106. struct rmnet_ll_buffer *ll_buf, *tmp;
  107. list_for_each_entry_safe(ll_buf, tmp, &ll_ep->buf_pool.buf_list, list) {
  108. list_del(&ll_buf->list);
  109. put_page(ll_buf->page);
  110. }
  111. ll_ep->buf_pool.last = NULL;
  112. }
  113. static void rmnet_ll_buffers_recycle(struct rmnet_ll_endpoint *ll_ep)
  114. {
  115. struct rmnet_ll_buffer *ll_buf, *tmp;
  116. LIST_HEAD(buf_list);
  117. int num_tre, count = 0, iter = 0;
  118. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  119. num_tre = mhi_get_no_free_descriptors(ll_ep->mhi_dev, DMA_FROM_DEVICE);
  120. #else
  121. num_tre = mhi_get_free_desc_count(ll_ep->mhi_dev, DMA_FROM_DEVICE);
  122. #endif
  123. if (!num_tre)
  124. goto out;
  125. list_for_each_entry_safe(ll_buf, tmp, ll_ep->buf_pool.last, list) {
  126. if (++iter > RMNET_LL_MAX_RECYCLE_ITER || count == num_tre)
  127. break;
  128. if (ll_buf->submitted)
  129. continue;
  130. count++;
  131. list_move_tail(&ll_buf->list, &buf_list);
  132. }
  133. /* Mark where we left off */
  134. ll_ep->buf_pool.last = &ll_buf->list;
  135. /* Submit any pool buffers to the HW if we found some */
  136. if (count) {
  137. rmnet_ll_buffers_submit(ll_ep, &buf_list);
  138. /* requeue immediately BEFORE the last checked element */
  139. list_splice_tail_init(&buf_list, ll_ep->buf_pool.last);
  140. }
  141. /* Do any temporary allocations needed to fill the rest */
  142. for (; count < num_tre; count++) {
  143. ll_buf = rmnet_ll_buffer_alloc(ll_ep, GFP_ATOMIC);
  144. if (!ll_buf)
  145. break;
  146. list_add_tail(&ll_buf->list, &buf_list);
  147. ll_buf->temp_alloc = true;
  148. rmnet_ll_stats.rx_tmp_allocs++;
  149. }
  150. if (!list_empty(&buf_list))
  151. rmnet_ll_buffers_submit(ll_ep, &buf_list);
  152. out:
  153. return;
  154. }
  155. static void rmnet_ll_rx(struct mhi_device *mhi_dev, struct mhi_result *res)
  156. {
  157. struct rmnet_ll_endpoint *ll_ep = dev_get_drvdata(&mhi_dev->dev);
  158. struct rmnet_ll_buffer *ll_buf;
  159. struct sk_buff *skb;
  160. /* Get the buffer struct back for our page information */
  161. ll_buf = res->buf_addr + ll_ep->buf_len;
  162. ll_buf->submitted = false;
  163. if (res->transaction_status) {
  164. rmnet_ll_stats.rx_status_err++;
  165. goto err;
  166. } else if (!res->bytes_xferd) {
  167. rmnet_ll_stats.rx_null++;
  168. goto err;
  169. }
  170. /* Store this away so we don't have to look it up every time */
  171. if (!ll_ep->mhi_netdev) {
  172. ll_ep->mhi_netdev = dev_get_by_name(&init_net, "rmnet_mhi0");
  173. if (!ll_ep->mhi_netdev)
  174. goto err;
  175. }
  176. skb = alloc_skb(0, GFP_ATOMIC);
  177. if (!skb) {
  178. rmnet_ll_stats.rx_oom++;
  179. goto err;
  180. }
  181. /* Build the SKB and pass it off to the stack */
  182. skb_add_rx_frag(skb, 0, ll_buf->page, 0, res->bytes_xferd,
  183. ll_ep->buf_len);
  184. if (!ll_buf->temp_alloc)
  185. get_page(ll_buf->page);
  186. skb->dev = ll_ep->mhi_netdev;
  187. skb->protocol = htons(ETH_P_MAP);
  188. /* Mark this as arriving on the LL channel. Allows rmnet to skip
  189. * module handling as needed.
  190. */
  191. skb->priority = 0xda1a;
  192. rmnet_ll_stats.rx_pkts++;
  193. netif_rx(skb);
  194. rmnet_ll_buffers_recycle(ll_ep);
  195. return;
  196. err:
  197. /* Go, and never darken my towels again! */
  198. if (ll_buf->temp_alloc)
  199. put_page(ll_buf->page);
  200. }
  201. static void rmnet_ll_tx_complete(struct mhi_device *mhi_dev,
  202. struct mhi_result *res)
  203. {
  204. struct sk_buff *skb = res->buf_addr;
  205. /* Check the result and free the SKB */
  206. if (res->transaction_status)
  207. rmnet_ll_stats.tx_complete_err++;
  208. else
  209. rmnet_ll_stats.tx_complete++;
  210. dev_kfree_skb_any(skb);
  211. }
  212. static int rmnet_ll_probe(struct mhi_device *mhi_dev,
  213. const struct mhi_device_id *id)
  214. {
  215. struct rmnet_ll_endpoint *ll_ep;
  216. int rc;
  217. /* Allocate space for our state from the managed pool tied to the life
  218. * of the mhi device.
  219. */
  220. ll_ep = devm_kzalloc(&mhi_dev->dev, sizeof(*ll_ep), GFP_KERNEL);
  221. if (!ll_ep)
  222. return -ENOMEM;
  223. /* Hold on to the mhi_dev so we can send data to it later */
  224. ll_ep->mhi_dev = mhi_dev;
  225. /* Grab the MRU of the device so we know the size of the pages we need
  226. * to allocate for the pool.
  227. */
  228. rc = of_property_read_u32(mhi_dev->dev.of_node, "mhi,mru",
  229. &ll_ep->dev_mru);
  230. if (rc || !ll_ep->dev_mru)
  231. /* Use our default mru */
  232. ll_ep->dev_mru = RMNET_LL_DEFAULT_MRU;
  233. ll_ep->page_order = get_order(ll_ep->dev_mru);
  234. /* We store some stuff at the end of the page, so don't let the HW
  235. * use that part of it.
  236. */
  237. ll_ep->buf_len = ll_ep->dev_mru - sizeof(struct rmnet_ll_buffer);
  238. /* Tell MHI to initialize the UL/DL channels for transfer */
  239. rc = mhi_prepare_for_transfer(mhi_dev);
  240. if (rc) {
  241. pr_err("%s(): Failed to prepare device for transfer: 0x%x\n",
  242. __func__, rc);
  243. return rc;
  244. }
  245. rc = rmnet_ll_buffer_pool_alloc(ll_ep);
  246. if (rc) {
  247. pr_err("%s(): Failed to allocate buffer pool: %d\n", __func__,
  248. rc);
  249. mhi_unprepare_from_transfer(mhi_dev);
  250. return rc;
  251. }
  252. rmnet_ll_buffers_recycle(ll_ep);
  253. /* Not a fan of storing this pointer in two locations, but I've yet to
  254. * come up with any other good way of accessing it on the TX path from
  255. * rmnet otherwise, since we won't have any references to the mhi_dev.
  256. */
  257. dev_set_drvdata(&mhi_dev->dev, ll_ep);
  258. rmnet_ll_ep = ll_ep;
  259. return 0;
  260. }
  261. static void rmnet_ll_remove(struct mhi_device *mhi_dev)
  262. {
  263. struct rmnet_ll_endpoint *ll_ep;
  264. ll_ep = dev_get_drvdata(&mhi_dev->dev);
  265. /* Remove our private data form the device. No need to free it though.
  266. * It will be freed once the mhi_dev is released since it was alloced
  267. * from a managed pool.
  268. */
  269. dev_set_drvdata(&mhi_dev->dev, NULL);
  270. rmnet_ll_ep = NULL;
  271. rmnet_ll_buffer_pool_free(ll_ep);
  272. }
  273. static const struct mhi_device_id rmnet_ll_channel_table[] = {
  274. {
  275. .chan = "RMNET_DATA_LL",
  276. },
  277. {},
  278. };
  279. static struct mhi_driver rmnet_ll_driver = {
  280. .probe = rmnet_ll_probe,
  281. .remove = rmnet_ll_remove,
  282. .dl_xfer_cb = rmnet_ll_rx,
  283. .ul_xfer_cb = rmnet_ll_tx_complete,
  284. .id_table = rmnet_ll_channel_table,
  285. .driver = {
  286. .name = "rmnet_ll",
  287. .owner = THIS_MODULE,
  288. },
  289. };
  290. int rmnet_ll_send_skb(struct sk_buff *skb)
  291. {
  292. struct rmnet_ll_endpoint *ll_ep = rmnet_ll_ep;
  293. int rc = -ENODEV;
  294. /* Lock to prevent multiple sends at the same time. mhi_queue_transfer()
  295. * cannot be called in parallel for the same DMA direction.
  296. */
  297. spin_lock_bh(&rmnet_ll_tx_lock);
  298. if (ll_ep)
  299. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
  300. rc = mhi_queue_transfer(ll_ep->mhi_dev, DMA_TO_DEVICE, skb,
  301. skb->len, MHI_EOT);
  302. #else
  303. rc = mhi_queue_skb(ll_ep->mhi_dev, DMA_TO_DEVICE, skb,
  304. skb->len, MHI_EOT);
  305. #endif
  306. spin_unlock_bh(&rmnet_ll_tx_lock);
  307. if (rc)
  308. rmnet_ll_stats.tx_queue_err++;
  309. else
  310. rmnet_ll_stats.tx_queue++;
  311. return rc;
  312. }
  313. struct rmnet_ll_stats *rmnet_ll_get_stats(void)
  314. {
  315. return &rmnet_ll_stats;
  316. }
  317. int rmnet_ll_init(void)
  318. {
  319. return mhi_driver_register(&rmnet_ll_driver);
  320. }
  321. void rmnet_ll_exit(void)
  322. {
  323. mhi_driver_unregister(&rmnet_ll_driver);
  324. }