rmnet_ctl_mhi.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
  3. *
  4. * RMNET_CTL mhi handler
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/mod_devicetable.h>
  9. #include <linux/of.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/mhi.h>
  12. #include "rmnet_ctl.h"
  13. #include "rmnet_ctl_client.h"
  14. #define RMNET_CTL_DEFAULT_MRU 256
  15. struct rmnet_ctl_mhi_dev {
  16. struct mhi_device *mhi_dev;
  17. struct rmnet_ctl_dev dev;
  18. u32 mru;
  19. spinlock_t rx_lock; /* rx lock */
  20. spinlock_t tx_lock; /* tx lock */
  21. atomic_t in_reset;
  22. };
  23. static int rmnet_ctl_send_mhi(struct rmnet_ctl_dev *dev, struct sk_buff *skb)
  24. {
  25. struct rmnet_ctl_mhi_dev *ctl_dev = container_of(
  26. dev, struct rmnet_ctl_mhi_dev, dev);
  27. int rc;
  28. spin_lock_bh(&ctl_dev->tx_lock);
  29. rc = mhi_queue_transfer(ctl_dev->mhi_dev,
  30. DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
  31. if (rc)
  32. dev->stats.tx_err++;
  33. else
  34. dev->stats.tx_pkts++;
  35. spin_unlock_bh(&ctl_dev->tx_lock);
  36. return rc;
  37. }
  38. static void rmnet_ctl_alloc_buffers(struct rmnet_ctl_mhi_dev *ctl_dev,
  39. gfp_t gfp, void *free_buf)
  40. {
  41. struct mhi_device *mhi_dev = ctl_dev->mhi_dev;
  42. void *buf;
  43. int no_tre, i, rc;
  44. no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
  45. if (!no_tre && free_buf) {
  46. kfree(free_buf);
  47. return;
  48. }
  49. for (i = 0; i < no_tre; i++) {
  50. if (free_buf) {
  51. buf = free_buf;
  52. free_buf = NULL;
  53. } else {
  54. buf = kmalloc(ctl_dev->mru, gfp);
  55. }
  56. if (!buf)
  57. return;
  58. spin_lock_bh(&ctl_dev->rx_lock);
  59. rc = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
  60. buf, ctl_dev->mru, MHI_EOT);
  61. spin_unlock_bh(&ctl_dev->rx_lock);
  62. if (rc) {
  63. kfree(buf);
  64. return;
  65. }
  66. }
  67. }
  68. static void rmnet_ctl_dl_callback(struct mhi_device *mhi_dev,
  69. struct mhi_result *mhi_res)
  70. {
  71. struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
  72. if (mhi_res->transaction_status == -ENOTCONN) {
  73. kfree(mhi_res->buf_addr);
  74. return;
  75. } else if (mhi_res->transaction_status ||
  76. !mhi_res->buf_addr || !mhi_res->bytes_xferd) {
  77. rmnet_ctl_log_err("RXE", mhi_res->transaction_status, NULL, 0);
  78. ctl_dev->dev.stats.rx_err++;
  79. } else {
  80. ctl_dev->dev.stats.rx_pkts++;
  81. rmnet_ctl_endpoint_post(mhi_res->buf_addr,
  82. mhi_res->bytes_xferd);
  83. }
  84. /* Re-supply receive buffers */
  85. rmnet_ctl_alloc_buffers(ctl_dev, GFP_ATOMIC, mhi_res->buf_addr);
  86. }
  87. static void rmnet_ctl_ul_callback(struct mhi_device *mhi_dev,
  88. struct mhi_result *mhi_res)
  89. {
  90. struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
  91. struct sk_buff *skb = (struct sk_buff *)mhi_res->buf_addr;
  92. if (skb) {
  93. if (mhi_res->transaction_status) {
  94. rmnet_ctl_log_err("TXE", mhi_res->transaction_status,
  95. skb->data, skb->len);
  96. ctl_dev->dev.stats.tx_err++;
  97. } else {
  98. rmnet_ctl_log_debug("TXC", skb->data, skb->len);
  99. ctl_dev->dev.stats.tx_complete++;
  100. }
  101. kfree_skb(skb);
  102. }
  103. }
  104. static void rmnet_ctl_status_callback(struct mhi_device *mhi_dev,
  105. enum MHI_CB mhi_cb)
  106. {
  107. struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
  108. if (mhi_cb != MHI_CB_FATAL_ERROR)
  109. return;
  110. atomic_inc(&ctl_dev->in_reset);
  111. }
  112. static int rmnet_ctl_probe(struct mhi_device *mhi_dev,
  113. const struct mhi_device_id *id)
  114. {
  115. struct rmnet_ctl_mhi_dev *ctl_dev;
  116. struct device_node *of_node = mhi_dev->dev.of_node;
  117. int rc;
  118. ctl_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*ctl_dev), GFP_KERNEL);
  119. if (!ctl_dev)
  120. return -ENOMEM;
  121. ctl_dev->mhi_dev = mhi_dev;
  122. ctl_dev->dev.xmit = rmnet_ctl_send_mhi;
  123. spin_lock_init(&ctl_dev->rx_lock);
  124. spin_lock_init(&ctl_dev->tx_lock);
  125. atomic_set(&ctl_dev->in_reset, 0);
  126. dev_set_drvdata(&mhi_dev->dev, ctl_dev);
  127. rc = of_property_read_u32(of_node, "mhi,mru", &ctl_dev->mru);
  128. if (rc || !ctl_dev->mru)
  129. ctl_dev->mru = RMNET_CTL_DEFAULT_MRU;
  130. rc = mhi_prepare_for_transfer(mhi_dev);
  131. if (rc) {
  132. pr_err("%s(): Failed to prep for transfer %d\n", __func__, rc);
  133. return -EINVAL;
  134. }
  135. /* Post receive buffers */
  136. rmnet_ctl_alloc_buffers(ctl_dev, GFP_KERNEL, NULL);
  137. rmnet_ctl_endpoint_setdev(&ctl_dev->dev);
  138. pr_info("rmnet_ctl driver probed\n");
  139. return 0;
  140. }
  141. static void rmnet_ctl_remove(struct mhi_device *mhi_dev)
  142. {
  143. rmnet_ctl_endpoint_setdev(NULL);
  144. synchronize_rcu();
  145. dev_set_drvdata(&mhi_dev->dev, NULL);
  146. pr_info("rmnet_ctl driver removed\n");
  147. }
  148. static const struct mhi_device_id rmnet_ctl_mhi_match[] = {
  149. { .chan = "RMNET_CTL" },
  150. {}
  151. };
  152. static struct mhi_driver rmnet_ctl_driver = {
  153. .probe = rmnet_ctl_probe,
  154. .remove = rmnet_ctl_remove,
  155. .dl_xfer_cb = rmnet_ctl_dl_callback,
  156. .ul_xfer_cb = rmnet_ctl_ul_callback,
  157. .status_cb = rmnet_ctl_status_callback,
  158. .id_table = rmnet_ctl_mhi_match,
  159. .driver = {
  160. .name = "rmnet_ctl",
  161. .owner = THIS_MODULE,
  162. },
  163. };
  164. static int __init rmnet_ctl_init(void)
  165. {
  166. int rc;
  167. rc = mhi_driver_register(&rmnet_ctl_driver);
  168. rmnet_ctl_set_dbgfs(true);
  169. return rc;
  170. }
  171. static void __exit rmnet_ctl_exit(void)
  172. {
  173. mhi_driver_unregister(&rmnet_ctl_driver);
  174. rmnet_ctl_set_dbgfs(false);
  175. }
  176. module_init(rmnet_ctl_init)
  177. module_exit(rmnet_ctl_exit)
  178. MODULE_DESCRIPTION("RmNet Control MHI Driver");
  179. MODULE_LICENSE("GPL v2");