mhi_wwan_mbim.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* MHI MBIM Network driver - Network/MBIM over MHI bus
  3. *
  4. * Copyright (C) 2021 Linaro Ltd <[email protected]>
  5. *
  6. * This driver copy some code from cdc_ncm, which is:
  7. * Copyright (C) ST-Ericsson 2010-2012
  8. * and cdc_mbim, which is:
  9. * Copyright (c) 2012 Smith Micro Software, Inc.
  10. * Copyright (c) 2012 Bjørn Mork <[email protected]>
  11. *
  12. */
  13. #include <linux/ethtool.h>
  14. #include <linux/if_arp.h>
  15. #include <linux/if_vlan.h>
  16. #include <linux/ip.h>
  17. #include <linux/mhi.h>
  18. #include <linux/mii.h>
  19. #include <linux/mod_devicetable.h>
  20. #include <linux/module.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/skbuff.h>
  23. #include <linux/u64_stats_sync.h>
  24. #include <linux/usb.h>
  25. #include <linux/usb/cdc.h>
  26. #include <linux/usb/usbnet.h>
  27. #include <linux/usb/cdc_ncm.h>
  28. #include <linux/wwan.h>
  29. /* 3500 allows to optimize skb allocation, the skbs will basically fit in
  30. * one 4K page. Large MBIM packets will simply be split over several MHI
  31. * transfers and chained by the MHI net layer (zerocopy).
  32. */
  33. #define MHI_DEFAULT_MRU 3500
  34. #define MHI_MBIM_DEFAULT_MTU 1500
  35. #define MHI_MAX_BUF_SZ 0xffff
  36. #define MBIM_NDP16_SIGN_MASK 0x00ffffff
  37. #define MHI_MBIM_LINK_HASH_SIZE 8
  38. #define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE)
  39. struct mhi_mbim_link {
  40. struct mhi_mbim_context *mbim;
  41. struct net_device *ndev;
  42. unsigned int session;
  43. /* stats */
  44. u64_stats_t rx_packets;
  45. u64_stats_t rx_bytes;
  46. u64_stats_t rx_errors;
  47. u64_stats_t tx_packets;
  48. u64_stats_t tx_bytes;
  49. u64_stats_t tx_errors;
  50. u64_stats_t tx_dropped;
  51. struct u64_stats_sync tx_syncp;
  52. struct u64_stats_sync rx_syncp;
  53. struct hlist_node hlnode;
  54. };
  55. struct mhi_mbim_context {
  56. struct mhi_device *mdev;
  57. struct sk_buff *skbagg_head;
  58. struct sk_buff *skbagg_tail;
  59. unsigned int mru;
  60. u32 rx_queue_sz;
  61. u16 rx_seq;
  62. u16 tx_seq;
  63. struct delayed_work rx_refill;
  64. spinlock_t tx_lock;
  65. struct hlist_head link_list[MHI_MBIM_LINK_HASH_SIZE];
  66. };
  67. struct mbim_tx_hdr {
  68. struct usb_cdc_ncm_nth16 nth16;
  69. struct usb_cdc_ncm_ndp16 ndp16;
  70. struct usb_cdc_ncm_dpe16 dpe16[2];
  71. } __packed;
  72. static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim,
  73. unsigned int session)
  74. {
  75. struct mhi_mbim_link *link;
  76. hlist_for_each_entry_rcu(link, &mbim->link_list[LINK_HASH(session)], hlnode) {
  77. if (link->session == session)
  78. return link;
  79. }
  80. return NULL;
  81. }
  82. static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
  83. u16 tx_seq)
  84. {
  85. unsigned int dgram_size = skb->len;
  86. struct usb_cdc_ncm_nth16 *nth16;
  87. struct usb_cdc_ncm_ndp16 *ndp16;
  88. struct mbim_tx_hdr *mbim_hdr;
  89. /* Only one NDP is sent, containing the IP packet (no aggregation) */
  90. /* Ensure we have enough headroom for crafting MBIM header */
  91. if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) {
  92. dev_kfree_skb_any(skb);
  93. return NULL;
  94. }
  95. mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr));
  96. /* Fill NTB header */
  97. nth16 = &mbim_hdr->nth16;
  98. nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
  99. nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
  100. nth16->wSequence = cpu_to_le16(tx_seq);
  101. nth16->wBlockLength = cpu_to_le16(skb->len);
  102. nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
  103. /* Fill the unique NDP */
  104. ndp16 = &mbim_hdr->ndp16;
  105. ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24));
  106. ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16)
  107. + sizeof(struct usb_cdc_ncm_dpe16) * 2);
  108. ndp16->wNextNdpIndex = 0;
  109. /* Datagram follows the mbim header */
  110. ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr));
  111. ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size);
  112. /* null termination */
  113. ndp16->dpe16[1].wDatagramIndex = 0;
  114. ndp16->dpe16[1].wDatagramLength = 0;
  115. return skb;
  116. }
  117. static netdev_tx_t mhi_mbim_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
  118. {
  119. struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
  120. struct mhi_mbim_context *mbim = link->mbim;
  121. unsigned long flags;
  122. int err = -ENOMEM;
  123. /* Serialize MHI channel queuing and MBIM seq */
  124. spin_lock_irqsave(&mbim->tx_lock, flags);
  125. skb = mbim_tx_fixup(skb, link->session, mbim->tx_seq);
  126. if (unlikely(!skb))
  127. goto exit_unlock;
  128. err = mhi_queue_skb(mbim->mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
  129. if (mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
  130. netif_stop_queue(ndev);
  131. if (!err)
  132. mbim->tx_seq++;
  133. exit_unlock:
  134. spin_unlock_irqrestore(&mbim->tx_lock, flags);
  135. if (unlikely(err)) {
  136. net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
  137. ndev->name, err);
  138. dev_kfree_skb_any(skb);
  139. goto exit_drop;
  140. }
  141. return NETDEV_TX_OK;
  142. exit_drop:
  143. u64_stats_update_begin(&link->tx_syncp);
  144. u64_stats_inc(&link->tx_dropped);
  145. u64_stats_update_end(&link->tx_syncp);
  146. return NETDEV_TX_OK;
  147. }
  148. static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *skb)
  149. {
  150. struct usb_cdc_ncm_nth16 *nth16;
  151. int len;
  152. if (skb->len < sizeof(struct usb_cdc_ncm_nth16) +
  153. sizeof(struct usb_cdc_ncm_ndp16)) {
  154. net_err_ratelimited("frame too short\n");
  155. return -EINVAL;
  156. }
  157. nth16 = (struct usb_cdc_ncm_nth16 *)skb->data;
  158. if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) {
  159. net_err_ratelimited("invalid NTH16 signature <%#010x>\n",
  160. le32_to_cpu(nth16->dwSignature));
  161. return -EINVAL;
  162. }
  163. /* No limit on the block length, except the size of the data pkt */
  164. len = le16_to_cpu(nth16->wBlockLength);
  165. if (len > skb->len) {
  166. net_err_ratelimited("NTB does not fit into the skb %u/%u\n",
  167. len, skb->len);
  168. return -EINVAL;
  169. }
  170. if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
  171. (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
  172. !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
  173. net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
  174. mbim->rx_seq, le16_to_cpu(nth16->wSequence));
  175. }
  176. mbim->rx_seq = le16_to_cpu(nth16->wSequence);
  177. return le16_to_cpu(nth16->wNdpIndex);
  178. }
  179. static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16)
  180. {
  181. int ret;
  182. if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
  183. net_err_ratelimited("invalid DPT16 length <%u>\n",
  184. le16_to_cpu(ndp16->wLength));
  185. return -EINVAL;
  186. }
  187. ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16))
  188. / sizeof(struct usb_cdc_ncm_dpe16));
  189. ret--; /* Last entry is always a NULL terminator */
  190. if (sizeof(struct usb_cdc_ncm_ndp16) +
  191. ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) {
  192. net_err_ratelimited("Invalid nframes = %d\n", ret);
  193. return -EINVAL;
  194. }
  195. return ret;
  196. }
  197. static void mhi_mbim_rx(struct mhi_mbim_context *mbim, struct sk_buff *skb)
  198. {
  199. int ndpoffset;
  200. /* Check NTB header and retrieve first NDP offset */
  201. ndpoffset = mbim_rx_verify_nth16(mbim, skb);
  202. if (ndpoffset < 0) {
  203. net_err_ratelimited("mbim: Incorrect NTB header\n");
  204. goto error;
  205. }
  206. /* Process each NDP */
  207. while (1) {
  208. struct usb_cdc_ncm_ndp16 ndp16;
  209. struct usb_cdc_ncm_dpe16 dpe16;
  210. struct mhi_mbim_link *link;
  211. int nframes, n, dpeoffset;
  212. unsigned int session;
  213. if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) {
  214. net_err_ratelimited("mbim: Incorrect NDP offset (%u)\n",
  215. ndpoffset);
  216. goto error;
  217. }
  218. /* Check NDP header and retrieve number of datagrams */
  219. nframes = mbim_rx_verify_ndp16(skb, &ndp16);
  220. if (nframes < 0) {
  221. net_err_ratelimited("mbim: Incorrect NDP16\n");
  222. goto error;
  223. }
  224. /* Only IP data type supported, no DSS in MHI context */
  225. if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK))
  226. != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) {
  227. net_err_ratelimited("mbim: Unsupported NDP type\n");
  228. goto next_ndp;
  229. }
  230. session = (le32_to_cpu(ndp16.dwSignature) & ~MBIM_NDP16_SIGN_MASK) >> 24;
  231. rcu_read_lock();
  232. link = mhi_mbim_get_link_rcu(mbim, session);
  233. if (!link) {
  234. net_err_ratelimited("mbim: bad packet session (%u)\n", session);
  235. goto unlock;
  236. }
  237. /* de-aggregate and deliver IP packets */
  238. dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16);
  239. for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) {
  240. u16 dgram_offset, dgram_len;
  241. struct sk_buff *skbn;
  242. if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16)))
  243. break;
  244. dgram_offset = le16_to_cpu(dpe16.wDatagramIndex);
  245. dgram_len = le16_to_cpu(dpe16.wDatagramLength);
  246. if (!dgram_offset || !dgram_len)
  247. break; /* null terminator */
  248. skbn = netdev_alloc_skb(link->ndev, dgram_len);
  249. if (!skbn)
  250. continue;
  251. skb_put(skbn, dgram_len);
  252. skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len);
  253. switch (skbn->data[0] & 0xf0) {
  254. case 0x40:
  255. skbn->protocol = htons(ETH_P_IP);
  256. break;
  257. case 0x60:
  258. skbn->protocol = htons(ETH_P_IPV6);
  259. break;
  260. default:
  261. net_err_ratelimited("%s: unknown protocol\n",
  262. link->ndev->name);
  263. dev_kfree_skb_any(skbn);
  264. u64_stats_update_begin(&link->rx_syncp);
  265. u64_stats_inc(&link->rx_errors);
  266. u64_stats_update_end(&link->rx_syncp);
  267. continue;
  268. }
  269. u64_stats_update_begin(&link->rx_syncp);
  270. u64_stats_inc(&link->rx_packets);
  271. u64_stats_add(&link->rx_bytes, skbn->len);
  272. u64_stats_update_end(&link->rx_syncp);
  273. netif_rx(skbn);
  274. }
  275. unlock:
  276. rcu_read_unlock();
  277. next_ndp:
  278. /* Other NDP to process? */
  279. ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex);
  280. if (!ndpoffset)
  281. break;
  282. }
  283. /* free skb */
  284. dev_consume_skb_any(skb);
  285. return;
  286. error:
  287. dev_kfree_skb_any(skb);
  288. }
  289. static struct sk_buff *mhi_net_skb_agg(struct mhi_mbim_context *mbim,
  290. struct sk_buff *skb)
  291. {
  292. struct sk_buff *head = mbim->skbagg_head;
  293. struct sk_buff *tail = mbim->skbagg_tail;
  294. /* This is non-paged skb chaining using frag_list */
  295. if (!head) {
  296. mbim->skbagg_head = skb;
  297. return skb;
  298. }
  299. if (!skb_shinfo(head)->frag_list)
  300. skb_shinfo(head)->frag_list = skb;
  301. else
  302. tail->next = skb;
  303. head->len += skb->len;
  304. head->data_len += skb->len;
  305. head->truesize += skb->truesize;
  306. mbim->skbagg_tail = skb;
  307. return mbim->skbagg_head;
  308. }
  309. static void mhi_net_rx_refill_work(struct work_struct *work)
  310. {
  311. struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context,
  312. rx_refill.work);
  313. struct mhi_device *mdev = mbim->mdev;
  314. int err;
  315. while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
  316. struct sk_buff *skb = alloc_skb(mbim->mru, GFP_KERNEL);
  317. if (unlikely(!skb))
  318. break;
  319. err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb,
  320. mbim->mru, MHI_EOT);
  321. if (unlikely(err)) {
  322. kfree_skb(skb);
  323. break;
  324. }
  325. /* Do not hog the CPU if rx buffers are consumed faster than
  326. * queued (unlikely).
  327. */
  328. cond_resched();
  329. }
  330. /* If we're still starved of rx buffers, reschedule later */
  331. if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mbim->rx_queue_sz)
  332. schedule_delayed_work(&mbim->rx_refill, HZ / 2);
  333. }
  334. static void mhi_mbim_dl_callback(struct mhi_device *mhi_dev,
  335. struct mhi_result *mhi_res)
  336. {
  337. struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
  338. struct sk_buff *skb = mhi_res->buf_addr;
  339. int free_desc_count;
  340. free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
  341. if (unlikely(mhi_res->transaction_status)) {
  342. switch (mhi_res->transaction_status) {
  343. case -EOVERFLOW:
  344. /* Packet has been split over multiple transfers */
  345. skb_put(skb, mhi_res->bytes_xferd);
  346. mhi_net_skb_agg(mbim, skb);
  347. break;
  348. case -ENOTCONN:
  349. /* MHI layer stopping/resetting the DL channel */
  350. dev_kfree_skb_any(skb);
  351. return;
  352. default:
  353. /* Unknown error, simply drop */
  354. dev_kfree_skb_any(skb);
  355. }
  356. } else {
  357. skb_put(skb, mhi_res->bytes_xferd);
  358. if (mbim->skbagg_head) {
  359. /* Aggregate the final fragment */
  360. skb = mhi_net_skb_agg(mbim, skb);
  361. mbim->skbagg_head = NULL;
  362. }
  363. mhi_mbim_rx(mbim, skb);
  364. }
  365. /* Refill if RX buffers queue becomes low */
  366. if (free_desc_count >= mbim->rx_queue_sz / 2)
  367. schedule_delayed_work(&mbim->rx_refill, 0);
  368. }
  369. static void mhi_mbim_ndo_get_stats64(struct net_device *ndev,
  370. struct rtnl_link_stats64 *stats)
  371. {
  372. struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
  373. unsigned int start;
  374. do {
  375. start = u64_stats_fetch_begin_irq(&link->rx_syncp);
  376. stats->rx_packets = u64_stats_read(&link->rx_packets);
  377. stats->rx_bytes = u64_stats_read(&link->rx_bytes);
  378. stats->rx_errors = u64_stats_read(&link->rx_errors);
  379. } while (u64_stats_fetch_retry_irq(&link->rx_syncp, start));
  380. do {
  381. start = u64_stats_fetch_begin_irq(&link->tx_syncp);
  382. stats->tx_packets = u64_stats_read(&link->tx_packets);
  383. stats->tx_bytes = u64_stats_read(&link->tx_bytes);
  384. stats->tx_errors = u64_stats_read(&link->tx_errors);
  385. stats->tx_dropped = u64_stats_read(&link->tx_dropped);
  386. } while (u64_stats_fetch_retry_irq(&link->tx_syncp, start));
  387. }
  388. static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev,
  389. struct mhi_result *mhi_res)
  390. {
  391. struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
  392. struct sk_buff *skb = mhi_res->buf_addr;
  393. struct net_device *ndev = skb->dev;
  394. struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
  395. /* Hardware has consumed the buffer, so free the skb (which is not
  396. * freed by the MHI stack) and perform accounting.
  397. */
  398. dev_consume_skb_any(skb);
  399. u64_stats_update_begin(&link->tx_syncp);
  400. if (unlikely(mhi_res->transaction_status)) {
  401. /* MHI layer stopping/resetting the UL channel */
  402. if (mhi_res->transaction_status == -ENOTCONN) {
  403. u64_stats_update_end(&link->tx_syncp);
  404. return;
  405. }
  406. u64_stats_inc(&link->tx_errors);
  407. } else {
  408. u64_stats_inc(&link->tx_packets);
  409. u64_stats_add(&link->tx_bytes, mhi_res->bytes_xferd);
  410. }
  411. u64_stats_update_end(&link->tx_syncp);
  412. if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE))
  413. netif_wake_queue(ndev);
  414. }
  415. static int mhi_mbim_ndo_open(struct net_device *ndev)
  416. {
  417. struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
  418. /* Feed the MHI rx buffer pool */
  419. schedule_delayed_work(&link->mbim->rx_refill, 0);
  420. /* Carrier is established via out-of-band channel (e.g. qmi) */
  421. netif_carrier_on(ndev);
  422. netif_start_queue(ndev);
  423. return 0;
  424. }
  425. static int mhi_mbim_ndo_stop(struct net_device *ndev)
  426. {
  427. netif_stop_queue(ndev);
  428. netif_carrier_off(ndev);
  429. return 0;
  430. }
  431. static const struct net_device_ops mhi_mbim_ndo = {
  432. .ndo_open = mhi_mbim_ndo_open,
  433. .ndo_stop = mhi_mbim_ndo_stop,
  434. .ndo_start_xmit = mhi_mbim_ndo_xmit,
  435. .ndo_get_stats64 = mhi_mbim_ndo_get_stats64,
  436. };
  437. static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
  438. struct netlink_ext_ack *extack)
  439. {
  440. struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
  441. struct mhi_mbim_context *mbim = ctxt;
  442. link->session = if_id;
  443. link->mbim = mbim;
  444. link->ndev = ndev;
  445. u64_stats_init(&link->rx_syncp);
  446. u64_stats_init(&link->tx_syncp);
  447. rcu_read_lock();
  448. if (mhi_mbim_get_link_rcu(mbim, if_id)) {
  449. rcu_read_unlock();
  450. return -EEXIST;
  451. }
  452. rcu_read_unlock();
  453. /* Already protected by RTNL lock */
  454. hlist_add_head_rcu(&link->hlnode, &mbim->link_list[LINK_HASH(if_id)]);
  455. return register_netdevice(ndev);
  456. }
  457. static void mhi_mbim_dellink(void *ctxt, struct net_device *ndev,
  458. struct list_head *head)
  459. {
  460. struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev);
  461. hlist_del_init_rcu(&link->hlnode);
  462. synchronize_rcu();
  463. unregister_netdevice_queue(ndev, head);
  464. }
  465. static void mhi_mbim_setup(struct net_device *ndev)
  466. {
  467. ndev->header_ops = NULL; /* No header */
  468. ndev->type = ARPHRD_RAWIP;
  469. ndev->needed_headroom = sizeof(struct mbim_tx_hdr);
  470. ndev->hard_header_len = 0;
  471. ndev->addr_len = 0;
  472. ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
  473. ndev->netdev_ops = &mhi_mbim_ndo;
  474. ndev->mtu = MHI_MBIM_DEFAULT_MTU;
  475. ndev->min_mtu = ETH_MIN_MTU;
  476. ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom;
  477. ndev->tx_queue_len = 1000;
  478. ndev->needs_free_netdev = true;
  479. }
  480. static const struct wwan_ops mhi_mbim_wwan_ops = {
  481. .priv_size = sizeof(struct mhi_mbim_link),
  482. .setup = mhi_mbim_setup,
  483. .newlink = mhi_mbim_newlink,
  484. .dellink = mhi_mbim_dellink,
  485. };
  486. static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
  487. {
  488. struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
  489. struct mhi_mbim_context *mbim;
  490. int err;
  491. mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
  492. if (!mbim)
  493. return -ENOMEM;
  494. spin_lock_init(&mbim->tx_lock);
  495. dev_set_drvdata(&mhi_dev->dev, mbim);
  496. mbim->mdev = mhi_dev;
  497. mbim->mru = mhi_dev->mhi_cntrl->mru ? mhi_dev->mhi_cntrl->mru : MHI_DEFAULT_MRU;
  498. INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work);
  499. /* Start MHI channels */
  500. err = mhi_prepare_for_transfer(mhi_dev);
  501. if (err)
  502. return err;
  503. /* Number of transfer descriptors determines size of the queue */
  504. mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
  505. /* Register wwan link ops with MHI controller representing WWAN instance */
  506. return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
  507. }
  508. static void mhi_mbim_remove(struct mhi_device *mhi_dev)
  509. {
  510. struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev);
  511. struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
  512. mhi_unprepare_from_transfer(mhi_dev);
  513. cancel_delayed_work_sync(&mbim->rx_refill);
  514. wwan_unregister_ops(&cntrl->mhi_dev->dev);
  515. kfree_skb(mbim->skbagg_head);
  516. dev_set_drvdata(&mhi_dev->dev, NULL);
  517. }
  518. static const struct mhi_device_id mhi_mbim_id_table[] = {
  519. /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */
  520. { .chan = "IP_HW0_MBIM", .driver_data = 0 },
  521. {}
  522. };
  523. MODULE_DEVICE_TABLE(mhi, mhi_mbim_id_table);
  524. static struct mhi_driver mhi_mbim_driver = {
  525. .probe = mhi_mbim_probe,
  526. .remove = mhi_mbim_remove,
  527. .dl_xfer_cb = mhi_mbim_dl_callback,
  528. .ul_xfer_cb = mhi_mbim_ul_callback,
  529. .id_table = mhi_mbim_id_table,
  530. .driver = {
  531. .name = "mhi_wwan_mbim",
  532. .owner = THIS_MODULE,
  533. },
  534. };
  535. module_mhi_driver(mhi_mbim_driver);
  536. MODULE_AUTHOR("Loic Poulain <[email protected]>");
  537. MODULE_DESCRIPTION("Network/MBIM over MHI");
  538. MODULE_LICENSE("GPL v2");