t7xx_netdev.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, MediaTek Inc.
  4. * Copyright (c) 2021-2022, Intel Corporation.
  5. *
  6. * Authors:
  7. * Chandrashekar Devegowda <[email protected]>
  8. * Haijun Liu <[email protected]>
  9. * Ricardo Martinez <[email protected]>
  10. *
  11. * Contributors:
  12. * Amir Hanania <[email protected]>
  13. * Andy Shevchenko <[email protected]>
  14. * Chiranjeevi Rapolu <[email protected]>
  15. * Eliot Lee <[email protected]>
  16. * Moises Veleta <[email protected]>
  17. * Sreehari Kancharla <[email protected]>
  18. */
  19. #include <linux/atomic.h>
  20. #include <linux/device.h>
  21. #include <linux/gfp.h>
  22. #include <linux/if_arp.h>
  23. #include <linux/if_ether.h>
  24. #include <linux/kernel.h>
  25. #include <linux/list.h>
  26. #include <linux/netdev_features.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/types.h>
  30. #include <linux/wwan.h>
  31. #include <net/pkt_sched.h>
  32. #include "t7xx_hif_dpmaif_rx.h"
  33. #include "t7xx_hif_dpmaif_tx.h"
  34. #include "t7xx_netdev.h"
  35. #include "t7xx_pci.h"
  36. #include "t7xx_port_proxy.h"
  37. #include "t7xx_state_monitor.h"
  38. #define IP_MUX_SESSION_DEFAULT 0
  39. static int t7xx_ccmni_open(struct net_device *dev)
  40. {
  41. struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
  42. netif_carrier_on(dev);
  43. netif_tx_start_all_queues(dev);
  44. atomic_inc(&ccmni->usage);
  45. return 0;
  46. }
  47. static int t7xx_ccmni_close(struct net_device *dev)
  48. {
  49. struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
  50. atomic_dec(&ccmni->usage);
  51. netif_carrier_off(dev);
  52. netif_tx_disable(dev);
  53. return 0;
  54. }
  55. static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
  56. unsigned int txq_number)
  57. {
  58. struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
  59. struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
  60. skb_cb->netif_idx = ccmni->index;
  61. if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
  62. return NETDEV_TX_BUSY;
  63. return 0;
  64. }
  65. static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
  66. {
  67. struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
  68. int skb_len = skb->len;
  69. /* If MTU is changed or there is no headroom, drop the packet */
  70. if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
  71. dev_kfree_skb(skb);
  72. dev->stats.tx_dropped++;
  73. return NETDEV_TX_OK;
  74. }
  75. if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
  76. return NETDEV_TX_BUSY;
  77. dev->stats.tx_packets++;
  78. dev->stats.tx_bytes += skb_len;
  79. return NETDEV_TX_OK;
  80. }
  81. static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
  82. {
  83. struct t7xx_ccmni *ccmni = netdev_priv(dev);
  84. dev->stats.tx_errors++;
  85. if (atomic_read(&ccmni->usage) > 0)
  86. netif_tx_wake_all_queues(dev);
  87. }
  88. static const struct net_device_ops ccmni_netdev_ops = {
  89. .ndo_open = t7xx_ccmni_open,
  90. .ndo_stop = t7xx_ccmni_close,
  91. .ndo_start_xmit = t7xx_ccmni_start_xmit,
  92. .ndo_tx_timeout = t7xx_ccmni_tx_timeout,
  93. };
  94. static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
  95. {
  96. struct t7xx_ccmni *ccmni;
  97. int i;
  98. for (i = 0; i < ctlb->nic_dev_num; i++) {
  99. ccmni = ctlb->ccmni_inst[i];
  100. if (!ccmni)
  101. continue;
  102. if (atomic_read(&ccmni->usage) > 0) {
  103. netif_tx_start_all_queues(ccmni->dev);
  104. netif_carrier_on(ccmni->dev);
  105. }
  106. }
  107. }
  108. static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
  109. {
  110. struct t7xx_ccmni *ccmni;
  111. int i;
  112. for (i = 0; i < ctlb->nic_dev_num; i++) {
  113. ccmni = ctlb->ccmni_inst[i];
  114. if (!ccmni)
  115. continue;
  116. if (atomic_read(&ccmni->usage) > 0)
  117. netif_tx_disable(ccmni->dev);
  118. }
  119. }
  120. static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
  121. {
  122. struct t7xx_ccmni *ccmni;
  123. int i;
  124. for (i = 0; i < ctlb->nic_dev_num; i++) {
  125. ccmni = ctlb->ccmni_inst[i];
  126. if (!ccmni)
  127. continue;
  128. if (atomic_read(&ccmni->usage) > 0)
  129. netif_carrier_off(ccmni->dev);
  130. }
  131. }
  132. static void t7xx_ccmni_wwan_setup(struct net_device *dev)
  133. {
  134. dev->hard_header_len += sizeof(struct ccci_header);
  135. dev->mtu = ETH_DATA_LEN;
  136. dev->max_mtu = CCMNI_MTU_MAX;
  137. BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
  138. dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
  139. dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
  140. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  141. dev->features = NETIF_F_VLAN_CHALLENGED;
  142. dev->features |= NETIF_F_SG;
  143. dev->hw_features |= NETIF_F_SG;
  144. dev->features |= NETIF_F_HW_CSUM;
  145. dev->hw_features |= NETIF_F_HW_CSUM;
  146. dev->features |= NETIF_F_RXCSUM;
  147. dev->hw_features |= NETIF_F_RXCSUM;
  148. dev->needs_free_netdev = true;
  149. dev->type = ARPHRD_NONE;
  150. dev->netdev_ops = &ccmni_netdev_ops;
  151. }
  152. static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
  153. struct netlink_ext_ack *extack)
  154. {
  155. struct t7xx_ccmni_ctrl *ctlb = ctxt;
  156. struct t7xx_ccmni *ccmni;
  157. int ret;
  158. if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
  159. return -EINVAL;
  160. ccmni = wwan_netdev_drvpriv(dev);
  161. ccmni->index = if_id;
  162. ccmni->ctlb = ctlb;
  163. ccmni->dev = dev;
  164. atomic_set(&ccmni->usage, 0);
  165. ctlb->ccmni_inst[if_id] = ccmni;
  166. ret = register_netdevice(dev);
  167. if (ret)
  168. return ret;
  169. netif_device_attach(dev);
  170. return 0;
  171. }
  172. static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
  173. {
  174. struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
  175. struct t7xx_ccmni_ctrl *ctlb = ctxt;
  176. u8 if_id = ccmni->index;
  177. if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
  178. return;
  179. if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
  180. return;
  181. unregister_netdevice(dev);
  182. }
  183. static const struct wwan_ops ccmni_wwan_ops = {
  184. .priv_size = sizeof(struct t7xx_ccmni),
  185. .setup = t7xx_ccmni_wwan_setup,
  186. .newlink = t7xx_ccmni_wwan_newlink,
  187. .dellink = t7xx_ccmni_wwan_dellink,
  188. };
  189. static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
  190. {
  191. struct device *dev = ctlb->hif_ctrl->dev;
  192. int ret;
  193. if (ctlb->wwan_is_registered)
  194. return 0;
  195. /* WWAN core will create a netdev for the default IP MUX channel */
  196. ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
  197. if (ret < 0) {
  198. dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
  199. return ret;
  200. }
  201. ctlb->wwan_is_registered = true;
  202. return 0;
  203. }
  204. static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
  205. {
  206. struct t7xx_ccmni_ctrl *ctlb = para;
  207. struct device *dev;
  208. int ret = 0;
  209. dev = ctlb->hif_ctrl->dev;
  210. ctlb->md_sta = state;
  211. switch (state) {
  212. case MD_STATE_READY:
  213. ret = t7xx_ccmni_register_wwan(ctlb);
  214. if (!ret)
  215. t7xx_ccmni_start(ctlb);
  216. break;
  217. case MD_STATE_EXCEPTION:
  218. case MD_STATE_STOPPED:
  219. t7xx_ccmni_pre_stop(ctlb);
  220. ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
  221. if (ret < 0)
  222. dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
  223. t7xx_ccmni_post_stop(ctlb);
  224. break;
  225. case MD_STATE_WAITING_FOR_HS1:
  226. case MD_STATE_WAITING_TO_STOP:
  227. ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
  228. if (ret < 0)
  229. dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
  230. break;
  231. default:
  232. break;
  233. }
  234. return ret;
  235. }
  236. static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
  237. {
  238. struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
  239. struct t7xx_fsm_notifier *md_status_notifier;
  240. md_status_notifier = &ctlb->md_status_notify;
  241. INIT_LIST_HEAD(&md_status_notifier->entry);
  242. md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
  243. md_status_notifier->data = ctlb;
  244. t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
  245. }
  246. static void t7xx_ccmni_recv_skb(struct t7xx_pci_dev *t7xx_dev, struct sk_buff *skb)
  247. {
  248. struct t7xx_skb_cb *skb_cb;
  249. struct net_device *net_dev;
  250. struct t7xx_ccmni *ccmni;
  251. int pkt_type, skb_len;
  252. u8 netif_id;
  253. skb_cb = T7XX_SKB_CB(skb);
  254. netif_id = skb_cb->netif_idx;
  255. ccmni = t7xx_dev->ccmni_ctlb->ccmni_inst[netif_id];
  256. if (!ccmni) {
  257. dev_kfree_skb(skb);
  258. return;
  259. }
  260. net_dev = ccmni->dev;
  261. skb->dev = net_dev;
  262. pkt_type = skb_cb->rx_pkt_type;
  263. if (pkt_type == PKT_TYPE_IP6)
  264. skb->protocol = htons(ETH_P_IPV6);
  265. else
  266. skb->protocol = htons(ETH_P_IP);
  267. skb_len = skb->len;
  268. netif_rx(skb);
  269. net_dev->stats.rx_packets++;
  270. net_dev->stats.rx_bytes += skb_len;
  271. }
  272. static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
  273. {
  274. struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
  275. struct netdev_queue *net_queue;
  276. if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
  277. net_queue = netdev_get_tx_queue(ccmni->dev, qno);
  278. if (netif_tx_queue_stopped(net_queue))
  279. netif_tx_wake_queue(net_queue);
  280. }
  281. }
  282. static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
  283. {
  284. struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
  285. struct netdev_queue *net_queue;
  286. if (atomic_read(&ccmni->usage) > 0) {
  287. netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
  288. net_queue = netdev_get_tx_queue(ccmni->dev, qno);
  289. netif_tx_stop_queue(net_queue);
  290. }
  291. }
  292. static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
  293. enum dpmaif_txq_state state, int qno)
  294. {
  295. struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
  296. if (ctlb->md_sta != MD_STATE_READY)
  297. return;
  298. if (!ctlb->ccmni_inst[0]) {
  299. dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
  300. return;
  301. }
  302. if (state == DMPAIF_TXQ_STATE_IRQ)
  303. t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
  304. else if (state == DMPAIF_TXQ_STATE_FULL)
  305. t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
  306. }
  307. int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
  308. {
  309. struct device *dev = &t7xx_dev->pdev->dev;
  310. struct t7xx_ccmni_ctrl *ctlb;
  311. ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
  312. if (!ctlb)
  313. return -ENOMEM;
  314. t7xx_dev->ccmni_ctlb = ctlb;
  315. ctlb->t7xx_dev = t7xx_dev;
  316. ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
  317. ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
  318. ctlb->nic_dev_num = NIC_DEV_DEFAULT;
  319. ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
  320. if (!ctlb->hif_ctrl)
  321. return -ENOMEM;
  322. init_md_status_notifier(t7xx_dev);
  323. return 0;
  324. }
  325. void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
  326. {
  327. struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
  328. t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
  329. if (ctlb->wwan_is_registered) {
  330. wwan_unregister_ops(&t7xx_dev->pdev->dev);
  331. ctlb->wwan_is_registered = false;
  332. }
  333. t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
  334. }