qcom_bam_dmux.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Qualcomm BAM-DMUX WWAN network driver
  4. * Copyright (c) 2020, Stephan Gerhold <[email protected]>
  5. */
  6. #include <linux/atomic.h>
  7. #include <linux/bitops.h>
  8. #include <linux/completion.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/dmaengine.h>
  11. #include <linux/if_arp.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/mod_devicetable.h>
  14. #include <linux/module.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/soc/qcom/smem_state.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/wait.h>
  21. #include <linux/workqueue.h>
  22. #include <net/pkt_sched.h>
  23. #define BAM_DMUX_BUFFER_SIZE SZ_2K
  24. #define BAM_DMUX_HDR_SIZE sizeof(struct bam_dmux_hdr)
  25. #define BAM_DMUX_MAX_DATA_SIZE (BAM_DMUX_BUFFER_SIZE - BAM_DMUX_HDR_SIZE)
  26. #define BAM_DMUX_NUM_SKB 32
  27. #define BAM_DMUX_HDR_MAGIC 0x33fc
  28. #define BAM_DMUX_AUTOSUSPEND_DELAY 1000
  29. #define BAM_DMUX_REMOTE_TIMEOUT msecs_to_jiffies(2000)
  30. enum {
  31. BAM_DMUX_CMD_DATA,
  32. BAM_DMUX_CMD_OPEN,
  33. BAM_DMUX_CMD_CLOSE,
  34. };
  35. enum {
  36. BAM_DMUX_CH_DATA_0,
  37. BAM_DMUX_CH_DATA_1,
  38. BAM_DMUX_CH_DATA_2,
  39. BAM_DMUX_CH_DATA_3,
  40. BAM_DMUX_CH_DATA_4,
  41. BAM_DMUX_CH_DATA_5,
  42. BAM_DMUX_CH_DATA_6,
  43. BAM_DMUX_CH_DATA_7,
  44. BAM_DMUX_NUM_CH
  45. };
  46. struct bam_dmux_hdr {
  47. u16 magic;
  48. u8 signal;
  49. u8 cmd;
  50. u8 pad;
  51. u8 ch;
  52. u16 len;
  53. };
  54. struct bam_dmux_skb_dma {
  55. struct bam_dmux *dmux;
  56. struct sk_buff *skb;
  57. dma_addr_t addr;
  58. };
  59. struct bam_dmux {
  60. struct device *dev;
  61. int pc_irq;
  62. bool pc_state, pc_ack_state;
  63. struct qcom_smem_state *pc, *pc_ack;
  64. u32 pc_mask, pc_ack_mask;
  65. wait_queue_head_t pc_wait;
  66. struct completion pc_ack_completion;
  67. struct dma_chan *rx, *tx;
  68. struct bam_dmux_skb_dma rx_skbs[BAM_DMUX_NUM_SKB];
  69. struct bam_dmux_skb_dma tx_skbs[BAM_DMUX_NUM_SKB];
  70. spinlock_t tx_lock; /* Protect tx_skbs, tx_next_skb */
  71. unsigned int tx_next_skb;
  72. atomic_long_t tx_deferred_skb;
  73. struct work_struct tx_wakeup_work;
  74. DECLARE_BITMAP(remote_channels, BAM_DMUX_NUM_CH);
  75. struct work_struct register_netdev_work;
  76. struct net_device *netdevs[BAM_DMUX_NUM_CH];
  77. };
  78. struct bam_dmux_netdev {
  79. struct bam_dmux *dmux;
  80. u8 ch;
  81. };
  82. static void bam_dmux_pc_vote(struct bam_dmux *dmux, bool enable)
  83. {
  84. reinit_completion(&dmux->pc_ack_completion);
  85. qcom_smem_state_update_bits(dmux->pc, dmux->pc_mask,
  86. enable ? dmux->pc_mask : 0);
  87. }
  88. static void bam_dmux_pc_ack(struct bam_dmux *dmux)
  89. {
  90. qcom_smem_state_update_bits(dmux->pc_ack, dmux->pc_ack_mask,
  91. dmux->pc_ack_state ? 0 : dmux->pc_ack_mask);
  92. dmux->pc_ack_state = !dmux->pc_ack_state;
  93. }
  94. static bool bam_dmux_skb_dma_map(struct bam_dmux_skb_dma *skb_dma,
  95. enum dma_data_direction dir)
  96. {
  97. struct device *dev = skb_dma->dmux->dev;
  98. skb_dma->addr = dma_map_single(dev, skb_dma->skb->data, skb_dma->skb->len, dir);
  99. if (dma_mapping_error(dev, skb_dma->addr)) {
  100. dev_err(dev, "Failed to DMA map buffer\n");
  101. skb_dma->addr = 0;
  102. return false;
  103. }
  104. return true;
  105. }
  106. static void bam_dmux_skb_dma_unmap(struct bam_dmux_skb_dma *skb_dma,
  107. enum dma_data_direction dir)
  108. {
  109. dma_unmap_single(skb_dma->dmux->dev, skb_dma->addr, skb_dma->skb->len, dir);
  110. skb_dma->addr = 0;
  111. }
  112. static void bam_dmux_tx_wake_queues(struct bam_dmux *dmux)
  113. {
  114. int i;
  115. dev_dbg(dmux->dev, "wake queues\n");
  116. for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
  117. struct net_device *netdev = dmux->netdevs[i];
  118. if (netdev && netif_running(netdev))
  119. netif_wake_queue(netdev);
  120. }
  121. }
  122. static void bam_dmux_tx_stop_queues(struct bam_dmux *dmux)
  123. {
  124. int i;
  125. dev_dbg(dmux->dev, "stop queues\n");
  126. for (i = 0; i < BAM_DMUX_NUM_CH; ++i) {
  127. struct net_device *netdev = dmux->netdevs[i];
  128. if (netdev)
  129. netif_stop_queue(netdev);
  130. }
  131. }
  132. static void bam_dmux_tx_done(struct bam_dmux_skb_dma *skb_dma)
  133. {
  134. struct bam_dmux *dmux = skb_dma->dmux;
  135. unsigned long flags;
  136. pm_runtime_mark_last_busy(dmux->dev);
  137. pm_runtime_put_autosuspend(dmux->dev);
  138. if (skb_dma->addr)
  139. bam_dmux_skb_dma_unmap(skb_dma, DMA_TO_DEVICE);
  140. spin_lock_irqsave(&dmux->tx_lock, flags);
  141. skb_dma->skb = NULL;
  142. if (skb_dma == &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB])
  143. bam_dmux_tx_wake_queues(dmux);
  144. spin_unlock_irqrestore(&dmux->tx_lock, flags);
  145. }
  146. static void bam_dmux_tx_callback(void *data)
  147. {
  148. struct bam_dmux_skb_dma *skb_dma = data;
  149. struct sk_buff *skb = skb_dma->skb;
  150. bam_dmux_tx_done(skb_dma);
  151. dev_consume_skb_any(skb);
  152. }
  153. static bool bam_dmux_skb_dma_submit_tx(struct bam_dmux_skb_dma *skb_dma)
  154. {
  155. struct bam_dmux *dmux = skb_dma->dmux;
  156. struct dma_async_tx_descriptor *desc;
  157. desc = dmaengine_prep_slave_single(dmux->tx, skb_dma->addr,
  158. skb_dma->skb->len, DMA_MEM_TO_DEV,
  159. DMA_PREP_INTERRUPT);
  160. if (!desc) {
  161. dev_err(dmux->dev, "Failed to prepare TX DMA buffer\n");
  162. return false;
  163. }
  164. desc->callback = bam_dmux_tx_callback;
  165. desc->callback_param = skb_dma;
  166. desc->cookie = dmaengine_submit(desc);
  167. return true;
  168. }
  169. static struct bam_dmux_skb_dma *
  170. bam_dmux_tx_queue(struct bam_dmux *dmux, struct sk_buff *skb)
  171. {
  172. struct bam_dmux_skb_dma *skb_dma;
  173. unsigned long flags;
  174. spin_lock_irqsave(&dmux->tx_lock, flags);
  175. skb_dma = &dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB];
  176. if (skb_dma->skb) {
  177. bam_dmux_tx_stop_queues(dmux);
  178. spin_unlock_irqrestore(&dmux->tx_lock, flags);
  179. return NULL;
  180. }
  181. skb_dma->skb = skb;
  182. dmux->tx_next_skb++;
  183. if (dmux->tx_skbs[dmux->tx_next_skb % BAM_DMUX_NUM_SKB].skb)
  184. bam_dmux_tx_stop_queues(dmux);
  185. spin_unlock_irqrestore(&dmux->tx_lock, flags);
  186. return skb_dma;
  187. }
  188. static int bam_dmux_send_cmd(struct bam_dmux_netdev *bndev, u8 cmd)
  189. {
  190. struct bam_dmux *dmux = bndev->dmux;
  191. struct bam_dmux_skb_dma *skb_dma;
  192. struct bam_dmux_hdr *hdr;
  193. struct sk_buff *skb;
  194. int ret;
  195. skb = alloc_skb(sizeof(*hdr), GFP_KERNEL);
  196. if (!skb)
  197. return -ENOMEM;
  198. hdr = skb_put_zero(skb, sizeof(*hdr));
  199. hdr->magic = BAM_DMUX_HDR_MAGIC;
  200. hdr->cmd = cmd;
  201. hdr->ch = bndev->ch;
  202. skb_dma = bam_dmux_tx_queue(dmux, skb);
  203. if (!skb_dma) {
  204. ret = -EAGAIN;
  205. goto free_skb;
  206. }
  207. ret = pm_runtime_get_sync(dmux->dev);
  208. if (ret < 0)
  209. goto tx_fail;
  210. if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE)) {
  211. ret = -ENOMEM;
  212. goto tx_fail;
  213. }
  214. if (!bam_dmux_skb_dma_submit_tx(skb_dma)) {
  215. ret = -EIO;
  216. goto tx_fail;
  217. }
  218. dma_async_issue_pending(dmux->tx);
  219. return 0;
  220. tx_fail:
  221. bam_dmux_tx_done(skb_dma);
  222. free_skb:
  223. dev_kfree_skb(skb);
  224. return ret;
  225. }
  226. static int bam_dmux_netdev_open(struct net_device *netdev)
  227. {
  228. struct bam_dmux_netdev *bndev = netdev_priv(netdev);
  229. int ret;
  230. ret = bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_OPEN);
  231. if (ret)
  232. return ret;
  233. netif_start_queue(netdev);
  234. return 0;
  235. }
  236. static int bam_dmux_netdev_stop(struct net_device *netdev)
  237. {
  238. struct bam_dmux_netdev *bndev = netdev_priv(netdev);
  239. netif_stop_queue(netdev);
  240. bam_dmux_send_cmd(bndev, BAM_DMUX_CMD_CLOSE);
  241. return 0;
  242. }
  243. static unsigned int needed_room(unsigned int avail, unsigned int needed)
  244. {
  245. if (avail >= needed)
  246. return 0;
  247. return needed - avail;
  248. }
  249. static int bam_dmux_tx_prepare_skb(struct bam_dmux_netdev *bndev,
  250. struct sk_buff *skb)
  251. {
  252. unsigned int head = needed_room(skb_headroom(skb), BAM_DMUX_HDR_SIZE);
  253. unsigned int pad = sizeof(u32) - skb->len % sizeof(u32);
  254. unsigned int tail = needed_room(skb_tailroom(skb), pad);
  255. struct bam_dmux_hdr *hdr;
  256. int ret;
  257. if (head || tail || skb_cloned(skb)) {
  258. ret = pskb_expand_head(skb, head, tail, GFP_ATOMIC);
  259. if (ret)
  260. return ret;
  261. }
  262. hdr = skb_push(skb, sizeof(*hdr));
  263. hdr->magic = BAM_DMUX_HDR_MAGIC;
  264. hdr->signal = 0;
  265. hdr->cmd = BAM_DMUX_CMD_DATA;
  266. hdr->pad = pad;
  267. hdr->ch = bndev->ch;
  268. hdr->len = skb->len - sizeof(*hdr);
  269. if (pad)
  270. skb_put_zero(skb, pad);
  271. return 0;
  272. }
  273. static netdev_tx_t bam_dmux_netdev_start_xmit(struct sk_buff *skb,
  274. struct net_device *netdev)
  275. {
  276. struct bam_dmux_netdev *bndev = netdev_priv(netdev);
  277. struct bam_dmux *dmux = bndev->dmux;
  278. struct bam_dmux_skb_dma *skb_dma;
  279. int active, ret;
  280. skb_dma = bam_dmux_tx_queue(dmux, skb);
  281. if (!skb_dma)
  282. return NETDEV_TX_BUSY;
  283. active = pm_runtime_get(dmux->dev);
  284. if (active < 0 && active != -EINPROGRESS)
  285. goto drop;
  286. ret = bam_dmux_tx_prepare_skb(bndev, skb);
  287. if (ret)
  288. goto drop;
  289. if (!bam_dmux_skb_dma_map(skb_dma, DMA_TO_DEVICE))
  290. goto drop;
  291. if (active <= 0) {
  292. /* Cannot sleep here so mark skb for wakeup handler and return */
  293. if (!atomic_long_fetch_or(BIT(skb_dma - dmux->tx_skbs),
  294. &dmux->tx_deferred_skb))
  295. queue_pm_work(&dmux->tx_wakeup_work);
  296. return NETDEV_TX_OK;
  297. }
  298. if (!bam_dmux_skb_dma_submit_tx(skb_dma))
  299. goto drop;
  300. dma_async_issue_pending(dmux->tx);
  301. return NETDEV_TX_OK;
  302. drop:
  303. bam_dmux_tx_done(skb_dma);
  304. dev_kfree_skb_any(skb);
  305. return NETDEV_TX_OK;
  306. }
  307. static void bam_dmux_tx_wakeup_work(struct work_struct *work)
  308. {
  309. struct bam_dmux *dmux = container_of(work, struct bam_dmux, tx_wakeup_work);
  310. unsigned long pending;
  311. int ret, i;
  312. ret = pm_runtime_resume_and_get(dmux->dev);
  313. if (ret < 0) {
  314. dev_err(dmux->dev, "Failed to resume: %d\n", ret);
  315. return;
  316. }
  317. pending = atomic_long_xchg(&dmux->tx_deferred_skb, 0);
  318. if (!pending)
  319. goto out;
  320. dev_dbg(dmux->dev, "pending skbs after wakeup: %#lx\n", pending);
  321. for_each_set_bit(i, &pending, BAM_DMUX_NUM_SKB) {
  322. bam_dmux_skb_dma_submit_tx(&dmux->tx_skbs[i]);
  323. }
  324. dma_async_issue_pending(dmux->tx);
  325. out:
  326. pm_runtime_mark_last_busy(dmux->dev);
  327. pm_runtime_put_autosuspend(dmux->dev);
  328. }
  329. static const struct net_device_ops bam_dmux_ops = {
  330. .ndo_open = bam_dmux_netdev_open,
  331. .ndo_stop = bam_dmux_netdev_stop,
  332. .ndo_start_xmit = bam_dmux_netdev_start_xmit,
  333. };
  334. static const struct device_type wwan_type = {
  335. .name = "wwan",
  336. };
  337. static void bam_dmux_netdev_setup(struct net_device *dev)
  338. {
  339. dev->netdev_ops = &bam_dmux_ops;
  340. dev->type = ARPHRD_RAWIP;
  341. SET_NETDEV_DEVTYPE(dev, &wwan_type);
  342. dev->flags = IFF_POINTOPOINT | IFF_NOARP;
  343. dev->mtu = ETH_DATA_LEN;
  344. dev->max_mtu = BAM_DMUX_MAX_DATA_SIZE;
  345. dev->needed_headroom = sizeof(struct bam_dmux_hdr);
  346. dev->needed_tailroom = sizeof(u32); /* word-aligned */
  347. dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
  348. /* This perm addr will be used as interface identifier by IPv6 */
  349. dev->addr_assign_type = NET_ADDR_RANDOM;
  350. eth_random_addr(dev->perm_addr);
  351. }
  352. static void bam_dmux_register_netdev_work(struct work_struct *work)
  353. {
  354. struct bam_dmux *dmux = container_of(work, struct bam_dmux, register_netdev_work);
  355. struct bam_dmux_netdev *bndev;
  356. struct net_device *netdev;
  357. int ch, ret;
  358. for_each_set_bit(ch, dmux->remote_channels, BAM_DMUX_NUM_CH) {
  359. if (dmux->netdevs[ch])
  360. continue;
  361. netdev = alloc_netdev(sizeof(*bndev), "wwan%d", NET_NAME_ENUM,
  362. bam_dmux_netdev_setup);
  363. if (!netdev)
  364. return;
  365. SET_NETDEV_DEV(netdev, dmux->dev);
  366. netdev->dev_port = ch;
  367. bndev = netdev_priv(netdev);
  368. bndev->dmux = dmux;
  369. bndev->ch = ch;
  370. ret = register_netdev(netdev);
  371. if (ret) {
  372. dev_err(dmux->dev, "Failed to register netdev for channel %u: %d\n",
  373. ch, ret);
  374. free_netdev(netdev);
  375. return;
  376. }
  377. dmux->netdevs[ch] = netdev;
  378. }
  379. }
  380. static void bam_dmux_rx_callback(void *data);
  381. static bool bam_dmux_skb_dma_submit_rx(struct bam_dmux_skb_dma *skb_dma)
  382. {
  383. struct bam_dmux *dmux = skb_dma->dmux;
  384. struct dma_async_tx_descriptor *desc;
  385. desc = dmaengine_prep_slave_single(dmux->rx, skb_dma->addr,
  386. skb_dma->skb->len, DMA_DEV_TO_MEM,
  387. DMA_PREP_INTERRUPT);
  388. if (!desc) {
  389. dev_err(dmux->dev, "Failed to prepare RX DMA buffer\n");
  390. return false;
  391. }
  392. desc->callback = bam_dmux_rx_callback;
  393. desc->callback_param = skb_dma;
  394. desc->cookie = dmaengine_submit(desc);
  395. return true;
  396. }
  397. static bool bam_dmux_skb_dma_queue_rx(struct bam_dmux_skb_dma *skb_dma, gfp_t gfp)
  398. {
  399. if (!skb_dma->skb) {
  400. skb_dma->skb = __netdev_alloc_skb(NULL, BAM_DMUX_BUFFER_SIZE, gfp);
  401. if (!skb_dma->skb)
  402. return false;
  403. skb_put(skb_dma->skb, BAM_DMUX_BUFFER_SIZE);
  404. }
  405. return bam_dmux_skb_dma_map(skb_dma, DMA_FROM_DEVICE) &&
  406. bam_dmux_skb_dma_submit_rx(skb_dma);
  407. }
  408. static void bam_dmux_cmd_data(struct bam_dmux_skb_dma *skb_dma)
  409. {
  410. struct bam_dmux *dmux = skb_dma->dmux;
  411. struct sk_buff *skb = skb_dma->skb;
  412. struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
  413. struct net_device *netdev = dmux->netdevs[hdr->ch];
  414. if (!netdev || !netif_running(netdev)) {
  415. dev_warn(dmux->dev, "Data for inactive channel %u\n", hdr->ch);
  416. return;
  417. }
  418. if (hdr->len > BAM_DMUX_MAX_DATA_SIZE) {
  419. dev_err(dmux->dev, "Data larger than buffer? (%u > %u)\n",
  420. hdr->len, (u16)BAM_DMUX_MAX_DATA_SIZE);
  421. return;
  422. }
  423. skb_dma->skb = NULL; /* Hand over to network stack */
  424. skb_pull(skb, sizeof(*hdr));
  425. skb_trim(skb, hdr->len);
  426. skb->dev = netdev;
  427. /* Only Raw-IP/QMAP is supported by this driver */
  428. switch (skb->data[0] & 0xf0) {
  429. case 0x40:
  430. skb->protocol = htons(ETH_P_IP);
  431. break;
  432. case 0x60:
  433. skb->protocol = htons(ETH_P_IPV6);
  434. break;
  435. default:
  436. skb->protocol = htons(ETH_P_MAP);
  437. break;
  438. }
  439. netif_receive_skb(skb);
  440. }
  441. static void bam_dmux_cmd_open(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
  442. {
  443. struct net_device *netdev = dmux->netdevs[hdr->ch];
  444. dev_dbg(dmux->dev, "open channel: %u\n", hdr->ch);
  445. if (__test_and_set_bit(hdr->ch, dmux->remote_channels)) {
  446. dev_warn(dmux->dev, "Channel already open: %u\n", hdr->ch);
  447. return;
  448. }
  449. if (netdev) {
  450. netif_device_attach(netdev);
  451. } else {
  452. /* Cannot sleep here, schedule work to register the netdev */
  453. schedule_work(&dmux->register_netdev_work);
  454. }
  455. }
  456. static void bam_dmux_cmd_close(struct bam_dmux *dmux, struct bam_dmux_hdr *hdr)
  457. {
  458. struct net_device *netdev = dmux->netdevs[hdr->ch];
  459. dev_dbg(dmux->dev, "close channel: %u\n", hdr->ch);
  460. if (!__test_and_clear_bit(hdr->ch, dmux->remote_channels)) {
  461. dev_err(dmux->dev, "Channel not open: %u\n", hdr->ch);
  462. return;
  463. }
  464. if (netdev)
  465. netif_device_detach(netdev);
  466. }
  467. static void bam_dmux_rx_callback(void *data)
  468. {
  469. struct bam_dmux_skb_dma *skb_dma = data;
  470. struct bam_dmux *dmux = skb_dma->dmux;
  471. struct sk_buff *skb = skb_dma->skb;
  472. struct bam_dmux_hdr *hdr = (struct bam_dmux_hdr *)skb->data;
  473. bam_dmux_skb_dma_unmap(skb_dma, DMA_FROM_DEVICE);
  474. if (hdr->magic != BAM_DMUX_HDR_MAGIC) {
  475. dev_err(dmux->dev, "Invalid magic in header: %#x\n", hdr->magic);
  476. goto out;
  477. }
  478. if (hdr->ch >= BAM_DMUX_NUM_CH) {
  479. dev_dbg(dmux->dev, "Unsupported channel: %u\n", hdr->ch);
  480. goto out;
  481. }
  482. switch (hdr->cmd) {
  483. case BAM_DMUX_CMD_DATA:
  484. bam_dmux_cmd_data(skb_dma);
  485. break;
  486. case BAM_DMUX_CMD_OPEN:
  487. bam_dmux_cmd_open(dmux, hdr);
  488. break;
  489. case BAM_DMUX_CMD_CLOSE:
  490. bam_dmux_cmd_close(dmux, hdr);
  491. break;
  492. default:
  493. dev_err(dmux->dev, "Unsupported command %u on channel %u\n",
  494. hdr->cmd, hdr->ch);
  495. break;
  496. }
  497. out:
  498. if (bam_dmux_skb_dma_queue_rx(skb_dma, GFP_ATOMIC))
  499. dma_async_issue_pending(dmux->rx);
  500. }
  501. static bool bam_dmux_power_on(struct bam_dmux *dmux)
  502. {
  503. struct device *dev = dmux->dev;
  504. struct dma_slave_config dma_rx_conf = {
  505. .direction = DMA_DEV_TO_MEM,
  506. .src_maxburst = BAM_DMUX_BUFFER_SIZE,
  507. };
  508. int i;
  509. dmux->rx = dma_request_chan(dev, "rx");
  510. if (IS_ERR(dmux->rx)) {
  511. dev_err(dev, "Failed to request RX DMA channel: %pe\n", dmux->rx);
  512. dmux->rx = NULL;
  513. return false;
  514. }
  515. dmaengine_slave_config(dmux->rx, &dma_rx_conf);
  516. for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
  517. if (!bam_dmux_skb_dma_queue_rx(&dmux->rx_skbs[i], GFP_KERNEL))
  518. return false;
  519. }
  520. dma_async_issue_pending(dmux->rx);
  521. return true;
  522. }
  523. static void bam_dmux_free_skbs(struct bam_dmux_skb_dma skbs[],
  524. enum dma_data_direction dir)
  525. {
  526. int i;
  527. for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
  528. struct bam_dmux_skb_dma *skb_dma = &skbs[i];
  529. if (skb_dma->addr)
  530. bam_dmux_skb_dma_unmap(skb_dma, dir);
  531. if (skb_dma->skb) {
  532. dev_kfree_skb(skb_dma->skb);
  533. skb_dma->skb = NULL;
  534. }
  535. }
  536. }
  537. static void bam_dmux_power_off(struct bam_dmux *dmux)
  538. {
  539. if (dmux->tx) {
  540. dmaengine_terminate_sync(dmux->tx);
  541. dma_release_channel(dmux->tx);
  542. dmux->tx = NULL;
  543. }
  544. if (dmux->rx) {
  545. dmaengine_terminate_sync(dmux->rx);
  546. dma_release_channel(dmux->rx);
  547. dmux->rx = NULL;
  548. }
  549. bam_dmux_free_skbs(dmux->rx_skbs, DMA_FROM_DEVICE);
  550. }
  551. static irqreturn_t bam_dmux_pc_irq(int irq, void *data)
  552. {
  553. struct bam_dmux *dmux = data;
  554. bool new_state = !dmux->pc_state;
  555. dev_dbg(dmux->dev, "pc: %u\n", new_state);
  556. if (new_state) {
  557. if (bam_dmux_power_on(dmux))
  558. bam_dmux_pc_ack(dmux);
  559. else
  560. bam_dmux_power_off(dmux);
  561. } else {
  562. bam_dmux_power_off(dmux);
  563. bam_dmux_pc_ack(dmux);
  564. }
  565. dmux->pc_state = new_state;
  566. wake_up_all(&dmux->pc_wait);
  567. return IRQ_HANDLED;
  568. }
  569. static irqreturn_t bam_dmux_pc_ack_irq(int irq, void *data)
  570. {
  571. struct bam_dmux *dmux = data;
  572. dev_dbg(dmux->dev, "pc ack\n");
  573. complete_all(&dmux->pc_ack_completion);
  574. return IRQ_HANDLED;
  575. }
  576. static int bam_dmux_runtime_suspend(struct device *dev)
  577. {
  578. struct bam_dmux *dmux = dev_get_drvdata(dev);
  579. dev_dbg(dev, "runtime suspend\n");
  580. bam_dmux_pc_vote(dmux, false);
  581. return 0;
  582. }
  583. static int __maybe_unused bam_dmux_runtime_resume(struct device *dev)
  584. {
  585. struct bam_dmux *dmux = dev_get_drvdata(dev);
  586. dev_dbg(dev, "runtime resume\n");
  587. /* Wait until previous power down was acked */
  588. if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
  589. BAM_DMUX_REMOTE_TIMEOUT))
  590. return -ETIMEDOUT;
  591. /* Vote for power state */
  592. bam_dmux_pc_vote(dmux, true);
  593. /* Wait for ack */
  594. if (!wait_for_completion_timeout(&dmux->pc_ack_completion,
  595. BAM_DMUX_REMOTE_TIMEOUT)) {
  596. bam_dmux_pc_vote(dmux, false);
  597. return -ETIMEDOUT;
  598. }
  599. /* Wait until we're up */
  600. if (!wait_event_timeout(dmux->pc_wait, dmux->pc_state,
  601. BAM_DMUX_REMOTE_TIMEOUT)) {
  602. bam_dmux_pc_vote(dmux, false);
  603. return -ETIMEDOUT;
  604. }
  605. /* Ensure that we actually initialized successfully */
  606. if (!dmux->rx) {
  607. bam_dmux_pc_vote(dmux, false);
  608. return -ENXIO;
  609. }
  610. /* Request TX channel if necessary */
  611. if (dmux->tx)
  612. return 0;
  613. dmux->tx = dma_request_chan(dev, "tx");
  614. if (IS_ERR(dmux->tx)) {
  615. dev_err(dev, "Failed to request TX DMA channel: %pe\n", dmux->tx);
  616. dmux->tx = NULL;
  617. bam_dmux_runtime_suspend(dev);
  618. return -ENXIO;
  619. }
  620. return 0;
  621. }
  622. static int bam_dmux_probe(struct platform_device *pdev)
  623. {
  624. struct device *dev = &pdev->dev;
  625. struct bam_dmux *dmux;
  626. int ret, pc_ack_irq, i;
  627. unsigned int bit;
  628. dmux = devm_kzalloc(dev, sizeof(*dmux), GFP_KERNEL);
  629. if (!dmux)
  630. return -ENOMEM;
  631. dmux->dev = dev;
  632. platform_set_drvdata(pdev, dmux);
  633. dmux->pc_irq = platform_get_irq_byname(pdev, "pc");
  634. if (dmux->pc_irq < 0)
  635. return dmux->pc_irq;
  636. pc_ack_irq = platform_get_irq_byname(pdev, "pc-ack");
  637. if (pc_ack_irq < 0)
  638. return pc_ack_irq;
  639. dmux->pc = devm_qcom_smem_state_get(dev, "pc", &bit);
  640. if (IS_ERR(dmux->pc))
  641. return dev_err_probe(dev, PTR_ERR(dmux->pc),
  642. "Failed to get pc state\n");
  643. dmux->pc_mask = BIT(bit);
  644. dmux->pc_ack = devm_qcom_smem_state_get(dev, "pc-ack", &bit);
  645. if (IS_ERR(dmux->pc_ack))
  646. return dev_err_probe(dev, PTR_ERR(dmux->pc_ack),
  647. "Failed to get pc-ack state\n");
  648. dmux->pc_ack_mask = BIT(bit);
  649. init_waitqueue_head(&dmux->pc_wait);
  650. init_completion(&dmux->pc_ack_completion);
  651. complete_all(&dmux->pc_ack_completion);
  652. spin_lock_init(&dmux->tx_lock);
  653. INIT_WORK(&dmux->tx_wakeup_work, bam_dmux_tx_wakeup_work);
  654. INIT_WORK(&dmux->register_netdev_work, bam_dmux_register_netdev_work);
  655. for (i = 0; i < BAM_DMUX_NUM_SKB; i++) {
  656. dmux->rx_skbs[i].dmux = dmux;
  657. dmux->tx_skbs[i].dmux = dmux;
  658. }
  659. /* Runtime PM manages our own power vote.
  660. * Note that the RX path may be active even if we are runtime suspended,
  661. * since it is controlled by the remote side.
  662. */
  663. pm_runtime_set_autosuspend_delay(dev, BAM_DMUX_AUTOSUSPEND_DELAY);
  664. pm_runtime_use_autosuspend(dev);
  665. pm_runtime_enable(dev);
  666. ret = devm_request_threaded_irq(dev, pc_ack_irq, NULL, bam_dmux_pc_ack_irq,
  667. IRQF_ONESHOT, NULL, dmux);
  668. if (ret)
  669. return ret;
  670. ret = devm_request_threaded_irq(dev, dmux->pc_irq, NULL, bam_dmux_pc_irq,
  671. IRQF_ONESHOT, NULL, dmux);
  672. if (ret)
  673. return ret;
  674. ret = irq_get_irqchip_state(dmux->pc_irq, IRQCHIP_STATE_LINE_LEVEL,
  675. &dmux->pc_state);
  676. if (ret)
  677. return ret;
  678. /* Check if remote finished initialization before us */
  679. if (dmux->pc_state) {
  680. if (bam_dmux_power_on(dmux))
  681. bam_dmux_pc_ack(dmux);
  682. else
  683. bam_dmux_power_off(dmux);
  684. }
  685. return 0;
  686. }
  687. static int bam_dmux_remove(struct platform_device *pdev)
  688. {
  689. struct bam_dmux *dmux = platform_get_drvdata(pdev);
  690. struct device *dev = dmux->dev;
  691. LIST_HEAD(list);
  692. int i;
  693. /* Unregister network interfaces */
  694. cancel_work_sync(&dmux->register_netdev_work);
  695. rtnl_lock();
  696. for (i = 0; i < BAM_DMUX_NUM_CH; ++i)
  697. if (dmux->netdevs[i])
  698. unregister_netdevice_queue(dmux->netdevs[i], &list);
  699. unregister_netdevice_many(&list);
  700. rtnl_unlock();
  701. cancel_work_sync(&dmux->tx_wakeup_work);
  702. /* Drop our own power vote */
  703. pm_runtime_disable(dev);
  704. pm_runtime_dont_use_autosuspend(dev);
  705. bam_dmux_runtime_suspend(dev);
  706. pm_runtime_set_suspended(dev);
  707. /* Try to wait for remote side to drop power vote */
  708. if (!wait_event_timeout(dmux->pc_wait, !dmux->rx, BAM_DMUX_REMOTE_TIMEOUT))
  709. dev_err(dev, "Timed out waiting for remote side to suspend\n");
  710. /* Make sure everything is cleaned up before we return */
  711. disable_irq(dmux->pc_irq);
  712. bam_dmux_power_off(dmux);
  713. bam_dmux_free_skbs(dmux->tx_skbs, DMA_TO_DEVICE);
  714. return 0;
  715. }
  716. static const struct dev_pm_ops bam_dmux_pm_ops = {
  717. SET_RUNTIME_PM_OPS(bam_dmux_runtime_suspend, bam_dmux_runtime_resume, NULL)
  718. };
  719. static const struct of_device_id bam_dmux_of_match[] = {
  720. { .compatible = "qcom,bam-dmux" },
  721. { /* sentinel */ }
  722. };
  723. MODULE_DEVICE_TABLE(of, bam_dmux_of_match);
  724. static struct platform_driver bam_dmux_driver = {
  725. .probe = bam_dmux_probe,
  726. .remove = bam_dmux_remove,
  727. .driver = {
  728. .name = "bam-dmux",
  729. .pm = &bam_dmux_pm_ops,
  730. .of_match_table = bam_dmux_of_match,
  731. },
  732. };
  733. module_platform_driver(bam_dmux_driver);
  734. MODULE_LICENSE("GPL v2");
  735. MODULE_DESCRIPTION("Qualcomm BAM-DMUX WWAN Network Driver");
  736. MODULE_AUTHOR("Stephan Gerhold <[email protected]>");