dma.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818
  1. // SPDX-License-Identifier: ISC
  2. /*
  3. * Copyright (C) 2016 Felix Fietkau <[email protected]>
  4. */
  5. #include <linux/dma-mapping.h>
  6. #include "mt76.h"
  7. #include "dma.h"
  8. #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
  9. #define Q_READ(_dev, _q, _field) ({ \
  10. u32 _offset = offsetof(struct mt76_queue_regs, _field); \
  11. u32 _val; \
  12. if ((_q)->flags & MT_QFLAG_WED) \
  13. _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
  14. ((_q)->wed_regs + \
  15. _offset)); \
  16. else \
  17. _val = readl(&(_q)->regs->_field); \
  18. _val; \
  19. })
  20. #define Q_WRITE(_dev, _q, _field, _val) do { \
  21. u32 _offset = offsetof(struct mt76_queue_regs, _field); \
  22. if ((_q)->flags & MT_QFLAG_WED) \
  23. mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
  24. ((_q)->wed_regs + _offset), \
  25. _val); \
  26. else \
  27. writel(_val, &(_q)->regs->_field); \
  28. } while (0)
  29. #else
  30. #define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
  31. #define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
  32. #endif
  33. static struct mt76_txwi_cache *
  34. mt76_alloc_txwi(struct mt76_dev *dev)
  35. {
  36. struct mt76_txwi_cache *t;
  37. dma_addr_t addr;
  38. u8 *txwi;
  39. int size;
  40. size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
  41. txwi = kzalloc(size, GFP_ATOMIC);
  42. if (!txwi)
  43. return NULL;
  44. addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
  45. DMA_TO_DEVICE);
  46. t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
  47. t->dma_addr = addr;
  48. return t;
  49. }
  50. static struct mt76_txwi_cache *
  51. __mt76_get_txwi(struct mt76_dev *dev)
  52. {
  53. struct mt76_txwi_cache *t = NULL;
  54. spin_lock(&dev->lock);
  55. if (!list_empty(&dev->txwi_cache)) {
  56. t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
  57. list);
  58. list_del(&t->list);
  59. }
  60. spin_unlock(&dev->lock);
  61. return t;
  62. }
  63. static struct mt76_txwi_cache *
  64. mt76_get_txwi(struct mt76_dev *dev)
  65. {
  66. struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
  67. if (t)
  68. return t;
  69. return mt76_alloc_txwi(dev);
  70. }
  71. void
  72. mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
  73. {
  74. if (!t)
  75. return;
  76. spin_lock(&dev->lock);
  77. list_add(&t->list, &dev->txwi_cache);
  78. spin_unlock(&dev->lock);
  79. }
  80. EXPORT_SYMBOL_GPL(mt76_put_txwi);
  81. static void
  82. mt76_free_pending_txwi(struct mt76_dev *dev)
  83. {
  84. struct mt76_txwi_cache *t;
  85. local_bh_disable();
  86. while ((t = __mt76_get_txwi(dev)) != NULL) {
  87. dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
  88. DMA_TO_DEVICE);
  89. kfree(mt76_get_txwi_ptr(dev, t));
  90. }
  91. local_bh_enable();
  92. }
  93. static void
  94. mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
  95. {
  96. Q_WRITE(dev, q, desc_base, q->desc_dma);
  97. Q_WRITE(dev, q, ring_size, q->ndesc);
  98. q->head = Q_READ(dev, q, dma_idx);
  99. q->tail = q->head;
  100. }
  101. static void
  102. mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
  103. {
  104. int i;
  105. if (!q || !q->ndesc)
  106. return;
  107. /* clear descriptors */
  108. for (i = 0; i < q->ndesc; i++)
  109. q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
  110. Q_WRITE(dev, q, cpu_idx, 0);
  111. Q_WRITE(dev, q, dma_idx, 0);
  112. mt76_dma_sync_idx(dev, q);
  113. }
  114. static int
  115. mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
  116. struct mt76_queue_buf *buf, int nbufs, u32 info,
  117. struct sk_buff *skb, void *txwi)
  118. {
  119. struct mt76_queue_entry *entry;
  120. struct mt76_desc *desc;
  121. u32 ctrl;
  122. int i, idx = -1;
  123. if (txwi) {
  124. q->entry[q->head].txwi = DMA_DUMMY_DATA;
  125. q->entry[q->head].skip_buf0 = true;
  126. }
  127. for (i = 0; i < nbufs; i += 2, buf += 2) {
  128. u32 buf0 = buf[0].addr, buf1 = 0;
  129. idx = q->head;
  130. q->head = (q->head + 1) % q->ndesc;
  131. desc = &q->desc[idx];
  132. entry = &q->entry[idx];
  133. if (buf[0].skip_unmap)
  134. entry->skip_buf0 = true;
  135. entry->skip_buf1 = i == nbufs - 1;
  136. entry->dma_addr[0] = buf[0].addr;
  137. entry->dma_len[0] = buf[0].len;
  138. ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
  139. if (i < nbufs - 1) {
  140. entry->dma_addr[1] = buf[1].addr;
  141. entry->dma_len[1] = buf[1].len;
  142. buf1 = buf[1].addr;
  143. ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
  144. if (buf[1].skip_unmap)
  145. entry->skip_buf1 = true;
  146. }
  147. if (i == nbufs - 1)
  148. ctrl |= MT_DMA_CTL_LAST_SEC0;
  149. else if (i == nbufs - 2)
  150. ctrl |= MT_DMA_CTL_LAST_SEC1;
  151. WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
  152. WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
  153. WRITE_ONCE(desc->info, cpu_to_le32(info));
  154. WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
  155. q->queued++;
  156. }
  157. q->entry[idx].txwi = txwi;
  158. q->entry[idx].skb = skb;
  159. q->entry[idx].wcid = 0xffff;
  160. return idx;
  161. }
  162. static void
  163. mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
  164. struct mt76_queue_entry *prev_e)
  165. {
  166. struct mt76_queue_entry *e = &q->entry[idx];
  167. if (!e->skip_buf0)
  168. dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
  169. DMA_TO_DEVICE);
  170. if (!e->skip_buf1)
  171. dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
  172. DMA_TO_DEVICE);
  173. if (e->txwi == DMA_DUMMY_DATA)
  174. e->txwi = NULL;
  175. if (e->skb == DMA_DUMMY_DATA)
  176. e->skb = NULL;
  177. *prev_e = *e;
  178. memset(e, 0, sizeof(*e));
  179. }
  180. static void
  181. mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
  182. {
  183. wmb();
  184. Q_WRITE(dev, q, cpu_idx, q->head);
  185. }
  186. static void
  187. mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
  188. {
  189. struct mt76_queue_entry entry;
  190. int last;
  191. if (!q || !q->ndesc)
  192. return;
  193. spin_lock_bh(&q->cleanup_lock);
  194. if (flush)
  195. last = -1;
  196. else
  197. last = Q_READ(dev, q, dma_idx);
  198. while (q->queued > 0 && q->tail != last) {
  199. mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
  200. mt76_queue_tx_complete(dev, q, &entry);
  201. if (entry.txwi) {
  202. if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
  203. mt76_put_txwi(dev, entry.txwi);
  204. }
  205. if (!flush && q->tail == last)
  206. last = Q_READ(dev, q, dma_idx);
  207. }
  208. spin_unlock_bh(&q->cleanup_lock);
  209. if (flush) {
  210. spin_lock_bh(&q->lock);
  211. mt76_dma_sync_idx(dev, q);
  212. mt76_dma_kick_queue(dev, q);
  213. spin_unlock_bh(&q->lock);
  214. }
  215. if (!q->queued)
  216. wake_up(&dev->tx_wait);
  217. }
  218. static void *
  219. mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
  220. int *len, u32 *info, bool *more)
  221. {
  222. struct mt76_queue_entry *e = &q->entry[idx];
  223. struct mt76_desc *desc = &q->desc[idx];
  224. dma_addr_t buf_addr;
  225. void *buf = e->buf;
  226. int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
  227. buf_addr = e->dma_addr[0];
  228. if (len) {
  229. u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
  230. *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
  231. *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
  232. }
  233. if (info)
  234. *info = le32_to_cpu(desc->info);
  235. dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
  236. e->buf = NULL;
  237. return buf;
  238. }
  239. static void *
  240. mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
  241. int *len, u32 *info, bool *more)
  242. {
  243. int idx = q->tail;
  244. *more = false;
  245. if (!q->queued)
  246. return NULL;
  247. if (flush)
  248. q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
  249. else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
  250. return NULL;
  251. q->tail = (q->tail + 1) % q->ndesc;
  252. q->queued--;
  253. return mt76_dma_get_buf(dev, q, idx, len, info, more);
  254. }
  255. static int
  256. mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
  257. struct sk_buff *skb, u32 tx_info)
  258. {
  259. struct mt76_queue_buf buf = {};
  260. dma_addr_t addr;
  261. if (q->queued + 1 >= q->ndesc - 1)
  262. goto error;
  263. addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
  264. DMA_TO_DEVICE);
  265. if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
  266. goto error;
  267. buf.addr = addr;
  268. buf.len = skb->len;
  269. spin_lock_bh(&q->lock);
  270. mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
  271. mt76_dma_kick_queue(dev, q);
  272. spin_unlock_bh(&q->lock);
  273. return 0;
  274. error:
  275. dev_kfree_skb(skb);
  276. return -ENOMEM;
  277. }
  278. static int
  279. mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
  280. enum mt76_txq_id qid, struct sk_buff *skb,
  281. struct mt76_wcid *wcid, struct ieee80211_sta *sta)
  282. {
  283. struct ieee80211_tx_status status = {
  284. .sta = sta,
  285. };
  286. struct mt76_tx_info tx_info = {
  287. .skb = skb,
  288. };
  289. struct ieee80211_hw *hw;
  290. int len, n = 0, ret = -ENOMEM;
  291. struct mt76_txwi_cache *t;
  292. struct sk_buff *iter;
  293. dma_addr_t addr;
  294. u8 *txwi;
  295. t = mt76_get_txwi(dev);
  296. if (!t)
  297. goto free_skb;
  298. txwi = mt76_get_txwi_ptr(dev, t);
  299. skb->prev = skb->next = NULL;
  300. if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
  301. mt76_insert_hdr_pad(skb);
  302. len = skb_headlen(skb);
  303. addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
  304. if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
  305. goto free;
  306. tx_info.buf[n].addr = t->dma_addr;
  307. tx_info.buf[n++].len = dev->drv->txwi_size;
  308. tx_info.buf[n].addr = addr;
  309. tx_info.buf[n++].len = len;
  310. skb_walk_frags(skb, iter) {
  311. if (n == ARRAY_SIZE(tx_info.buf))
  312. goto unmap;
  313. addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
  314. DMA_TO_DEVICE);
  315. if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
  316. goto unmap;
  317. tx_info.buf[n].addr = addr;
  318. tx_info.buf[n++].len = iter->len;
  319. }
  320. tx_info.nbuf = n;
  321. if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
  322. ret = -ENOMEM;
  323. goto unmap;
  324. }
  325. dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
  326. DMA_TO_DEVICE);
  327. ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
  328. dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
  329. DMA_TO_DEVICE);
  330. if (ret < 0)
  331. goto unmap;
  332. return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
  333. tx_info.info, tx_info.skb, t);
  334. unmap:
  335. for (n--; n > 0; n--)
  336. dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
  337. tx_info.buf[n].len, DMA_TO_DEVICE);
  338. free:
  339. #ifdef CONFIG_NL80211_TESTMODE
  340. /* fix tx_done accounting on queue overflow */
  341. if (mt76_is_testmode_skb(dev, skb, &hw)) {
  342. struct mt76_phy *phy = hw->priv;
  343. if (tx_info.skb == phy->test.tx_skb)
  344. phy->test.tx_done--;
  345. }
  346. #endif
  347. mt76_put_txwi(dev, t);
  348. free_skb:
  349. status.skb = tx_info.skb;
  350. hw = mt76_tx_status_get_hw(dev, tx_info.skb);
  351. spin_lock_bh(&dev->rx_lock);
  352. ieee80211_tx_status_ext(hw, &status);
  353. spin_unlock_bh(&dev->rx_lock);
  354. return ret;
  355. }
  356. static int
  357. mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
  358. {
  359. dma_addr_t addr;
  360. void *buf;
  361. int frames = 0;
  362. int len = SKB_WITH_OVERHEAD(q->buf_size);
  363. int offset = q->buf_offset;
  364. if (!q->ndesc)
  365. return 0;
  366. spin_lock_bh(&q->lock);
  367. while (q->queued < q->ndesc - 1) {
  368. struct mt76_queue_buf qbuf;
  369. buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
  370. if (!buf)
  371. break;
  372. addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
  373. if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
  374. skb_free_frag(buf);
  375. break;
  376. }
  377. qbuf.addr = addr + offset;
  378. qbuf.len = len - offset;
  379. qbuf.skip_unmap = false;
  380. mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
  381. frames++;
  382. }
  383. if (frames)
  384. mt76_dma_kick_queue(dev, q);
  385. spin_unlock_bh(&q->lock);
  386. return frames;
  387. }
  388. static int
  389. mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
  390. {
  391. #ifdef CONFIG_NET_MEDIATEK_SOC_WED
  392. struct mtk_wed_device *wed = &dev->mmio.wed;
  393. int ret, type, ring;
  394. u8 flags = q->flags;
  395. if (!mtk_wed_device_active(wed))
  396. q->flags &= ~MT_QFLAG_WED;
  397. if (!(q->flags & MT_QFLAG_WED))
  398. return 0;
  399. type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
  400. ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
  401. switch (type) {
  402. case MT76_WED_Q_TX:
  403. ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
  404. if (!ret)
  405. q->wed_regs = wed->tx_ring[ring].reg_base;
  406. break;
  407. case MT76_WED_Q_TXFREE:
  408. /* WED txfree queue needs ring to be initialized before setup */
  409. q->flags = 0;
  410. mt76_dma_queue_reset(dev, q);
  411. mt76_dma_rx_fill(dev, q);
  412. q->flags = flags;
  413. ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
  414. if (!ret)
  415. q->wed_regs = wed->txfree_ring.reg_base;
  416. break;
  417. default:
  418. ret = -EINVAL;
  419. }
  420. return ret;
  421. #else
  422. return 0;
  423. #endif
  424. }
  425. static int
  426. mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
  427. int idx, int n_desc, int bufsize,
  428. u32 ring_base)
  429. {
  430. int ret, size;
  431. spin_lock_init(&q->lock);
  432. spin_lock_init(&q->cleanup_lock);
  433. q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
  434. q->ndesc = n_desc;
  435. q->buf_size = bufsize;
  436. q->hw_idx = idx;
  437. size = q->ndesc * sizeof(struct mt76_desc);
  438. q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
  439. if (!q->desc)
  440. return -ENOMEM;
  441. size = q->ndesc * sizeof(*q->entry);
  442. q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
  443. if (!q->entry)
  444. return -ENOMEM;
  445. ret = mt76_dma_wed_setup(dev, q);
  446. if (ret)
  447. return ret;
  448. if (q->flags != MT_WED_Q_TXFREE)
  449. mt76_dma_queue_reset(dev, q);
  450. return 0;
  451. }
  452. static void
  453. mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
  454. {
  455. struct page *page;
  456. void *buf;
  457. bool more;
  458. if (!q->ndesc)
  459. return;
  460. spin_lock_bh(&q->lock);
  461. do {
  462. buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
  463. if (!buf)
  464. break;
  465. skb_free_frag(buf);
  466. } while (1);
  467. if (q->rx_head) {
  468. dev_kfree_skb(q->rx_head);
  469. q->rx_head = NULL;
  470. }
  471. spin_unlock_bh(&q->lock);
  472. if (!q->rx_page.va)
  473. return;
  474. page = virt_to_page(q->rx_page.va);
  475. __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
  476. memset(&q->rx_page, 0, sizeof(q->rx_page));
  477. }
  478. static void
  479. mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
  480. {
  481. struct mt76_queue *q = &dev->q_rx[qid];
  482. int i;
  483. if (!q->ndesc)
  484. return;
  485. for (i = 0; i < q->ndesc; i++)
  486. q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
  487. mt76_dma_rx_cleanup(dev, q);
  488. mt76_dma_sync_idx(dev, q);
  489. mt76_dma_rx_fill(dev, q);
  490. }
  491. static void
  492. mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
  493. int len, bool more)
  494. {
  495. struct sk_buff *skb = q->rx_head;
  496. struct skb_shared_info *shinfo = skb_shinfo(skb);
  497. int nr_frags = shinfo->nr_frags;
  498. if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
  499. struct page *page = virt_to_head_page(data);
  500. int offset = data - page_address(page) + q->buf_offset;
  501. skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
  502. } else {
  503. skb_free_frag(data);
  504. }
  505. if (more)
  506. return;
  507. q->rx_head = NULL;
  508. if (nr_frags < ARRAY_SIZE(shinfo->frags))
  509. dev->drv->rx_skb(dev, q - dev->q_rx, skb);
  510. else
  511. dev_kfree_skb(skb);
  512. }
  513. static int
  514. mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
  515. {
  516. int len, data_len, done = 0, dma_idx;
  517. struct sk_buff *skb;
  518. unsigned char *data;
  519. bool check_ddone = false;
  520. bool more;
  521. if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
  522. q->flags == MT_WED_Q_TXFREE) {
  523. dma_idx = Q_READ(dev, q, dma_idx);
  524. check_ddone = true;
  525. }
  526. while (done < budget) {
  527. u32 info;
  528. if (check_ddone) {
  529. if (q->tail == dma_idx)
  530. dma_idx = Q_READ(dev, q, dma_idx);
  531. if (q->tail == dma_idx)
  532. break;
  533. }
  534. data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
  535. if (!data)
  536. break;
  537. if (q->rx_head)
  538. data_len = q->buf_size;
  539. else
  540. data_len = SKB_WITH_OVERHEAD(q->buf_size);
  541. if (data_len < len + q->buf_offset) {
  542. dev_kfree_skb(q->rx_head);
  543. q->rx_head = NULL;
  544. goto free_frag;
  545. }
  546. if (q->rx_head) {
  547. mt76_add_fragment(dev, q, data, len, more);
  548. continue;
  549. }
  550. if (!more && dev->drv->rx_check &&
  551. !(dev->drv->rx_check(dev, data, len)))
  552. goto free_frag;
  553. skb = build_skb(data, q->buf_size);
  554. if (!skb)
  555. goto free_frag;
  556. skb_reserve(skb, q->buf_offset);
  557. *(u32 *)skb->cb = info;
  558. __skb_put(skb, len);
  559. done++;
  560. if (more) {
  561. q->rx_head = skb;
  562. continue;
  563. }
  564. dev->drv->rx_skb(dev, q - dev->q_rx, skb);
  565. continue;
  566. free_frag:
  567. skb_free_frag(data);
  568. }
  569. mt76_dma_rx_fill(dev, q);
  570. return done;
  571. }
  572. int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
  573. {
  574. struct mt76_dev *dev;
  575. int qid, done = 0, cur;
  576. dev = container_of(napi->dev, struct mt76_dev, napi_dev);
  577. qid = napi - dev->napi;
  578. rcu_read_lock();
  579. do {
  580. cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
  581. mt76_rx_poll_complete(dev, qid, napi);
  582. done += cur;
  583. } while (cur && done < budget);
  584. rcu_read_unlock();
  585. if (done < budget && napi_complete(napi))
  586. dev->drv->rx_poll_complete(dev, qid);
  587. return done;
  588. }
  589. EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
  590. static int
  591. mt76_dma_init(struct mt76_dev *dev,
  592. int (*poll)(struct napi_struct *napi, int budget))
  593. {
  594. int i;
  595. init_dummy_netdev(&dev->napi_dev);
  596. init_dummy_netdev(&dev->tx_napi_dev);
  597. snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
  598. wiphy_name(dev->hw->wiphy));
  599. dev->napi_dev.threaded = 1;
  600. mt76_for_each_q_rx(dev, i) {
  601. netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
  602. mt76_dma_rx_fill(dev, &dev->q_rx[i]);
  603. napi_enable(&dev->napi[i]);
  604. }
  605. return 0;
  606. }
  607. static const struct mt76_queue_ops mt76_dma_ops = {
  608. .init = mt76_dma_init,
  609. .alloc = mt76_dma_alloc_queue,
  610. .reset_q = mt76_dma_queue_reset,
  611. .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
  612. .tx_queue_skb = mt76_dma_tx_queue_skb,
  613. .tx_cleanup = mt76_dma_tx_cleanup,
  614. .rx_cleanup = mt76_dma_rx_cleanup,
  615. .rx_reset = mt76_dma_rx_reset,
  616. .kick = mt76_dma_kick_queue,
  617. };
  618. void mt76_dma_attach(struct mt76_dev *dev)
  619. {
  620. dev->queue_ops = &mt76_dma_ops;
  621. }
  622. EXPORT_SYMBOL_GPL(mt76_dma_attach);
  623. void mt76_dma_cleanup(struct mt76_dev *dev)
  624. {
  625. int i;
  626. mt76_worker_disable(&dev->tx_worker);
  627. netif_napi_del(&dev->tx_napi);
  628. for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
  629. struct mt76_phy *phy = dev->phys[i];
  630. int j;
  631. if (!phy)
  632. continue;
  633. for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
  634. mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
  635. }
  636. for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
  637. mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
  638. mt76_for_each_q_rx(dev, i) {
  639. netif_napi_del(&dev->napi[i]);
  640. mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
  641. }
  642. mt76_free_pending_txwi(dev);
  643. if (mtk_wed_device_active(&dev->mmio.wed))
  644. mtk_wed_device_detach(&dev->mmio.wed);
  645. }
  646. EXPORT_SYMBOL_GPL(mt76_dma_cleanup);