sdio.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671
  1. // SPDX-License-Identifier: ISC
  2. /* Copyright (C) 2020 MediaTek Inc.
  3. *
  4. * This file is written based on mt76/usb.c.
  5. *
  6. * Author: Felix Fietkau <[email protected]>
  7. * Lorenzo Bianconi <[email protected]>
  8. * Sean Wang <[email protected]>
  9. */
  10. #include <linux/iopoll.h>
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/mmc/sdio_func.h>
  14. #include <linux/mmc/card.h>
  15. #include <linux/mmc/host.h>
  16. #include <linux/sched.h>
  17. #include <linux/kthread.h>
  18. #include "mt76.h"
  19. #include "sdio.h"
  20. static u32 mt76s_read_whisr(struct mt76_dev *dev)
  21. {
  22. return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
  23. }
  24. u32 mt76s_read_pcr(struct mt76_dev *dev)
  25. {
  26. struct mt76_sdio *sdio = &dev->sdio;
  27. return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
  28. }
  29. EXPORT_SYMBOL_GPL(mt76s_read_pcr);
  30. static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset)
  31. {
  32. struct sdio_func *func = dev->sdio.func;
  33. u32 val = ~0, status;
  34. int err;
  35. sdio_claim_host(func);
  36. sdio_writel(func, offset, MCR_H2DSM0R, &err);
  37. if (err < 0) {
  38. dev_err(dev->dev, "failed setting address [err=%d]\n", err);
  39. goto out;
  40. }
  41. sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
  42. if (err < 0) {
  43. dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
  44. goto out;
  45. }
  46. err = readx_poll_timeout(mt76s_read_whisr, dev, status,
  47. status & H2D_SW_INT_READ, 0, 1000000);
  48. if (err < 0) {
  49. dev_err(dev->dev, "query whisr timeout\n");
  50. goto out;
  51. }
  52. sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
  53. if (err < 0) {
  54. dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
  55. goto out;
  56. }
  57. val = sdio_readl(func, MCR_H2DSM0R, &err);
  58. if (err < 0) {
  59. dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
  60. goto out;
  61. }
  62. if (val != offset) {
  63. dev_err(dev->dev, "register mismatch\n");
  64. val = ~0;
  65. goto out;
  66. }
  67. val = sdio_readl(func, MCR_D2HRM1R, &err);
  68. if (err < 0)
  69. dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
  70. out:
  71. sdio_release_host(func);
  72. return val;
  73. }
  74. static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
  75. {
  76. struct sdio_func *func = dev->sdio.func;
  77. u32 status;
  78. int err;
  79. sdio_claim_host(func);
  80. sdio_writel(func, offset, MCR_H2DSM0R, &err);
  81. if (err < 0) {
  82. dev_err(dev->dev, "failed setting address [err=%d]\n", err);
  83. goto out;
  84. }
  85. sdio_writel(func, val, MCR_H2DSM1R, &err);
  86. if (err < 0) {
  87. dev_err(dev->dev,
  88. "failed setting write value [err=%d]\n", err);
  89. goto out;
  90. }
  91. sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
  92. if (err < 0) {
  93. dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
  94. goto out;
  95. }
  96. err = readx_poll_timeout(mt76s_read_whisr, dev, status,
  97. status & H2D_SW_INT_WRITE, 0, 1000000);
  98. if (err < 0) {
  99. dev_err(dev->dev, "query whisr timeout\n");
  100. goto out;
  101. }
  102. sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
  103. if (err < 0) {
  104. dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
  105. goto out;
  106. }
  107. val = sdio_readl(func, MCR_H2DSM0R, &err);
  108. if (err < 0) {
  109. dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
  110. goto out;
  111. }
  112. if (val != offset)
  113. dev_err(dev->dev, "register mismatch\n");
  114. out:
  115. sdio_release_host(func);
  116. }
  117. u32 mt76s_rr(struct mt76_dev *dev, u32 offset)
  118. {
  119. if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
  120. return dev->mcu_ops->mcu_rr(dev, offset);
  121. else
  122. return mt76s_read_mailbox(dev, offset);
  123. }
  124. EXPORT_SYMBOL_GPL(mt76s_rr);
  125. void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val)
  126. {
  127. if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
  128. dev->mcu_ops->mcu_wr(dev, offset, val);
  129. else
  130. mt76s_write_mailbox(dev, offset, val);
  131. }
  132. EXPORT_SYMBOL_GPL(mt76s_wr);
  133. u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
  134. {
  135. val |= mt76s_rr(dev, offset) & ~mask;
  136. mt76s_wr(dev, offset, val);
  137. return val;
  138. }
  139. EXPORT_SYMBOL_GPL(mt76s_rmw);
  140. void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
  141. const void *data, int len)
  142. {
  143. const u32 *val = data;
  144. int i;
  145. for (i = 0; i < len / sizeof(u32); i++) {
  146. mt76s_wr(dev, offset, val[i]);
  147. offset += sizeof(u32);
  148. }
  149. }
  150. EXPORT_SYMBOL_GPL(mt76s_write_copy);
  151. void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
  152. void *data, int len)
  153. {
  154. u32 *val = data;
  155. int i;
  156. for (i = 0; i < len / sizeof(u32); i++) {
  157. val[i] = mt76s_rr(dev, offset);
  158. offset += sizeof(u32);
  159. }
  160. }
  161. EXPORT_SYMBOL_GPL(mt76s_read_copy);
  162. int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
  163. const struct mt76_reg_pair *data,
  164. int len)
  165. {
  166. int i;
  167. for (i = 0; i < len; i++) {
  168. mt76s_wr(dev, data->reg, data->value);
  169. data++;
  170. }
  171. return 0;
  172. }
  173. EXPORT_SYMBOL_GPL(mt76s_wr_rp);
  174. int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
  175. struct mt76_reg_pair *data, int len)
  176. {
  177. int i;
  178. for (i = 0; i < len; i++) {
  179. data->value = mt76s_rr(dev, data->reg);
  180. data++;
  181. }
  182. return 0;
  183. }
  184. EXPORT_SYMBOL_GPL(mt76s_rd_rp);
  185. int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver)
  186. {
  187. u32 status, ctrl;
  188. int ret;
  189. dev->sdio.hw_ver = hw_ver;
  190. sdio_claim_host(func);
  191. ret = sdio_enable_func(func);
  192. if (ret < 0)
  193. goto release;
  194. /* Get ownership from the device */
  195. sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
  196. MCR_WHLPCR, &ret);
  197. if (ret < 0)
  198. goto disable_func;
  199. ret = readx_poll_timeout(mt76s_read_pcr, dev, status,
  200. status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
  201. if (ret < 0) {
  202. dev_err(dev->dev, "Cannot get ownership from device");
  203. goto disable_func;
  204. }
  205. ret = sdio_set_block_size(func, 512);
  206. if (ret < 0)
  207. goto disable_func;
  208. /* Enable interrupt */
  209. sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
  210. if (ret < 0)
  211. goto disable_func;
  212. ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
  213. if (hw_ver == MT76_CONNAC2_SDIO)
  214. ctrl |= WHIER_RX1_DONE_INT_EN;
  215. sdio_writel(func, ctrl, MCR_WHIER, &ret);
  216. if (ret < 0)
  217. goto disable_func;
  218. switch (hw_ver) {
  219. case MT76_CONNAC_SDIO:
  220. /* set WHISR as read clear and Rx aggregation number as 16 */
  221. ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
  222. break;
  223. default:
  224. ctrl = sdio_readl(func, MCR_WHCR, &ret);
  225. if (ret < 0)
  226. goto disable_func;
  227. ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2;
  228. ctrl &= ~W_INT_CLR_CTRL; /* read clear */
  229. ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0);
  230. break;
  231. }
  232. sdio_writel(func, ctrl, MCR_WHCR, &ret);
  233. if (ret < 0)
  234. goto disable_func;
  235. ret = sdio_claim_irq(func, mt76s_sdio_irq);
  236. if (ret < 0)
  237. goto disable_func;
  238. sdio_release_host(func);
  239. return 0;
  240. disable_func:
  241. sdio_disable_func(func);
  242. release:
  243. sdio_release_host(func);
  244. return ret;
  245. }
  246. EXPORT_SYMBOL_GPL(mt76s_hw_init);
  247. int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
  248. {
  249. struct mt76_queue *q = &dev->q_rx[qid];
  250. spin_lock_init(&q->lock);
  251. q->entry = devm_kcalloc(dev->dev,
  252. MT76S_NUM_RX_ENTRIES, sizeof(*q->entry),
  253. GFP_KERNEL);
  254. if (!q->entry)
  255. return -ENOMEM;
  256. q->ndesc = MT76S_NUM_RX_ENTRIES;
  257. q->head = q->tail = 0;
  258. q->queued = 0;
  259. return 0;
  260. }
  261. EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue);
  262. static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
  263. {
  264. struct mt76_queue *q;
  265. q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
  266. if (!q)
  267. return ERR_PTR(-ENOMEM);
  268. spin_lock_init(&q->lock);
  269. q->entry = devm_kcalloc(dev->dev,
  270. MT76S_NUM_TX_ENTRIES, sizeof(*q->entry),
  271. GFP_KERNEL);
  272. if (!q->entry)
  273. return ERR_PTR(-ENOMEM);
  274. q->ndesc = MT76S_NUM_TX_ENTRIES;
  275. return q;
  276. }
  277. int mt76s_alloc_tx(struct mt76_dev *dev)
  278. {
  279. struct mt76_queue *q;
  280. int i;
  281. for (i = 0; i <= MT_TXQ_PSD; i++) {
  282. q = mt76s_alloc_tx_queue(dev);
  283. if (IS_ERR(q))
  284. return PTR_ERR(q);
  285. dev->phy.q_tx[i] = q;
  286. }
  287. q = mt76s_alloc_tx_queue(dev);
  288. if (IS_ERR(q))
  289. return PTR_ERR(q);
  290. dev->q_mcu[MT_MCUQ_WM] = q;
  291. return 0;
  292. }
  293. EXPORT_SYMBOL_GPL(mt76s_alloc_tx);
  294. static struct mt76_queue_entry *
  295. mt76s_get_next_rx_entry(struct mt76_queue *q)
  296. {
  297. struct mt76_queue_entry *e = NULL;
  298. spin_lock_bh(&q->lock);
  299. if (q->queued > 0) {
  300. e = &q->entry[q->tail];
  301. q->tail = (q->tail + 1) % q->ndesc;
  302. q->queued--;
  303. }
  304. spin_unlock_bh(&q->lock);
  305. return e;
  306. }
  307. static int
  308. mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
  309. {
  310. int qid = q - &dev->q_rx[MT_RXQ_MAIN];
  311. int nframes = 0;
  312. while (true) {
  313. struct mt76_queue_entry *e;
  314. if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
  315. break;
  316. e = mt76s_get_next_rx_entry(q);
  317. if (!e || !e->skb)
  318. break;
  319. dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
  320. e->skb = NULL;
  321. nframes++;
  322. }
  323. if (qid == MT_RXQ_MAIN)
  324. mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
  325. return nframes;
  326. }
  327. static void mt76s_net_worker(struct mt76_worker *w)
  328. {
  329. struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
  330. net_worker);
  331. struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
  332. int i, nframes;
  333. do {
  334. nframes = 0;
  335. local_bh_disable();
  336. rcu_read_lock();
  337. mt76_for_each_q_rx(dev, i)
  338. nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
  339. rcu_read_unlock();
  340. local_bh_enable();
  341. } while (nframes > 0);
  342. }
  343. static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
  344. {
  345. struct mt76_queue_entry entry;
  346. int nframes = 0;
  347. bool mcu;
  348. if (!q)
  349. return 0;
  350. mcu = q == dev->q_mcu[MT_MCUQ_WM];
  351. while (q->queued > 0) {
  352. if (!q->entry[q->tail].done)
  353. break;
  354. entry = q->entry[q->tail];
  355. q->entry[q->tail].done = false;
  356. if (mcu) {
  357. dev_kfree_skb(entry.skb);
  358. entry.skb = NULL;
  359. }
  360. mt76_queue_tx_complete(dev, q, &entry);
  361. nframes++;
  362. }
  363. if (!q->queued)
  364. wake_up(&dev->tx_wait);
  365. return nframes;
  366. }
  367. static void mt76s_status_worker(struct mt76_worker *w)
  368. {
  369. struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
  370. status_worker);
  371. struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
  372. bool resched = false;
  373. int i, nframes;
  374. do {
  375. int ndata_frames = 0;
  376. nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
  377. for (i = 0; i <= MT_TXQ_PSD; i++)
  378. ndata_frames += mt76s_process_tx_queue(dev,
  379. dev->phy.q_tx[i]);
  380. nframes += ndata_frames;
  381. if (ndata_frames > 0)
  382. resched = true;
  383. if (dev->drv->tx_status_data && ndata_frames > 0 &&
  384. !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
  385. !test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
  386. ieee80211_queue_work(dev->hw, &dev->sdio.stat_work);
  387. } while (nframes > 0);
  388. if (resched)
  389. mt76_worker_schedule(&dev->tx_worker);
  390. }
  391. static void mt76s_tx_status_data(struct work_struct *work)
  392. {
  393. struct mt76_sdio *sdio;
  394. struct mt76_dev *dev;
  395. u8 update = 1;
  396. u16 count = 0;
  397. sdio = container_of(work, struct mt76_sdio, stat_work);
  398. dev = container_of(sdio, struct mt76_dev, sdio);
  399. while (true) {
  400. if (test_bit(MT76_REMOVED, &dev->phy.state))
  401. break;
  402. if (!dev->drv->tx_status_data(dev, &update))
  403. break;
  404. count++;
  405. }
  406. if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
  407. ieee80211_queue_work(dev->hw, &sdio->stat_work);
  408. else
  409. clear_bit(MT76_READING_STATS, &dev->phy.state);
  410. }
  411. static int
  412. mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
  413. enum mt76_txq_id qid, struct sk_buff *skb,
  414. struct mt76_wcid *wcid, struct ieee80211_sta *sta)
  415. {
  416. struct mt76_tx_info tx_info = {
  417. .skb = skb,
  418. };
  419. int err, len = skb->len;
  420. u16 idx = q->head;
  421. if (q->queued == q->ndesc)
  422. return -ENOSPC;
  423. skb->prev = skb->next = NULL;
  424. err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
  425. if (err < 0)
  426. return err;
  427. q->entry[q->head].skb = tx_info.skb;
  428. q->entry[q->head].buf_sz = len;
  429. q->entry[q->head].wcid = 0xffff;
  430. smp_wmb();
  431. q->head = (q->head + 1) % q->ndesc;
  432. q->queued++;
  433. return idx;
  434. }
  435. static int
  436. mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
  437. struct sk_buff *skb, u32 tx_info)
  438. {
  439. int ret = -ENOSPC, len = skb->len, pad;
  440. if (q->queued == q->ndesc)
  441. goto error;
  442. pad = round_up(skb->len, 4) - skb->len;
  443. ret = mt76_skb_adjust_pad(skb, pad);
  444. if (ret)
  445. goto error;
  446. spin_lock_bh(&q->lock);
  447. q->entry[q->head].buf_sz = len;
  448. q->entry[q->head].skb = skb;
  449. /* ensure the entry fully updated before bus access */
  450. smp_wmb();
  451. q->head = (q->head + 1) % q->ndesc;
  452. q->queued++;
  453. spin_unlock_bh(&q->lock);
  454. return 0;
  455. error:
  456. dev_kfree_skb(skb);
  457. return ret;
  458. }
  459. static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
  460. {
  461. struct mt76_sdio *sdio = &dev->sdio;
  462. mt76_worker_schedule(&sdio->txrx_worker);
  463. }
  464. static const struct mt76_queue_ops sdio_queue_ops = {
  465. .tx_queue_skb = mt76s_tx_queue_skb,
  466. .kick = mt76s_tx_kick,
  467. .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
  468. };
  469. void mt76s_deinit(struct mt76_dev *dev)
  470. {
  471. struct mt76_sdio *sdio = &dev->sdio;
  472. int i;
  473. mt76_worker_teardown(&sdio->txrx_worker);
  474. mt76_worker_teardown(&sdio->status_worker);
  475. mt76_worker_teardown(&sdio->net_worker);
  476. cancel_work_sync(&sdio->stat_work);
  477. clear_bit(MT76_READING_STATS, &dev->phy.state);
  478. mt76_tx_status_check(dev, true);
  479. sdio_claim_host(sdio->func);
  480. sdio_release_irq(sdio->func);
  481. sdio_release_host(sdio->func);
  482. mt76_for_each_q_rx(dev, i) {
  483. struct mt76_queue *q = &dev->q_rx[i];
  484. int j;
  485. for (j = 0; j < q->ndesc; j++) {
  486. struct mt76_queue_entry *e = &q->entry[j];
  487. if (!e->skb)
  488. continue;
  489. dev_kfree_skb(e->skb);
  490. e->skb = NULL;
  491. }
  492. }
  493. }
  494. EXPORT_SYMBOL_GPL(mt76s_deinit);
  495. int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
  496. const struct mt76_bus_ops *bus_ops)
  497. {
  498. struct mt76_sdio *sdio = &dev->sdio;
  499. u32 host_max_cap;
  500. int err;
  501. err = mt76_worker_setup(dev->hw, &sdio->status_worker,
  502. mt76s_status_worker, "sdio-status");
  503. if (err)
  504. return err;
  505. err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
  506. "sdio-net");
  507. if (err)
  508. return err;
  509. sched_set_fifo_low(sdio->status_worker.task);
  510. sched_set_fifo_low(sdio->net_worker.task);
  511. INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
  512. dev->queue_ops = &sdio_queue_ops;
  513. dev->bus = bus_ops;
  514. dev->sdio.func = func;
  515. host_max_cap = min_t(u32, func->card->host->max_req_size,
  516. func->cur_blksize *
  517. func->card->host->max_blk_count);
  518. dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
  519. dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
  520. GFP_KERNEL);
  521. if (!dev->sdio.xmit_buf)
  522. err = -ENOMEM;
  523. return err;
  524. }
  525. EXPORT_SYMBOL_GPL(mt76s_init);
  526. MODULE_AUTHOR("Sean Wang <[email protected]>");
  527. MODULE_AUTHOR("Lorenzo Bianconi <[email protected]>");
  528. MODULE_LICENSE("Dual BSD/GPL");