bh.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Interrupt bottom half (BH).
  4. *
  5. * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
  6. * Copyright (c) 2010, ST-Ericsson
  7. */
  8. #include <linux/gpio/consumer.h>
  9. #include <net/mac80211.h>
  10. #include "bh.h"
  11. #include "wfx.h"
  12. #include "hwio.h"
  13. #include "traces.h"
  14. #include "hif_rx.h"
  15. #include "hif_api_cmd.h"
  16. static void device_wakeup(struct wfx_dev *wdev)
  17. {
  18. int max_retry = 3;
  19. if (!wdev->pdata.gpio_wakeup)
  20. return;
  21. if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0)
  22. return;
  23. if (wfx_api_older_than(wdev, 1, 4)) {
  24. gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
  25. if (!completion_done(&wdev->hif.ctrl_ready))
  26. usleep_range(2000, 2500);
  27. return;
  28. }
  29. for (;;) {
  30. gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
  31. /* completion.h does not provide any function to wait completion without consume it
  32. * (a kind of wait_for_completion_done_timeout()). So we have to emulate it.
  33. */
  34. if (wait_for_completion_timeout(&wdev->hif.ctrl_ready, msecs_to_jiffies(2))) {
  35. complete(&wdev->hif.ctrl_ready);
  36. return;
  37. } else if (max_retry-- > 0) {
  38. /* Older firmwares have a race in sleep/wake-up process. Redo the process
  39. * is sufficient to unfreeze the chip.
  40. */
  41. dev_err(wdev->dev, "timeout while wake up chip\n");
  42. gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0);
  43. usleep_range(2000, 2500);
  44. } else {
  45. dev_err(wdev->dev, "max wake-up retries reached\n");
  46. return;
  47. }
  48. }
  49. }
  50. static void device_release(struct wfx_dev *wdev)
  51. {
  52. if (!wdev->pdata.gpio_wakeup)
  53. return;
  54. gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0);
  55. }
  56. static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
  57. {
  58. struct sk_buff *skb;
  59. struct wfx_hif_msg *hif;
  60. size_t alloc_len;
  61. size_t computed_len;
  62. int release_count;
  63. int piggyback = 0;
  64. WARN(read_len > round_down(0xFFF, 2) * sizeof(u16), "request exceed the chip capability");
  65. /* Add 2 to take into account piggyback size */
  66. alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2);
  67. skb = dev_alloc_skb(alloc_len);
  68. if (!skb)
  69. return -ENOMEM;
  70. if (wfx_data_read(wdev, skb->data, alloc_len))
  71. goto err;
  72. piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2));
  73. _trace_piggyback(piggyback, false);
  74. hif = (struct wfx_hif_msg *)skb->data;
  75. WARN(hif->encrypted & 0x3, "encryption is unsupported");
  76. if (WARN(read_len < sizeof(struct wfx_hif_msg), "corrupted read"))
  77. goto err;
  78. computed_len = le16_to_cpu(hif->len);
  79. computed_len = round_up(computed_len, 2);
  80. if (computed_len != read_len) {
  81. dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n",
  82. computed_len, read_len);
  83. print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1,
  84. hif, read_len, true);
  85. goto err;
  86. }
  87. if (!(hif->id & HIF_ID_IS_INDICATION)) {
  88. (*is_cnf)++;
  89. if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT)
  90. release_count =
  91. ((struct wfx_hif_cnf_multi_transmit *)hif->body)->num_tx_confs;
  92. else
  93. release_count = 1;
  94. WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter");
  95. wdev->hif.tx_buffers_used -= release_count;
  96. }
  97. _trace_hif_recv(hif, wdev->hif.tx_buffers_used);
  98. if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) {
  99. if (hif->seqnum != wdev->hif.rx_seqnum)
  100. dev_warn(wdev->dev, "wrong message sequence: %d != %d\n",
  101. hif->seqnum, wdev->hif.rx_seqnum);
  102. wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1);
  103. }
  104. skb_put(skb, le16_to_cpu(hif->len));
  105. /* wfx_handle_rx takes care on SKB livetime */
  106. wfx_handle_rx(wdev, skb);
  107. if (!wdev->hif.tx_buffers_used)
  108. wake_up(&wdev->hif.tx_buffers_empty);
  109. return piggyback;
  110. err:
  111. if (skb)
  112. dev_kfree_skb(skb);
  113. return -EIO;
  114. }
  115. static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf)
  116. {
  117. size_t len;
  118. int i;
  119. int ctrl_reg, piggyback;
  120. piggyback = 0;
  121. for (i = 0; i < max_msg; i++) {
  122. if (piggyback & CTRL_NEXT_LEN_MASK)
  123. ctrl_reg = piggyback;
  124. else if (try_wait_for_completion(&wdev->hif.ctrl_ready))
  125. ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0);
  126. else
  127. ctrl_reg = 0;
  128. if (!(ctrl_reg & CTRL_NEXT_LEN_MASK))
  129. return i;
  130. /* ctrl_reg units are 16bits words */
  131. len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2;
  132. piggyback = rx_helper(wdev, len, num_cnf);
  133. if (piggyback < 0)
  134. return i;
  135. if (!(piggyback & CTRL_WLAN_READY))
  136. dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n",
  137. piggyback);
  138. }
  139. if (piggyback & CTRL_NEXT_LEN_MASK) {
  140. ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback);
  141. complete(&wdev->hif.ctrl_ready);
  142. if (ctrl_reg)
  143. dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n",
  144. ctrl_reg, piggyback);
  145. }
  146. return i;
  147. }
  148. static void tx_helper(struct wfx_dev *wdev, struct wfx_hif_msg *hif)
  149. {
  150. int ret;
  151. void *data;
  152. bool is_encrypted = false;
  153. size_t len = le16_to_cpu(hif->len);
  154. WARN(len < sizeof(*hif), "try to send corrupted data");
  155. hif->seqnum = wdev->hif.tx_seqnum;
  156. wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1);
  157. data = hif;
  158. WARN(len > le16_to_cpu(wdev->hw_caps.size_inp_ch_buf),
  159. "request exceed the chip capability: %zu > %d\n",
  160. len, le16_to_cpu(wdev->hw_caps.size_inp_ch_buf));
  161. len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len);
  162. ret = wfx_data_write(wdev, data, len);
  163. if (ret)
  164. goto end;
  165. wdev->hif.tx_buffers_used++;
  166. _trace_hif_send(hif, wdev->hif.tx_buffers_used);
  167. end:
  168. if (is_encrypted)
  169. kfree(data);
  170. }
  171. static int bh_work_tx(struct wfx_dev *wdev, int max_msg)
  172. {
  173. struct wfx_hif_msg *hif;
  174. int i;
  175. for (i = 0; i < max_msg; i++) {
  176. hif = NULL;
  177. if (wdev->hif.tx_buffers_used < le16_to_cpu(wdev->hw_caps.num_inp_ch_bufs)) {
  178. if (try_wait_for_completion(&wdev->hif_cmd.ready)) {
  179. WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
  180. hif = wdev->hif_cmd.buf_send;
  181. } else {
  182. hif = wfx_tx_queues_get(wdev);
  183. }
  184. }
  185. if (!hif)
  186. return i;
  187. tx_helper(wdev, hif);
  188. }
  189. return i;
  190. }
  191. /* In SDIO mode, it is necessary to make an access to a register to acknowledge last received
  192. * message. It could be possible to restrict this acknowledge to SDIO mode and only if last
  193. * operation was rx.
  194. */
  195. static void ack_sdio_data(struct wfx_dev *wdev)
  196. {
  197. u32 cfg_reg;
  198. wfx_config_reg_read(wdev, &cfg_reg);
  199. if (cfg_reg & 0xFF) {
  200. dev_warn(wdev->dev, "chip reports errors: %02x\n", cfg_reg & 0xFF);
  201. wfx_config_reg_write_bits(wdev, 0xFF, 0x00);
  202. }
  203. }
  204. static void bh_work(struct work_struct *work)
  205. {
  206. struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh);
  207. int stats_req = 0, stats_cnf = 0, stats_ind = 0;
  208. bool release_chip = false, last_op_is_rx = false;
  209. int num_tx, num_rx;
  210. device_wakeup(wdev);
  211. do {
  212. num_tx = bh_work_tx(wdev, 32);
  213. stats_req += num_tx;
  214. if (num_tx)
  215. last_op_is_rx = false;
  216. num_rx = bh_work_rx(wdev, 32, &stats_cnf);
  217. stats_ind += num_rx;
  218. if (num_rx)
  219. last_op_is_rx = true;
  220. } while (num_rx || num_tx);
  221. stats_ind -= stats_cnf;
  222. if (last_op_is_rx)
  223. ack_sdio_data(wdev);
  224. if (!wdev->hif.tx_buffers_used && !work_pending(work)) {
  225. device_release(wdev);
  226. release_chip = true;
  227. }
  228. _trace_bh_stats(stats_ind, stats_req, stats_cnf, wdev->hif.tx_buffers_used, release_chip);
  229. }
  230. /* An IRQ from chip did occur */
  231. void wfx_bh_request_rx(struct wfx_dev *wdev)
  232. {
  233. u32 cur, prev;
  234. wfx_control_reg_read(wdev, &cur);
  235. prev = atomic_xchg(&wdev->hif.ctrl_reg, cur);
  236. complete(&wdev->hif.ctrl_ready);
  237. queue_work(wdev->bh_wq, &wdev->hif.bh);
  238. if (!(cur & CTRL_NEXT_LEN_MASK))
  239. dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n",
  240. cur);
  241. if (prev != 0)
  242. dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n",
  243. prev, cur);
  244. }
  245. /* Driver want to send data */
  246. void wfx_bh_request_tx(struct wfx_dev *wdev)
  247. {
  248. queue_work(wdev->bh_wq, &wdev->hif.bh);
  249. }
  250. /* If IRQ is not available, this function allow to manually poll the control register and simulate
  251. * an IRQ ahen an event happened.
  252. *
  253. * Note that the device has a bug: If an IRQ raise while host read control register, the IRQ is
  254. * lost. So, use this function carefully (only duing device initialisation).
  255. */
  256. void wfx_bh_poll_irq(struct wfx_dev *wdev)
  257. {
  258. ktime_t now, start;
  259. u32 reg;
  260. WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ");
  261. flush_workqueue(wdev->bh_wq);
  262. start = ktime_get();
  263. for (;;) {
  264. wfx_control_reg_read(wdev, &reg);
  265. now = ktime_get();
  266. if (reg & 0xFFF)
  267. break;
  268. if (ktime_after(now, ktime_add_ms(start, 1000))) {
  269. dev_err(wdev->dev, "time out while polling control register\n");
  270. return;
  271. }
  272. udelay(200);
  273. }
  274. wfx_bh_request_rx(wdev);
  275. }
  276. void wfx_bh_register(struct wfx_dev *wdev)
  277. {
  278. INIT_WORK(&wdev->hif.bh, bh_work);
  279. init_completion(&wdev->hif.ctrl_ready);
  280. init_waitqueue_head(&wdev->hif.tx_buffers_empty);
  281. }
  282. void wfx_bh_unregister(struct wfx_dev *wdev)
  283. {
  284. flush_work(&wdev->hif.bh);
  285. }