bh.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
  4. *
  5. * Copyright (c) 2010, ST-Ericsson
  6. * Author: Dmitry Tarnyagin <[email protected]>
  7. *
  8. * Based on:
  9. * ST-Ericsson UMAC CW1200 driver, which is
  10. * Copyright (c) 2010, ST-Ericsson
  11. * Author: Ajitpal Singh <[email protected]>
  12. */
  13. #include <linux/module.h>
  14. #include <net/mac80211.h>
  15. #include <linux/kthread.h>
  16. #include <linux/timer.h>
  17. #include "cw1200.h"
  18. #include "bh.h"
  19. #include "hwio.h"
  20. #include "wsm.h"
  21. #include "hwbus.h"
  22. #include "debug.h"
  23. #include "fwio.h"
  24. static int cw1200_bh(void *arg);
  25. #define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
  26. /* an SPI message cannot be bigger than (2"12-1)*2 bytes
  27. * "*2" to cvt to bytes
  28. */
  29. #define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
  30. #define PIGGYBACK_CTRL_REG (2)
  31. #define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
  32. /* Suspend state privates */
  33. enum cw1200_bh_pm_state {
  34. CW1200_BH_RESUMED = 0,
  35. CW1200_BH_SUSPEND,
  36. CW1200_BH_SUSPENDED,
  37. CW1200_BH_RESUME,
  38. };
  39. static void cw1200_bh_work(struct work_struct *work)
  40. {
  41. struct cw1200_common *priv =
  42. container_of(work, struct cw1200_common, bh_work);
  43. cw1200_bh(priv);
  44. }
  45. int cw1200_register_bh(struct cw1200_common *priv)
  46. {
  47. int err = 0;
  48. /* Realtime workqueue */
  49. priv->bh_workqueue = alloc_workqueue("cw1200_bh",
  50. WQ_MEM_RECLAIM | WQ_HIGHPRI
  51. | WQ_CPU_INTENSIVE, 1);
  52. if (!priv->bh_workqueue)
  53. return -ENOMEM;
  54. INIT_WORK(&priv->bh_work, cw1200_bh_work);
  55. pr_debug("[BH] register.\n");
  56. atomic_set(&priv->bh_rx, 0);
  57. atomic_set(&priv->bh_tx, 0);
  58. atomic_set(&priv->bh_term, 0);
  59. atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
  60. priv->bh_error = 0;
  61. priv->hw_bufs_used = 0;
  62. priv->buf_id_tx = 0;
  63. priv->buf_id_rx = 0;
  64. init_waitqueue_head(&priv->bh_wq);
  65. init_waitqueue_head(&priv->bh_evt_wq);
  66. err = !queue_work(priv->bh_workqueue, &priv->bh_work);
  67. WARN_ON(err);
  68. return err;
  69. }
  70. void cw1200_unregister_bh(struct cw1200_common *priv)
  71. {
  72. atomic_inc(&priv->bh_term);
  73. wake_up(&priv->bh_wq);
  74. destroy_workqueue(priv->bh_workqueue);
  75. priv->bh_workqueue = NULL;
  76. pr_debug("[BH] unregistered.\n");
  77. }
  78. void cw1200_irq_handler(struct cw1200_common *priv)
  79. {
  80. pr_debug("[BH] irq.\n");
  81. /* Disable Interrupts! */
  82. /* NOTE: hwbus_ops->lock already held */
  83. __cw1200_irq_enable(priv, 0);
  84. if (/* WARN_ON */(priv->bh_error))
  85. return;
  86. if (atomic_inc_return(&priv->bh_rx) == 1)
  87. wake_up(&priv->bh_wq);
  88. }
  89. EXPORT_SYMBOL_GPL(cw1200_irq_handler);
  90. void cw1200_bh_wakeup(struct cw1200_common *priv)
  91. {
  92. pr_debug("[BH] wakeup.\n");
  93. if (priv->bh_error) {
  94. pr_err("[BH] wakeup failed (BH error)\n");
  95. return;
  96. }
  97. if (atomic_inc_return(&priv->bh_tx) == 1)
  98. wake_up(&priv->bh_wq);
  99. }
  100. int cw1200_bh_suspend(struct cw1200_common *priv)
  101. {
  102. pr_debug("[BH] suspend.\n");
  103. if (priv->bh_error) {
  104. wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
  105. return -EINVAL;
  106. }
  107. atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
  108. wake_up(&priv->bh_wq);
  109. return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
  110. (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
  111. 1 * HZ) ? 0 : -ETIMEDOUT;
  112. }
  113. int cw1200_bh_resume(struct cw1200_common *priv)
  114. {
  115. pr_debug("[BH] resume.\n");
  116. if (priv->bh_error) {
  117. wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
  118. return -EINVAL;
  119. }
  120. atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
  121. wake_up(&priv->bh_wq);
  122. return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
  123. (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
  124. 1 * HZ) ? 0 : -ETIMEDOUT;
  125. }
  126. static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
  127. {
  128. ++priv->hw_bufs_used;
  129. }
  130. int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
  131. {
  132. int ret = 0;
  133. int hw_bufs_used = priv->hw_bufs_used;
  134. priv->hw_bufs_used -= count;
  135. if (WARN_ON(priv->hw_bufs_used < 0))
  136. ret = -1;
  137. else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
  138. ret = 1;
  139. if (!priv->hw_bufs_used)
  140. wake_up(&priv->bh_evt_wq);
  141. return ret;
  142. }
  143. static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
  144. u16 *ctrl_reg)
  145. {
  146. int ret;
  147. ret = cw1200_reg_read_16(priv,
  148. ST90TDS_CONTROL_REG_ID, ctrl_reg);
  149. if (ret) {
  150. ret = cw1200_reg_read_16(priv,
  151. ST90TDS_CONTROL_REG_ID, ctrl_reg);
  152. if (ret)
  153. pr_err("[BH] Failed to read control register.\n");
  154. }
  155. return ret;
  156. }
  157. static int cw1200_device_wakeup(struct cw1200_common *priv)
  158. {
  159. u16 ctrl_reg;
  160. int ret;
  161. pr_debug("[BH] Device wakeup.\n");
  162. /* First, set the dpll register */
  163. ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
  164. cw1200_dpll_from_clk(priv->hw_refclk));
  165. if (WARN_ON(ret))
  166. return ret;
  167. /* To force the device to be always-on, the host sets WLAN_UP to 1 */
  168. ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
  169. ST90TDS_CONT_WUP_BIT);
  170. if (WARN_ON(ret))
  171. return ret;
  172. ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
  173. if (WARN_ON(ret))
  174. return ret;
  175. /* If the device returns WLAN_RDY as 1, the device is active and will
  176. * remain active.
  177. */
  178. if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
  179. pr_debug("[BH] Device awake.\n");
  180. return 1;
  181. }
  182. return 0;
  183. }
  184. /* Must be called from BH thraed. */
  185. void cw1200_enable_powersave(struct cw1200_common *priv,
  186. bool enable)
  187. {
  188. pr_debug("[BH] Powerave is %s.\n",
  189. enable ? "enabled" : "disabled");
  190. priv->powersave_enabled = enable;
  191. }
  192. static int cw1200_bh_rx_helper(struct cw1200_common *priv,
  193. uint16_t *ctrl_reg,
  194. int *tx)
  195. {
  196. size_t read_len = 0;
  197. struct sk_buff *skb_rx = NULL;
  198. struct wsm_hdr *wsm;
  199. size_t wsm_len;
  200. u16 wsm_id;
  201. u8 wsm_seq;
  202. int rx_resync = 1;
  203. size_t alloc_len;
  204. u8 *data;
  205. read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
  206. if (!read_len)
  207. return 0; /* No more work */
  208. if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
  209. (read_len > EFFECTIVE_BUF_SIZE))) {
  210. pr_debug("Invalid read len: %zu (%04x)",
  211. read_len, *ctrl_reg);
  212. goto err;
  213. }
  214. /* Add SIZE of PIGGYBACK reg (CONTROL Reg)
  215. * to the NEXT Message length + 2 Bytes for SKB
  216. */
  217. read_len = read_len + 2;
  218. alloc_len = priv->hwbus_ops->align_size(
  219. priv->hwbus_priv, read_len);
  220. /* Check if not exceeding CW1200 capabilities */
  221. if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
  222. pr_debug("Read aligned len: %zu\n",
  223. alloc_len);
  224. }
  225. skb_rx = dev_alloc_skb(alloc_len);
  226. if (WARN_ON(!skb_rx))
  227. goto err;
  228. skb_trim(skb_rx, 0);
  229. skb_put(skb_rx, read_len);
  230. data = skb_rx->data;
  231. if (WARN_ON(!data))
  232. goto err;
  233. if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
  234. pr_err("rx blew up, len %zu\n", alloc_len);
  235. goto err;
  236. }
  237. /* Piggyback */
  238. *ctrl_reg = __le16_to_cpu(
  239. ((__le16 *)data)[alloc_len / 2 - 1]);
  240. wsm = (struct wsm_hdr *)data;
  241. wsm_len = __le16_to_cpu(wsm->len);
  242. if (WARN_ON(wsm_len > read_len))
  243. goto err;
  244. if (priv->wsm_enable_wsm_dumps)
  245. print_hex_dump_bytes("<-- ",
  246. DUMP_PREFIX_NONE,
  247. data, wsm_len);
  248. wsm_id = __le16_to_cpu(wsm->id) & 0xFFF;
  249. wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
  250. skb_trim(skb_rx, wsm_len);
  251. if (wsm_id == 0x0800) {
  252. wsm_handle_exception(priv,
  253. &data[sizeof(*wsm)],
  254. wsm_len - sizeof(*wsm));
  255. goto err;
  256. } else if (!rx_resync) {
  257. if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
  258. goto err;
  259. }
  260. priv->wsm_rx_seq = (wsm_seq + 1) & 7;
  261. rx_resync = 0;
  262. if (wsm_id & 0x0400) {
  263. int rc = wsm_release_tx_buffer(priv, 1);
  264. if (WARN_ON(rc < 0))
  265. return rc;
  266. else if (rc > 0)
  267. *tx = 1;
  268. }
  269. /* cw1200_wsm_rx takes care on SKB livetime */
  270. if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
  271. goto err;
  272. dev_kfree_skb(skb_rx);
  273. return 0;
  274. err:
  275. dev_kfree_skb(skb_rx);
  276. return -1;
  277. }
  278. static int cw1200_bh_tx_helper(struct cw1200_common *priv,
  279. int *pending_tx,
  280. int *tx_burst)
  281. {
  282. size_t tx_len;
  283. u8 *data;
  284. int ret;
  285. struct wsm_hdr *wsm;
  286. if (priv->device_can_sleep) {
  287. ret = cw1200_device_wakeup(priv);
  288. if (WARN_ON(ret < 0)) { /* Error in wakeup */
  289. *pending_tx = 1;
  290. return 0;
  291. } else if (ret) { /* Woke up */
  292. priv->device_can_sleep = false;
  293. } else { /* Did not awake */
  294. *pending_tx = 1;
  295. return 0;
  296. }
  297. }
  298. wsm_alloc_tx_buffer(priv);
  299. ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
  300. if (ret <= 0) {
  301. wsm_release_tx_buffer(priv, 1);
  302. if (WARN_ON(ret < 0))
  303. return ret; /* Error */
  304. return 0; /* No work */
  305. }
  306. wsm = (struct wsm_hdr *)data;
  307. BUG_ON(tx_len < sizeof(*wsm));
  308. BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
  309. atomic_inc(&priv->bh_tx);
  310. tx_len = priv->hwbus_ops->align_size(
  311. priv->hwbus_priv, tx_len);
  312. /* Check if not exceeding CW1200 capabilities */
  313. if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
  314. pr_debug("Write aligned len: %zu\n", tx_len);
  315. wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
  316. wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
  317. if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
  318. pr_err("tx blew up, len %zu\n", tx_len);
  319. wsm_release_tx_buffer(priv, 1);
  320. return -1; /* Error */
  321. }
  322. if (priv->wsm_enable_wsm_dumps)
  323. print_hex_dump_bytes("--> ",
  324. DUMP_PREFIX_NONE,
  325. data,
  326. __le16_to_cpu(wsm->len));
  327. wsm_txed(priv, data);
  328. priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
  329. if (*tx_burst > 1) {
  330. cw1200_debug_tx_burst(priv);
  331. return 1; /* Work remains */
  332. }
  333. return 0;
  334. }
  335. static int cw1200_bh(void *arg)
  336. {
  337. struct cw1200_common *priv = arg;
  338. int rx, tx, term, suspend;
  339. u16 ctrl_reg = 0;
  340. int tx_allowed;
  341. int pending_tx = 0;
  342. int tx_burst;
  343. long status;
  344. u32 dummy;
  345. int ret;
  346. for (;;) {
  347. if (!priv->hw_bufs_used &&
  348. priv->powersave_enabled &&
  349. !priv->device_can_sleep &&
  350. !atomic_read(&priv->recent_scan)) {
  351. status = 1 * HZ;
  352. pr_debug("[BH] Device wakedown. No data.\n");
  353. cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
  354. priv->device_can_sleep = true;
  355. } else if (priv->hw_bufs_used) {
  356. /* Interrupt loss detection */
  357. status = 1 * HZ;
  358. } else {
  359. status = MAX_SCHEDULE_TIMEOUT;
  360. }
  361. /* Dummy Read for SDIO retry mechanism*/
  362. if ((priv->hw_type != -1) &&
  363. (atomic_read(&priv->bh_rx) == 0) &&
  364. (atomic_read(&priv->bh_tx) == 0))
  365. cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
  366. &dummy, sizeof(dummy));
  367. pr_debug("[BH] waiting ...\n");
  368. status = wait_event_interruptible_timeout(priv->bh_wq, ({
  369. rx = atomic_xchg(&priv->bh_rx, 0);
  370. tx = atomic_xchg(&priv->bh_tx, 0);
  371. term = atomic_xchg(&priv->bh_term, 0);
  372. suspend = pending_tx ?
  373. 0 : atomic_read(&priv->bh_suspend);
  374. (rx || tx || term || suspend || priv->bh_error);
  375. }), status);
  376. pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
  377. rx, tx, term, suspend, priv->bh_error, status);
  378. /* Did an error occur? */
  379. if ((status < 0 && status != -ERESTARTSYS) ||
  380. term || priv->bh_error) {
  381. break;
  382. }
  383. if (!status) { /* wait_event timed out */
  384. unsigned long timestamp = jiffies;
  385. long timeout;
  386. int pending = 0;
  387. int i;
  388. /* Check to see if we have any outstanding frames */
  389. if (priv->hw_bufs_used && (!rx || !tx)) {
  390. wiphy_warn(priv->hw->wiphy,
  391. "Missed interrupt? (%d frames outstanding)\n",
  392. priv->hw_bufs_used);
  393. rx = 1;
  394. /* Get a timestamp of "oldest" frame */
  395. for (i = 0; i < 4; ++i)
  396. pending += cw1200_queue_get_xmit_timestamp(
  397. &priv->tx_queue[i],
  398. &timestamp,
  399. priv->pending_frame_id);
  400. /* Check if frame transmission is timed out.
  401. * Add an extra second with respect to possible
  402. * interrupt loss.
  403. */
  404. timeout = timestamp +
  405. WSM_CMD_LAST_CHANCE_TIMEOUT +
  406. 1 * HZ -
  407. jiffies;
  408. /* And terminate BH thread if the frame is "stuck" */
  409. if (pending && timeout < 0) {
  410. wiphy_warn(priv->hw->wiphy,
  411. "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
  412. priv->hw_bufs_used, pending,
  413. timestamp, jiffies);
  414. break;
  415. }
  416. } else if (!priv->device_can_sleep &&
  417. !atomic_read(&priv->recent_scan)) {
  418. pr_debug("[BH] Device wakedown. Timeout.\n");
  419. cw1200_reg_write_16(priv,
  420. ST90TDS_CONTROL_REG_ID, 0);
  421. priv->device_can_sleep = true;
  422. }
  423. goto done;
  424. } else if (suspend) {
  425. pr_debug("[BH] Device suspend.\n");
  426. if (priv->powersave_enabled) {
  427. pr_debug("[BH] Device wakedown. Suspend.\n");
  428. cw1200_reg_write_16(priv,
  429. ST90TDS_CONTROL_REG_ID, 0);
  430. priv->device_can_sleep = true;
  431. }
  432. atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
  433. wake_up(&priv->bh_evt_wq);
  434. status = wait_event_interruptible(priv->bh_wq,
  435. CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
  436. if (status < 0) {
  437. wiphy_err(priv->hw->wiphy,
  438. "Failed to wait for resume: %ld.\n",
  439. status);
  440. break;
  441. }
  442. pr_debug("[BH] Device resume.\n");
  443. atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
  444. wake_up(&priv->bh_evt_wq);
  445. atomic_inc(&priv->bh_rx);
  446. goto done;
  447. }
  448. rx:
  449. tx += pending_tx;
  450. pending_tx = 0;
  451. if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
  452. break;
  453. /* Don't bother trying to rx unless we have data to read */
  454. if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
  455. ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
  456. if (ret < 0)
  457. break;
  458. /* Double up here if there's more data.. */
  459. if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
  460. ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
  461. if (ret < 0)
  462. break;
  463. }
  464. }
  465. tx:
  466. if (tx) {
  467. tx = 0;
  468. BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
  469. tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
  470. tx_allowed = tx_burst > 0;
  471. if (!tx_allowed) {
  472. /* Buffers full. Ensure we process tx
  473. * after we handle rx..
  474. */
  475. pending_tx = tx;
  476. goto done_rx;
  477. }
  478. ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
  479. if (ret < 0)
  480. break;
  481. if (ret > 0) /* More to transmit */
  482. tx = ret;
  483. /* Re-read ctrl reg */
  484. if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
  485. break;
  486. }
  487. done_rx:
  488. if (priv->bh_error)
  489. break;
  490. if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
  491. goto rx;
  492. if (tx)
  493. goto tx;
  494. done:
  495. /* Re-enable device interrupts */
  496. priv->hwbus_ops->lock(priv->hwbus_priv);
  497. __cw1200_irq_enable(priv, 1);
  498. priv->hwbus_ops->unlock(priv->hwbus_priv);
  499. }
  500. /* Explicitly disable device interrupts */
  501. priv->hwbus_ops->lock(priv->hwbus_priv);
  502. __cw1200_irq_enable(priv, 0);
  503. priv->hwbus_ops->unlock(priv->hwbus_priv);
  504. if (!term) {
  505. pr_err("[BH] Fatal error, exiting.\n");
  506. priv->bh_error = 1;
  507. /* TODO: schedule_work(recovery) */
  508. }
  509. return 0;
  510. }