ser.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2019-2020 Realtek Corporation
  3. */
  4. #include <linux/devcoredump.h>
  5. #include "cam.h"
  6. #include "chan.h"
  7. #include "debug.h"
  8. #include "fw.h"
  9. #include "mac.h"
  10. #include "ps.h"
  11. #include "reg.h"
  12. #include "ser.h"
  13. #include "util.h"
  14. #define SER_RECFG_TIMEOUT 1000
  15. enum ser_evt {
  16. SER_EV_NONE,
  17. SER_EV_STATE_IN,
  18. SER_EV_STATE_OUT,
  19. SER_EV_L1_RESET, /* M1 */
  20. SER_EV_DO_RECOVERY, /* M3 */
  21. SER_EV_MAC_RESET_DONE, /* M5 */
  22. SER_EV_L2_RESET,
  23. SER_EV_L2_RECFG_DONE,
  24. SER_EV_L2_RECFG_TIMEOUT,
  25. SER_EV_M3_TIMEOUT,
  26. SER_EV_FW_M5_TIMEOUT,
  27. SER_EV_L0_RESET,
  28. SER_EV_MAXX
  29. };
  30. enum ser_state {
  31. SER_IDLE_ST,
  32. SER_RESET_TRX_ST,
  33. SER_DO_HCI_ST,
  34. SER_L2_RESET_ST,
  35. SER_ST_MAX_ST
  36. };
  37. struct ser_msg {
  38. struct list_head list;
  39. u8 event;
  40. };
  41. struct state_ent {
  42. u8 state;
  43. char *name;
  44. void (*st_func)(struct rtw89_ser *ser, u8 event);
  45. };
  46. struct event_ent {
  47. u8 event;
  48. char *name;
  49. };
  50. static char *ser_ev_name(struct rtw89_ser *ser, u8 event)
  51. {
  52. if (event < SER_EV_MAXX)
  53. return ser->ev_tbl[event].name;
  54. return "err_ev_name";
  55. }
  56. static char *ser_st_name(struct rtw89_ser *ser)
  57. {
  58. if (ser->state < SER_ST_MAX_ST)
  59. return ser->st_tbl[ser->state].name;
  60. return "err_st_name";
  61. }
  62. #define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
  63. struct ser_cd_ ## _name { \
  64. u32 type; \
  65. u32 type_size; \
  66. u64 padding; \
  67. u8 data[_size]; \
  68. } __packed; \
  69. static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
  70. { \
  71. p->type = _type; \
  72. p->type_size = sizeof(p->data); \
  73. p->padding = 0x0123456789abcdef; \
  74. }
  75. enum rtw89_ser_cd_type {
  76. RTW89_SER_CD_FW_RSVD_PLE = 0,
  77. RTW89_SER_CD_FW_BACKTRACE = 1,
  78. };
  79. RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
  80. RTW89_SER_CD_FW_RSVD_PLE,
  81. RTW89_FW_RSVD_PLE_SIZE);
  82. RTW89_DEF_SER_CD_TYPE(fw_backtrace,
  83. RTW89_SER_CD_FW_BACKTRACE,
  84. RTW89_FW_BACKTRACE_MAX_SIZE);
  85. struct rtw89_ser_cd_buffer {
  86. struct ser_cd_fw_rsvd_ple fwple;
  87. struct ser_cd_fw_backtrace fwbt;
  88. } __packed;
  89. static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
  90. {
  91. struct rtw89_ser_cd_buffer *buf;
  92. buf = vzalloc(sizeof(*buf));
  93. if (!buf)
  94. return NULL;
  95. ser_cd_fw_rsvd_ple_init(&buf->fwple);
  96. ser_cd_fw_backtrace_init(&buf->fwbt);
  97. return buf;
  98. }
  99. static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
  100. struct rtw89_ser_cd_buffer *buf)
  101. {
  102. rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n");
  103. /* After calling dev_coredump, buf's lifetime is supposed to be
  104. * handled by the device coredump framework. Note that a new dump
  105. * will be discarded if a previous one hasn't been released by
  106. * framework yet.
  107. */
  108. dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL);
  109. }
  110. static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
  111. struct rtw89_ser_cd_buffer *buf, bool free_self)
  112. {
  113. if (!free_self)
  114. return;
  115. rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n");
  116. /* When some problems happen during filling data of core dump,
  117. * we won't send it to device coredump framework. Instead, we
  118. * free buf by ourselves.
  119. */
  120. vfree(buf);
  121. }
  122. static void ser_state_run(struct rtw89_ser *ser, u8 evt)
  123. {
  124. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  125. rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
  126. ser_st_name(ser), ser_ev_name(ser, evt));
  127. mutex_lock(&rtwdev->mutex);
  128. rtw89_leave_lps(rtwdev);
  129. mutex_unlock(&rtwdev->mutex);
  130. ser->st_tbl[ser->state].st_func(ser, evt);
  131. }
  132. static void ser_state_goto(struct rtw89_ser *ser, u8 new_state)
  133. {
  134. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  135. if (ser->state == new_state || new_state >= SER_ST_MAX_ST)
  136. return;
  137. ser_state_run(ser, SER_EV_STATE_OUT);
  138. rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n",
  139. ser_st_name(ser), ser->st_tbl[new_state].name);
  140. ser->state = new_state;
  141. ser_state_run(ser, SER_EV_STATE_IN);
  142. }
  143. static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser)
  144. {
  145. struct ser_msg *msg;
  146. spin_lock_irq(&ser->msg_q_lock);
  147. msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list);
  148. if (msg)
  149. list_del(&msg->list);
  150. spin_unlock_irq(&ser->msg_q_lock);
  151. return msg;
  152. }
  153. static void rtw89_ser_hdl_work(struct work_struct *work)
  154. {
  155. struct ser_msg *msg;
  156. struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
  157. ser_hdl_work);
  158. while ((msg = __rtw89_ser_dequeue_msg(ser))) {
  159. ser_state_run(ser, msg->event);
  160. kfree(msg);
  161. }
  162. }
  163. static int ser_send_msg(struct rtw89_ser *ser, u8 event)
  164. {
  165. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  166. struct ser_msg *msg = NULL;
  167. if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
  168. return -EIO;
  169. msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
  170. if (!msg)
  171. return -ENOMEM;
  172. msg->event = event;
  173. spin_lock_irq(&ser->msg_q_lock);
  174. list_add(&msg->list, &ser->msg_q);
  175. spin_unlock_irq(&ser->msg_q_lock);
  176. ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
  177. return 0;
  178. }
  179. static void rtw89_ser_alarm_work(struct work_struct *work)
  180. {
  181. struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
  182. ser_alarm_work.work);
  183. ser_send_msg(ser, ser->alarm_event);
  184. ser->alarm_event = SER_EV_NONE;
  185. }
  186. static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event)
  187. {
  188. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  189. if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
  190. return;
  191. ser->alarm_event = event;
  192. ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work,
  193. msecs_to_jiffies(ms));
  194. }
  195. static void ser_del_alarm(struct rtw89_ser *ser)
  196. {
  197. cancel_delayed_work(&ser->ser_alarm_work);
  198. ser->alarm_event = SER_EV_NONE;
  199. }
  200. /* driver function */
  201. static void drv_stop_tx(struct rtw89_ser *ser)
  202. {
  203. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  204. ieee80211_stop_queues(rtwdev->hw);
  205. set_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
  206. }
  207. static void drv_stop_rx(struct rtw89_ser *ser)
  208. {
  209. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  210. clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
  211. set_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
  212. }
  213. static void drv_trx_reset(struct rtw89_ser *ser)
  214. {
  215. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  216. rtw89_hci_reset(rtwdev);
  217. }
  218. static void drv_resume_tx(struct rtw89_ser *ser)
  219. {
  220. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  221. if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags))
  222. return;
  223. ieee80211_wake_queues(rtwdev->hw);
  224. clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
  225. }
  226. static void drv_resume_rx(struct rtw89_ser *ser)
  227. {
  228. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  229. if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags))
  230. return;
  231. set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
  232. clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
  233. }
  234. static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  235. {
  236. rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
  237. rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
  238. rtwvif->trigger = false;
  239. }
  240. static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
  241. {
  242. struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
  243. struct rtw89_dev *rtwdev = rtwvif->rtwdev;
  244. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  245. if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
  246. rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
  247. if (sta->tdls)
  248. rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
  249. INIT_LIST_HEAD(&rtwsta->ba_cam_list);
  250. }
  251. static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  252. {
  253. ieee80211_iterate_stations_atomic(rtwdev->hw,
  254. ser_sta_deinit_cam_iter,
  255. rtwvif);
  256. rtw89_cam_deinit(rtwdev, rtwvif);
  257. bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM);
  258. }
  259. static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
  260. {
  261. struct rtw89_vif *rtwvif;
  262. rtw89_cam_reset_keys(rtwdev);
  263. rtw89_for_each_rtwvif(rtwdev, rtwvif)
  264. ser_deinit_cam(rtwdev, rtwvif);
  265. rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
  266. rtw89_for_each_rtwvif(rtwdev, rtwvif)
  267. ser_reset_vif(rtwdev, rtwvif);
  268. }
  269. /* hal function */
  270. static int hal_enable_dma(struct rtw89_ser *ser)
  271. {
  272. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  273. int ret;
  274. if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags))
  275. return 0;
  276. if (!rtwdev->hci.ops->mac_lv1_rcvy)
  277. return -EIO;
  278. ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
  279. if (!ret)
  280. clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
  281. return ret;
  282. }
  283. static int hal_stop_dma(struct rtw89_ser *ser)
  284. {
  285. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  286. int ret;
  287. if (!rtwdev->hci.ops->mac_lv1_rcvy)
  288. return -EIO;
  289. ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
  290. if (!ret)
  291. set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
  292. return ret;
  293. }
  294. static void hal_send_m2_event(struct rtw89_ser *ser)
  295. {
  296. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  297. rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN);
  298. }
  299. static void hal_send_m4_event(struct rtw89_ser *ser)
  300. {
  301. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  302. rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN);
  303. }
  304. /* state handler */
  305. static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
  306. {
  307. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  308. switch (evt) {
  309. case SER_EV_STATE_IN:
  310. rtw89_hci_recovery_complete(rtwdev);
  311. clear_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
  312. break;
  313. case SER_EV_L1_RESET:
  314. ser_state_goto(ser, SER_RESET_TRX_ST);
  315. break;
  316. case SER_EV_L2_RESET:
  317. ser_state_goto(ser, SER_L2_RESET_ST);
  318. break;
  319. case SER_EV_STATE_OUT:
  320. rtw89_hci_recovery_start(rtwdev);
  321. break;
  322. default:
  323. break;
  324. }
  325. }
  326. static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
  327. {
  328. switch (evt) {
  329. case SER_EV_STATE_IN:
  330. drv_stop_tx(ser);
  331. if (hal_stop_dma(ser)) {
  332. ser_state_goto(ser, SER_L2_RESET_ST);
  333. break;
  334. }
  335. drv_stop_rx(ser);
  336. drv_trx_reset(ser);
  337. /* wait m3 */
  338. hal_send_m2_event(ser);
  339. /* set alarm to prevent FW response timeout */
  340. ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT);
  341. break;
  342. case SER_EV_DO_RECOVERY:
  343. ser_state_goto(ser, SER_DO_HCI_ST);
  344. break;
  345. case SER_EV_M3_TIMEOUT:
  346. ser_state_goto(ser, SER_L2_RESET_ST);
  347. break;
  348. case SER_EV_STATE_OUT:
  349. ser_del_alarm(ser);
  350. hal_enable_dma(ser);
  351. drv_resume_rx(ser);
  352. drv_resume_tx(ser);
  353. break;
  354. default:
  355. break;
  356. }
  357. }
  358. static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
  359. {
  360. switch (evt) {
  361. case SER_EV_STATE_IN:
  362. /* wait m5 */
  363. hal_send_m4_event(ser);
  364. /* prevent FW response timeout */
  365. ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT);
  366. break;
  367. case SER_EV_FW_M5_TIMEOUT:
  368. ser_state_goto(ser, SER_L2_RESET_ST);
  369. break;
  370. case SER_EV_MAC_RESET_DONE:
  371. ser_state_goto(ser, SER_IDLE_ST);
  372. break;
  373. case SER_EV_STATE_OUT:
  374. ser_del_alarm(ser);
  375. break;
  376. default:
  377. break;
  378. }
  379. }
  380. static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
  381. u8 sel, u32 start_addr, u32 len)
  382. {
  383. u32 *ptr = (u32 *)buf;
  384. u32 base_addr, start_page, residue;
  385. u32 cnt = 0;
  386. u32 i;
  387. start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
  388. residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
  389. base_addr = rtw89_mac_mem_base_addrs[sel];
  390. base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
  391. while (cnt < len) {
  392. rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr);
  393. for (i = R_AX_INDIR_ACCESS_ENTRY + residue;
  394. i < R_AX_INDIR_ACCESS_ENTRY + MAC_MEM_DUMP_PAGE_SIZE;
  395. i += 4, ptr++) {
  396. *ptr = rtw89_read32(rtwdev, i);
  397. cnt += 4;
  398. if (cnt >= len)
  399. break;
  400. }
  401. residue = 0;
  402. base_addr += MAC_MEM_DUMP_PAGE_SIZE;
  403. }
  404. }
  405. static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
  406. {
  407. u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
  408. rtw89_debug(rtwdev, RTW89_DBG_SER,
  409. "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
  410. start_addr);
  411. ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr,
  412. RTW89_FW_RSVD_PLE_SIZE);
  413. }
  414. struct __fw_backtrace_entry {
  415. u32 wcpu_addr;
  416. u32 size;
  417. u32 key;
  418. } __packed;
  419. struct __fw_backtrace_info {
  420. u32 ra;
  421. u32 sp;
  422. } __packed;
  423. static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
  424. sizeof(struct __fw_backtrace_info));
  425. static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
  426. const struct __fw_backtrace_entry *ent)
  427. {
  428. struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
  429. u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK;
  430. u32 fwbt_size = ent->size;
  431. u32 fwbt_key = ent->key;
  432. u32 i;
  433. if (fwbt_addr == 0) {
  434. rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
  435. fwbt_addr);
  436. return -EINVAL;
  437. }
  438. if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
  439. rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
  440. fwbt_key);
  441. return -EINVAL;
  442. }
  443. if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
  444. fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
  445. rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
  446. fwbt_size);
  447. return -EINVAL;
  448. }
  449. rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n");
  450. rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, fwbt_addr);
  451. for (i = R_AX_INDIR_ACCESS_ENTRY;
  452. i < R_AX_INDIR_ACCESS_ENTRY + fwbt_size;
  453. i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
  454. *ptr = (struct __fw_backtrace_info){
  455. .ra = rtw89_read32(rtwdev, i),
  456. .sp = rtw89_read32(rtwdev, i + 4),
  457. };
  458. rtw89_debug(rtwdev, RTW89_DBG_SER,
  459. "next sp: 0x%x, next ra: 0x%x\n",
  460. ptr->sp, ptr->ra);
  461. }
  462. rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n");
  463. return 0;
  464. }
  465. static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
  466. {
  467. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  468. struct rtw89_ser_cd_buffer *buf;
  469. struct __fw_backtrace_entry fwbt_ent;
  470. int ret = 0;
  471. buf = rtw89_ser_cd_prep(rtwdev);
  472. if (!buf) {
  473. ret = -ENOMEM;
  474. goto bottom;
  475. }
  476. rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data);
  477. fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
  478. ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent);
  479. if (ret)
  480. goto bottom;
  481. rtw89_ser_cd_send(rtwdev, buf);
  482. bottom:
  483. rtw89_ser_cd_free(rtwdev, buf, !!ret);
  484. ser_reset_mac_binding(rtwdev);
  485. rtw89_core_stop(rtwdev);
  486. rtw89_entity_init(rtwdev);
  487. INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
  488. }
  489. static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
  490. {
  491. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  492. switch (evt) {
  493. case SER_EV_STATE_IN:
  494. mutex_lock(&rtwdev->mutex);
  495. ser_l2_reset_st_pre_hdl(ser);
  496. mutex_unlock(&rtwdev->mutex);
  497. ieee80211_restart_hw(rtwdev->hw);
  498. ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
  499. break;
  500. case SER_EV_L2_RECFG_TIMEOUT:
  501. rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n");
  502. fallthrough;
  503. case SER_EV_L2_RECFG_DONE:
  504. ser_state_goto(ser, SER_IDLE_ST);
  505. break;
  506. case SER_EV_STATE_OUT:
  507. ser_del_alarm(ser);
  508. break;
  509. default:
  510. break;
  511. }
  512. }
  513. static const struct event_ent ser_ev_tbl[] = {
  514. {SER_EV_NONE, "SER_EV_NONE"},
  515. {SER_EV_STATE_IN, "SER_EV_STATE_IN"},
  516. {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
  517. {SER_EV_L1_RESET, "SER_EV_L1_RESET"},
  518. {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"},
  519. {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"},
  520. {SER_EV_L2_RESET, "SER_EV_L2_RESET"},
  521. {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"},
  522. {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"},
  523. {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"},
  524. {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"},
  525. {SER_EV_L0_RESET, "SER_EV_L0_RESET"},
  526. {SER_EV_MAXX, "SER_EV_MAX"}
  527. };
  528. static const struct state_ent ser_st_tbl[] = {
  529. {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
  530. {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
  531. {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
  532. {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl}
  533. };
  534. int rtw89_ser_init(struct rtw89_dev *rtwdev)
  535. {
  536. struct rtw89_ser *ser = &rtwdev->ser;
  537. memset(ser, 0, sizeof(*ser));
  538. INIT_LIST_HEAD(&ser->msg_q);
  539. ser->state = SER_IDLE_ST;
  540. ser->st_tbl = ser_st_tbl;
  541. ser->ev_tbl = ser_ev_tbl;
  542. bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS);
  543. spin_lock_init(&ser->msg_q_lock);
  544. INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work);
  545. INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work);
  546. return 0;
  547. }
  548. int rtw89_ser_deinit(struct rtw89_dev *rtwdev)
  549. {
  550. struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser;
  551. set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
  552. cancel_delayed_work_sync(&ser->ser_alarm_work);
  553. cancel_work_sync(&ser->ser_hdl_work);
  554. clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
  555. return 0;
  556. }
  557. void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev)
  558. {
  559. ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE);
  560. }
  561. int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
  562. {
  563. u8 event = SER_EV_NONE;
  564. rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
  565. switch (err) {
  566. case MAC_AX_ERR_L1_ERR_DMAC:
  567. case MAC_AX_ERR_L0_PROMOTE_TO_L1:
  568. event = SER_EV_L1_RESET; /* M1 */
  569. break;
  570. case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE:
  571. event = SER_EV_DO_RECOVERY; /* M3 */
  572. break;
  573. case MAC_AX_ERR_L1_RESET_RECOVERY_DONE:
  574. event = SER_EV_MAC_RESET_DONE; /* M5 */
  575. break;
  576. case MAC_AX_ERR_L0_ERR_CMAC0:
  577. case MAC_AX_ERR_L0_ERR_CMAC1:
  578. case MAC_AX_ERR_L0_RESET_DONE:
  579. event = SER_EV_L0_RESET;
  580. break;
  581. default:
  582. if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 ||
  583. (err >= MAC_AX_ERR_L2_ERR_AH_DMA &&
  584. err <= MAC_AX_GET_ERR_MAX))
  585. event = SER_EV_L2_RESET;
  586. break;
  587. }
  588. if (event == SER_EV_NONE) {
  589. rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
  590. return -EINVAL;
  591. }
  592. ser_send_msg(&rtwdev->ser, event);
  593. return 0;
  594. }
  595. EXPORT_SYMBOL(rtw89_ser_notify);