dxe.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075
  1. /*
  2. * Copyright (c) 2013 Eugene Krasnikov <[email protected]>
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
  11. * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
  13. * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
  14. * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* DXE - DMA transfer engine
  17. * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
  18. * through low channels data packets are transfered
  19. * through high channels managment packets are transfered
  20. */
  21. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22. #include <linux/interrupt.h>
  23. #include <linux/soc/qcom/smem_state.h>
  24. #include "wcn36xx.h"
  25. #include "txrx.h"
  26. static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
  27. {
  28. wcn36xx_dbg(WCN36XX_DBG_DXE,
  29. "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
  30. addr, data);
  31. writel(data, wcn->ccu_base + addr);
  32. }
  33. static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
  34. {
  35. wcn36xx_dbg(WCN36XX_DBG_DXE,
  36. "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
  37. addr, data);
  38. writel(data, wcn->dxe_base + addr);
  39. }
  40. static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
  41. {
  42. *data = readl(wcn->dxe_base + addr);
  43. wcn36xx_dbg(WCN36XX_DBG_DXE,
  44. "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
  45. addr, *data);
  46. }
  47. static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
  48. {
  49. struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
  50. int i;
  51. for (i = 0; i < ch->desc_num && ctl; i++) {
  52. next = ctl->next;
  53. kfree(ctl);
  54. ctl = next;
  55. }
  56. }
  57. static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
  58. {
  59. struct wcn36xx_dxe_ctl *prev_ctl = NULL;
  60. struct wcn36xx_dxe_ctl *cur_ctl = NULL;
  61. int i;
  62. spin_lock_init(&ch->lock);
  63. for (i = 0; i < ch->desc_num; i++) {
  64. cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
  65. if (!cur_ctl)
  66. goto out_fail;
  67. cur_ctl->ctl_blk_order = i;
  68. if (i == 0) {
  69. ch->head_blk_ctl = cur_ctl;
  70. ch->tail_blk_ctl = cur_ctl;
  71. } else if (ch->desc_num - 1 == i) {
  72. prev_ctl->next = cur_ctl;
  73. cur_ctl->next = ch->head_blk_ctl;
  74. } else {
  75. prev_ctl->next = cur_ctl;
  76. }
  77. prev_ctl = cur_ctl;
  78. }
  79. return 0;
  80. out_fail:
  81. wcn36xx_dxe_free_ctl_block(ch);
  82. return -ENOMEM;
  83. }
  84. int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
  85. {
  86. int ret;
  87. wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
  88. wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
  89. wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
  90. wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
  91. wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
  92. wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
  93. wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
  94. wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
  95. wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L;
  96. wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H;
  97. wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
  98. wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
  99. wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
  100. wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
  101. wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
  102. wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
  103. wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
  104. wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
  105. /* DXE control block allocation */
  106. ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
  107. if (ret)
  108. goto out_err;
  109. ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
  110. if (ret)
  111. goto out_err;
  112. ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
  113. if (ret)
  114. goto out_err;
  115. ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
  116. if (ret)
  117. goto out_err;
  118. /* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
  119. ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
  120. WCN36XX_SMSM_WLAN_TX_ENABLE |
  121. WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
  122. WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
  123. if (ret)
  124. goto out_err;
  125. return 0;
  126. out_err:
  127. wcn36xx_err("Failed to allocate DXE control blocks\n");
  128. wcn36xx_dxe_free_ctl_blks(wcn);
  129. return -ENOMEM;
  130. }
  131. void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
  132. {
  133. wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
  134. wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
  135. wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
  136. wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
  137. }
  138. static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
  139. {
  140. struct wcn36xx_dxe_desc *cur_dxe = NULL;
  141. struct wcn36xx_dxe_desc *prev_dxe = NULL;
  142. struct wcn36xx_dxe_ctl *cur_ctl = NULL;
  143. size_t size;
  144. int i;
  145. size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
  146. wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
  147. GFP_KERNEL);
  148. if (!wcn_ch->cpu_addr)
  149. return -ENOMEM;
  150. cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
  151. cur_ctl = wcn_ch->head_blk_ctl;
  152. for (i = 0; i < wcn_ch->desc_num; i++) {
  153. cur_ctl->desc = cur_dxe;
  154. cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
  155. i * sizeof(struct wcn36xx_dxe_desc);
  156. switch (wcn_ch->ch_type) {
  157. case WCN36XX_DXE_CH_TX_L:
  158. cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
  159. cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
  160. break;
  161. case WCN36XX_DXE_CH_TX_H:
  162. cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
  163. cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
  164. break;
  165. case WCN36XX_DXE_CH_RX_L:
  166. cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
  167. cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
  168. break;
  169. case WCN36XX_DXE_CH_RX_H:
  170. cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
  171. cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
  172. break;
  173. }
  174. if (0 == i) {
  175. cur_dxe->phy_next_l = 0;
  176. } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
  177. prev_dxe->phy_next_l =
  178. cur_ctl->desc_phy_addr;
  179. } else if (i == (wcn_ch->desc_num - 1)) {
  180. prev_dxe->phy_next_l =
  181. cur_ctl->desc_phy_addr;
  182. cur_dxe->phy_next_l =
  183. wcn_ch->head_blk_ctl->desc_phy_addr;
  184. }
  185. cur_ctl = cur_ctl->next;
  186. prev_dxe = cur_dxe;
  187. cur_dxe++;
  188. }
  189. return 0;
  190. }
  191. static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
  192. {
  193. size_t size;
  194. size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
  195. dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
  196. }
  197. static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
  198. struct wcn36xx_dxe_mem_pool *pool)
  199. {
  200. int i, chunk_size = pool->chunk_size;
  201. dma_addr_t bd_phy_addr = pool->phy_addr;
  202. void *bd_cpu_addr = pool->virt_addr;
  203. struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
  204. for (i = 0; i < ch->desc_num; i++) {
  205. /* Only every second dxe needs a bd pointer,
  206. the other will point to the skb data */
  207. if (!(i & 1)) {
  208. cur->bd_phy_addr = bd_phy_addr;
  209. cur->bd_cpu_addr = bd_cpu_addr;
  210. bd_phy_addr += chunk_size;
  211. bd_cpu_addr += chunk_size;
  212. } else {
  213. cur->bd_phy_addr = 0;
  214. cur->bd_cpu_addr = NULL;
  215. }
  216. cur = cur->next;
  217. }
  218. }
  219. static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
  220. {
  221. int reg_data = 0;
  222. wcn36xx_dxe_read_register(wcn,
  223. WCN36XX_DXE_INT_MASK_REG,
  224. &reg_data);
  225. reg_data |= wcn_ch;
  226. wcn36xx_dxe_write_register(wcn,
  227. WCN36XX_DXE_INT_MASK_REG,
  228. (int)reg_data);
  229. return 0;
  230. }
  231. static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
  232. {
  233. int reg_data = 0;
  234. wcn36xx_dxe_read_register(wcn,
  235. WCN36XX_DXE_INT_MASK_REG,
  236. &reg_data);
  237. reg_data &= ~wcn_ch;
  238. wcn36xx_dxe_write_register(wcn,
  239. WCN36XX_DXE_INT_MASK_REG,
  240. (int)reg_data);
  241. }
  242. static int wcn36xx_dxe_fill_skb(struct device *dev,
  243. struct wcn36xx_dxe_ctl *ctl,
  244. gfp_t gfp)
  245. {
  246. struct wcn36xx_dxe_desc *dxe = ctl->desc;
  247. struct sk_buff *skb;
  248. skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
  249. if (skb == NULL)
  250. return -ENOMEM;
  251. dxe->dst_addr_l = dma_map_single(dev,
  252. skb_tail_pointer(skb),
  253. WCN36XX_PKT_SIZE,
  254. DMA_FROM_DEVICE);
  255. if (dma_mapping_error(dev, dxe->dst_addr_l)) {
  256. dev_err(dev, "unable to map skb\n");
  257. kfree_skb(skb);
  258. return -ENOMEM;
  259. }
  260. ctl->skb = skb;
  261. return 0;
  262. }
  263. static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
  264. struct wcn36xx_dxe_ch *wcn_ch)
  265. {
  266. int i;
  267. struct wcn36xx_dxe_ctl *cur_ctl = NULL;
  268. cur_ctl = wcn_ch->head_blk_ctl;
  269. for (i = 0; i < wcn_ch->desc_num; i++) {
  270. wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
  271. cur_ctl = cur_ctl->next;
  272. }
  273. return 0;
  274. }
  275. static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
  276. struct wcn36xx_dxe_ch *wcn_ch)
  277. {
  278. struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
  279. int i;
  280. for (i = 0; i < wcn_ch->desc_num; i++) {
  281. kfree_skb(cur->skb);
  282. cur = cur->next;
  283. }
  284. }
  285. void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
  286. {
  287. struct ieee80211_tx_info *info;
  288. struct sk_buff *skb;
  289. unsigned long flags;
  290. spin_lock_irqsave(&wcn->dxe_lock, flags);
  291. skb = wcn->tx_ack_skb;
  292. wcn->tx_ack_skb = NULL;
  293. del_timer(&wcn->tx_ack_timer);
  294. spin_unlock_irqrestore(&wcn->dxe_lock, flags);
  295. if (!skb) {
  296. wcn36xx_warn("Spurious TX complete indication\n");
  297. return;
  298. }
  299. info = IEEE80211_SKB_CB(skb);
  300. if (status == 1)
  301. info->flags |= IEEE80211_TX_STAT_ACK;
  302. else
  303. info->flags &= ~IEEE80211_TX_STAT_ACK;
  304. wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
  305. ieee80211_tx_status_irqsafe(wcn->hw, skb);
  306. ieee80211_wake_queues(wcn->hw);
  307. }
  308. static void wcn36xx_dxe_tx_timer(struct timer_list *t)
  309. {
  310. struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
  311. struct ieee80211_tx_info *info;
  312. unsigned long flags;
  313. struct sk_buff *skb;
  314. /* TX Timeout */
  315. wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
  316. spin_lock_irqsave(&wcn->dxe_lock, flags);
  317. skb = wcn->tx_ack_skb;
  318. wcn->tx_ack_skb = NULL;
  319. spin_unlock_irqrestore(&wcn->dxe_lock, flags);
  320. if (!skb)
  321. return;
  322. info = IEEE80211_SKB_CB(skb);
  323. info->flags &= ~IEEE80211_TX_STAT_ACK;
  324. info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
  325. ieee80211_tx_status_irqsafe(wcn->hw, skb);
  326. ieee80211_wake_queues(wcn->hw);
  327. }
  328. static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
  329. {
  330. struct wcn36xx_dxe_ctl *ctl;
  331. struct ieee80211_tx_info *info;
  332. unsigned long flags;
  333. /*
  334. * Make at least one loop of do-while because in case ring is
  335. * completely full head and tail are pointing to the same element
  336. * and while-do will not make any cycles.
  337. */
  338. spin_lock_irqsave(&ch->lock, flags);
  339. ctl = ch->tail_blk_ctl;
  340. do {
  341. if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
  342. break;
  343. if (ctl->skb &&
  344. READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
  345. dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
  346. ctl->skb->len, DMA_TO_DEVICE);
  347. info = IEEE80211_SKB_CB(ctl->skb);
  348. if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
  349. if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
  350. info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
  351. ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
  352. } else {
  353. /* Wait for the TX ack indication or timeout... */
  354. spin_lock(&wcn->dxe_lock);
  355. if (WARN_ON(wcn->tx_ack_skb))
  356. ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
  357. wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
  358. mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
  359. spin_unlock(&wcn->dxe_lock);
  360. }
  361. /* do not free, ownership transferred to mac80211 status cb */
  362. } else {
  363. ieee80211_free_txskb(wcn->hw, ctl->skb);
  364. }
  365. if (wcn->queues_stopped) {
  366. wcn->queues_stopped = false;
  367. ieee80211_wake_queues(wcn->hw);
  368. }
  369. ctl->skb = NULL;
  370. }
  371. ctl = ctl->next;
  372. } while (ctl != ch->head_blk_ctl);
  373. ch->tail_blk_ctl = ctl;
  374. spin_unlock_irqrestore(&ch->lock, flags);
  375. }
  376. static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
  377. {
  378. struct wcn36xx *wcn = (struct wcn36xx *)dev;
  379. int int_src, int_reason;
  380. wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
  381. if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
  382. wcn36xx_dxe_read_register(wcn,
  383. WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
  384. &int_reason);
  385. wcn36xx_dxe_write_register(wcn,
  386. WCN36XX_DXE_0_INT_CLR,
  387. WCN36XX_INT_MASK_CHAN_TX_H);
  388. if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
  389. wcn36xx_dxe_write_register(wcn,
  390. WCN36XX_DXE_0_INT_ERR_CLR,
  391. WCN36XX_INT_MASK_CHAN_TX_H);
  392. wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
  393. int_src);
  394. }
  395. if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
  396. wcn36xx_dxe_write_register(wcn,
  397. WCN36XX_DXE_0_INT_DONE_CLR,
  398. WCN36XX_INT_MASK_CHAN_TX_H);
  399. }
  400. if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
  401. wcn36xx_dxe_write_register(wcn,
  402. WCN36XX_DXE_0_INT_ED_CLR,
  403. WCN36XX_INT_MASK_CHAN_TX_H);
  404. }
  405. wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
  406. int_reason);
  407. if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
  408. WCN36XX_CH_STAT_INT_ED_MASK)) {
  409. reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
  410. }
  411. }
  412. if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
  413. wcn36xx_dxe_read_register(wcn,
  414. WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
  415. &int_reason);
  416. wcn36xx_dxe_write_register(wcn,
  417. WCN36XX_DXE_0_INT_CLR,
  418. WCN36XX_INT_MASK_CHAN_TX_L);
  419. if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
  420. wcn36xx_dxe_write_register(wcn,
  421. WCN36XX_DXE_0_INT_ERR_CLR,
  422. WCN36XX_INT_MASK_CHAN_TX_L);
  423. wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
  424. int_src);
  425. }
  426. if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
  427. wcn36xx_dxe_write_register(wcn,
  428. WCN36XX_DXE_0_INT_DONE_CLR,
  429. WCN36XX_INT_MASK_CHAN_TX_L);
  430. }
  431. if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
  432. wcn36xx_dxe_write_register(wcn,
  433. WCN36XX_DXE_0_INT_ED_CLR,
  434. WCN36XX_INT_MASK_CHAN_TX_L);
  435. }
  436. wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
  437. int_reason);
  438. if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
  439. WCN36XX_CH_STAT_INT_ED_MASK)) {
  440. reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
  441. }
  442. }
  443. return IRQ_HANDLED;
  444. }
  445. static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
  446. {
  447. struct wcn36xx *wcn = (struct wcn36xx *)dev;
  448. wcn36xx_dxe_rx_frame(wcn);
  449. return IRQ_HANDLED;
  450. }
  451. static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
  452. {
  453. int ret;
  454. ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
  455. IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
  456. if (ret) {
  457. wcn36xx_err("failed to alloc tx irq\n");
  458. goto out_err;
  459. }
  460. ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
  461. "wcn36xx_rx", wcn);
  462. if (ret) {
  463. wcn36xx_err("failed to alloc rx irq\n");
  464. goto out_txirq;
  465. }
  466. enable_irq_wake(wcn->rx_irq);
  467. return 0;
  468. out_txirq:
  469. free_irq(wcn->tx_irq, wcn);
  470. out_err:
  471. return ret;
  472. }
  473. static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
  474. struct wcn36xx_dxe_ch *ch,
  475. u32 ctrl,
  476. u32 en_mask,
  477. u32 int_mask,
  478. u32 status_reg)
  479. {
  480. struct wcn36xx_dxe_desc *dxe;
  481. struct wcn36xx_dxe_ctl *ctl;
  482. dma_addr_t dma_addr;
  483. struct sk_buff *skb;
  484. u32 int_reason;
  485. int ret;
  486. wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
  487. wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
  488. if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
  489. wcn36xx_dxe_write_register(wcn,
  490. WCN36XX_DXE_0_INT_ERR_CLR,
  491. int_mask);
  492. wcn36xx_err("DXE IRQ reported error on RX channel\n");
  493. }
  494. if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
  495. wcn36xx_dxe_write_register(wcn,
  496. WCN36XX_DXE_0_INT_DONE_CLR,
  497. int_mask);
  498. if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
  499. wcn36xx_dxe_write_register(wcn,
  500. WCN36XX_DXE_0_INT_ED_CLR,
  501. int_mask);
  502. if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
  503. WCN36XX_CH_STAT_INT_ED_MASK)))
  504. return 0;
  505. spin_lock(&ch->lock);
  506. ctl = ch->head_blk_ctl;
  507. dxe = ctl->desc;
  508. while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
  509. /* do not read until we own DMA descriptor */
  510. dma_rmb();
  511. /* read/modify DMA descriptor */
  512. skb = ctl->skb;
  513. dma_addr = dxe->dst_addr_l;
  514. ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
  515. if (0 == ret) {
  516. /* new skb allocation ok. Use the new one and queue
  517. * the old one to network system.
  518. */
  519. dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
  520. DMA_FROM_DEVICE);
  521. wcn36xx_rx_skb(wcn, skb);
  522. }
  523. /* else keep old skb not submitted and reuse it for rx DMA
  524. * (dropping the packet that it contained)
  525. */
  526. /* flush descriptor changes before re-marking as valid */
  527. dma_wmb();
  528. dxe->ctrl = ctrl;
  529. ctl = ctl->next;
  530. dxe = ctl->desc;
  531. }
  532. wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
  533. ch->head_blk_ctl = ctl;
  534. spin_unlock(&ch->lock);
  535. return 0;
  536. }
  537. void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
  538. {
  539. int int_src;
  540. wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
  541. /* RX_LOW_PRI */
  542. if (int_src & WCN36XX_DXE_INT_CH1_MASK)
  543. wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
  544. WCN36XX_DXE_CTRL_RX_L,
  545. WCN36XX_DXE_INT_CH1_MASK,
  546. WCN36XX_INT_MASK_CHAN_RX_L,
  547. WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
  548. /* RX_HIGH_PRI */
  549. if (int_src & WCN36XX_DXE_INT_CH3_MASK)
  550. wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
  551. WCN36XX_DXE_CTRL_RX_H,
  552. WCN36XX_DXE_INT_CH3_MASK,
  553. WCN36XX_INT_MASK_CHAN_RX_H,
  554. WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
  555. if (!int_src)
  556. wcn36xx_warn("No DXE interrupt pending\n");
  557. }
  558. int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
  559. {
  560. size_t s;
  561. void *cpu_addr;
  562. /* Allocate BD headers for MGMT frames */
  563. /* Where this come from ask QC */
  564. wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
  565. 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
  566. s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
  567. cpu_addr = dma_alloc_coherent(wcn->dev, s,
  568. &wcn->mgmt_mem_pool.phy_addr,
  569. GFP_KERNEL);
  570. if (!cpu_addr)
  571. goto out_err;
  572. wcn->mgmt_mem_pool.virt_addr = cpu_addr;
  573. /* Allocate BD headers for DATA frames */
  574. /* Where this come from ask QC */
  575. wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
  576. 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
  577. s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
  578. cpu_addr = dma_alloc_coherent(wcn->dev, s,
  579. &wcn->data_mem_pool.phy_addr,
  580. GFP_KERNEL);
  581. if (!cpu_addr)
  582. goto out_err;
  583. wcn->data_mem_pool.virt_addr = cpu_addr;
  584. return 0;
  585. out_err:
  586. wcn36xx_dxe_free_mem_pools(wcn);
  587. wcn36xx_err("Failed to allocate BD mempool\n");
  588. return -ENOMEM;
  589. }
  590. void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
  591. {
  592. if (wcn->mgmt_mem_pool.virt_addr)
  593. dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
  594. WCN36XX_DXE_CH_DESC_NUMB_TX_H,
  595. wcn->mgmt_mem_pool.virt_addr,
  596. wcn->mgmt_mem_pool.phy_addr);
  597. if (wcn->data_mem_pool.virt_addr) {
  598. dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
  599. WCN36XX_DXE_CH_DESC_NUMB_TX_L,
  600. wcn->data_mem_pool.virt_addr,
  601. wcn->data_mem_pool.phy_addr);
  602. }
  603. }
  604. int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
  605. struct wcn36xx_vif *vif_priv,
  606. struct wcn36xx_tx_bd *bd,
  607. struct sk_buff *skb,
  608. bool is_low)
  609. {
  610. struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
  611. struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
  612. struct wcn36xx_dxe_ch *ch = NULL;
  613. unsigned long flags;
  614. int ret;
  615. ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
  616. spin_lock_irqsave(&ch->lock, flags);
  617. ctl_bd = ch->head_blk_ctl;
  618. ctl_skb = ctl_bd->next;
  619. /*
  620. * If skb is not null that means that we reached the tail of the ring
  621. * hence ring is full. Stop queues to let mac80211 back off until ring
  622. * has an empty slot again.
  623. */
  624. if (NULL != ctl_skb->skb) {
  625. ieee80211_stop_queues(wcn->hw);
  626. wcn->queues_stopped = true;
  627. spin_unlock_irqrestore(&ch->lock, flags);
  628. return -EBUSY;
  629. }
  630. if (unlikely(ctl_skb->bd_cpu_addr)) {
  631. wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
  632. ret = -EINVAL;
  633. goto unlock;
  634. }
  635. desc_bd = ctl_bd->desc;
  636. desc_skb = ctl_skb->desc;
  637. ctl_bd->skb = NULL;
  638. /* write buffer descriptor */
  639. memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
  640. /* Set source address of the BD we send */
  641. desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
  642. desc_bd->dst_addr_l = ch->dxe_wq;
  643. desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
  644. wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
  645. wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
  646. (char *)desc_bd, sizeof(*desc_bd));
  647. wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
  648. "BD >>> ", (char *)ctl_bd->bd_cpu_addr,
  649. sizeof(struct wcn36xx_tx_bd));
  650. desc_skb->src_addr_l = dma_map_single(wcn->dev,
  651. skb->data,
  652. skb->len,
  653. DMA_TO_DEVICE);
  654. if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
  655. dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
  656. ret = -ENOMEM;
  657. goto unlock;
  658. }
  659. ctl_skb->skb = skb;
  660. desc_skb->dst_addr_l = ch->dxe_wq;
  661. desc_skb->fr_len = ctl_skb->skb->len;
  662. wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
  663. (char *)desc_skb, sizeof(*desc_skb));
  664. wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
  665. (char *)ctl_skb->skb->data, ctl_skb->skb->len);
  666. /* Move the head of the ring to the next empty descriptor */
  667. ch->head_blk_ctl = ctl_skb->next;
  668. /* Commit all previous writes and set descriptors to VALID */
  669. wmb();
  670. desc_skb->ctrl = ch->ctrl_skb;
  671. wmb();
  672. desc_bd->ctrl = ch->ctrl_bd;
  673. /*
  674. * When connected and trying to send data frame chip can be in sleep
  675. * mode and writing to the register will not wake up the chip. Instead
  676. * notify chip about new frame through SMSM bus.
  677. */
  678. if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
  679. qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
  680. WCN36XX_SMSM_WLAN_TX_ENABLE,
  681. WCN36XX_SMSM_WLAN_TX_ENABLE);
  682. } else {
  683. /* indicate End Of Packet and generate interrupt on descriptor
  684. * done.
  685. */
  686. wcn36xx_dxe_write_register(wcn,
  687. ch->reg_ctrl, ch->def_ctrl);
  688. }
  689. ret = 0;
  690. unlock:
  691. spin_unlock_irqrestore(&ch->lock, flags);
  692. return ret;
  693. }
  694. static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch)
  695. {
  696. unsigned long flags;
  697. struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start;
  698. struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
  699. bool ret = true;
  700. spin_lock_irqsave(&ch->lock, flags);
  701. /* Loop through ring buffer looking for nonempty entries. */
  702. ctl_bd_start = ch->head_blk_ctl;
  703. ctl_bd = ctl_bd_start;
  704. ctl_skb_start = ctl_bd_start->next;
  705. ctl_skb = ctl_skb_start;
  706. do {
  707. if (ctl_skb->skb) {
  708. ret = false;
  709. goto unlock;
  710. }
  711. ctl_bd = ctl_skb->next;
  712. ctl_skb = ctl_bd->next;
  713. } while (ctl_skb != ctl_skb_start);
  714. unlock:
  715. spin_unlock_irqrestore(&ch->lock, flags);
  716. return ret;
  717. }
  718. int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn)
  719. {
  720. int i = 0;
  721. /* Called with mac80211 queues stopped. Wait for empty HW queues. */
  722. do {
  723. if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) &&
  724. _wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) {
  725. return 0;
  726. }
  727. /* This ieee80211_ops callback is specifically allowed to
  728. * sleep.
  729. */
  730. usleep_range(1000, 1100);
  731. } while (++i < 100);
  732. return -EBUSY;
  733. }
  734. int wcn36xx_dxe_init(struct wcn36xx *wcn)
  735. {
  736. int reg_data = 0, ret;
  737. reg_data = WCN36XX_DXE_REG_RESET;
  738. wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
  739. /* Select channels for rx avail and xfer done interrupts... */
  740. reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
  741. WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
  742. if (wcn->is_pronto)
  743. wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
  744. else
  745. wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
  746. /***************************************/
  747. /* Init descriptors for TX LOW channel */
  748. /***************************************/
  749. ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
  750. if (ret) {
  751. dev_err(wcn->dev, "Error allocating descriptor\n");
  752. return ret;
  753. }
  754. wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
  755. /* Write channel head to a NEXT register */
  756. wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
  757. wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
  758. /* Program DMA destination addr for TX LOW */
  759. wcn36xx_dxe_write_register(wcn,
  760. WCN36XX_DXE_CH_DEST_ADDR_TX_L,
  761. WCN36XX_DXE_WQ_TX_L);
  762. wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
  763. /***************************************/
  764. /* Init descriptors for TX HIGH channel */
  765. /***************************************/
  766. ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
  767. if (ret) {
  768. dev_err(wcn->dev, "Error allocating descriptor\n");
  769. goto out_err_txh_ch;
  770. }
  771. wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
  772. /* Write channel head to a NEXT register */
  773. wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
  774. wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
  775. /* Program DMA destination addr for TX HIGH */
  776. wcn36xx_dxe_write_register(wcn,
  777. WCN36XX_DXE_CH_DEST_ADDR_TX_H,
  778. WCN36XX_DXE_WQ_TX_H);
  779. wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
  780. /***************************************/
  781. /* Init descriptors for RX LOW channel */
  782. /***************************************/
  783. ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
  784. if (ret) {
  785. dev_err(wcn->dev, "Error allocating descriptor\n");
  786. goto out_err_rxl_ch;
  787. }
  788. /* For RX we need to preallocated buffers */
  789. wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
  790. /* Write channel head to a NEXT register */
  791. wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
  792. wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
  793. /* Write DMA source address */
  794. wcn36xx_dxe_write_register(wcn,
  795. WCN36XX_DXE_CH_SRC_ADDR_RX_L,
  796. WCN36XX_DXE_WQ_RX_L);
  797. /* Program preallocated destination address */
  798. wcn36xx_dxe_write_register(wcn,
  799. WCN36XX_DXE_CH_DEST_ADDR_RX_L,
  800. wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
  801. /* Enable default control registers */
  802. wcn36xx_dxe_write_register(wcn,
  803. WCN36XX_DXE_REG_CTL_RX_L,
  804. WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
  805. /***************************************/
  806. /* Init descriptors for RX HIGH channel */
  807. /***************************************/
  808. ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
  809. if (ret) {
  810. dev_err(wcn->dev, "Error allocating descriptor\n");
  811. goto out_err_rxh_ch;
  812. }
  813. /* For RX we need to prealocat buffers */
  814. wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
  815. /* Write chanel head to a NEXT register */
  816. wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
  817. wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
  818. /* Write DMA source address */
  819. wcn36xx_dxe_write_register(wcn,
  820. WCN36XX_DXE_CH_SRC_ADDR_RX_H,
  821. WCN36XX_DXE_WQ_RX_H);
  822. /* Program preallocated destination address */
  823. wcn36xx_dxe_write_register(wcn,
  824. WCN36XX_DXE_CH_DEST_ADDR_RX_H,
  825. wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
  826. /* Enable default control registers */
  827. wcn36xx_dxe_write_register(wcn,
  828. WCN36XX_DXE_REG_CTL_RX_H,
  829. WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
  830. ret = wcn36xx_dxe_request_irqs(wcn);
  831. if (ret < 0)
  832. goto out_err_irq;
  833. timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
  834. /* Enable channel interrupts */
  835. wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
  836. wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
  837. wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
  838. wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
  839. return 0;
  840. out_err_irq:
  841. wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
  842. out_err_rxh_ch:
  843. wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
  844. out_err_rxl_ch:
  845. wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
  846. out_err_txh_ch:
  847. wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
  848. return ret;
  849. }
  850. void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
  851. {
  852. int reg_data = 0;
  853. /* Disable channel interrupts */
  854. wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
  855. wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
  856. wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
  857. wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
  858. free_irq(wcn->tx_irq, wcn);
  859. free_irq(wcn->rx_irq, wcn);
  860. del_timer(&wcn->tx_ack_timer);
  861. if (wcn->tx_ack_skb) {
  862. ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
  863. wcn->tx_ack_skb = NULL;
  864. }
  865. /* Put the DXE block into reset before freeing memory */
  866. reg_data = WCN36XX_DXE_REG_RESET;
  867. wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
  868. wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
  869. wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
  870. wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
  871. wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
  872. wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
  873. wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
  874. }