dp_full_mon.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "dp_types.h"
  17. #include "hal_rx.h"
  18. #include "hal_api.h"
  19. #include "qdf_trace.h"
  20. #include "qdf_nbuf.h"
  21. #include "hal_api_mon.h"
  22. #include "dp_rx.h"
  23. #include "dp_rx_mon.h"
  24. #include "dp_internal.h"
  25. #include "dp_htt.h"
  26. #include "dp_full_mon.h"
  27. #include "qdf_mem.h"
  28. #ifdef QCA_SUPPORT_FULL_MON
  29. uint32_t
  30. dp_rx_mon_status_process(struct dp_soc *soc,
  31. uint32_t mac_id,
  32. uint32_t quota);
  33. /*
  34. * dp_rx_mon_prepare_mon_mpdu () - API to prepare dp_mon_mpdu object
  35. *
  36. * @pdev: DP pdev object
  37. * @head_msdu: Head msdu
  38. * @tail_msdu: Tail msdu
  39. *
  40. */
  41. static inline struct dp_mon_mpdu *
  42. dp_rx_mon_prepare_mon_mpdu(struct dp_pdev *pdev,
  43. qdf_nbuf_t head_msdu,
  44. qdf_nbuf_t tail_msdu)
  45. {
  46. struct dp_mon_mpdu *mon_mpdu = NULL;
  47. mon_mpdu = qdf_mem_malloc(sizeof(struct dp_mon_mpdu));
  48. if (!mon_mpdu) {
  49. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  50. FL("Monitor MPDU object allocation failed -- %pK"),
  51. pdev);
  52. qdf_assert_always(0);
  53. }
  54. mon_mpdu->head = head_msdu;
  55. mon_mpdu->tail = tail_msdu;
  56. mon_mpdu->rs_flags = pdev->ppdu_info.rx_status.rs_flags;
  57. mon_mpdu->ant_signal_db = pdev->ppdu_info.rx_status.ant_signal_db;
  58. mon_mpdu->is_stbc = pdev->ppdu_info.rx_status.is_stbc;
  59. mon_mpdu->sgi = pdev->ppdu_info.rx_status.sgi;
  60. mon_mpdu->beamformed = pdev->ppdu_info.rx_status.beamformed;
  61. return mon_mpdu;
  62. }
  63. /*
  64. * dp_rx_monitor_deliver_ppdu () - API to deliver all MPDU for a MPDU
  65. * to upper layer stack
  66. *
  67. * @soc: DP soc handle
  68. * @mac_id: lmac id
  69. */
  70. static inline QDF_STATUS
  71. dp_rx_monitor_deliver_ppdu(struct dp_soc *soc, uint32_t mac_id)
  72. {
  73. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  74. struct dp_mon_mpdu *mpdu = NULL;
  75. struct dp_mon_mpdu *temp_mpdu = NULL;
  76. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  77. TAILQ_FOREACH_SAFE(mpdu,
  78. &pdev->mon_mpdu_q,
  79. mpdu_list_elem,
  80. temp_mpdu) {
  81. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  82. mpdu, mpdu_list_elem);
  83. pdev->ppdu_info.rx_status.rs_flags = mpdu->rs_flags;
  84. pdev->ppdu_info.rx_status.ant_signal_db =
  85. mpdu->ant_signal_db;
  86. pdev->ppdu_info.rx_status.is_stbc = mpdu->is_stbc;
  87. pdev->ppdu_info.rx_status.sgi = mpdu->sgi;
  88. pdev->ppdu_info.rx_status.beamformed = mpdu->beamformed;
  89. dp_rx_mon_deliver(soc, mac_id,
  90. mpdu->head, mpdu->tail);
  91. qdf_mem_free(mpdu);
  92. }
  93. }
  94. return QDF_STATUS_SUCCESS;
  95. }
  96. /**
  97. * dp_rx_mon_reap_status_ring () - Reap status_buf_count of status buffers for
  98. * status ring.
  99. *
  100. * @soc: DP soc handle
  101. * @mac_id: mac id on which interrupt is received
  102. * @quota: number of status ring entries to be reaped
  103. * @desc_info: Rx ppdu desc info
  104. */
  105. static inline uint32_t
  106. dp_rx_mon_reap_status_ring(struct dp_soc *soc,
  107. uint32_t mac_id,
  108. uint32_t quota,
  109. struct hal_rx_mon_desc_info *desc_info)
  110. {
  111. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  112. uint8_t status_buf_count;
  113. uint32_t work_done = 0;
  114. status_buf_count = desc_info->status_buf_count;
  115. status_reap:
  116. work_done += dp_rx_mon_status_process(soc, mac_id, status_buf_count);
  117. if (desc_info->ppdu_id != pdev->ppdu_info.com_info.ppdu_id) {
  118. pdev->rx_mon_stats.ppdu_id_mismatch++;
  119. qdf_err("count: %d quota: %d work_done: %d status_ppdu_id: %d"
  120. "dest_ppdu_id: %d ", status_buf_count, quota,
  121. work_done,
  122. pdev->ppdu_info.com_info.ppdu_id,
  123. desc_info->ppdu_id);
  124. if (desc_info->ppdu_id > pdev->ppdu_info.com_info.ppdu_id)
  125. goto status_reap;
  126. }
  127. return work_done;
  128. }
  129. /**
  130. * dp_rx_mon_mpdu_reap () - This API reaps a mpdu from mon dest ring descriptor
  131. * and returns link descriptor to HW (WBM)
  132. *
  133. * @soc: DP soc handle
  134. * @mac_id: lmac id
  135. * @ring_desc: SW monitor ring desc
  136. * @head_msdu: nbuf pointing to first msdu in a chain
  137. * @tail_msdu: nbuf pointing to last msdu in a chain
  138. * @head_desc: head pointer to free desc list
  139. * @tail_desc: tail pointer to free desc list
  140. *
  141. * Return: number of reaped buffers
  142. */
  143. static inline uint32_t
  144. dp_rx_mon_mpdu_reap(struct dp_soc *soc, uint32_t mac_id, void *ring_desc,
  145. qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
  146. union dp_rx_desc_list_elem_t **head_desc,
  147. union dp_rx_desc_list_elem_t **tail_desc)
  148. {
  149. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  150. struct dp_rx_desc *rx_desc = NULL;
  151. struct hal_rx_msdu_list msdu_list;
  152. uint32_t rx_buf_reaped = 0;
  153. uint16_t num_msdus = 0, msdu_index, rx_hdr_tlv_len, l3_hdr_pad;
  154. uint32_t total_frag_len = 0, frag_len = 0;
  155. bool drop_mpdu = false;
  156. bool msdu_frag = false;
  157. void *link_desc_va;
  158. uint8_t *rx_tlv_hdr;
  159. qdf_nbuf_t msdu = NULL, last_msdu = NULL;
  160. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  161. struct hal_rx_mon_desc_info *desc_info;
  162. desc_info = pdev->mon_desc;
  163. qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info));
  164. /* Read SW Mon ring descriptor */
  165. hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
  166. ring_desc,
  167. (void *)desc_info);
  168. /* If end_of_ppdu is 1, return*/
  169. if (desc_info->end_of_ppdu)
  170. return rx_buf_reaped;
  171. /* If there is rxdma error, drop mpdu */
  172. if (qdf_unlikely(dp_rx_mon_is_rxdma_error(desc_info)
  173. == QDF_STATUS_SUCCESS)) {
  174. drop_mpdu = true;
  175. pdev->rx_mon_stats.dest_mpdu_drop++;
  176. }
  177. /*
  178. * while loop iterates through all link descriptors and
  179. * reaps msdu_count number of msdus for one SW_MONITOR_RING descriptor
  180. * and forms nbuf queue.
  181. */
  182. while (desc_info->msdu_count && desc_info->link_desc.paddr) {
  183. link_desc_va = dp_rx_cookie_2_mon_link_desc(pdev,
  184. desc_info->link_desc,
  185. mac_id);
  186. qdf_assert_always(link_desc_va);
  187. hal_rx_msdu_list_get(soc->hal_soc,
  188. link_desc_va,
  189. &msdu_list,
  190. &num_msdus);
  191. for (msdu_index = 0; msdu_index < num_msdus; msdu_index++) {
  192. rx_desc = dp_rx_get_mon_desc(soc,
  193. msdu_list.sw_cookie[msdu_index]);
  194. qdf_assert_always(rx_desc);
  195. msdu = rx_desc->nbuf;
  196. if (rx_desc->unmapped == 0) {
  197. qdf_nbuf_unmap_single(soc->osdev,
  198. msdu,
  199. QDF_DMA_FROM_DEVICE);
  200. rx_desc->unmapped = 1;
  201. }
  202. if (drop_mpdu) {
  203. qdf_nbuf_free(msdu);
  204. msdu = NULL;
  205. desc_info->msdu_count--;
  206. goto next_msdu;
  207. }
  208. rx_tlv_hdr = qdf_nbuf_data(msdu);
  209. if (hal_rx_desc_is_first_msdu(soc->hal_soc,
  210. rx_tlv_hdr))
  211. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
  212. rx_tlv_hdr,
  213. &pdev->ppdu_info.rx_status);
  214. /** If msdu is fragmented, spread across multiple
  215. * buffers
  216. * a. calculate len of each fragmented buffer
  217. * b. calculate the number of fragmented buffers for
  218. * a msdu and decrement one msdu_count
  219. */
  220. if (msdu_list.msdu_info[msdu_index].msdu_flags
  221. & HAL_MSDU_F_MSDU_CONTINUATION) {
  222. if (!msdu_frag) {
  223. total_frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
  224. msdu_frag = true;
  225. }
  226. dp_mon_adjust_frag_len(&total_frag_len,
  227. &frag_len);
  228. } else {
  229. if (msdu_frag)
  230. dp_mon_adjust_frag_len(&total_frag_len,
  231. &frag_len);
  232. else
  233. frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
  234. msdu_frag = false;
  235. desc_info->msdu_count--;
  236. }
  237. rx_hdr_tlv_len = SIZE_OF_MONITOR_TLV;
  238. /*
  239. * HW structures call this L3 header padding.
  240. * this is actually the offset
  241. * from the buffer beginning where the L2
  242. * header begins.
  243. */
  244. l3_hdr_pad = hal_rx_msdu_end_l3_hdr_padding_get(
  245. soc->hal_soc,
  246. rx_tlv_hdr);
  247. /*******************************************************
  248. * RX_PACKET *
  249. * ----------------------------------------------------*
  250. | RX_PKT_TLVS | L3 Padding header | msdu data| |
  251. * ----------------------------------------------------*
  252. ******************************************************/
  253. qdf_nbuf_set_pktlen(msdu,
  254. rx_hdr_tlv_len +
  255. l3_hdr_pad +
  256. frag_len);
  257. if (head_msdu && !*head_msdu)
  258. *head_msdu = msdu;
  259. else if (last_msdu)
  260. qdf_nbuf_set_next(last_msdu, msdu);
  261. last_msdu = msdu;
  262. next_msdu:
  263. rx_buf_reaped++;
  264. dp_rx_add_to_free_desc_list(head_desc,
  265. tail_desc,
  266. rx_desc);
  267. QDF_TRACE(QDF_MODULE_ID_DP,
  268. QDF_TRACE_LEVEL_DEBUG,
  269. FL("%s total_len %u frag_len %u flags %u"),
  270. total_frag_len, frag_len,
  271. msdu_list.msdu_info[msdu_index].msdu_flags);
  272. }
  273. hal_rxdma_buff_addr_info_set(rx_link_buf_info,
  274. desc_info->link_desc.paddr,
  275. desc_info->link_desc.sw_cookie,
  276. desc_info->link_desc.rbm);
  277. /* Get next link desc VA from current link desc */
  278. hal_rx_mon_next_link_desc_get(link_desc_va,
  279. &desc_info->link_desc);
  280. /* return msdu link descriptor to WBM */
  281. if (dp_rx_monitor_link_desc_return(pdev,
  282. (hal_buff_addrinfo_t)rx_link_buf_info,
  283. mac_id,
  284. HAL_BM_ACTION_PUT_IN_IDLE_LIST)
  285. != QDF_STATUS_SUCCESS) {
  286. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  287. "dp_rx_monitor_link_desc_return failed");
  288. }
  289. }
  290. pdev->rx_mon_stats.dest_mpdu_done++;
  291. if (last_msdu)
  292. qdf_nbuf_set_next(last_msdu, NULL);
  293. *tail_msdu = msdu;
  294. return rx_buf_reaped;
  295. }
  296. /**
  297. * dp_rx_mon_process () - Core brain processing for monitor mode
  298. *
  299. * This API processes monitor destination ring followed by monitor status ring
  300. * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
  301. *
  302. * @soc: datapath soc context
  303. * @mac_id: mac_id on which interrupt is received
  304. * @quota: Number of status ring entry that can be serviced in one shot.
  305. *
  306. * @Return: Number of reaped status ring entries
  307. */
  308. uint32_t dp_rx_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
  309. {
  310. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  311. union dp_rx_desc_list_elem_t *head_desc = NULL;
  312. union dp_rx_desc_list_elem_t *tail_desc = NULL;
  313. uint32_t rx_bufs_reaped = 0;
  314. struct dp_mon_mpdu *mon_mpdu;
  315. struct cdp_pdev_mon_stats *rx_mon_stats = &pdev->rx_mon_stats;
  316. hal_rxdma_desc_t ring_desc;
  317. hal_soc_handle_t hal_soc;
  318. hal_ring_handle_t mon_dest_srng;
  319. qdf_nbuf_t head_msdu = NULL;
  320. qdf_nbuf_t tail_msdu = NULL;
  321. struct hal_rx_mon_desc_info *desc_info;
  322. int mac_for_pdev = mac_id;
  323. QDF_STATUS status;
  324. if (qdf_unlikely(!dp_soc_is_full_mon_enable(pdev)))
  325. return dp_rx_mon_status_process(soc, mac_id, quota);
  326. mon_dest_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
  327. if (qdf_unlikely(!mon_dest_srng ||
  328. !hal_srng_initialized(mon_dest_srng))) {
  329. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  330. FL("HAL Monitor Destination Ring Init Failed -- %pK"),
  331. mon_dest_srng);
  332. goto done;
  333. }
  334. hal_soc = soc->hal_soc;
  335. qdf_assert_always(hal_soc && pdev);
  336. qdf_spin_lock_bh(&pdev->mon_lock);
  337. desc_info = pdev->mon_desc;
  338. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dest_srng))) {
  339. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  340. FL("HAL Monitor Destination Ring access Failed -- %pK"),
  341. mon_dest_srng);
  342. goto done1;
  343. }
  344. /* Each entry in mon dest ring carries mpdu data
  345. * reap all msdus for a mpdu and form skb chain
  346. */
  347. while (qdf_likely(ring_desc =
  348. hal_srng_dst_peek(hal_soc, mon_dest_srng))) {
  349. head_msdu = NULL;
  350. tail_msdu = NULL;
  351. rx_bufs_reaped = dp_rx_mon_mpdu_reap(soc, mac_id,
  352. ring_desc, &head_msdu,
  353. &tail_msdu, &head_desc,
  354. &tail_desc);
  355. /* Assert if end_of_ppdu is zero and number of reaped buffers
  356. * are zero.
  357. */
  358. if (qdf_unlikely(!desc_info->end_of_ppdu && !rx_bufs_reaped)) {
  359. qdf_err("end_of_ppdu and rx_bufs_reaped are zero");
  360. }
  361. rx_mon_stats->mon_rx_bufs_reaped_dest += rx_bufs_reaped;
  362. /* replenish rx_bufs_reaped buffers back to
  363. * RxDMA Monitor buffer ring
  364. */
  365. if (rx_bufs_reaped) {
  366. status = dp_rx_buffers_replenish(soc, mac_id,
  367. dp_rxdma_get_mon_buf_ring(pdev,
  368. mac_for_pdev),
  369. dp_rx_get_mon_desc_pool(soc, mac_id,
  370. pdev->pdev_id),
  371. rx_bufs_reaped,
  372. &head_desc, &tail_desc);
  373. if (status != QDF_STATUS_SUCCESS)
  374. qdf_assert_always(0);
  375. rx_mon_stats->mon_rx_bufs_replenished_dest += rx_bufs_reaped;
  376. }
  377. head_desc = NULL;
  378. tail_desc = NULL;
  379. /* If end_of_ppdu is zero, it is a valid data mpdu
  380. * a. Add head_msdu and tail_msdu to mpdu list
  381. * b. continue reaping next SW_MONITOR_RING descriptor
  382. */
  383. if (!desc_info->end_of_ppdu) {
  384. /*
  385. * In case of rxdma error, MPDU is dropped
  386. * from sw_monitor_ring descriptor.
  387. * in this case, head_msdu remains NULL.
  388. * move srng to next and continue reaping next entry
  389. */
  390. if (!head_msdu) {
  391. ring_desc = hal_srng_dst_get_next(hal_soc,
  392. mon_dest_srng);
  393. continue;
  394. }
  395. /*
  396. * Prepare a MPDU object which holds chain of msdus
  397. * and MPDU specific status and add this is to
  398. * monitor mpdu queue
  399. */
  400. mon_mpdu = dp_rx_mon_prepare_mon_mpdu(pdev,
  401. head_msdu,
  402. tail_msdu);
  403. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  404. FL("Dest_srng: %pK MPDU_OBJ: %pK "
  405. "head_msdu: %pK tail_msdu: %pK -- "),
  406. mon_dest_srng,
  407. mon_mpdu,
  408. head_msdu,
  409. tail_msdu);
  410. TAILQ_INSERT_TAIL(&pdev->mon_mpdu_q,
  411. mon_mpdu,
  412. mpdu_list_elem);
  413. head_msdu = NULL;
  414. tail_msdu = NULL;
  415. ring_desc = hal_srng_dst_get_next(hal_soc,
  416. mon_dest_srng);
  417. continue;
  418. }
  419. /*
  420. * end_of_ppdu is one,
  421. * a. update ppdu_done stattistics
  422. * b. Replenish buffers back to mon buffer ring
  423. * c. reap status ring for a PPDU and deliver all mpdus
  424. * to upper layer
  425. */
  426. rx_mon_stats->dest_ppdu_done++;
  427. #if 0
  428. if (pdev->ppdu_info.com_info.ppdu_id !=
  429. pdev->mon_desc->ppdu_id) {
  430. pdev->rx_mon_stats.ppdu_id_mismatch++;
  431. qdf_err("PPDU id mismatch, status_ppdu_id: %d"
  432. "dest_ppdu_id: %d status_ppdu_done: %d "
  433. "dest_ppdu_done: %d ppdu_id_mismatch_cnt: %u"
  434. "dest_mpdu_drop: %u",
  435. pdev->ppdu_info.com_info.ppdu_id,
  436. pdev->mon_desc->ppdu_id,
  437. pdev->rx_mon_stats.status_ppdu_done,
  438. pdev->rx_mon_stats.dest_ppdu_done,
  439. pdev->rx_mon_stats.ppdu_id_mismatch,
  440. pdev->rx_mon_stats.dest_mpdu_drop);
  441. /* WAR: It is observed that in some cases, status ring ppdu_id
  442. * and destination ring ppdu_id doesn't match.
  443. * Following WAR is added to fix it.
  444. * a. If status ppdu_id is less than destination ppdu_id,
  445. * hold onto destination ring until ppdu_id matches
  446. * b. If status ppdu_id is greater than destination ring
  447. * ppdu_Id, move tp in destination ring.
  448. */
  449. if (pdev->ppdu_info.com_info.ppdu_id <
  450. pdev->mon_desc->ppdu_id) {
  451. break;
  452. } else {
  453. ring_desc = hal_srng_dst_get_next(hal_soc,
  454. mon_dest_srng);
  455. continue;
  456. }
  457. }
  458. /*
  459. * At this point, end_of_ppdu is one here,
  460. * When 'end_of_ppdu' is one, status buffer_count and
  461. * status_buf_addr must be valid.
  462. *
  463. * Assert if
  464. * a. status_buf_count is zero
  465. * b. status_buf.paddr is NULL
  466. */
  467. if (!pdev->mon_desc->status_buf_count ||
  468. !pdev->mon_desc->status_buf.paddr) {
  469. qdf_assert_always(0);
  470. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  471. FL("Status buffer info is NULL"
  472. "status_buf_count: %d"
  473. "status_buf_addr: %pK"
  474. "ring_desc: %pK-- "),
  475. pdev->mon_desc->status_buf_count,
  476. pdev->mon_desc->status_buf.paddr,
  477. ring_desc);
  478. goto done2;
  479. }
  480. #endif
  481. quota -= dp_rx_mon_reap_status_ring(soc, mac_id,
  482. quota, desc_info);
  483. /* Deliver all MPDUs for a PPDU */
  484. dp_rx_monitor_deliver_ppdu(soc, mac_id);
  485. hal_srng_dst_get_next(hal_soc, mon_dest_srng);
  486. break;
  487. }
  488. hal_srng_access_end(hal_soc, mon_dest_srng);
  489. done1:
  490. qdf_spin_unlock_bh(&pdev->mon_lock);
  491. done:
  492. return quota;
  493. }
  494. /**
  495. * dp_full_mon_attach() - attach full monitor mode
  496. * resources
  497. * @pdev: Datapath PDEV handle
  498. *
  499. * Return: void
  500. */
  501. void dp_full_mon_attach(struct dp_pdev *pdev)
  502. {
  503. struct dp_soc *soc = pdev->soc;
  504. if (!soc->full_mon_mode) {
  505. qdf_debug("Full monitor is not enabled");
  506. return;
  507. }
  508. pdev->mon_desc = qdf_mem_malloc(sizeof(struct hal_rx_mon_desc_info));
  509. if (!pdev->mon_desc) {
  510. qdf_err("Memory allocation failed for hal_rx_mon_desc_info ");
  511. return;
  512. }
  513. TAILQ_INIT(&pdev->mon_mpdu_q);
  514. }
  515. /**
  516. * dp_full_mon_detach() - detach full monitor mode
  517. * resources
  518. * @pdev: Datapath PDEV handle
  519. *
  520. * Return: void
  521. *
  522. */
  523. void dp_full_mon_detach(struct dp_pdev *pdev)
  524. {
  525. struct dp_soc *soc = pdev->soc;
  526. struct dp_mon_mpdu *mpdu = NULL;
  527. struct dp_mon_mpdu *temp_mpdu = NULL;
  528. if (!soc->full_mon_mode) {
  529. qdf_debug("Full monitor is not enabled");
  530. return;
  531. }
  532. if (pdev->mon_desc)
  533. qdf_mem_free(pdev->mon_desc);
  534. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  535. TAILQ_FOREACH_SAFE(mpdu,
  536. &pdev->mon_mpdu_q,
  537. mpdu_list_elem,
  538. temp_mpdu) {
  539. qdf_mem_free(mpdu);
  540. }
  541. }
  542. }
  543. #endif