dp_full_mon.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "dp_types.h"
  17. #include "hal_rx.h"
  18. #include "hal_api.h"
  19. #include "qdf_trace.h"
  20. #include "qdf_nbuf.h"
  21. #include "hal_api_mon.h"
  22. #include "dp_rx.h"
  23. #include "dp_rx_mon.h"
  24. #include "dp_internal.h"
  25. #include "dp_htt.h"
  26. #include "dp_full_mon.h"
  27. #include "qdf_mem.h"
  28. #ifdef QCA_SUPPORT_FULL_MON
  29. uint32_t
  30. dp_rx_mon_status_process(struct dp_soc *soc,
  31. struct dp_intr *int_ctx,
  32. uint32_t mac_id,
  33. uint32_t quota);
  34. /*
  35. * dp_rx_mon_status_buf_validate () - Validate first monitor status buffer addr
  36. * against status buf addr given in monitor destination ring
  37. *
  38. * @pdev: DP pdev handle
  39. * @int_ctx: Interrupt context
  40. * @mac_id: lmac id
  41. *
  42. * Return: QDF_STATUS
  43. */
  44. static inline enum dp_mon_reap_status
  45. dp_rx_mon_status_buf_validate(struct dp_pdev *pdev,
  46. struct dp_intr *int_ctx,
  47. uint32_t mac_id)
  48. {
  49. struct dp_soc *soc = pdev->soc;
  50. hal_soc_handle_t hal_soc;
  51. void *mon_status_srng;
  52. void *ring_entry;
  53. uint32_t rx_buf_cookie;
  54. qdf_nbuf_t status_nbuf;
  55. struct dp_rx_desc *rx_desc;
  56. uint64_t buf_paddr;
  57. struct rx_desc_pool *rx_desc_pool;
  58. uint32_t tlv_tag;
  59. void *rx_tlv;
  60. struct hal_rx_ppdu_info *ppdu_info;
  61. enum dp_mon_reap_status status = DP_MON_STATUS_MATCH;
  62. QDF_STATUS buf_status;
  63. uint32_t ppdu_id_diff;
  64. mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
  65. qdf_assert(mon_status_srng);
  66. if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
  67. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  68. "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
  69. __func__, __LINE__, mon_status_srng);
  70. QDF_ASSERT(0);
  71. return status;
  72. }
  73. hal_soc = soc->hal_soc;
  74. qdf_assert(hal_soc);
  75. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) {
  76. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  77. "%s %d : HAL SRNG access Failed -- %pK",
  78. __func__, __LINE__, mon_status_srng);
  79. QDF_ASSERT(0);
  80. return status;
  81. }
  82. ring_entry = hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng);
  83. if (!ring_entry) {
  84. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  85. "%s %d : HAL SRNG entry is NULL srng:-- %pK",
  86. __func__, __LINE__, mon_status_srng);
  87. status = DP_MON_STATUS_REPLENISH;
  88. goto done;
  89. }
  90. ppdu_info = &pdev->ppdu_info;
  91. rx_desc_pool = &soc->rx_desc_status[mac_id];
  92. buf_paddr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_entry) |
  93. ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_entry)) << 32));
  94. if (!buf_paddr) {
  95. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  96. "%s %d : buf addr is NULL -- %pK",
  97. __func__, __LINE__, mon_status_srng);
  98. status = DP_MON_STATUS_REPLENISH;
  99. goto done;
  100. }
  101. rx_buf_cookie = HAL_RX_BUF_COOKIE_GET(ring_entry);
  102. rx_desc = dp_rx_cookie_2_va_mon_status(soc, rx_buf_cookie);
  103. qdf_assert(rx_desc);
  104. status_nbuf = rx_desc->nbuf;
  105. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  106. QDF_DMA_FROM_DEVICE);
  107. rx_tlv = qdf_nbuf_data(status_nbuf);
  108. buf_status = hal_get_rx_status_done(rx_tlv);
  109. /* If status buffer DMA is not done,
  110. * hold on to mon destination ring.
  111. */
  112. if (buf_status != QDF_STATUS_SUCCESS) {
  113. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  114. FL("Monitor status ring: DMA is not done "
  115. "for nbuf: %pK buf_addr: %llx"),
  116. status_nbuf, buf_paddr);
  117. status = dp_rx_mon_handle_status_buf_done(pdev,
  118. mon_status_srng);
  119. if (status == DP_MON_STATUS_REPLENISH) {
  120. union dp_rx_desc_list_elem_t *desc_list = NULL;
  121. union dp_rx_desc_list_elem_t *tail = NULL;
  122. /* If this is DMA not done WAR case,
  123. * free buffer and current SW descriptor and
  124. * make buf_addr_info NULL, so that call to
  125. * dp_rx_mon_status_process() replenishes entry to
  126. * status ring
  127. */
  128. qdf_nbuf_free(status_nbuf);
  129. dp_rx_add_to_free_desc_list(&desc_list,
  130. &tail, rx_desc);
  131. dp_rx_add_desc_list_to_free_list(soc, &desc_list,
  132. &tail, mac_id, rx_desc_pool);
  133. hal_rxdma_buff_addr_info_set(
  134. ring_entry,
  135. 0, 0, HAL_RX_BUF_RBM_SW3_BM);
  136. }
  137. goto done;
  138. }
  139. qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
  140. QDF_DMA_FROM_DEVICE,
  141. rx_desc_pool->buf_size);
  142. rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
  143. tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv);
  144. if (tlv_tag == WIFIRX_PPDU_START_E) {
  145. rx_tlv = (uint8_t *)rx_tlv + HAL_RX_TLV32_HDR_SIZE;
  146. pdev->mon_desc->status_ppdu_id =
  147. HAL_RX_GET(rx_tlv, RX_PPDU_START_0, PHY_PPDU_ID);
  148. pdev->status_buf_addr = buf_paddr;
  149. }
  150. if (pdev->mon_desc->ppdu_id < pdev->mon_desc->status_ppdu_id) {
  151. status = DP_MON_STATUS_LEAD;
  152. /* For wrap around case */
  153. ppdu_id_diff = pdev->mon_desc->status_ppdu_id -
  154. pdev->mon_desc->ppdu_id;
  155. if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
  156. status = DP_MON_STATUS_LAG;
  157. } else if (pdev->mon_desc->ppdu_id > pdev->mon_desc->status_ppdu_id) {
  158. status = DP_MON_STATUS_LAG;
  159. /* For wrap around case */
  160. ppdu_id_diff = pdev->mon_desc->ppdu_id -
  161. pdev->mon_desc->status_ppdu_id;
  162. if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
  163. status = DP_MON_STATUS_LEAD;
  164. }
  165. if ((pdev->mon_desc->status_buf.paddr != buf_paddr) ||
  166. (pdev->mon_desc->ppdu_id != pdev->mon_desc->status_ppdu_id)) {
  167. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  168. FL("Monitor: PPDU id or status buf_addr mismatch "
  169. "status_ppdu_id: %d dest_ppdu_id: %d "
  170. "status_addr: %llx status_buf_cookie: %d "
  171. "dest_addr: %llx tlv_tag: %d"
  172. " status_nbuf: %pK pdev->hold_mon_dest: %d"),
  173. pdev->mon_desc->status_ppdu_id,
  174. pdev->mon_desc->ppdu_id, pdev->status_buf_addr,
  175. rx_buf_cookie,
  176. pdev->mon_desc->status_buf.paddr, tlv_tag,
  177. status_nbuf, pdev->hold_mon_dest_ring);
  178. }
  179. done:
  180. hal_srng_access_end(hal_soc, mon_status_srng);
  181. return status;
  182. }
  183. /*
  184. * dp_rx_mon_prepare_mon_mpdu () - API to prepare dp_mon_mpdu object
  185. *
  186. * @pdev: DP pdev object
  187. * @head_msdu: Head msdu
  188. * @tail_msdu: Tail msdu
  189. *
  190. */
  191. static inline struct dp_mon_mpdu *
  192. dp_rx_mon_prepare_mon_mpdu(struct dp_pdev *pdev,
  193. qdf_nbuf_t head_msdu,
  194. qdf_nbuf_t tail_msdu)
  195. {
  196. struct dp_mon_mpdu *mon_mpdu = NULL;
  197. mon_mpdu = qdf_mem_malloc(sizeof(struct dp_mon_mpdu));
  198. if (!mon_mpdu) {
  199. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  200. FL("Monitor MPDU object allocation failed -- %pK"),
  201. pdev);
  202. qdf_assert_always(0);
  203. }
  204. mon_mpdu->head = head_msdu;
  205. mon_mpdu->tail = tail_msdu;
  206. mon_mpdu->rs_flags = pdev->ppdu_info.rx_status.rs_flags;
  207. mon_mpdu->ant_signal_db = pdev->ppdu_info.rx_status.ant_signal_db;
  208. mon_mpdu->is_stbc = pdev->ppdu_info.rx_status.is_stbc;
  209. mon_mpdu->sgi = pdev->ppdu_info.rx_status.sgi;
  210. mon_mpdu->beamformed = pdev->ppdu_info.rx_status.beamformed;
  211. return mon_mpdu;
  212. }
  213. static inline void
  214. dp_rx_mon_drop_ppdu(struct dp_pdev *pdev, uint32_t mac_id)
  215. {
  216. struct dp_mon_mpdu *mpdu = NULL;
  217. struct dp_mon_mpdu *temp_mpdu = NULL;
  218. qdf_nbuf_t mon_skb, skb_next;
  219. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  220. TAILQ_FOREACH_SAFE(mpdu,
  221. &pdev->mon_mpdu_q,
  222. mpdu_list_elem,
  223. temp_mpdu) {
  224. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  225. mpdu, mpdu_list_elem);
  226. mon_skb = mpdu->head;
  227. while (mon_skb) {
  228. skb_next = qdf_nbuf_next(mon_skb);
  229. QDF_TRACE(QDF_MODULE_ID_DP,
  230. QDF_TRACE_LEVEL_DEBUG,
  231. "[%s][%d] mon_skb=%pK len %u"
  232. " __func__, __LINE__",
  233. mon_skb, mon_skb->len);
  234. qdf_nbuf_free(mon_skb);
  235. mon_skb = skb_next;
  236. }
  237. qdf_mem_free(mpdu);
  238. }
  239. }
  240. pdev->mon_desc->drop_ppdu = 0;
  241. }
  242. /*
  243. * dp_rx_monitor_deliver_ppdu () - API to deliver all MPDU for a MPDU
  244. * to upper layer stack
  245. *
  246. * @soc: DP soc handle
  247. * @mac_id: lmac id
  248. */
  249. static inline QDF_STATUS
  250. dp_rx_monitor_deliver_ppdu(struct dp_soc *soc, uint32_t mac_id)
  251. {
  252. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  253. struct dp_mon_mpdu *mpdu = NULL;
  254. struct dp_mon_mpdu *temp_mpdu = NULL;
  255. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  256. TAILQ_FOREACH_SAFE(mpdu,
  257. &pdev->mon_mpdu_q,
  258. mpdu_list_elem,
  259. temp_mpdu) {
  260. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  261. mpdu, mpdu_list_elem);
  262. /* Check for IEEE80211_AMSDU_FLAG in mpdu
  263. * and set in pdev->ppdu_info.rx_status
  264. */
  265. HAL_RX_SET_MSDU_AGGREGATION(mpdu,
  266. &(pdev->ppdu_info.rx_status));
  267. pdev->ppdu_info.rx_status.ant_signal_db =
  268. mpdu->ant_signal_db;
  269. pdev->ppdu_info.rx_status.is_stbc = mpdu->is_stbc;
  270. pdev->ppdu_info.rx_status.sgi = mpdu->sgi;
  271. pdev->ppdu_info.rx_status.beamformed = mpdu->beamformed;
  272. dp_rx_mon_deliver(soc, mac_id,
  273. mpdu->head, mpdu->tail);
  274. qdf_mem_free(mpdu);
  275. }
  276. }
  277. return QDF_STATUS_SUCCESS;
  278. }
  279. /**
  280. * dp_rx_mon_reap_status_ring () - Reap status_buf_count of status buffers for
  281. * status ring.
  282. *
  283. * @soc: DP soc handle
  284. * @int_ctx: interrupt context
  285. * @mac_id: mac id on which interrupt is received
  286. * @quota: number of status ring entries to be reaped
  287. * @desc_info: Rx ppdu desc info
  288. */
  289. static inline uint32_t
  290. dp_rx_mon_reap_status_ring(struct dp_soc *soc,
  291. struct dp_intr *int_ctx,
  292. uint32_t mac_id,
  293. uint32_t quota,
  294. struct hal_rx_mon_desc_info *desc_info)
  295. {
  296. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  297. uint8_t status_buf_count;
  298. uint32_t work_done = 0;
  299. enum dp_mon_reap_status status;
  300. status_buf_count = desc_info->status_buf_count;
  301. desc_info->drop_ppdu = false;
  302. status = dp_rx_mon_status_buf_validate(pdev, int_ctx, mac_id);
  303. switch (status) {
  304. case DP_MON_STATUS_NO_DMA:
  305. /* If DMA is not done for status ring entry,
  306. * hold on to monitor destination ring and
  307. * deliver current ppdu data once DMA is done.
  308. */
  309. pdev->hold_mon_dest_ring = true;
  310. break;
  311. case DP_MON_STATUS_LAG:
  312. /* If status_ppdu_id is lagging behind destination,
  313. * a. Hold on to destination ring
  314. * b. Drop status ppdus until ppdu id matches
  315. * c. Increment stats for ppdu_id mismatch and
  316. * status ppdu drop
  317. */
  318. pdev->hold_mon_dest_ring = true;
  319. pdev->rx_mon_stats.ppdu_id_mismatch++;
  320. pdev->rx_mon_stats.status_ppdu_drop++;
  321. break;
  322. case DP_MON_STATUS_LEAD:
  323. /* If status_ppdu_id is leading ahead destination,
  324. * a. Drop destination ring ppdu until ppdu_id matches
  325. * b. Unhold monitor destination ring so status ppdus
  326. * can be dropped.
  327. * c. Increment stats for ppdu_id mismatch and
  328. * destination ppdu drop
  329. */
  330. desc_info->drop_ppdu = true;
  331. pdev->hold_mon_dest_ring = false;
  332. pdev->rx_mon_stats.ppdu_id_mismatch++;
  333. pdev->rx_mon_stats.dest_ppdu_drop++;
  334. break;
  335. case DP_MON_STATUS_REPLENISH:
  336. /* If status ring hp entry is NULL, replenish it */
  337. work_done = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
  338. break;
  339. case DP_MON_STATUS_MATCH:
  340. /* If status ppdu id matches with destnation,
  341. * unhold monitor destination ring and deliver ppdu
  342. */
  343. pdev->hold_mon_dest_ring = false;
  344. break;
  345. default:
  346. dp_err("mon reap status is not supported");
  347. }
  348. /* If status ring is lagging behind detination ring,
  349. * reap only one status buffer
  350. */
  351. if (status == DP_MON_STATUS_LAG)
  352. status_buf_count = 1;
  353. if (status == DP_MON_STATUS_LAG ||
  354. status == DP_MON_STATUS_MATCH) {
  355. work_done = dp_rx_mon_status_process(soc,
  356. int_ctx,
  357. mac_id,
  358. status_buf_count);
  359. }
  360. return work_done;
  361. }
  362. /**
  363. * dp_rx_mon_mpdu_reap () - This API reaps a mpdu from mon dest ring descriptor
  364. * and returns link descriptor to HW (WBM)
  365. *
  366. * @soc: DP soc handle
  367. * @mac_id: lmac id
  368. * @ring_desc: SW monitor ring desc
  369. * @head_msdu: nbuf pointing to first msdu in a chain
  370. * @tail_msdu: nbuf pointing to last msdu in a chain
  371. * @head_desc: head pointer to free desc list
  372. * @tail_desc: tail pointer to free desc list
  373. *
  374. * Return: number of reaped buffers
  375. */
  376. static inline uint32_t
  377. dp_rx_mon_mpdu_reap(struct dp_soc *soc, uint32_t mac_id, void *ring_desc,
  378. qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
  379. union dp_rx_desc_list_elem_t **head_desc,
  380. union dp_rx_desc_list_elem_t **tail_desc)
  381. {
  382. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  383. struct dp_rx_desc *rx_desc = NULL;
  384. struct hal_rx_msdu_list msdu_list;
  385. uint32_t rx_buf_reaped = 0;
  386. uint16_t num_msdus = 0, msdu_index, rx_hdr_tlv_len, l3_hdr_pad;
  387. uint32_t total_frag_len = 0, frag_len = 0;
  388. bool drop_mpdu = false;
  389. bool msdu_frag = false;
  390. void *link_desc_va;
  391. uint8_t *rx_tlv_hdr;
  392. qdf_nbuf_t msdu = NULL, last_msdu = NULL;
  393. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  394. struct hal_rx_mon_desc_info *desc_info;
  395. uint16_t prev_ppdu_id;
  396. desc_info = pdev->mon_desc;
  397. /* Restore previous ppdu_id to use it while doing
  398. * status buffer validation
  399. */
  400. prev_ppdu_id = pdev->mon_desc->status_ppdu_id;
  401. qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info));
  402. pdev->mon_desc->status_ppdu_id = prev_ppdu_id;
  403. /* Read SW Mon ring descriptor */
  404. hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
  405. ring_desc,
  406. (void *)desc_info);
  407. /* If end_of_ppdu is 1, return*/
  408. if (desc_info->end_of_ppdu)
  409. return rx_buf_reaped;
  410. /* If there is rxdma error, drop mpdu */
  411. if (qdf_unlikely(dp_rx_mon_is_rxdma_error(desc_info)
  412. == QDF_STATUS_SUCCESS)) {
  413. drop_mpdu = true;
  414. pdev->rx_mon_stats.dest_mpdu_drop++;
  415. }
  416. /*
  417. * while loop iterates through all link descriptors and
  418. * reaps msdu_count number of msdus for one SW_MONITOR_RING descriptor
  419. * and forms nbuf queue.
  420. */
  421. while (desc_info->msdu_count && desc_info->link_desc.paddr) {
  422. link_desc_va = dp_rx_cookie_2_mon_link_desc(pdev,
  423. desc_info->link_desc,
  424. mac_id);
  425. qdf_assert_always(link_desc_va);
  426. hal_rx_msdu_list_get(soc->hal_soc,
  427. link_desc_va,
  428. &msdu_list,
  429. &num_msdus);
  430. for (msdu_index = 0; msdu_index < num_msdus; msdu_index++) {
  431. rx_desc = dp_rx_get_mon_desc(soc,
  432. msdu_list.sw_cookie[msdu_index]);
  433. qdf_assert_always(rx_desc);
  434. msdu = rx_desc->nbuf;
  435. if (rx_desc->unmapped == 0) {
  436. qdf_nbuf_unmap_single(soc->osdev,
  437. msdu,
  438. QDF_DMA_FROM_DEVICE);
  439. rx_desc->unmapped = 1;
  440. }
  441. if (drop_mpdu) {
  442. qdf_nbuf_free(msdu);
  443. msdu = NULL;
  444. desc_info->msdu_count--;
  445. goto next_msdu;
  446. }
  447. rx_tlv_hdr = qdf_nbuf_data(msdu);
  448. if (hal_rx_desc_is_first_msdu(soc->hal_soc,
  449. rx_tlv_hdr))
  450. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
  451. rx_tlv_hdr,
  452. &pdev->ppdu_info.rx_status);
  453. /** If msdu is fragmented, spread across multiple
  454. * buffers
  455. * a. calculate len of each fragmented buffer
  456. * b. calculate the number of fragmented buffers for
  457. * a msdu and decrement one msdu_count
  458. */
  459. if (msdu_list.msdu_info[msdu_index].msdu_flags
  460. & HAL_MSDU_F_MSDU_CONTINUATION) {
  461. if (!msdu_frag) {
  462. total_frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
  463. msdu_frag = true;
  464. }
  465. dp_mon_adjust_frag_len(&total_frag_len,
  466. &frag_len);
  467. } else {
  468. if (msdu_frag)
  469. dp_mon_adjust_frag_len(&total_frag_len,
  470. &frag_len);
  471. else
  472. frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
  473. msdu_frag = false;
  474. desc_info->msdu_count--;
  475. }
  476. rx_hdr_tlv_len = SIZE_OF_MONITOR_TLV;
  477. /*
  478. * HW structures call this L3 header padding.
  479. * this is actually the offset
  480. * from the buffer beginning where the L2
  481. * header begins.
  482. */
  483. l3_hdr_pad = hal_rx_msdu_end_l3_hdr_padding_get(
  484. soc->hal_soc,
  485. rx_tlv_hdr);
  486. /*******************************************************
  487. * RX_PACKET *
  488. * ----------------------------------------------------*
  489. | RX_PKT_TLVS | L3 Padding header | msdu data| |
  490. * ----------------------------------------------------*
  491. ******************************************************/
  492. qdf_nbuf_set_pktlen(msdu,
  493. rx_hdr_tlv_len +
  494. l3_hdr_pad +
  495. frag_len);
  496. if (head_msdu && !*head_msdu)
  497. *head_msdu = msdu;
  498. else if (last_msdu)
  499. qdf_nbuf_set_next(last_msdu, msdu);
  500. last_msdu = msdu;
  501. next_msdu:
  502. rx_buf_reaped++;
  503. dp_rx_add_to_free_desc_list(head_desc,
  504. tail_desc,
  505. rx_desc);
  506. QDF_TRACE(QDF_MODULE_ID_DP,
  507. QDF_TRACE_LEVEL_DEBUG,
  508. FL("%s total_len %u frag_len %u flags %u"),
  509. total_frag_len, frag_len,
  510. msdu_list.msdu_info[msdu_index].msdu_flags);
  511. }
  512. hal_rxdma_buff_addr_info_set(rx_link_buf_info,
  513. desc_info->link_desc.paddr,
  514. desc_info->link_desc.sw_cookie,
  515. desc_info->link_desc.rbm);
  516. /* Get next link desc VA from current link desc */
  517. hal_rx_mon_next_link_desc_get(link_desc_va,
  518. &desc_info->link_desc);
  519. /* return msdu link descriptor to WBM */
  520. if (dp_rx_monitor_link_desc_return(pdev,
  521. (hal_buff_addrinfo_t)rx_link_buf_info,
  522. mac_id,
  523. HAL_BM_ACTION_PUT_IN_IDLE_LIST)
  524. != QDF_STATUS_SUCCESS) {
  525. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  526. "dp_rx_monitor_link_desc_return failed");
  527. }
  528. }
  529. pdev->rx_mon_stats.dest_mpdu_done++;
  530. if (last_msdu)
  531. qdf_nbuf_set_next(last_msdu, NULL);
  532. *tail_msdu = msdu;
  533. return rx_buf_reaped;
  534. }
  535. /*
  536. * dp_rx_mon_deliver_prev_ppdu () - Deliver previous PPDU
  537. *
  538. * @pdev: DP pdev handle
  539. * @int_ctx: interrupt context
  540. * @mac_id: lmac id
  541. * @quota: quota
  542. *
  543. * Return: remaining qouta
  544. */
  545. static inline uint32_t
  546. dp_rx_mon_deliver_prev_ppdu(struct dp_pdev *pdev,
  547. struct dp_intr *int_ctx,
  548. uint32_t mac_id,
  549. uint32_t quota)
  550. {
  551. struct dp_soc *soc = pdev->soc;
  552. struct hal_rx_mon_desc_info *desc_info = pdev->mon_desc;
  553. uint32_t work_done = 0, work = 0;
  554. bool deliver_ppdu = false;
  555. enum dp_mon_reap_status status;
  556. while (pdev->hold_mon_dest_ring) {
  557. status = dp_rx_mon_status_buf_validate(pdev, int_ctx, mac_id);
  558. switch (status) {
  559. case DP_MON_STATUS_NO_DMA:
  560. /* If DMA is not done for status ring entry,
  561. * hold on to monitor destination ring and
  562. * deliver current ppdu data once DMA is done.
  563. */
  564. pdev->hold_mon_dest_ring = true;
  565. break;
  566. case DP_MON_STATUS_LAG:
  567. /* If status_ppdu_id is lagging behind destination,
  568. * a. Hold on to destination ring
  569. * b. Drop status ppdus until ppdu id matches
  570. * c. Increment stats for ppdu_id mismatch and
  571. * status ppdu drop
  572. */
  573. pdev->hold_mon_dest_ring = true;
  574. pdev->rx_mon_stats.ppdu_id_mismatch++;
  575. pdev->rx_mon_stats.status_ppdu_drop++;
  576. break;
  577. case DP_MON_STATUS_LEAD:
  578. /* If status_ppdu_id is leading ahead destination,
  579. * a. Drop destination ring ppdu until ppdu_id matches
  580. * b. Unhold monitor destination ring so status ppdus
  581. * can be dropped.
  582. * c. Increment stats for ppdu_id mismatch and
  583. * destination ppdu drop
  584. */
  585. desc_info->drop_ppdu = true;
  586. pdev->hold_mon_dest_ring = false;
  587. pdev->rx_mon_stats.ppdu_id_mismatch++;
  588. pdev->rx_mon_stats.dest_ppdu_drop++;
  589. break;
  590. case DP_MON_STATUS_REPLENISH:
  591. /* If status ring hp entry is NULL, replenish it */
  592. work = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
  593. break;
  594. case DP_MON_STATUS_MATCH:
  595. /* If status ppdu id matches with destnation,
  596. * unhold monitor destination ring and deliver ppdu
  597. */
  598. pdev->hold_mon_dest_ring = false;
  599. break;
  600. default:
  601. dp_err("mon reap status is not supported");
  602. }
  603. /* When status ring entry's DMA is not done or
  604. * status ring entry is replenished, ppdu status is not
  605. * available for radiotap construction, so return and
  606. * check for status on next interrupt
  607. */
  608. if ((status == DP_MON_STATUS_NO_DMA) ||
  609. (status == DP_MON_STATUS_REPLENISH)) {
  610. return work_done;
  611. }
  612. if (status == DP_MON_STATUS_LAG) {
  613. work = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
  614. if (!work)
  615. return 0;
  616. work_done += work;
  617. }
  618. deliver_ppdu = true;
  619. }
  620. if (deliver_ppdu) {
  621. if (pdev->mon_desc->drop_ppdu) {
  622. dp_rx_mon_drop_ppdu(pdev, mac_id);
  623. return work_done;
  624. }
  625. work_done += dp_rx_mon_status_process(soc, int_ctx, mac_id,
  626. desc_info->status_buf_count);
  627. dp_rx_monitor_deliver_ppdu(soc, mac_id);
  628. }
  629. return work_done;
  630. }
  631. /**
  632. * dp_rx_mon_process () - Core brain processing for monitor mode
  633. *
  634. * This API processes monitor destination ring followed by monitor status ring
  635. * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
  636. *
  637. * @soc: datapath soc context
  638. * @int_ctx: interrupt context
  639. * @mac_id: mac_id on which interrupt is received
  640. * @quota: Number of status ring entry that can be serviced in one shot.
  641. *
  642. * @Return: Number of reaped status ring entries
  643. */
  644. uint32_t dp_rx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
  645. uint32_t mac_id, uint32_t quota)
  646. {
  647. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  648. union dp_rx_desc_list_elem_t *head_desc = NULL;
  649. union dp_rx_desc_list_elem_t *tail_desc = NULL;
  650. uint32_t rx_bufs_reaped = 0;
  651. struct dp_mon_mpdu *mon_mpdu;
  652. struct cdp_pdev_mon_stats *rx_mon_stats;
  653. hal_rxdma_desc_t ring_desc;
  654. hal_soc_handle_t hal_soc;
  655. hal_ring_handle_t mon_dest_srng;
  656. qdf_nbuf_t head_msdu = NULL;
  657. qdf_nbuf_t tail_msdu = NULL;
  658. struct hal_rx_mon_desc_info *desc_info;
  659. int mac_for_pdev = mac_id;
  660. QDF_STATUS status;
  661. uint32_t work_done = 0;
  662. if (!pdev) {
  663. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  664. "pdev is null for mac_id = %d", mac_id);
  665. return work_done;
  666. }
  667. qdf_spin_lock_bh(&pdev->mon_lock);
  668. if (qdf_unlikely(!dp_soc_is_full_mon_enable(pdev))) {
  669. work_done += dp_rx_mon_status_process(soc, int_ctx,
  670. mac_id, quota);
  671. qdf_spin_unlock_bh(&pdev->mon_lock);
  672. return work_done;
  673. }
  674. desc_info = pdev->mon_desc;
  675. rx_mon_stats = &pdev->rx_mon_stats;
  676. work_done = dp_rx_mon_deliver_prev_ppdu(pdev, int_ctx, mac_id, quota);
  677. /* Do not proceed if work_done zero */
  678. if (!work_done && pdev->hold_mon_dest_ring) {
  679. qdf_spin_unlock_bh(&pdev->mon_lock);
  680. return work_done;
  681. }
  682. mon_dest_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
  683. if (qdf_unlikely(!mon_dest_srng ||
  684. !hal_srng_initialized(mon_dest_srng))) {
  685. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  686. FL("HAL Monitor Destination Ring Init Failed -- %pK"),
  687. mon_dest_srng);
  688. goto done1;
  689. }
  690. hal_soc = soc->hal_soc;
  691. qdf_assert_always(hal_soc && pdev);
  692. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dest_srng))) {
  693. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  694. FL("HAL Monitor Destination Ring access Failed -- %pK"),
  695. mon_dest_srng);
  696. goto done1;
  697. }
  698. /* Each entry in mon dest ring carries mpdu data
  699. * reap all msdus for a mpdu and form skb chain
  700. */
  701. while (qdf_likely(ring_desc =
  702. hal_srng_dst_peek(hal_soc, mon_dest_srng))) {
  703. head_msdu = NULL;
  704. tail_msdu = NULL;
  705. rx_bufs_reaped = dp_rx_mon_mpdu_reap(soc, mac_id,
  706. ring_desc, &head_msdu,
  707. &tail_msdu, &head_desc,
  708. &tail_desc);
  709. /* Assert if end_of_ppdu is zero and number of reaped buffers
  710. * are zero.
  711. */
  712. if (qdf_unlikely(!desc_info->end_of_ppdu && !rx_bufs_reaped)) {
  713. qdf_err("end_of_ppdu and rx_bufs_reaped are zero");
  714. }
  715. rx_mon_stats->mon_rx_bufs_reaped_dest += rx_bufs_reaped;
  716. /* replenish rx_bufs_reaped buffers back to
  717. * RxDMA Monitor buffer ring
  718. */
  719. if (rx_bufs_reaped) {
  720. status = dp_rx_buffers_replenish(soc, mac_id,
  721. dp_rxdma_get_mon_buf_ring(pdev,
  722. mac_for_pdev),
  723. dp_rx_get_mon_desc_pool(soc, mac_id,
  724. pdev->pdev_id),
  725. rx_bufs_reaped,
  726. &head_desc, &tail_desc);
  727. if (status != QDF_STATUS_SUCCESS)
  728. qdf_assert_always(0);
  729. rx_mon_stats->mon_rx_bufs_replenished_dest += rx_bufs_reaped;
  730. }
  731. head_desc = NULL;
  732. tail_desc = NULL;
  733. /* If end_of_ppdu is zero, it is a valid data mpdu
  734. * a. Add head_msdu and tail_msdu to mpdu list
  735. * b. continue reaping next SW_MONITOR_RING descriptor
  736. */
  737. if (!desc_info->end_of_ppdu) {
  738. /*
  739. * In case of rxdma error, MPDU is dropped
  740. * from sw_monitor_ring descriptor.
  741. * in this case, head_msdu remains NULL.
  742. * move srng to next and continue reaping next entry
  743. */
  744. if (!head_msdu) {
  745. ring_desc = hal_srng_dst_get_next(hal_soc,
  746. mon_dest_srng);
  747. continue;
  748. }
  749. /*
  750. * Prepare a MPDU object which holds chain of msdus
  751. * and MPDU specific status and add this is to
  752. * monitor mpdu queue
  753. */
  754. mon_mpdu = dp_rx_mon_prepare_mon_mpdu(pdev,
  755. head_msdu,
  756. tail_msdu);
  757. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  758. FL("Dest_srng: %pK MPDU_OBJ: %pK "
  759. "head_msdu: %pK tail_msdu: %pK -- "),
  760. mon_dest_srng,
  761. mon_mpdu,
  762. head_msdu,
  763. tail_msdu);
  764. TAILQ_INSERT_TAIL(&pdev->mon_mpdu_q,
  765. mon_mpdu,
  766. mpdu_list_elem);
  767. head_msdu = NULL;
  768. tail_msdu = NULL;
  769. ring_desc = hal_srng_dst_get_next(hal_soc,
  770. mon_dest_srng);
  771. continue;
  772. }
  773. /* It is observed sometimes that, ppdu_id, status_buf_addr
  774. * and link desc addr is NULL, this WAR is to handle same
  775. */
  776. if (!desc_info->ppdu_id && !desc_info->status_buf.paddr) {
  777. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  778. FL("ppdu_id: %d ring_entry: %pK"
  779. "status_buf_count: %d rxdma_push: %d"
  780. "rxdma_err: %d link_desc: %pK "),
  781. desc_info->ppdu_id, ring_desc,
  782. desc_info->status_buf_count,
  783. desc_info->rxdma_push_reason,
  784. desc_info->rxdma_error_code,
  785. desc_info->link_desc.paddr);
  786. goto next_entry;
  787. }
  788. /*
  789. * end_of_ppdu is one,
  790. * a. update ppdu_done stattistics
  791. * b. Replenish buffers back to mon buffer ring
  792. * c. reap status ring for a PPDU and deliver all mpdus
  793. * to upper layer
  794. */
  795. rx_mon_stats->dest_ppdu_done++;
  796. work_done += dp_rx_mon_reap_status_ring(soc, int_ctx, mac_id,
  797. quota, desc_info);
  798. /* Deliver all MPDUs for a PPDU */
  799. if (desc_info->drop_ppdu)
  800. dp_rx_mon_drop_ppdu(pdev, mac_id);
  801. else if (!pdev->hold_mon_dest_ring)
  802. dp_rx_monitor_deliver_ppdu(soc, mac_id);
  803. next_entry:
  804. hal_srng_dst_get_next(hal_soc, mon_dest_srng);
  805. break;
  806. }
  807. dp_srng_access_end(int_ctx, soc, mon_dest_srng);
  808. done1:
  809. qdf_spin_unlock_bh(&pdev->mon_lock);
  810. return work_done;
  811. }
  812. /**
  813. * dp_full_mon_attach() - attach full monitor mode
  814. * resources
  815. * @pdev: Datapath PDEV handle
  816. *
  817. * Return: void
  818. */
  819. void dp_full_mon_attach(struct dp_pdev *pdev)
  820. {
  821. struct dp_soc *soc = pdev->soc;
  822. if (!soc->full_mon_mode) {
  823. qdf_debug("Full monitor is not enabled");
  824. return;
  825. }
  826. pdev->mon_desc = qdf_mem_malloc(sizeof(struct hal_rx_mon_desc_info));
  827. if (!pdev->mon_desc) {
  828. qdf_err("Memory allocation failed for hal_rx_mon_desc_info ");
  829. return;
  830. }
  831. TAILQ_INIT(&pdev->mon_mpdu_q);
  832. }
  833. /**
  834. * dp_full_mon_detach() - detach full monitor mode
  835. * resources
  836. * @pdev: Datapath PDEV handle
  837. *
  838. * Return: void
  839. *
  840. */
  841. void dp_full_mon_detach(struct dp_pdev *pdev)
  842. {
  843. struct dp_soc *soc = pdev->soc;
  844. struct dp_mon_mpdu *mpdu = NULL;
  845. struct dp_mon_mpdu *temp_mpdu = NULL;
  846. if (!soc->full_mon_mode) {
  847. qdf_debug("Full monitor is not enabled");
  848. return;
  849. }
  850. if (pdev->mon_desc)
  851. qdf_mem_free(pdev->mon_desc);
  852. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  853. TAILQ_FOREACH_SAFE(mpdu,
  854. &pdev->mon_mpdu_q,
  855. mpdu_list_elem,
  856. temp_mpdu) {
  857. qdf_mem_free(mpdu);
  858. }
  859. }
  860. }
  861. #endif