dp_full_mon.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. /*
  2. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "dp_types.h"
  17. #include "hal_rx.h"
  18. #include "hal_api.h"
  19. #include "qdf_trace.h"
  20. #include "qdf_nbuf.h"
  21. #include "hal_api_mon.h"
  22. #include "dp_rx.h"
  23. #include "dp_rx_mon.h"
  24. #include "dp_internal.h"
  25. #include "dp_htt.h"
  26. #include "dp_full_mon.h"
  27. #include "qdf_mem.h"
  28. #ifdef QCA_SUPPORT_FULL_MON
  29. uint32_t
  30. dp_rx_mon_status_process(struct dp_soc *soc,
  31. struct dp_intr *int_ctx,
  32. uint32_t mac_id,
  33. uint32_t quota);
  34. /*
  35. * dp_rx_mon_status_buf_validate () - Validate first monitor status buffer addr
  36. * against status buf addr given in monitor destination ring
  37. *
  38. * @pdev: DP pdev handle
  39. * @int_ctx: Interrupt context
  40. * @mac_id: lmac id
  41. *
  42. * Return: QDF_STATUS
  43. */
  44. static inline enum dp_mon_reap_status
  45. dp_rx_mon_status_buf_validate(struct dp_pdev *pdev,
  46. struct dp_intr *int_ctx,
  47. uint32_t mac_id)
  48. {
  49. struct dp_soc *soc = pdev->soc;
  50. hal_soc_handle_t hal_soc;
  51. void *mon_status_srng;
  52. void *ring_entry;
  53. uint32_t rx_buf_cookie;
  54. qdf_nbuf_t status_nbuf;
  55. struct dp_rx_desc *rx_desc;
  56. uint64_t buf_paddr;
  57. struct rx_desc_pool *rx_desc_pool;
  58. uint32_t tlv_tag;
  59. void *rx_tlv;
  60. struct hal_rx_ppdu_info *ppdu_info;
  61. enum dp_mon_reap_status status = dp_mon_status_match;
  62. QDF_STATUS buf_status;
  63. uint32_t ppdu_id_diff;
  64. mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
  65. qdf_assert(mon_status_srng);
  66. if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
  67. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  68. "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
  69. __func__, __LINE__, mon_status_srng);
  70. QDF_ASSERT(0);
  71. return status;
  72. }
  73. hal_soc = soc->hal_soc;
  74. qdf_assert(hal_soc);
  75. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) {
  76. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  77. "%s %d : HAL SRNG access Failed -- %pK",
  78. __func__, __LINE__, mon_status_srng);
  79. QDF_ASSERT(0);
  80. return status;
  81. }
  82. ring_entry = hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng);
  83. if (!ring_entry) {
  84. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  85. "%s %d : HAL SRNG entry is NULL srng:-- %pK",
  86. __func__, __LINE__, mon_status_srng);
  87. status = dp_mon_status_replenish;
  88. goto done;
  89. }
  90. ppdu_info = &pdev->ppdu_info;
  91. rx_desc_pool = &soc->rx_desc_status[mac_id];
  92. buf_paddr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_entry) |
  93. ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_entry)) << 32));
  94. if (!buf_paddr) {
  95. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  96. "%s %d : buf addr is NULL -- %pK",
  97. __func__, __LINE__, mon_status_srng);
  98. status = dp_mon_status_replenish;
  99. goto done;
  100. }
  101. rx_buf_cookie = HAL_RX_BUF_COOKIE_GET(ring_entry);
  102. rx_desc = dp_rx_cookie_2_va_mon_status(soc, rx_buf_cookie);
  103. qdf_assert(rx_desc);
  104. status_nbuf = rx_desc->nbuf;
  105. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  106. QDF_DMA_FROM_DEVICE);
  107. rx_tlv = qdf_nbuf_data(status_nbuf);
  108. buf_status = hal_get_rx_status_done(rx_tlv);
  109. /* If status buffer DMA is not done,
  110. * hold on to mon destination ring.
  111. */
  112. if (buf_status != QDF_STATUS_SUCCESS) {
  113. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  114. FL("Monitor status ring: DMA is not done "
  115. "for nbuf: %pK buf_addr: %llx"),
  116. status_nbuf, buf_paddr);
  117. pdev->rx_mon_stats.tlv_tag_status_err++;
  118. status = dp_mon_status_no_dma;
  119. goto done;
  120. }
  121. qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
  122. QDF_DMA_FROM_DEVICE,
  123. rx_desc_pool->buf_size);
  124. rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
  125. tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv);
  126. if (tlv_tag == WIFIRX_PPDU_START_E) {
  127. rx_tlv = (uint8_t *)rx_tlv + HAL_RX_TLV32_HDR_SIZE;
  128. pdev->mon_desc->status_ppdu_id =
  129. HAL_RX_GET(rx_tlv, RX_PPDU_START_0, PHY_PPDU_ID);
  130. pdev->status_buf_addr = buf_paddr;
  131. }
  132. if (pdev->mon_desc->ppdu_id < pdev->mon_desc->status_ppdu_id) {
  133. status = dp_mon_status_lead;
  134. /* For wrap around case */
  135. ppdu_id_diff = pdev->mon_desc->status_ppdu_id -
  136. pdev->mon_desc->ppdu_id;
  137. if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
  138. status = dp_mon_status_lag;
  139. } else if (pdev->mon_desc->ppdu_id > pdev->mon_desc->status_ppdu_id) {
  140. status = dp_mon_status_lag;
  141. /* For wrap around case */
  142. ppdu_id_diff = pdev->mon_desc->ppdu_id -
  143. pdev->mon_desc->status_ppdu_id;
  144. if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
  145. status = dp_mon_status_lead;
  146. }
  147. if ((pdev->mon_desc->status_buf.paddr != buf_paddr) ||
  148. (pdev->mon_desc->ppdu_id != pdev->mon_desc->status_ppdu_id)) {
  149. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  150. FL("Monitor: PPDU id or status buf_addr mismatch "
  151. "status_ppdu_id: %d dest_ppdu_id: %d "
  152. "status_addr: %llx status_buf_cookie: %d "
  153. "dest_addr: %llx tlv_tag: %d"
  154. " status_nbuf: %pK pdev->hold_mon_dest: %d"),
  155. pdev->mon_desc->status_ppdu_id,
  156. pdev->mon_desc->ppdu_id, pdev->status_buf_addr,
  157. rx_buf_cookie,
  158. pdev->mon_desc->status_buf.paddr, tlv_tag,
  159. status_nbuf, pdev->hold_mon_dest_ring);
  160. }
  161. done:
  162. hal_srng_access_end(hal_soc, mon_status_srng);
  163. return status;
  164. }
  165. /*
  166. * dp_rx_mon_prepare_mon_mpdu () - API to prepare dp_mon_mpdu object
  167. *
  168. * @pdev: DP pdev object
  169. * @head_msdu: Head msdu
  170. * @tail_msdu: Tail msdu
  171. *
  172. */
  173. static inline struct dp_mon_mpdu *
  174. dp_rx_mon_prepare_mon_mpdu(struct dp_pdev *pdev,
  175. qdf_nbuf_t head_msdu,
  176. qdf_nbuf_t tail_msdu)
  177. {
  178. struct dp_mon_mpdu *mon_mpdu = NULL;
  179. mon_mpdu = qdf_mem_malloc(sizeof(struct dp_mon_mpdu));
  180. if (!mon_mpdu) {
  181. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  182. FL("Monitor MPDU object allocation failed -- %pK"),
  183. pdev);
  184. qdf_assert_always(0);
  185. }
  186. mon_mpdu->head = head_msdu;
  187. mon_mpdu->tail = tail_msdu;
  188. mon_mpdu->rs_flags = pdev->ppdu_info.rx_status.rs_flags;
  189. mon_mpdu->ant_signal_db = pdev->ppdu_info.rx_status.ant_signal_db;
  190. mon_mpdu->is_stbc = pdev->ppdu_info.rx_status.is_stbc;
  191. mon_mpdu->sgi = pdev->ppdu_info.rx_status.sgi;
  192. mon_mpdu->beamformed = pdev->ppdu_info.rx_status.beamformed;
  193. return mon_mpdu;
  194. }
  195. static inline void
  196. dp_rx_mon_drop_ppdu(struct dp_pdev *pdev, uint32_t mac_id)
  197. {
  198. struct dp_mon_mpdu *mpdu = NULL;
  199. struct dp_mon_mpdu *temp_mpdu = NULL;
  200. qdf_nbuf_t mon_skb, skb_next;
  201. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  202. TAILQ_FOREACH_SAFE(mpdu,
  203. &pdev->mon_mpdu_q,
  204. mpdu_list_elem,
  205. temp_mpdu) {
  206. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  207. mpdu, mpdu_list_elem);
  208. mon_skb = mpdu->head;
  209. while (mon_skb) {
  210. skb_next = qdf_nbuf_next(mon_skb);
  211. QDF_TRACE(QDF_MODULE_ID_DP,
  212. QDF_TRACE_LEVEL_DEBUG,
  213. "[%s][%d] mon_skb=%pK len %u"
  214. " __func__, __LINE__",
  215. mon_skb, mon_skb->len);
  216. qdf_nbuf_free(mon_skb);
  217. mon_skb = skb_next;
  218. }
  219. qdf_mem_free(mpdu);
  220. }
  221. }
  222. pdev->mon_desc->drop_ppdu = 0;
  223. }
  224. /*
  225. * dp_rx_monitor_deliver_ppdu () - API to deliver all MPDU for a MPDU
  226. * to upper layer stack
  227. *
  228. * @soc: DP soc handle
  229. * @mac_id: lmac id
  230. */
  231. static inline QDF_STATUS
  232. dp_rx_monitor_deliver_ppdu(struct dp_soc *soc, uint32_t mac_id)
  233. {
  234. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  235. struct dp_mon_mpdu *mpdu = NULL;
  236. struct dp_mon_mpdu *temp_mpdu = NULL;
  237. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  238. TAILQ_FOREACH_SAFE(mpdu,
  239. &pdev->mon_mpdu_q,
  240. mpdu_list_elem,
  241. temp_mpdu) {
  242. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  243. mpdu, mpdu_list_elem);
  244. /* Check for IEEE80211_AMSDU_FLAG in mpdu
  245. * and set in pdev->ppdu_info.rx_status
  246. */
  247. HAL_RX_SET_MSDU_AGGREGATION(mpdu,
  248. &(pdev->ppdu_info.rx_status));
  249. pdev->ppdu_info.rx_status.ant_signal_db =
  250. mpdu->ant_signal_db;
  251. pdev->ppdu_info.rx_status.is_stbc = mpdu->is_stbc;
  252. pdev->ppdu_info.rx_status.sgi = mpdu->sgi;
  253. pdev->ppdu_info.rx_status.beamformed = mpdu->beamformed;
  254. dp_rx_mon_deliver(soc, mac_id,
  255. mpdu->head, mpdu->tail);
  256. qdf_mem_free(mpdu);
  257. }
  258. }
  259. return QDF_STATUS_SUCCESS;
  260. }
  261. /**
  262. * dp_rx_mon_reap_status_ring () - Reap status_buf_count of status buffers for
  263. * status ring.
  264. *
  265. * @soc: DP soc handle
  266. * @int_ctx: interrupt context
  267. * @mac_id: mac id on which interrupt is received
  268. * @quota: number of status ring entries to be reaped
  269. * @desc_info: Rx ppdu desc info
  270. */
  271. static inline uint32_t
  272. dp_rx_mon_reap_status_ring(struct dp_soc *soc,
  273. struct dp_intr *int_ctx,
  274. uint32_t mac_id,
  275. uint32_t quota,
  276. struct hal_rx_mon_desc_info *desc_info)
  277. {
  278. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  279. uint8_t status_buf_count;
  280. uint32_t work_done = 0;
  281. enum dp_mon_reap_status status;
  282. status_buf_count = desc_info->status_buf_count;
  283. desc_info->drop_ppdu = false;
  284. status = dp_rx_mon_status_buf_validate(pdev, int_ctx, mac_id);
  285. switch (status) {
  286. case dp_mon_status_no_dma:
  287. /* If DMA is not done for status ring entry,
  288. * hold on to monitor destination ring and
  289. * deliver current ppdu data once DMA is done.
  290. */
  291. pdev->hold_mon_dest_ring = true;
  292. break;
  293. case dp_mon_status_lag:
  294. /* If status_ppdu_id is lagging behind destination,
  295. * a. Hold on to destination ring
  296. * b. Drop status ppdus until ppdu id matches
  297. * c. Increment stats for ppdu_id mismatch and
  298. * status ppdu drop
  299. */
  300. pdev->hold_mon_dest_ring = true;
  301. pdev->rx_mon_stats.ppdu_id_mismatch++;
  302. pdev->rx_mon_stats.status_ppdu_drop++;
  303. break;
  304. case dp_mon_status_lead:
  305. /* If status_ppdu_id is leading ahead destination,
  306. * a. Drop destination ring ppdu until ppdu_id matches
  307. * b. Unhold monitor destination ring so status ppdus
  308. * can be dropped.
  309. * c. Increment stats for ppdu_id mismatch and
  310. * destination ppdu drop
  311. */
  312. desc_info->drop_ppdu = true;
  313. pdev->hold_mon_dest_ring = false;
  314. pdev->rx_mon_stats.ppdu_id_mismatch++;
  315. pdev->rx_mon_stats.dest_ppdu_drop++;
  316. break;
  317. case dp_mon_status_replenish:
  318. /* If status ring hp entry is NULL, replenish it */
  319. work_done = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
  320. break;
  321. case dp_mon_status_match:
  322. /* If status ppdu id matches with destnation,
  323. * unhold monitor destination ring and deliver ppdu
  324. */
  325. pdev->hold_mon_dest_ring = false;
  326. break;
  327. default:
  328. dp_err("mon reap status is not supported");
  329. }
  330. /* If status ring is lagging behind detination ring,
  331. * reap only one status buffer
  332. */
  333. if (status == dp_mon_status_lag)
  334. status_buf_count = 1;
  335. if (status == dp_mon_status_lag ||
  336. status == dp_mon_status_match) {
  337. work_done = dp_rx_mon_status_process(soc,
  338. int_ctx,
  339. mac_id,
  340. status_buf_count);
  341. }
  342. return work_done;
  343. }
  344. /**
  345. * dp_rx_mon_mpdu_reap () - This API reaps a mpdu from mon dest ring descriptor
  346. * and returns link descriptor to HW (WBM)
  347. *
  348. * @soc: DP soc handle
  349. * @mac_id: lmac id
  350. * @ring_desc: SW monitor ring desc
  351. * @head_msdu: nbuf pointing to first msdu in a chain
  352. * @tail_msdu: nbuf pointing to last msdu in a chain
  353. * @head_desc: head pointer to free desc list
  354. * @tail_desc: tail pointer to free desc list
  355. *
  356. * Return: number of reaped buffers
  357. */
  358. static inline uint32_t
  359. dp_rx_mon_mpdu_reap(struct dp_soc *soc, uint32_t mac_id, void *ring_desc,
  360. qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
  361. union dp_rx_desc_list_elem_t **head_desc,
  362. union dp_rx_desc_list_elem_t **tail_desc)
  363. {
  364. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  365. struct dp_rx_desc *rx_desc = NULL;
  366. struct hal_rx_msdu_list msdu_list;
  367. uint32_t rx_buf_reaped = 0;
  368. uint16_t num_msdus = 0, msdu_index, rx_hdr_tlv_len, l3_hdr_pad;
  369. uint32_t total_frag_len = 0, frag_len = 0;
  370. bool drop_mpdu = false;
  371. bool msdu_frag = false, is_first_msdu = true, is_frag_non_raw = false;
  372. void *link_desc_va;
  373. uint8_t *rx_tlv_hdr;
  374. qdf_nbuf_t msdu = NULL, last_msdu = NULL;
  375. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  376. struct hal_rx_mon_desc_info *desc_info;
  377. uint16_t prev_ppdu_id;
  378. struct rx_desc_pool *rx_desc_pool = NULL;
  379. desc_info = pdev->mon_desc;
  380. /* Restore previous ppdu_id to use it while doing
  381. * status buffer validation
  382. */
  383. prev_ppdu_id = pdev->mon_desc->status_ppdu_id;
  384. qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info));
  385. pdev->mon_desc->status_ppdu_id = prev_ppdu_id;
  386. /* Read SW Mon ring descriptor */
  387. hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
  388. ring_desc,
  389. (void *)desc_info);
  390. /* If end_of_ppdu is 1, return*/
  391. if (desc_info->end_of_ppdu)
  392. return rx_buf_reaped;
  393. /* If there is rxdma error, drop mpdu */
  394. if (qdf_unlikely(dp_rx_mon_is_rxdma_error(desc_info)
  395. == QDF_STATUS_SUCCESS)) {
  396. drop_mpdu = true;
  397. pdev->rx_mon_stats.dest_mpdu_drop++;
  398. }
  399. /*
  400. * while loop iterates through all link descriptors and
  401. * reaps msdu_count number of msdus for one SW_MONITOR_RING descriptor
  402. * and forms nbuf queue.
  403. */
  404. while (desc_info->msdu_count && desc_info->link_desc.paddr) {
  405. link_desc_va = dp_rx_cookie_2_mon_link_desc(pdev,
  406. desc_info->link_desc,
  407. mac_id);
  408. qdf_assert_always(link_desc_va);
  409. hal_rx_msdu_list_get(soc->hal_soc,
  410. link_desc_va,
  411. &msdu_list,
  412. &num_msdus);
  413. for (msdu_index = 0; msdu_index < num_msdus; msdu_index++) {
  414. rx_desc = dp_rx_get_mon_desc(soc,
  415. msdu_list.sw_cookie[msdu_index]);
  416. qdf_assert_always(rx_desc);
  417. msdu = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
  418. if (rx_desc->unmapped == 0) {
  419. rx_desc_pool = dp_rx_get_mon_desc_pool(
  420. soc, mac_id, pdev->pdev_id);
  421. dp_rx_mon_buffer_unmap(soc, rx_desc,
  422. rx_desc_pool->buf_size);
  423. rx_desc->unmapped = 1;
  424. }
  425. if (drop_mpdu) {
  426. dp_rx_mon_buffer_free(rx_desc);
  427. msdu = NULL;
  428. desc_info->msdu_count--;
  429. goto next_msdu;
  430. }
  431. rx_tlv_hdr = dp_rx_mon_get_buffer_data(rx_desc);
  432. if (is_first_msdu) {
  433. if (dp_rx_mon_alloc_parent_buffer(head_msdu)
  434. != QDF_STATUS_SUCCESS) {
  435. DP_STATS_INC(pdev,
  436. replenish.nbuf_alloc_fail,
  437. 1);
  438. qdf_frag_free(rx_tlv_hdr);
  439. QDF_TRACE(QDF_MODULE_ID_DP,
  440. QDF_TRACE_LEVEL_DEBUG,
  441. "[%s] failed to allocate parent buffer to hold all frag",
  442. __func__);
  443. drop_mpdu = true;
  444. desc_info->msdu_count--;
  445. goto next_msdu;
  446. }
  447. is_first_msdu = false;
  448. }
  449. if (hal_rx_desc_is_first_msdu(soc->hal_soc,
  450. rx_tlv_hdr))
  451. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
  452. rx_tlv_hdr,
  453. &pdev->ppdu_info.rx_status);
  454. /** If msdu is fragmented, spread across multiple
  455. * buffers
  456. * a. calculate len of each fragmented buffer
  457. * b. calculate the number of fragmented buffers for
  458. * a msdu and decrement one msdu_count
  459. */
  460. dp_rx_mon_parse_desc_buffer(soc,
  461. &(msdu_list.msdu_info[msdu_index]),
  462. &msdu_frag,
  463. &total_frag_len,
  464. &frag_len,
  465. &l3_hdr_pad,
  466. rx_tlv_hdr,
  467. &is_frag_non_raw, rx_tlv_hdr);
  468. if (!msdu_frag)
  469. desc_info->msdu_count--;
  470. rx_hdr_tlv_len = SIZE_OF_MONITOR_TLV;
  471. /*
  472. * HW structures call this L3 header padding.
  473. * this is actually the offset
  474. * from the buffer beginning where the L2
  475. * header begins.
  476. */
  477. /*******************************************************
  478. * RX_PACKET *
  479. * ----------------------------------------------------*
  480. | RX_PKT_TLVS | L3 Padding header | msdu data| |
  481. * ----------------------------------------------------*
  482. ******************************************************/
  483. dp_rx_mon_buffer_set_pktlen(msdu,
  484. rx_hdr_tlv_len +
  485. l3_hdr_pad +
  486. frag_len);
  487. dp_rx_mon_add_msdu_to_list(head_msdu, msdu, &last_msdu,
  488. rx_tlv_hdr, frag_len,
  489. l3_hdr_pad);
  490. next_msdu:
  491. rx_buf_reaped++;
  492. dp_rx_add_to_free_desc_list(head_desc,
  493. tail_desc,
  494. rx_desc);
  495. QDF_TRACE(QDF_MODULE_ID_DP,
  496. QDF_TRACE_LEVEL_DEBUG,
  497. FL("%s total_len %u frag_len %u flags %u"),
  498. total_frag_len, frag_len,
  499. msdu_list.msdu_info[msdu_index].msdu_flags);
  500. }
  501. hal_rxdma_buff_addr_info_set(rx_link_buf_info,
  502. desc_info->link_desc.paddr,
  503. desc_info->link_desc.sw_cookie,
  504. desc_info->link_desc.rbm);
  505. /* Get next link desc VA from current link desc */
  506. hal_rx_mon_next_link_desc_get(link_desc_va,
  507. &desc_info->link_desc);
  508. /* return msdu link descriptor to WBM */
  509. if (dp_rx_monitor_link_desc_return(pdev,
  510. (hal_buff_addrinfo_t)rx_link_buf_info,
  511. mac_id,
  512. HAL_BM_ACTION_PUT_IN_IDLE_LIST)
  513. != QDF_STATUS_SUCCESS) {
  514. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  515. "dp_rx_monitor_link_desc_return failed");
  516. }
  517. }
  518. pdev->rx_mon_stats.dest_mpdu_done++;
  519. dp_rx_mon_init_tail_msdu(msdu, last_msdu, tail_msdu);
  520. dp_rx_mon_remove_raw_frame_fcs_len(head_msdu);
  521. return rx_buf_reaped;
  522. }
  523. /*
  524. * dp_rx_mon_deliver_prev_ppdu () - Deliver previous PPDU
  525. *
  526. * @pdev: DP pdev handle
  527. * @int_ctx: interrupt context
  528. * @mac_id: lmac id
  529. * @quota: quota
  530. *
  531. * Return: remaining qouta
  532. */
  533. static inline uint32_t
  534. dp_rx_mon_deliver_prev_ppdu(struct dp_pdev *pdev,
  535. struct dp_intr *int_ctx,
  536. uint32_t mac_id,
  537. uint32_t quota)
  538. {
  539. struct dp_soc *soc = pdev->soc;
  540. struct hal_rx_mon_desc_info *desc_info = pdev->mon_desc;
  541. uint32_t work_done = 0, work = 0;
  542. bool deliver_ppdu = false;
  543. enum dp_mon_reap_status status;
  544. while (pdev->hold_mon_dest_ring) {
  545. status = dp_rx_mon_status_buf_validate(pdev, int_ctx, mac_id);
  546. switch (status) {
  547. case dp_mon_status_no_dma:
  548. /* If DMA is not done for status ring entry,
  549. * hold on to monitor destination ring and
  550. * deliver current ppdu data once DMA is done.
  551. */
  552. pdev->hold_mon_dest_ring = true;
  553. break;
  554. case dp_mon_status_lag:
  555. /* If status_ppdu_id is lagging behind destination,
  556. * a. Hold on to destination ring
  557. * b. Drop status ppdus until ppdu id matches
  558. * c. Increment stats for ppdu_id mismatch and
  559. * status ppdu drop
  560. */
  561. pdev->hold_mon_dest_ring = true;
  562. pdev->rx_mon_stats.ppdu_id_mismatch++;
  563. pdev->rx_mon_stats.status_ppdu_drop++;
  564. break;
  565. case dp_mon_status_lead:
  566. /* If status_ppdu_id is leading ahead destination,
  567. * a. Drop destination ring ppdu until ppdu_id matches
  568. * b. Unhold monitor destination ring so status ppdus
  569. * can be dropped.
  570. * c. Increment stats for ppdu_id mismatch and
  571. * destination ppdu drop
  572. */
  573. desc_info->drop_ppdu = true;
  574. pdev->hold_mon_dest_ring = false;
  575. pdev->rx_mon_stats.ppdu_id_mismatch++;
  576. pdev->rx_mon_stats.dest_ppdu_drop++;
  577. break;
  578. case dp_mon_status_replenish:
  579. /* If status ring hp entry is NULL, replenish it */
  580. work = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
  581. break;
  582. case dp_mon_status_match:
  583. /* If status ppdu id matches with destnation,
  584. * unhold monitor destination ring and deliver ppdu
  585. */
  586. pdev->hold_mon_dest_ring = false;
  587. break;
  588. default:
  589. dp_err("mon reap status is not supported");
  590. }
  591. /* When status ring entry's DMA is not done or
  592. * status ring entry is replenished, ppdu status is not
  593. * available for radiotap construction, so return and
  594. * check for status on next interrupt
  595. */
  596. if ((status == dp_mon_status_no_dma) ||
  597. (status == dp_mon_status_replenish)) {
  598. return work_done;
  599. }
  600. if (status == dp_mon_status_lag) {
  601. work = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
  602. if (!work)
  603. return 0;
  604. work_done += work;
  605. }
  606. deliver_ppdu = true;
  607. }
  608. if (deliver_ppdu) {
  609. if (pdev->mon_desc->drop_ppdu) {
  610. dp_rx_mon_drop_ppdu(pdev, mac_id);
  611. return work_done;
  612. }
  613. work_done += dp_rx_mon_status_process(soc, int_ctx, mac_id,
  614. desc_info->status_buf_count);
  615. dp_rx_monitor_deliver_ppdu(soc, mac_id);
  616. }
  617. return work_done;
  618. }
  619. /**
  620. * dp_rx_mon_process () - Core brain processing for monitor mode
  621. *
  622. * This API processes monitor destination ring followed by monitor status ring
  623. * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
  624. *
  625. * @soc: datapath soc context
  626. * @int_ctx: interrupt context
  627. * @mac_id: mac_id on which interrupt is received
  628. * @quota: Number of status ring entry that can be serviced in one shot.
  629. *
  630. * @Return: Number of reaped status ring entries
  631. */
  632. uint32_t dp_rx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
  633. uint32_t mac_id, uint32_t quota)
  634. {
  635. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  636. union dp_rx_desc_list_elem_t *head_desc = NULL;
  637. union dp_rx_desc_list_elem_t *tail_desc = NULL;
  638. uint32_t rx_bufs_reaped = 0;
  639. struct dp_mon_mpdu *mon_mpdu;
  640. struct cdp_pdev_mon_stats *rx_mon_stats;
  641. hal_rxdma_desc_t ring_desc;
  642. hal_soc_handle_t hal_soc;
  643. hal_ring_handle_t mon_dest_srng;
  644. qdf_nbuf_t head_msdu = NULL;
  645. qdf_nbuf_t tail_msdu = NULL;
  646. struct hal_rx_mon_desc_info *desc_info;
  647. int mac_for_pdev = mac_id;
  648. QDF_STATUS status;
  649. uint32_t work_done = 0;
  650. if (!pdev) {
  651. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  652. "pdev is null for mac_id = %d", mac_id);
  653. return work_done;
  654. }
  655. qdf_spin_lock_bh(&pdev->mon_lock);
  656. if (qdf_unlikely(!dp_soc_is_full_mon_enable(pdev))) {
  657. work_done += dp_rx_mon_status_process(soc, int_ctx,
  658. mac_id, quota);
  659. qdf_spin_unlock_bh(&pdev->mon_lock);
  660. return work_done;
  661. }
  662. desc_info = pdev->mon_desc;
  663. rx_mon_stats = &pdev->rx_mon_stats;
  664. work_done = dp_rx_mon_deliver_prev_ppdu(pdev, int_ctx, mac_id, quota);
  665. /* Do not proceed if work_done zero */
  666. if (!work_done && pdev->hold_mon_dest_ring) {
  667. qdf_spin_unlock_bh(&pdev->mon_lock);
  668. return work_done;
  669. }
  670. mon_dest_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
  671. if (qdf_unlikely(!mon_dest_srng ||
  672. !hal_srng_initialized(mon_dest_srng))) {
  673. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  674. FL("HAL Monitor Destination Ring Init Failed -- %pK"),
  675. mon_dest_srng);
  676. goto done1;
  677. }
  678. hal_soc = soc->hal_soc;
  679. qdf_assert_always(hal_soc && pdev);
  680. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dest_srng))) {
  681. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  682. FL("HAL Monitor Destination Ring access Failed -- %pK"),
  683. mon_dest_srng);
  684. goto done1;
  685. }
  686. /* Each entry in mon dest ring carries mpdu data
  687. * reap all msdus for a mpdu and form skb chain
  688. */
  689. while (qdf_likely(ring_desc =
  690. hal_srng_dst_peek(hal_soc, mon_dest_srng))) {
  691. head_msdu = NULL;
  692. tail_msdu = NULL;
  693. rx_bufs_reaped = dp_rx_mon_mpdu_reap(soc, mac_id,
  694. ring_desc, &head_msdu,
  695. &tail_msdu, &head_desc,
  696. &tail_desc);
  697. /* Assert if end_of_ppdu is zero and number of reaped buffers
  698. * are zero.
  699. */
  700. if (qdf_unlikely(!desc_info->end_of_ppdu && !rx_bufs_reaped)) {
  701. qdf_err("end_of_ppdu and rx_bufs_reaped are zero");
  702. }
  703. rx_mon_stats->mon_rx_bufs_reaped_dest += rx_bufs_reaped;
  704. /* replenish rx_bufs_reaped buffers back to
  705. * RxDMA Monitor buffer ring
  706. */
  707. if (rx_bufs_reaped) {
  708. status = dp_rx_buffers_replenish(soc, mac_id,
  709. dp_rxdma_get_mon_buf_ring(pdev,
  710. mac_for_pdev),
  711. dp_rx_get_mon_desc_pool(soc, mac_id,
  712. pdev->pdev_id),
  713. rx_bufs_reaped,
  714. &head_desc, &tail_desc);
  715. if (status != QDF_STATUS_SUCCESS)
  716. qdf_assert_always(0);
  717. rx_mon_stats->mon_rx_bufs_replenished_dest += rx_bufs_reaped;
  718. }
  719. head_desc = NULL;
  720. tail_desc = NULL;
  721. /* If end_of_ppdu is zero, it is a valid data mpdu
  722. * a. Add head_msdu and tail_msdu to mpdu list
  723. * b. continue reaping next SW_MONITOR_RING descriptor
  724. */
  725. if (!desc_info->end_of_ppdu) {
  726. /*
  727. * In case of rxdma error, MPDU is dropped
  728. * from sw_monitor_ring descriptor.
  729. * in this case, head_msdu remains NULL.
  730. * move srng to next and continue reaping next entry
  731. */
  732. if (!head_msdu) {
  733. ring_desc = hal_srng_dst_get_next(hal_soc,
  734. mon_dest_srng);
  735. continue;
  736. }
  737. /*
  738. * Prepare a MPDU object which holds chain of msdus
  739. * and MPDU specific status and add this is to
  740. * monitor mpdu queue
  741. */
  742. mon_mpdu = dp_rx_mon_prepare_mon_mpdu(pdev,
  743. head_msdu,
  744. tail_msdu);
  745. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  746. FL("Dest_srng: %pK MPDU_OBJ: %pK "
  747. "head_msdu: %pK tail_msdu: %pK -- "),
  748. mon_dest_srng,
  749. mon_mpdu,
  750. head_msdu,
  751. tail_msdu);
  752. TAILQ_INSERT_TAIL(&pdev->mon_mpdu_q,
  753. mon_mpdu,
  754. mpdu_list_elem);
  755. head_msdu = NULL;
  756. tail_msdu = NULL;
  757. ring_desc = hal_srng_dst_get_next(hal_soc,
  758. mon_dest_srng);
  759. continue;
  760. }
  761. /* It is observed sometimes that, ppdu_id, status_buf_addr
  762. * and link desc addr is NULL, this WAR is to handle same
  763. */
  764. if (!desc_info->ppdu_id && !desc_info->status_buf.paddr) {
  765. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  766. FL("ppdu_id: %d ring_entry: %pK"
  767. "status_buf_count: %d rxdma_push: %d"
  768. "rxdma_err: %d link_desc: %pK "),
  769. desc_info->ppdu_id, ring_desc,
  770. desc_info->status_buf_count,
  771. desc_info->rxdma_push_reason,
  772. desc_info->rxdma_error_code,
  773. desc_info->link_desc.paddr);
  774. goto next_entry;
  775. }
  776. /*
  777. * end_of_ppdu is one,
  778. * a. update ppdu_done stattistics
  779. * b. Replenish buffers back to mon buffer ring
  780. * c. reap status ring for a PPDU and deliver all mpdus
  781. * to upper layer
  782. */
  783. rx_mon_stats->dest_ppdu_done++;
  784. work_done += dp_rx_mon_reap_status_ring(soc, int_ctx, mac_id,
  785. quota, desc_info);
  786. /* Deliver all MPDUs for a PPDU */
  787. if (desc_info->drop_ppdu)
  788. dp_rx_mon_drop_ppdu(pdev, mac_id);
  789. else if (!pdev->hold_mon_dest_ring)
  790. dp_rx_monitor_deliver_ppdu(soc, mac_id);
  791. next_entry:
  792. hal_srng_dst_get_next(hal_soc, mon_dest_srng);
  793. break;
  794. }
  795. dp_srng_access_end(int_ctx, soc, mon_dest_srng);
  796. done1:
  797. qdf_spin_unlock_bh(&pdev->mon_lock);
  798. return work_done;
  799. }
  800. /**
  801. * dp_full_mon_attach() - attach full monitor mode
  802. * resources
  803. * @pdev: Datapath PDEV handle
  804. *
  805. * Return: void
  806. */
  807. void dp_full_mon_attach(struct dp_pdev *pdev)
  808. {
  809. struct dp_soc *soc = pdev->soc;
  810. if (!soc->full_mon_mode) {
  811. qdf_debug("Full monitor is not enabled");
  812. return;
  813. }
  814. pdev->mon_desc = qdf_mem_malloc(sizeof(struct hal_rx_mon_desc_info));
  815. if (!pdev->mon_desc) {
  816. qdf_err("Memory allocation failed for hal_rx_mon_desc_info ");
  817. return;
  818. }
  819. TAILQ_INIT(&pdev->mon_mpdu_q);
  820. }
  821. /**
  822. * dp_full_mon_detach() - detach full monitor mode
  823. * resources
  824. * @pdev: Datapath PDEV handle
  825. *
  826. * Return: void
  827. *
  828. */
  829. void dp_full_mon_detach(struct dp_pdev *pdev)
  830. {
  831. struct dp_soc *soc = pdev->soc;
  832. struct dp_mon_mpdu *mpdu = NULL;
  833. struct dp_mon_mpdu *temp_mpdu = NULL;
  834. if (!soc->full_mon_mode) {
  835. qdf_debug("Full monitor is not enabled");
  836. return;
  837. }
  838. if (pdev->mon_desc)
  839. qdf_mem_free(pdev->mon_desc);
  840. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  841. TAILQ_FOREACH_SAFE(mpdu,
  842. &pdev->mon_mpdu_q,
  843. mpdu_list_elem,
  844. temp_mpdu) {
  845. qdf_mem_free(mpdu);
  846. }
  847. }
  848. }
  849. #endif