dp_full_mon.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. /*
  2. * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "dp_types.h"
  17. #include "hal_rx.h"
  18. #include "hal_api.h"
  19. #include "qdf_trace.h"
  20. #include "qdf_nbuf.h"
  21. #include "hal_api_mon.h"
  22. #include "dp_rx.h"
  23. #include "dp_rx_mon.h"
  24. #include "dp_internal.h"
  25. #include "dp_htt.h"
  26. #include "dp_full_mon.h"
  27. #include "qdf_mem.h"
  28. #ifdef QCA_SUPPORT_FULL_MON
  29. uint32_t
  30. dp_rx_mon_status_process(struct dp_soc *soc,
  31. struct dp_intr *int_ctx,
  32. uint32_t mac_id,
  33. uint32_t quota);
  34. /*
  35. * dp_rx_mon_status_buf_validate () - Validate first monitor status buffer addr
  36. * against status buf addr given in monitor destination ring
  37. *
  38. * @pdev: DP pdev handle
  39. * @int_ctx: Interrupt context
  40. * @mac_id: lmac id
  41. *
  42. * Return: QDF_STATUS
  43. */
  44. static inline enum dp_mon_reap_status
  45. dp_rx_mon_status_buf_validate(struct dp_pdev *pdev,
  46. struct dp_intr *int_ctx,
  47. uint32_t mac_id)
  48. {
  49. struct dp_soc *soc = pdev->soc;
  50. hal_soc_handle_t hal_soc;
  51. void *mon_status_srng;
  52. void *ring_entry;
  53. uint32_t rx_buf_cookie;
  54. qdf_nbuf_t status_nbuf;
  55. struct dp_rx_desc *rx_desc;
  56. uint64_t buf_paddr;
  57. struct rx_desc_pool *rx_desc_pool;
  58. uint32_t tlv_tag;
  59. void *rx_tlv;
  60. struct hal_rx_ppdu_info *ppdu_info;
  61. enum dp_mon_reap_status status = DP_MON_STATUS_MATCH;
  62. QDF_STATUS buf_status;
  63. uint32_t ppdu_id_diff;
  64. mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
  65. qdf_assert(mon_status_srng);
  66. if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
  67. dp_rx_mon_dest_debug("%pK: HAL Monitor Status Ring Init Failed -- %pK",
  68. soc, mon_status_srng);
  69. QDF_ASSERT(0);
  70. return status;
  71. }
  72. hal_soc = soc->hal_soc;
  73. qdf_assert(hal_soc);
  74. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) {
  75. dp_rx_mon_dest_debug("%pK: HAL SRNG access Failed -- %pK",
  76. soc, mon_status_srng);
  77. QDF_ASSERT(0);
  78. return status;
  79. }
  80. ring_entry = hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng);
  81. if (!ring_entry) {
  82. dp_rx_mon_dest_debug("%pK: HAL SRNG entry is NULL srng:-- %pK",
  83. soc, mon_status_srng);
  84. status = DP_MON_STATUS_REPLENISH;
  85. goto done;
  86. }
  87. ppdu_info = &pdev->ppdu_info;
  88. rx_desc_pool = &soc->rx_desc_status[mac_id];
  89. buf_paddr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_entry) |
  90. ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_entry)) << 32));
  91. if (!buf_paddr) {
  92. dp_rx_mon_dest_debug("%pK : buf addr is NULL -- %pK",
  93. soc, mon_status_srng);
  94. status = DP_MON_STATUS_REPLENISH;
  95. goto done;
  96. }
  97. rx_buf_cookie = HAL_RX_BUF_COOKIE_GET(ring_entry);
  98. rx_desc = dp_rx_cookie_2_va_mon_status(soc, rx_buf_cookie);
  99. qdf_assert(rx_desc);
  100. status_nbuf = rx_desc->nbuf;
  101. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  102. QDF_DMA_FROM_DEVICE);
  103. rx_tlv = qdf_nbuf_data(status_nbuf);
  104. buf_status = hal_get_rx_status_done(rx_tlv);
  105. /* If status buffer DMA is not done,
  106. * hold on to mon destination ring.
  107. */
  108. if (buf_status != QDF_STATUS_SUCCESS) {
  109. dp_rx_mon_dest_debug("%pK: Monitor status ring: DMA is not done "
  110. "for nbuf: %pK buf_addr: %llx",
  111. soc, status_nbuf, buf_paddr);
  112. status = dp_rx_mon_handle_status_buf_done(pdev,
  113. mon_status_srng);
  114. if (status == DP_MON_STATUS_REPLENISH) {
  115. union dp_rx_desc_list_elem_t *desc_list = NULL;
  116. union dp_rx_desc_list_elem_t *tail = NULL;
  117. /* If this is DMA not done WAR case, unmap and
  118. * free buffer and current SW descriptor
  119. * and make buf_addr_info NULL, so that call to
  120. * dp_rx_mon_status_process() replenishes entry to
  121. * status ring
  122. */
  123. qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
  124. QDF_DMA_FROM_DEVICE,
  125. rx_desc_pool->buf_size);
  126. qdf_nbuf_free(status_nbuf);
  127. dp_rx_add_to_free_desc_list(&desc_list,
  128. &tail, rx_desc);
  129. dp_rx_add_desc_list_to_free_list(soc, &desc_list,
  130. &tail, mac_id, rx_desc_pool);
  131. hal_rxdma_buff_addr_info_set(
  132. ring_entry,
  133. 0, 0, HAL_RX_BUF_RBM_SW3_BM);
  134. }
  135. goto done;
  136. }
  137. qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
  138. QDF_DMA_FROM_DEVICE,
  139. rx_desc_pool->buf_size);
  140. rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
  141. tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv);
  142. if (tlv_tag == WIFIRX_PPDU_START_E) {
  143. rx_tlv = (uint8_t *)rx_tlv + HAL_RX_TLV32_HDR_SIZE;
  144. pdev->mon_desc->status_ppdu_id =
  145. HAL_RX_GET(rx_tlv, RX_PPDU_START_0, PHY_PPDU_ID);
  146. pdev->status_buf_addr = buf_paddr;
  147. }
  148. if (pdev->mon_desc->ppdu_id < pdev->mon_desc->status_ppdu_id) {
  149. status = DP_MON_STATUS_LEAD;
  150. /* For wrap around case */
  151. ppdu_id_diff = pdev->mon_desc->status_ppdu_id -
  152. pdev->mon_desc->ppdu_id;
  153. if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
  154. status = DP_MON_STATUS_LAG;
  155. } else if (pdev->mon_desc->ppdu_id > pdev->mon_desc->status_ppdu_id) {
  156. status = DP_MON_STATUS_LAG;
  157. /* For wrap around case */
  158. ppdu_id_diff = pdev->mon_desc->ppdu_id -
  159. pdev->mon_desc->status_ppdu_id;
  160. if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
  161. status = DP_MON_STATUS_LEAD;
  162. }
  163. if ((pdev->mon_desc->status_buf.paddr != buf_paddr) ||
  164. (pdev->mon_desc->ppdu_id != pdev->mon_desc->status_ppdu_id)) {
  165. dp_rx_mon_dest_debug("%pK: Monitor: PPDU id or status buf_addr mismatch "
  166. "status_ppdu_id: %d dest_ppdu_id: %d "
  167. "status_addr: %llx status_buf_cookie: %d "
  168. "dest_addr: %llx tlv_tag: %d"
  169. " status_nbuf: %pK pdev->hold_mon_dest: %d",
  170. soc, pdev->mon_desc->status_ppdu_id,
  171. pdev->mon_desc->ppdu_id, pdev->status_buf_addr,
  172. rx_buf_cookie,
  173. pdev->mon_desc->status_buf.paddr, tlv_tag,
  174. status_nbuf, pdev->hold_mon_dest_ring);
  175. }
  176. done:
  177. hal_srng_access_end(hal_soc, mon_status_srng);
  178. return status;
  179. }
  180. /*
  181. * dp_rx_mon_prepare_mon_mpdu () - API to prepare dp_mon_mpdu object
  182. *
  183. * @pdev: DP pdev object
  184. * @head_msdu: Head msdu
  185. * @tail_msdu: Tail msdu
  186. *
  187. */
  188. static inline struct dp_mon_mpdu *
  189. dp_rx_mon_prepare_mon_mpdu(struct dp_pdev *pdev,
  190. qdf_nbuf_t head_msdu,
  191. qdf_nbuf_t tail_msdu)
  192. {
  193. struct dp_mon_mpdu *mon_mpdu = NULL;
  194. mon_mpdu = qdf_mem_malloc(sizeof(struct dp_mon_mpdu));
  195. if (!mon_mpdu) {
  196. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  197. FL("Monitor MPDU object allocation failed -- %pK"),
  198. pdev);
  199. qdf_assert_always(0);
  200. }
  201. mon_mpdu->head = head_msdu;
  202. mon_mpdu->tail = tail_msdu;
  203. mon_mpdu->rs_flags = pdev->ppdu_info.rx_status.rs_flags;
  204. mon_mpdu->ant_signal_db = pdev->ppdu_info.rx_status.ant_signal_db;
  205. mon_mpdu->is_stbc = pdev->ppdu_info.rx_status.is_stbc;
  206. mon_mpdu->sgi = pdev->ppdu_info.rx_status.sgi;
  207. mon_mpdu->beamformed = pdev->ppdu_info.rx_status.beamformed;
  208. return mon_mpdu;
  209. }
  210. static inline void
  211. dp_rx_mon_drop_ppdu(struct dp_pdev *pdev, uint32_t mac_id)
  212. {
  213. struct dp_mon_mpdu *mpdu = NULL;
  214. struct dp_mon_mpdu *temp_mpdu = NULL;
  215. qdf_nbuf_t mon_skb, skb_next;
  216. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  217. TAILQ_FOREACH_SAFE(mpdu,
  218. &pdev->mon_mpdu_q,
  219. mpdu_list_elem,
  220. temp_mpdu) {
  221. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  222. mpdu, mpdu_list_elem);
  223. mon_skb = mpdu->head;
  224. while (mon_skb) {
  225. skb_next = qdf_nbuf_next(mon_skb);
  226. dp_rx_mon_dest_debug("%pK: mon_skb=%pK len %u",
  227. pdev->soc, mon_skb, mon_skb->len);
  228. qdf_nbuf_free(mon_skb);
  229. mon_skb = skb_next;
  230. }
  231. qdf_mem_free(mpdu);
  232. }
  233. }
  234. pdev->mon_desc->drop_ppdu = 0;
  235. }
  236. /*
  237. * dp_rx_monitor_deliver_ppdu () - API to deliver all MPDU for a MPDU
  238. * to upper layer stack
  239. *
  240. * @soc: DP soc handle
  241. * @pdev: pdev
  242. * @mac_id: lmac id
  243. */
  244. static inline QDF_STATUS
  245. dp_rx_monitor_deliver_ppdu(struct dp_soc *soc,
  246. struct dp_pdev *pdev,
  247. uint32_t mac_id)
  248. {
  249. struct dp_mon_mpdu *mpdu = NULL;
  250. struct dp_mon_mpdu *temp_mpdu = NULL;
  251. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  252. TAILQ_FOREACH_SAFE(mpdu,
  253. &pdev->mon_mpdu_q,
  254. mpdu_list_elem,
  255. temp_mpdu) {
  256. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  257. mpdu, mpdu_list_elem);
  258. /* Check for IEEE80211_AMSDU_FLAG in mpdu
  259. * and set in pdev->ppdu_info.rx_status
  260. */
  261. HAL_RX_SET_MSDU_AGGREGATION(mpdu,
  262. &(pdev->ppdu_info.rx_status));
  263. pdev->ppdu_info.rx_status.ant_signal_db =
  264. mpdu->ant_signal_db;
  265. pdev->ppdu_info.rx_status.is_stbc = mpdu->is_stbc;
  266. pdev->ppdu_info.rx_status.sgi = mpdu->sgi;
  267. pdev->ppdu_info.rx_status.beamformed = mpdu->beamformed;
  268. dp_rx_mon_deliver(soc, mac_id,
  269. mpdu->head, mpdu->tail);
  270. qdf_mem_free(mpdu);
  271. }
  272. }
  273. return QDF_STATUS_SUCCESS;
  274. }
  275. /**
  276. * dp_rx_mon_reap_status_ring () - Reap status_buf_count of status buffers for
  277. * status ring.
  278. *
  279. * @soc: DP soc handle
  280. * @pdev: pdev
  281. * @int_ctx: interrupt context
  282. * @mac_id: mac id on which interrupt is received
  283. * @quota: number of status ring entries to be reaped
  284. * @desc_info: Rx ppdu desc info
  285. */
  286. static inline uint32_t
  287. dp_rx_mon_reap_status_ring(struct dp_soc *soc,
  288. struct dp_pdev *pdev,
  289. struct dp_intr *int_ctx,
  290. uint32_t mac_id,
  291. uint32_t quota,
  292. struct hal_rx_mon_desc_info *desc_info)
  293. {
  294. uint8_t status_buf_count;
  295. uint32_t work_done = 0;
  296. enum dp_mon_reap_status status;
  297. status_buf_count = desc_info->status_buf_count;
  298. desc_info->drop_ppdu = false;
  299. status = dp_rx_mon_status_buf_validate(pdev, int_ctx, mac_id);
  300. switch (status) {
  301. case DP_MON_STATUS_NO_DMA:
  302. /* If DMA is not done for status ring entry,
  303. * hold on to monitor destination ring and
  304. * deliver current ppdu data once DMA is done.
  305. */
  306. pdev->hold_mon_dest_ring = true;
  307. break;
  308. case DP_MON_STATUS_LAG:
  309. /* If status_ppdu_id is lagging behind destination,
  310. * a. Hold on to destination ring
  311. * b. Drop status ppdus until ppdu id matches
  312. * c. Increment stats for ppdu_id mismatch and
  313. * status ppdu drop
  314. */
  315. pdev->hold_mon_dest_ring = true;
  316. pdev->rx_mon_stats.ppdu_id_mismatch++;
  317. pdev->rx_mon_stats.status_ppdu_drop++;
  318. break;
  319. case DP_MON_STATUS_LEAD:
  320. /* If status_ppdu_id is leading ahead destination,
  321. * a. Drop destination ring ppdu until ppdu_id matches
  322. * b. Unhold monitor destination ring so status ppdus
  323. * can be dropped.
  324. * c. Increment stats for ppdu_id mismatch and
  325. * destination ppdu drop
  326. */
  327. desc_info->drop_ppdu = true;
  328. pdev->hold_mon_dest_ring = false;
  329. pdev->rx_mon_stats.ppdu_id_mismatch++;
  330. pdev->rx_mon_stats.dest_ppdu_drop++;
  331. break;
  332. case DP_MON_STATUS_REPLENISH:
  333. /* If status ring hp entry is NULL, replenish it */
  334. work_done = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
  335. break;
  336. case DP_MON_STATUS_MATCH:
  337. /* If status ppdu id matches with destnation,
  338. * unhold monitor destination ring and deliver ppdu
  339. */
  340. pdev->hold_mon_dest_ring = false;
  341. break;
  342. default:
  343. dp_err("mon reap status is not supported");
  344. }
  345. /* If status ring is lagging behind detination ring,
  346. * reap only one status buffer
  347. */
  348. if (status == DP_MON_STATUS_LAG)
  349. status_buf_count = 1;
  350. if (status == DP_MON_STATUS_LAG ||
  351. status == DP_MON_STATUS_MATCH) {
  352. work_done = dp_rx_mon_status_process(soc,
  353. int_ctx,
  354. mac_id,
  355. status_buf_count);
  356. }
  357. return work_done;
  358. }
  359. /**
  360. * dp_rx_mon_mpdu_reap () - This API reaps a mpdu from mon dest ring descriptor
  361. * and returns link descriptor to HW (WBM)
  362. *
  363. * @soc: DP soc handle
  364. * @pdev: pdev
  365. * @mac_id: lmac id
  366. * @ring_desc: SW monitor ring desc
  367. * @head_msdu: nbuf pointing to first msdu in a chain
  368. * @tail_msdu: nbuf pointing to last msdu in a chain
  369. * @head_desc: head pointer to free desc list
  370. * @tail_desc: tail pointer to free desc list
  371. *
  372. * Return: number of reaped buffers
  373. */
  374. static inline uint32_t
  375. dp_rx_mon_mpdu_reap(struct dp_soc *soc, struct dp_pdev *pdev, uint32_t mac_id,
  376. void *ring_desc, qdf_nbuf_t *head_msdu,
  377. qdf_nbuf_t *tail_msdu,
  378. union dp_rx_desc_list_elem_t **head_desc,
  379. union dp_rx_desc_list_elem_t **tail_desc)
  380. {
  381. struct dp_rx_desc *rx_desc = NULL;
  382. struct hal_rx_msdu_list msdu_list;
  383. uint32_t rx_buf_reaped = 0;
  384. uint16_t num_msdus = 0, msdu_index, rx_hdr_tlv_len, l3_hdr_pad;
  385. uint32_t total_frag_len = 0, frag_len = 0;
  386. bool drop_mpdu = false;
  387. bool msdu_frag = false, is_first_msdu = true, is_frag_non_raw = false;
  388. void *link_desc_va;
  389. uint8_t *rx_tlv_hdr;
  390. qdf_nbuf_t msdu = NULL, last_msdu = NULL;
  391. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  392. struct hal_rx_mon_desc_info *desc_info;
  393. uint16_t prev_ppdu_id;
  394. struct rx_desc_pool *rx_desc_pool = NULL;
  395. desc_info = pdev->mon_desc;
  396. /* Restore previous ppdu_id to use it while doing
  397. * status buffer validation
  398. */
  399. prev_ppdu_id = pdev->mon_desc->status_ppdu_id;
  400. qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info));
  401. pdev->mon_desc->status_ppdu_id = prev_ppdu_id;
  402. /* Read SW Mon ring descriptor */
  403. hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
  404. ring_desc,
  405. (void *)desc_info);
  406. /* If end_of_ppdu is 1, return*/
  407. if (desc_info->end_of_ppdu)
  408. return rx_buf_reaped;
  409. /* If there is rxdma error, drop mpdu */
  410. if (qdf_unlikely(dp_rx_mon_is_rxdma_error(desc_info)
  411. == QDF_STATUS_SUCCESS)) {
  412. drop_mpdu = true;
  413. pdev->rx_mon_stats.dest_mpdu_drop++;
  414. }
  415. /*
  416. * while loop iterates through all link descriptors and
  417. * reaps msdu_count number of msdus for one SW_MONITOR_RING descriptor
  418. * and forms nbuf queue.
  419. */
  420. while (desc_info->msdu_count && desc_info->link_desc.paddr) {
  421. link_desc_va = dp_rx_cookie_2_mon_link_desc(pdev,
  422. desc_info->link_desc,
  423. mac_id);
  424. qdf_assert_always(link_desc_va);
  425. hal_rx_msdu_list_get(soc->hal_soc,
  426. link_desc_va,
  427. &msdu_list,
  428. &num_msdus);
  429. for (msdu_index = 0; msdu_index < num_msdus; msdu_index++) {
  430. rx_desc = dp_rx_get_mon_desc(soc,
  431. msdu_list.sw_cookie[msdu_index]);
  432. qdf_assert_always(rx_desc);
  433. msdu = DP_RX_MON_GET_NBUF_FROM_DESC(rx_desc);
  434. if (rx_desc->unmapped == 0) {
  435. rx_desc_pool = dp_rx_get_mon_desc_pool(
  436. soc, mac_id, pdev->pdev_id);
  437. dp_rx_mon_buffer_unmap(soc, rx_desc,
  438. rx_desc_pool->buf_size);
  439. rx_desc->unmapped = 1;
  440. }
  441. if (drop_mpdu) {
  442. dp_rx_mon_buffer_free(rx_desc);
  443. msdu = NULL;
  444. /*
  445. * Dont rely on msdu_cnt in case of rxdma error
  446. * Dont decrement msdu_cnt
  447. */
  448. goto next_msdu;
  449. }
  450. rx_tlv_hdr = dp_rx_mon_get_buffer_data(rx_desc);
  451. if (is_first_msdu) {
  452. if (dp_rx_mon_alloc_parent_buffer(head_msdu)
  453. != QDF_STATUS_SUCCESS) {
  454. DP_STATS_INC(pdev,
  455. replenish.nbuf_alloc_fail,
  456. 1);
  457. qdf_frag_free(rx_tlv_hdr);
  458. dp_rx_mon_dest_debug("%pK: failed to allocate parent buffer to hold all frag",
  459. soc);
  460. drop_mpdu = true;
  461. desc_info->msdu_count--;
  462. goto next_msdu;
  463. }
  464. is_first_msdu = false;
  465. }
  466. if (hal_rx_desc_is_first_msdu(soc->hal_soc,
  467. rx_tlv_hdr))
  468. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
  469. rx_tlv_hdr,
  470. &pdev->ppdu_info.rx_status);
  471. /** If msdu is fragmented, spread across multiple
  472. * buffers
  473. * a. calculate len of each fragmented buffer
  474. * b. calculate the number of fragmented buffers for
  475. * a msdu and decrement one msdu_count
  476. */
  477. dp_rx_mon_parse_desc_buffer(soc,
  478. &(msdu_list.msdu_info[msdu_index]),
  479. &msdu_frag,
  480. &total_frag_len,
  481. &frag_len,
  482. &l3_hdr_pad,
  483. rx_tlv_hdr,
  484. &is_frag_non_raw, rx_tlv_hdr);
  485. if (!msdu_frag)
  486. desc_info->msdu_count--;
  487. rx_hdr_tlv_len = SIZE_OF_MONITOR_TLV;
  488. /*
  489. * HW structures call this L3 header padding.
  490. * this is actually the offset
  491. * from the buffer beginning where the L2
  492. * header begins.
  493. */
  494. /*******************************************************
  495. * RX_PACKET *
  496. * ----------------------------------------------------*
  497. | RX_PKT_TLVS | L3 Padding header | msdu data| |
  498. * ----------------------------------------------------*
  499. ******************************************************/
  500. dp_rx_mon_buffer_set_pktlen(msdu,
  501. rx_hdr_tlv_len +
  502. l3_hdr_pad +
  503. frag_len);
  504. if (dp_rx_mon_add_msdu_to_list(head_msdu, msdu,
  505. &last_msdu,
  506. rx_tlv_hdr, frag_len,
  507. l3_hdr_pad)
  508. != QDF_STATUS_SUCCESS) {
  509. dp_rx_mon_add_msdu_to_list_failure_handler(rx_tlv_hdr,
  510. pdev, &last_msdu, head_msdu,
  511. tail_msdu, __func__);
  512. drop_mpdu = true;
  513. goto next_msdu;
  514. }
  515. next_msdu:
  516. rx_buf_reaped++;
  517. dp_rx_add_to_free_desc_list(head_desc,
  518. tail_desc,
  519. rx_desc);
  520. dp_rx_mon_dest_debug("%pK: total_len %u frag_len %u flags %u",
  521. soc, total_frag_len, frag_len,
  522. msdu_list.msdu_info[msdu_index].msdu_flags);
  523. }
  524. hal_rxdma_buff_addr_info_set(rx_link_buf_info,
  525. desc_info->link_desc.paddr,
  526. desc_info->link_desc.sw_cookie,
  527. desc_info->link_desc.rbm);
  528. /* Get next link desc VA from current link desc */
  529. hal_rx_mon_next_link_desc_get(link_desc_va,
  530. &desc_info->link_desc);
  531. /* return msdu link descriptor to WBM */
  532. if (dp_rx_monitor_link_desc_return(pdev,
  533. (hal_buff_addrinfo_t)rx_link_buf_info,
  534. mac_id,
  535. HAL_BM_ACTION_PUT_IN_IDLE_LIST)
  536. != QDF_STATUS_SUCCESS) {
  537. dp_rx_mon_dest_err("%pK: dp_rx_monitor_link_desc_return failed",
  538. soc);
  539. }
  540. }
  541. pdev->rx_mon_stats.dest_mpdu_done++;
  542. dp_rx_mon_init_tail_msdu(head_msdu, msdu, last_msdu, tail_msdu);
  543. dp_rx_mon_remove_raw_frame_fcs_len(head_msdu, tail_msdu);
  544. return rx_buf_reaped;
  545. }
  546. /*
  547. * dp_rx_mon_deliver_prev_ppdu () - Deliver previous PPDU
  548. *
  549. * @pdev: DP pdev handle
  550. * @int_ctx: interrupt context
  551. * @mac_id: lmac id
  552. * @quota: quota
  553. *
  554. * Return: remaining qouta
  555. */
  556. static inline uint32_t
  557. dp_rx_mon_deliver_prev_ppdu(struct dp_pdev *pdev,
  558. struct dp_intr *int_ctx,
  559. uint32_t mac_id,
  560. uint32_t quota)
  561. {
  562. struct dp_soc *soc = pdev->soc;
  563. struct hal_rx_mon_desc_info *desc_info = pdev->mon_desc;
  564. uint32_t work_done = 0, work = 0;
  565. bool deliver_ppdu = false;
  566. enum dp_mon_reap_status status;
  567. while (pdev->hold_mon_dest_ring) {
  568. status = dp_rx_mon_status_buf_validate(pdev, int_ctx, mac_id);
  569. switch (status) {
  570. case DP_MON_STATUS_NO_DMA:
  571. /* If DMA is not done for status ring entry,
  572. * hold on to monitor destination ring and
  573. * deliver current ppdu data once DMA is done.
  574. */
  575. pdev->hold_mon_dest_ring = true;
  576. break;
  577. case DP_MON_STATUS_LAG:
  578. /* If status_ppdu_id is lagging behind destination,
  579. * a. Hold on to destination ring
  580. * b. Drop status ppdus until ppdu id matches
  581. * c. Increment stats for ppdu_id mismatch and
  582. * status ppdu drop
  583. */
  584. pdev->hold_mon_dest_ring = true;
  585. pdev->rx_mon_stats.ppdu_id_mismatch++;
  586. pdev->rx_mon_stats.status_ppdu_drop++;
  587. break;
  588. case DP_MON_STATUS_LEAD:
  589. /* If status_ppdu_id is leading ahead destination,
  590. * a. Drop destination ring ppdu until ppdu_id matches
  591. * b. Unhold monitor destination ring so status ppdus
  592. * can be dropped.
  593. * c. Increment stats for ppdu_id mismatch and
  594. * destination ppdu drop
  595. */
  596. desc_info->drop_ppdu = true;
  597. pdev->hold_mon_dest_ring = false;
  598. pdev->rx_mon_stats.ppdu_id_mismatch++;
  599. pdev->rx_mon_stats.dest_ppdu_drop++;
  600. break;
  601. case DP_MON_STATUS_REPLENISH:
  602. /* If status ring hp entry is NULL, replenish it */
  603. work = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
  604. break;
  605. case DP_MON_STATUS_MATCH:
  606. /* If status ppdu id matches with destnation,
  607. * unhold monitor destination ring and deliver ppdu
  608. */
  609. pdev->hold_mon_dest_ring = false;
  610. break;
  611. default:
  612. dp_err("mon reap status is not supported");
  613. }
  614. /* When status ring entry's DMA is not done or
  615. * status ring entry is replenished, ppdu status is not
  616. * available for radiotap construction, so return and
  617. * check for status on next interrupt
  618. */
  619. if ((status == DP_MON_STATUS_NO_DMA) ||
  620. (status == DP_MON_STATUS_REPLENISH)) {
  621. return work_done;
  622. }
  623. if (status == DP_MON_STATUS_LAG) {
  624. work = dp_rx_mon_status_process(soc, int_ctx, mac_id, 1);
  625. if (!work)
  626. return 0;
  627. work_done += work;
  628. }
  629. deliver_ppdu = true;
  630. }
  631. if (deliver_ppdu) {
  632. if (pdev->mon_desc->drop_ppdu) {
  633. dp_rx_mon_drop_ppdu(pdev, mac_id);
  634. return work_done;
  635. }
  636. work_done += dp_rx_mon_status_process(soc, int_ctx, mac_id,
  637. desc_info->status_buf_count);
  638. dp_rx_monitor_deliver_ppdu(soc, pdev, mac_id);
  639. }
  640. return work_done;
  641. }
  642. /**
  643. * dp_rx_mon_process () - Core brain processing for monitor mode
  644. *
  645. * This API processes monitor destination ring followed by monitor status ring
  646. * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
  647. *
  648. * @soc: datapath soc context
  649. * @int_ctx: interrupt context
  650. * @mac_id: mac_id on which interrupt is received
  651. * @quota: Number of status ring entry that can be serviced in one shot.
  652. *
  653. * @Return: Number of reaped status ring entries
  654. */
  655. uint32_t dp_rx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
  656. uint32_t mac_id, uint32_t quota)
  657. {
  658. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  659. union dp_rx_desc_list_elem_t *head_desc = NULL;
  660. union dp_rx_desc_list_elem_t *tail_desc = NULL;
  661. uint32_t rx_bufs_reaped = 0;
  662. struct dp_mon_mpdu *mon_mpdu;
  663. struct cdp_pdev_mon_stats *rx_mon_stats;
  664. hal_rxdma_desc_t ring_desc;
  665. hal_soc_handle_t hal_soc;
  666. hal_ring_handle_t mon_dest_srng;
  667. qdf_nbuf_t head_msdu = NULL;
  668. qdf_nbuf_t tail_msdu = NULL;
  669. struct hal_rx_mon_desc_info *desc_info;
  670. int mac_for_pdev = mac_id;
  671. QDF_STATUS status;
  672. uint32_t work_done = 0;
  673. if (!pdev) {
  674. dp_rx_mon_dest_err("pdev is null for mac_id = %d",
  675. mac_id);
  676. return work_done;
  677. }
  678. qdf_spin_lock_bh(&pdev->mon_lock);
  679. if (qdf_unlikely(!dp_soc_is_full_mon_enable(pdev))) {
  680. work_done += dp_rx_mon_status_process(soc, int_ctx,
  681. mac_id, quota);
  682. qdf_spin_unlock_bh(&pdev->mon_lock);
  683. return work_done;
  684. }
  685. desc_info = pdev->mon_desc;
  686. rx_mon_stats = &pdev->rx_mon_stats;
  687. work_done = dp_rx_mon_deliver_prev_ppdu(pdev, int_ctx, mac_id, quota);
  688. /* Do not proceed if work_done zero */
  689. if (!work_done && pdev->hold_mon_dest_ring) {
  690. qdf_spin_unlock_bh(&pdev->mon_lock);
  691. return work_done;
  692. }
  693. mon_dest_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
  694. if (qdf_unlikely(!mon_dest_srng ||
  695. !hal_srng_initialized(mon_dest_srng))) {
  696. dp_rx_mon_dest_debug("%pK: HAL Monitor Destination Ring Init Failed -- %pK",
  697. soc, mon_dest_srng);
  698. goto done1;
  699. }
  700. hal_soc = soc->hal_soc;
  701. qdf_assert_always(hal_soc && pdev);
  702. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_dest_srng))) {
  703. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  704. FL("HAL Monitor Destination Ring access Failed -- %pK"),
  705. mon_dest_srng);
  706. goto done1;
  707. }
  708. /* Each entry in mon dest ring carries mpdu data
  709. * reap all msdus for a mpdu and form skb chain
  710. */
  711. while (qdf_likely(ring_desc =
  712. hal_srng_dst_peek(hal_soc, mon_dest_srng))) {
  713. head_msdu = NULL;
  714. tail_msdu = NULL;
  715. rx_bufs_reaped = dp_rx_mon_mpdu_reap(soc, pdev, mac_id,
  716. ring_desc, &head_msdu,
  717. &tail_msdu, &head_desc,
  718. &tail_desc);
  719. /* Assert if end_of_ppdu is zero and number of reaped buffers
  720. * are zero.
  721. */
  722. if (qdf_unlikely(!desc_info->end_of_ppdu && !rx_bufs_reaped)) {
  723. qdf_err("end_of_ppdu and rx_bufs_reaped are zero");
  724. }
  725. rx_mon_stats->mon_rx_bufs_reaped_dest += rx_bufs_reaped;
  726. /* replenish rx_bufs_reaped buffers back to
  727. * RxDMA Monitor buffer ring
  728. */
  729. if (rx_bufs_reaped) {
  730. status = dp_rx_buffers_replenish(soc, mac_id,
  731. dp_rxdma_get_mon_buf_ring(pdev,
  732. mac_for_pdev),
  733. dp_rx_get_mon_desc_pool(soc, mac_id,
  734. pdev->pdev_id),
  735. rx_bufs_reaped,
  736. &head_desc, &tail_desc);
  737. if (status != QDF_STATUS_SUCCESS)
  738. qdf_assert_always(0);
  739. rx_mon_stats->mon_rx_bufs_replenished_dest += rx_bufs_reaped;
  740. }
  741. head_desc = NULL;
  742. tail_desc = NULL;
  743. /* If end_of_ppdu is zero, it is a valid data mpdu
  744. * a. Add head_msdu and tail_msdu to mpdu list
  745. * b. continue reaping next SW_MONITOR_RING descriptor
  746. */
  747. if (!desc_info->end_of_ppdu) {
  748. /*
  749. * In case of rxdma error, MPDU is dropped
  750. * from sw_monitor_ring descriptor.
  751. * in this case, head_msdu remains NULL.
  752. * move srng to next and continue reaping next entry
  753. */
  754. if (!head_msdu) {
  755. ring_desc = hal_srng_dst_get_next(hal_soc,
  756. mon_dest_srng);
  757. continue;
  758. }
  759. /*
  760. * Prepare a MPDU object which holds chain of msdus
  761. * and MPDU specific status and add this is to
  762. * monitor mpdu queue
  763. */
  764. mon_mpdu = dp_rx_mon_prepare_mon_mpdu(pdev,
  765. head_msdu,
  766. tail_msdu);
  767. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  768. FL("Dest_srng: %pK MPDU_OBJ: %pK "
  769. "head_msdu: %pK tail_msdu: %pK -- "),
  770. mon_dest_srng,
  771. mon_mpdu,
  772. head_msdu,
  773. tail_msdu);
  774. TAILQ_INSERT_TAIL(&pdev->mon_mpdu_q,
  775. mon_mpdu,
  776. mpdu_list_elem);
  777. head_msdu = NULL;
  778. tail_msdu = NULL;
  779. ring_desc = hal_srng_dst_get_next(hal_soc,
  780. mon_dest_srng);
  781. continue;
  782. }
  783. /* It is observed sometimes that, ppdu_id, status_buf_addr
  784. * and link desc addr is NULL, this WAR is to handle same
  785. */
  786. if (!desc_info->ppdu_id && !desc_info->status_buf.paddr) {
  787. dp_rx_mon_dest_debug("%pK: ppdu_id: %d ring_entry: %pK"
  788. "status_buf_count: %d rxdma_push: %d"
  789. "rxdma_err: %d link_desc: %llx ",
  790. soc, desc_info->ppdu_id, ring_desc,
  791. desc_info->status_buf_count,
  792. desc_info->rxdma_push_reason,
  793. desc_info->rxdma_error_code,
  794. desc_info->link_desc.paddr);
  795. goto next_entry;
  796. }
  797. /*
  798. * end_of_ppdu is one,
  799. * a. update ppdu_done stattistics
  800. * b. Replenish buffers back to mon buffer ring
  801. * c. reap status ring for a PPDU and deliver all mpdus
  802. * to upper layer
  803. */
  804. rx_mon_stats->dest_ppdu_done++;
  805. work_done += dp_rx_mon_reap_status_ring(soc, pdev, int_ctx,
  806. mac_id, quota, desc_info);
  807. /* Deliver all MPDUs for a PPDU */
  808. if (desc_info->drop_ppdu)
  809. dp_rx_mon_drop_ppdu(pdev, mac_id);
  810. else if (!pdev->hold_mon_dest_ring)
  811. dp_rx_monitor_deliver_ppdu(soc, pdev, mac_id);
  812. next_entry:
  813. hal_srng_dst_get_next(hal_soc, mon_dest_srng);
  814. break;
  815. }
  816. dp_srng_access_end(int_ctx, soc, mon_dest_srng);
  817. done1:
  818. qdf_spin_unlock_bh(&pdev->mon_lock);
  819. return work_done;
  820. }
  821. /**
  822. * dp_full_mon_attach() - attach full monitor mode
  823. * resources
  824. * @pdev: Datapath PDEV handle
  825. *
  826. * Return: void
  827. */
  828. void dp_full_mon_attach(struct dp_pdev *pdev)
  829. {
  830. struct dp_soc *soc = pdev->soc;
  831. if (!soc->full_mon_mode) {
  832. qdf_debug("Full monitor is not enabled");
  833. return;
  834. }
  835. pdev->mon_desc = qdf_mem_malloc(sizeof(struct hal_rx_mon_desc_info));
  836. if (!pdev->mon_desc) {
  837. qdf_err("Memory allocation failed for hal_rx_mon_desc_info ");
  838. return;
  839. }
  840. TAILQ_INIT(&pdev->mon_mpdu_q);
  841. }
  842. /**
  843. * dp_full_mon_detach() - detach full monitor mode
  844. * resources
  845. * @pdev: Datapath PDEV handle
  846. *
  847. * Return: void
  848. *
  849. */
  850. void dp_full_mon_detach(struct dp_pdev *pdev)
  851. {
  852. struct dp_soc *soc = pdev->soc;
  853. struct dp_mon_mpdu *mpdu = NULL;
  854. struct dp_mon_mpdu *temp_mpdu = NULL;
  855. if (!soc->full_mon_mode) {
  856. qdf_debug("Full monitor is not enabled");
  857. return;
  858. }
  859. if (pdev->mon_desc)
  860. qdf_mem_free(pdev->mon_desc);
  861. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  862. TAILQ_FOREACH_SAFE(mpdu,
  863. &pdev->mon_mpdu_q,
  864. mpdu_list_elem,
  865. temp_mpdu) {
  866. qdf_mem_free(mpdu);
  867. }
  868. }
  869. }
  870. #endif