dp_full_mon.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. /*
  2. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "dp_types.h"
  17. #include "hal_rx.h"
  18. #include "hal_api.h"
  19. #include "qdf_trace.h"
  20. #include "qdf_nbuf.h"
  21. #include "hal_api_mon.h"
  22. #include "dp_rx.h"
  23. #include "dp_rx_mon.h"
  24. #include "dp_internal.h"
  25. #include "dp_htt.h"
  26. #include "dp_full_mon.h"
  27. #include "qdf_mem.h"
  28. #ifdef QCA_SUPPORT_FULL_MON
  29. uint32_t
  30. dp_rx_mon_status_process(struct dp_soc *soc,
  31. uint32_t mac_id,
  32. uint32_t quota);
  33. /*
  34. * dp_rx_mon_status_buf_validate () - Validate first monitor status buffer addr
  35. * against status buf addr given in monitor destination ring
  36. *
  37. * @pdev: DP pdev handle
  38. * @mac_id: lmac id
  39. *
  40. * Return: QDF_STATUS
  41. */
  42. static inline QDF_STATUS
  43. dp_rx_mon_status_buf_validate(struct dp_pdev *pdev, uint32_t mac_id)
  44. {
  45. struct dp_soc *soc = pdev->soc;
  46. hal_soc_handle_t hal_soc;
  47. void *mon_status_srng;
  48. void *ring_entry;
  49. uint32_t rx_buf_cookie;
  50. qdf_nbuf_t status_nbuf;
  51. struct dp_rx_desc *rx_desc;
  52. uint64_t buf_paddr;
  53. struct rx_desc_pool *rx_desc_pool;
  54. uint32_t tlv_tag;
  55. void *rx_tlv;
  56. struct hal_rx_ppdu_info *ppdu_info;
  57. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  58. QDF_STATUS buf_status;
  59. mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
  60. qdf_assert(mon_status_srng);
  61. if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
  62. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  63. "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
  64. __func__, __LINE__, mon_status_srng);
  65. return status;
  66. }
  67. hal_soc = soc->hal_soc;
  68. qdf_assert(hal_soc);
  69. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) {
  70. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  71. "%s %d : HAL SRNG access Failed -- %pK",
  72. __func__, __LINE__, mon_status_srng);
  73. return status;
  74. }
  75. ring_entry = hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng);
  76. if (!ring_entry) {
  77. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  78. "%s %d : HAL SRNG entry is NULL srng:-- %pK",
  79. __func__, __LINE__, mon_status_srng);
  80. goto done;
  81. }
  82. ppdu_info = &pdev->ppdu_info;
  83. rx_desc_pool = &soc->rx_desc_status[mac_id];
  84. buf_paddr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_entry) |
  85. ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_entry)) << 32));
  86. if (!buf_paddr) {
  87. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  88. "%s %d : buf addr is NULL -- %pK",
  89. __func__, __LINE__, mon_status_srng);
  90. goto done;
  91. }
  92. rx_buf_cookie = HAL_RX_BUF_COOKIE_GET(ring_entry);
  93. rx_desc = dp_rx_cookie_2_va_mon_status(soc, rx_buf_cookie);
  94. qdf_assert(rx_desc);
  95. status_nbuf = rx_desc->nbuf;
  96. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  97. QDF_DMA_FROM_DEVICE);
  98. rx_tlv = qdf_nbuf_data(status_nbuf);
  99. buf_status = hal_get_rx_status_done(rx_tlv);
  100. /* If status buffer DMA is not done,
  101. * hold on to mon destination ring.
  102. */
  103. if (buf_status != QDF_STATUS_SUCCESS) {
  104. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  105. FL("Monitor status ring: DMA is not done "
  106. "for nbuf: %pK buf_addr: %llx"),
  107. status_nbuf, buf_paddr);
  108. pdev->rx_mon_stats.tlv_tag_status_err++;
  109. pdev->hold_mon_dest_ring = true;
  110. goto done;
  111. }
  112. qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
  113. QDF_DMA_FROM_DEVICE,
  114. rx_desc_pool->buf_size);
  115. rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
  116. tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv);
  117. if (tlv_tag == WIFIRX_PPDU_START_E) {
  118. rx_tlv = (uint8_t *)rx_tlv + HAL_RX_TLV32_HDR_SIZE;
  119. ppdu_info->com_info.ppdu_id = HAL_RX_GET(rx_tlv,
  120. RX_PPDU_START_0,
  121. PHY_PPDU_ID);
  122. pdev->status_buf_addr = buf_paddr;
  123. }
  124. /* If Monitor destination ring is on hold and ppdu id matches,
  125. * deliver PPDU data which was on hold.
  126. */
  127. if (pdev->hold_mon_dest_ring &&
  128. (pdev->mon_desc->ppdu_id == pdev->ppdu_info.com_info.ppdu_id)) {
  129. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  130. FL("Monitor destination was on Hold "
  131. "PPDU id matched"));
  132. pdev->hold_mon_dest_ring = false;
  133. goto done;
  134. }
  135. if ((pdev->mon_desc->status_buf.paddr != buf_paddr) ||
  136. (pdev->mon_desc->ppdu_id != pdev->ppdu_info.com_info.ppdu_id)) {
  137. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  138. FL("Monitor: PPDU id or status buf_addr mismatch "
  139. "status_ppdu_id: %d dest_ppdu_id: %d "
  140. "status_addr: %llx status_buf_cookie: %d "
  141. "dest_addr: %llx tlv_tag: %d"
  142. " status_nbuf: %pK"),
  143. pdev->ppdu_info.com_info.ppdu_id,
  144. pdev->mon_desc->ppdu_id, pdev->status_buf_addr,
  145. rx_buf_cookie,
  146. pdev->mon_desc->status_buf.paddr, tlv_tag,
  147. status_nbuf);
  148. }
  149. /* Monitor Status ring is reaped in two cases:
  150. * a. If first status buffer's buf_addr_info matches
  151. * with latched status buffer addr info in monitor
  152. * destination ring.
  153. * b. If monitor status ring is lagging behind
  154. * monitor destination ring. Hold on to monitor
  155. * destination ring in this case until status ring
  156. * and destination ring ppdu id matches.
  157. */
  158. if ((pdev->mon_desc->status_buf.paddr == buf_paddr) ||
  159. (pdev->mon_desc->ppdu_id > pdev->ppdu_info.com_info.ppdu_id)) {
  160. if (pdev->mon_desc->ppdu_id >
  161. pdev->ppdu_info.com_info.ppdu_id) {
  162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  163. FL("Monitor status ring is lagging behind "
  164. "monitor destination ring "
  165. "status_ppdu_id: %d dest_ppdu_id: %d "
  166. "status_nbuf: %pK tlv_tag: %d "
  167. "status_addr: %llx dest_addr: %llx "),
  168. ppdu_info->com_info.ppdu_id,
  169. pdev->mon_desc->ppdu_id,
  170. status_nbuf, tlv_tag,
  171. pdev->status_buf_addr,
  172. pdev->mon_desc->status_buf.paddr);
  173. pdev->rx_mon_stats.ppdu_id_mismatch++;
  174. pdev->rx_mon_stats.status_ppdu_drop++;
  175. pdev->hold_mon_dest_ring = true;
  176. }
  177. status = QDF_STATUS_SUCCESS;
  178. }
  179. done:
  180. hal_srng_access_end(hal_soc, mon_status_srng);
  181. return status;
  182. }
  183. /*
  184. * dp_rx_mon_prepare_mon_mpdu () - API to prepare dp_mon_mpdu object
  185. *
  186. * @pdev: DP pdev object
  187. * @head_msdu: Head msdu
  188. * @tail_msdu: Tail msdu
  189. *
  190. */
  191. static inline struct dp_mon_mpdu *
  192. dp_rx_mon_prepare_mon_mpdu(struct dp_pdev *pdev,
  193. qdf_nbuf_t head_msdu,
  194. qdf_nbuf_t tail_msdu)
  195. {
  196. struct dp_mon_mpdu *mon_mpdu = NULL;
  197. mon_mpdu = qdf_mem_malloc(sizeof(struct dp_mon_mpdu));
  198. if (!mon_mpdu) {
  199. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  200. FL("Monitor MPDU object allocation failed -- %pK"),
  201. pdev);
  202. qdf_assert_always(0);
  203. }
  204. mon_mpdu->head = head_msdu;
  205. mon_mpdu->tail = tail_msdu;
  206. mon_mpdu->rs_flags = pdev->ppdu_info.rx_status.rs_flags;
  207. mon_mpdu->ant_signal_db = pdev->ppdu_info.rx_status.ant_signal_db;
  208. mon_mpdu->is_stbc = pdev->ppdu_info.rx_status.is_stbc;
  209. mon_mpdu->sgi = pdev->ppdu_info.rx_status.sgi;
  210. mon_mpdu->beamformed = pdev->ppdu_info.rx_status.beamformed;
  211. return mon_mpdu;
  212. }
  213. static inline void
  214. dp_rx_mon_drop_ppdu(struct dp_pdev *pdev, uint32_t mac_id)
  215. {
  216. struct dp_mon_mpdu *mpdu = NULL;
  217. struct dp_mon_mpdu *temp_mpdu = NULL;
  218. qdf_nbuf_t mon_skb, skb_next;
  219. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  220. TAILQ_FOREACH_SAFE(mpdu,
  221. &pdev->mon_mpdu_q,
  222. mpdu_list_elem,
  223. temp_mpdu) {
  224. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  225. mpdu, mpdu_list_elem);
  226. mon_skb = mpdu->head;
  227. while (mon_skb) {
  228. skb_next = qdf_nbuf_next(mon_skb);
  229. QDF_TRACE(QDF_MODULE_ID_DP,
  230. QDF_TRACE_LEVEL_DEBUG,
  231. "[%s][%d] mon_skb=%pK len %u"
  232. " __func__, __LINE__",
  233. mon_skb, mon_skb->len);
  234. qdf_nbuf_free(mon_skb);
  235. mon_skb = skb_next;
  236. }
  237. qdf_mem_free(mpdu);
  238. }
  239. }
  240. }
  241. /*
  242. * dp_rx_monitor_deliver_ppdu () - API to deliver all MPDU for a MPDU
  243. * to upper layer stack
  244. *
  245. * @soc: DP soc handle
  246. * @mac_id: lmac id
  247. */
  248. static inline QDF_STATUS
  249. dp_rx_monitor_deliver_ppdu(struct dp_soc *soc, uint32_t mac_id)
  250. {
  251. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  252. struct dp_mon_mpdu *mpdu = NULL;
  253. struct dp_mon_mpdu *temp_mpdu = NULL;
  254. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  255. TAILQ_FOREACH_SAFE(mpdu,
  256. &pdev->mon_mpdu_q,
  257. mpdu_list_elem,
  258. temp_mpdu) {
  259. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  260. mpdu, mpdu_list_elem);
  261. pdev->ppdu_info.rx_status.rs_flags = mpdu->rs_flags;
  262. pdev->ppdu_info.rx_status.ant_signal_db =
  263. mpdu->ant_signal_db;
  264. pdev->ppdu_info.rx_status.is_stbc = mpdu->is_stbc;
  265. pdev->ppdu_info.rx_status.sgi = mpdu->sgi;
  266. pdev->ppdu_info.rx_status.beamformed = mpdu->beamformed;
  267. dp_rx_mon_deliver(soc, mac_id,
  268. mpdu->head, mpdu->tail);
  269. qdf_mem_free(mpdu);
  270. }
  271. }
  272. return QDF_STATUS_SUCCESS;
  273. }
  274. /**
  275. * dp_rx_mon_reap_status_ring () - Reap status_buf_count of status buffers for
  276. * status ring.
  277. *
  278. * @soc: DP soc handle
  279. * @mac_id: mac id on which interrupt is received
  280. * @quota: number of status ring entries to be reaped
  281. * @desc_info: Rx ppdu desc info
  282. */
  283. static inline uint32_t
  284. dp_rx_mon_reap_status_ring(struct dp_soc *soc,
  285. uint32_t mac_id,
  286. uint32_t quota,
  287. struct hal_rx_mon_desc_info *desc_info)
  288. {
  289. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  290. uint8_t status_buf_count;
  291. uint32_t work_done = 0;
  292. status_buf_count = desc_info->status_buf_count;
  293. desc_info->drop_ppdu = false;
  294. if (dp_rx_mon_status_buf_validate(pdev, mac_id) == QDF_STATUS_SUCCESS)
  295. work_done = dp_rx_mon_status_process(soc,
  296. mac_id,
  297. status_buf_count);
  298. if (desc_info->ppdu_id != pdev->ppdu_info.com_info.ppdu_id) {
  299. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  300. FL("Monitor: PPDU id mismatch "
  301. "status_ppdu_id: %d dest_ppdu_id: %d "
  302. "status_addr: %llx dest_addr: %llx "
  303. "count: %d quota: %d work_done: %d "),
  304. pdev->ppdu_info.com_info.ppdu_id,
  305. pdev->mon_desc->ppdu_id, pdev->status_buf_addr,
  306. pdev->mon_desc->status_buf.paddr,
  307. status_buf_count, quota, work_done);
  308. }
  309. if (desc_info->ppdu_id < pdev->ppdu_info.com_info.ppdu_id) {
  310. pdev->rx_mon_stats.ppdu_id_mismatch++;
  311. desc_info->drop_ppdu = true;
  312. pdev->rx_mon_stats.dest_ppdu_drop++;
  313. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  314. FL("Monitor destination ring is lagging behind "
  315. "monitor status ring "
  316. "status_ppdu_id: %d dest_ppdu_id: %d "
  317. "status_addr: %llx dest_addr: %llx "),
  318. pdev->ppdu_info.com_info.ppdu_id,
  319. pdev->mon_desc->ppdu_id,
  320. pdev->status_buf_addr,
  321. pdev->mon_desc->status_buf.paddr);
  322. }
  323. return work_done;
  324. }
  325. /**
  326. * dp_rx_mon_mpdu_reap () - This API reaps a mpdu from mon dest ring descriptor
  327. * and returns link descriptor to HW (WBM)
  328. *
  329. * @soc: DP soc handle
  330. * @mac_id: lmac id
  331. * @ring_desc: SW monitor ring desc
  332. * @head_msdu: nbuf pointing to first msdu in a chain
  333. * @tail_msdu: nbuf pointing to last msdu in a chain
  334. * @head_desc: head pointer to free desc list
  335. * @tail_desc: tail pointer to free desc list
  336. *
  337. * Return: number of reaped buffers
  338. */
  339. static inline uint32_t
  340. dp_rx_mon_mpdu_reap(struct dp_soc *soc, uint32_t mac_id, void *ring_desc,
  341. qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
  342. union dp_rx_desc_list_elem_t **head_desc,
  343. union dp_rx_desc_list_elem_t **tail_desc)
  344. {
  345. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  346. struct dp_rx_desc *rx_desc = NULL;
  347. struct hal_rx_msdu_list msdu_list;
  348. uint32_t rx_buf_reaped = 0;
  349. uint16_t num_msdus = 0, msdu_index, rx_hdr_tlv_len, l3_hdr_pad;
  350. uint32_t total_frag_len = 0, frag_len = 0;
  351. bool drop_mpdu = false;
  352. bool msdu_frag = false;
  353. void *link_desc_va;
  354. uint8_t *rx_tlv_hdr;
  355. qdf_nbuf_t msdu = NULL, last_msdu = NULL;
  356. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  357. struct hal_rx_mon_desc_info *desc_info;
  358. desc_info = pdev->mon_desc;
  359. qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info));
  360. /* Read SW Mon ring descriptor */
  361. hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
  362. ring_desc,
  363. (void *)desc_info);
  364. /* If end_of_ppdu is 1, return*/
  365. if (desc_info->end_of_ppdu)
  366. return rx_buf_reaped;
  367. /* If there is rxdma error, drop mpdu */
  368. if (qdf_unlikely(dp_rx_mon_is_rxdma_error(desc_info)
  369. == QDF_STATUS_SUCCESS)) {
  370. drop_mpdu = true;
  371. pdev->rx_mon_stats.dest_mpdu_drop++;
  372. }
  373. /*
  374. * while loop iterates through all link descriptors and
  375. * reaps msdu_count number of msdus for one SW_MONITOR_RING descriptor
  376. * and forms nbuf queue.
  377. */
  378. while (desc_info->msdu_count && desc_info->link_desc.paddr) {
  379. link_desc_va = dp_rx_cookie_2_mon_link_desc(pdev,
  380. desc_info->link_desc,
  381. mac_id);
  382. qdf_assert_always(link_desc_va);
  383. hal_rx_msdu_list_get(soc->hal_soc,
  384. link_desc_va,
  385. &msdu_list,
  386. &num_msdus);
  387. for (msdu_index = 0; msdu_index < num_msdus; msdu_index++) {
  388. rx_desc = dp_rx_get_mon_desc(soc,
  389. msdu_list.sw_cookie[msdu_index]);
  390. qdf_assert_always(rx_desc);
  391. msdu = rx_desc->nbuf;
  392. if (rx_desc->unmapped == 0) {
  393. qdf_nbuf_unmap_single(soc->osdev,
  394. msdu,
  395. QDF_DMA_FROM_DEVICE);
  396. rx_desc->unmapped = 1;
  397. }
  398. if (drop_mpdu) {
  399. qdf_nbuf_free(msdu);
  400. msdu = NULL;
  401. desc_info->msdu_count--;
  402. goto next_msdu;
  403. }
  404. rx_tlv_hdr = qdf_nbuf_data(msdu);
  405. if (hal_rx_desc_is_first_msdu(soc->hal_soc,
  406. rx_tlv_hdr))
  407. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
  408. rx_tlv_hdr,
  409. &pdev->ppdu_info.rx_status);
  410. /** If msdu is fragmented, spread across multiple
  411. * buffers
  412. * a. calculate len of each fragmented buffer
  413. * b. calculate the number of fragmented buffers for
  414. * a msdu and decrement one msdu_count
  415. */
  416. if (msdu_list.msdu_info[msdu_index].msdu_flags
  417. & HAL_MSDU_F_MSDU_CONTINUATION) {
  418. if (!msdu_frag) {
  419. total_frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
  420. msdu_frag = true;
  421. }
  422. dp_mon_adjust_frag_len(&total_frag_len,
  423. &frag_len);
  424. } else {
  425. if (msdu_frag)
  426. dp_mon_adjust_frag_len(&total_frag_len,
  427. &frag_len);
  428. else
  429. frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
  430. msdu_frag = false;
  431. desc_info->msdu_count--;
  432. }
  433. rx_hdr_tlv_len = SIZE_OF_MONITOR_TLV;
  434. /*
  435. * HW structures call this L3 header padding.
  436. * this is actually the offset
  437. * from the buffer beginning where the L2
  438. * header begins.
  439. */
  440. l3_hdr_pad = hal_rx_msdu_end_l3_hdr_padding_get(
  441. soc->hal_soc,
  442. rx_tlv_hdr);
  443. /*******************************************************
  444. * RX_PACKET *
  445. * ----------------------------------------------------*
  446. | RX_PKT_TLVS | L3 Padding header | msdu data| |
  447. * ----------------------------------------------------*
  448. ******************************************************/
  449. qdf_nbuf_set_pktlen(msdu,
  450. rx_hdr_tlv_len +
  451. l3_hdr_pad +
  452. frag_len);
  453. if (head_msdu && !*head_msdu)
  454. *head_msdu = msdu;
  455. else if (last_msdu)
  456. qdf_nbuf_set_next(last_msdu, msdu);
  457. last_msdu = msdu;
  458. next_msdu:
  459. rx_buf_reaped++;
  460. dp_rx_add_to_free_desc_list(head_desc,
  461. tail_desc,
  462. rx_desc);
  463. QDF_TRACE(QDF_MODULE_ID_DP,
  464. QDF_TRACE_LEVEL_DEBUG,
  465. FL("%s total_len %u frag_len %u flags %u"),
  466. total_frag_len, frag_len,
  467. msdu_list.msdu_info[msdu_index].msdu_flags);
  468. }
  469. hal_rxdma_buff_addr_info_set(rx_link_buf_info,
  470. desc_info->link_desc.paddr,
  471. desc_info->link_desc.sw_cookie,
  472. desc_info->link_desc.rbm);
  473. /* Get next link desc VA from current link desc */
  474. hal_rx_mon_next_link_desc_get(link_desc_va,
  475. &desc_info->link_desc);
  476. /* return msdu link descriptor to WBM */
  477. if (dp_rx_monitor_link_desc_return(pdev,
  478. (hal_buff_addrinfo_t)rx_link_buf_info,
  479. mac_id,
  480. HAL_BM_ACTION_PUT_IN_IDLE_LIST)
  481. != QDF_STATUS_SUCCESS) {
  482. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  483. "dp_rx_monitor_link_desc_return failed");
  484. }
  485. }
  486. pdev->rx_mon_stats.dest_mpdu_done++;
  487. if (last_msdu)
  488. qdf_nbuf_set_next(last_msdu, NULL);
  489. *tail_msdu = msdu;
  490. return rx_buf_reaped;
  491. }
  492. /*
  493. * dp_rx_mon_deliver_prev_ppdu () - Deliver previous PPDU
  494. *
  495. * @pdev: DP pdev handle
  496. * @mac_id: lmac id
  497. * @quota: quota
  498. *
  499. * Return: remaining qouta
  500. */
  501. static inline uint32_t
  502. dp_rx_mon_deliver_prev_ppdu(struct dp_pdev *pdev,
  503. uint32_t mac_id,
  504. uint32_t quota)
  505. {
  506. struct dp_soc *soc = pdev->soc;
  507. struct hal_rx_mon_desc_info *desc_info = pdev->mon_desc;
  508. uint32_t work_done = 0;
  509. bool hold_mon_dest_ring = false;
  510. while (pdev->hold_mon_dest_ring) {
  511. if (dp_rx_mon_status_buf_validate(pdev, mac_id) == QDF_STATUS_SUCCESS) {
  512. work_done = dp_rx_mon_status_process(soc, mac_id, 1);
  513. }
  514. quota -= work_done;
  515. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  516. FL("Hold on Monitor destination ring "
  517. "work_done: %d quota: %d status_ppdu_id: %d "
  518. "dest_ppdu_id: %d s_addr: %llx d_addr: %llx "),
  519. work_done, quota,
  520. pdev->ppdu_info.com_info.ppdu_id,
  521. desc_info->ppdu_id, pdev->status_buf_addr,
  522. desc_info->status_buf.paddr);
  523. hold_mon_dest_ring = true;
  524. if (!quota)
  525. return quota;
  526. }
  527. if (hold_mon_dest_ring) {
  528. if (quota >= desc_info->status_buf_count) {
  529. qdf_err("DEBUG:");
  530. work_done = dp_rx_mon_status_process(soc, mac_id,
  531. desc_info->status_buf_count);
  532. dp_rx_monitor_deliver_ppdu(soc, mac_id);
  533. hold_mon_dest_ring = false;
  534. } else {
  535. pdev->hold_mon_dest_ring = true;
  536. return quota;
  537. }
  538. quota -= work_done;
  539. }
  540. return quota;
  541. }
  542. /**
  543. * dp_rx_mon_process () - Core brain processing for monitor mode
  544. *
  545. * This API processes monitor destination ring followed by monitor status ring
  546. * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
  547. *
  548. * @soc: datapath soc context
  549. * @mac_id: mac_id on which interrupt is received
  550. * @quota: Number of status ring entry that can be serviced in one shot.
  551. *
  552. * @Return: Number of reaped status ring entries
  553. */
  554. uint32_t dp_rx_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
  555. {
  556. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  557. union dp_rx_desc_list_elem_t *head_desc = NULL;
  558. union dp_rx_desc_list_elem_t *tail_desc = NULL;
  559. uint32_t rx_bufs_reaped = 0;
  560. struct dp_mon_mpdu *mon_mpdu;
  561. struct cdp_pdev_mon_stats *rx_mon_stats = &pdev->rx_mon_stats;
  562. hal_rxdma_desc_t ring_desc;
  563. hal_soc_handle_t hal_soc;
  564. hal_ring_handle_t mon_dest_srng;
  565. qdf_nbuf_t head_msdu = NULL;
  566. qdf_nbuf_t tail_msdu = NULL;
  567. struct hal_rx_mon_desc_info *desc_info;
  568. int mac_for_pdev = mac_id;
  569. QDF_STATUS status;
  570. uint32_t work_done = 0;
  571. qdf_spin_lock_bh(&pdev->mon_lock);
  572. if (qdf_unlikely(!dp_soc_is_full_mon_enable(pdev))) {
  573. quota -= dp_rx_mon_status_process(soc, mac_id, quota);
  574. qdf_spin_unlock_bh(&pdev->mon_lock);
  575. return quota;
  576. }
  577. desc_info = pdev->mon_desc;
  578. quota = dp_rx_mon_deliver_prev_ppdu(pdev, mac_id, quota);
  579. /* Do not proceed if quota expires */
  580. if (!quota || pdev->hold_mon_dest_ring) {
  581. qdf_spin_unlock_bh(&pdev->mon_lock);
  582. return quota;
  583. }
  584. mon_dest_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
  585. if (qdf_unlikely(!mon_dest_srng ||
  586. !hal_srng_initialized(mon_dest_srng))) {
  587. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  588. FL("HAL Monitor Destination Ring Init Failed -- %pK"),
  589. mon_dest_srng);
  590. goto done1;
  591. }
  592. hal_soc = soc->hal_soc;
  593. qdf_assert_always(hal_soc && pdev);
  594. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dest_srng))) {
  595. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  596. FL("HAL Monitor Destination Ring access Failed -- %pK"),
  597. mon_dest_srng);
  598. goto done1;
  599. }
  600. /* Each entry in mon dest ring carries mpdu data
  601. * reap all msdus for a mpdu and form skb chain
  602. */
  603. while (qdf_likely(ring_desc =
  604. hal_srng_dst_peek(hal_soc, mon_dest_srng))) {
  605. head_msdu = NULL;
  606. tail_msdu = NULL;
  607. rx_bufs_reaped = dp_rx_mon_mpdu_reap(soc, mac_id,
  608. ring_desc, &head_msdu,
  609. &tail_msdu, &head_desc,
  610. &tail_desc);
  611. /* Assert if end_of_ppdu is zero and number of reaped buffers
  612. * are zero.
  613. */
  614. if (qdf_unlikely(!desc_info->end_of_ppdu && !rx_bufs_reaped)) {
  615. qdf_err("end_of_ppdu and rx_bufs_reaped are zero");
  616. }
  617. rx_mon_stats->mon_rx_bufs_reaped_dest += rx_bufs_reaped;
  618. /* replenish rx_bufs_reaped buffers back to
  619. * RxDMA Monitor buffer ring
  620. */
  621. if (rx_bufs_reaped) {
  622. status = dp_rx_buffers_replenish(soc, mac_id,
  623. dp_rxdma_get_mon_buf_ring(pdev,
  624. mac_for_pdev),
  625. dp_rx_get_mon_desc_pool(soc, mac_id,
  626. pdev->pdev_id),
  627. rx_bufs_reaped,
  628. &head_desc, &tail_desc);
  629. if (status != QDF_STATUS_SUCCESS)
  630. qdf_assert_always(0);
  631. rx_mon_stats->mon_rx_bufs_replenished_dest += rx_bufs_reaped;
  632. }
  633. head_desc = NULL;
  634. tail_desc = NULL;
  635. /* If end_of_ppdu is zero, it is a valid data mpdu
  636. * a. Add head_msdu and tail_msdu to mpdu list
  637. * b. continue reaping next SW_MONITOR_RING descriptor
  638. */
  639. if (!desc_info->end_of_ppdu) {
  640. /*
  641. * In case of rxdma error, MPDU is dropped
  642. * from sw_monitor_ring descriptor.
  643. * in this case, head_msdu remains NULL.
  644. * move srng to next and continue reaping next entry
  645. */
  646. if (!head_msdu) {
  647. ring_desc = hal_srng_dst_get_next(hal_soc,
  648. mon_dest_srng);
  649. continue;
  650. }
  651. /*
  652. * Prepare a MPDU object which holds chain of msdus
  653. * and MPDU specific status and add this is to
  654. * monitor mpdu queue
  655. */
  656. mon_mpdu = dp_rx_mon_prepare_mon_mpdu(pdev,
  657. head_msdu,
  658. tail_msdu);
  659. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  660. FL("Dest_srng: %pK MPDU_OBJ: %pK "
  661. "head_msdu: %pK tail_msdu: %pK -- "),
  662. mon_dest_srng,
  663. mon_mpdu,
  664. head_msdu,
  665. tail_msdu);
  666. TAILQ_INSERT_TAIL(&pdev->mon_mpdu_q,
  667. mon_mpdu,
  668. mpdu_list_elem);
  669. head_msdu = NULL;
  670. tail_msdu = NULL;
  671. ring_desc = hal_srng_dst_get_next(hal_soc,
  672. mon_dest_srng);
  673. continue;
  674. }
  675. /*
  676. * end_of_ppdu is one,
  677. * a. update ppdu_done stattistics
  678. * b. Replenish buffers back to mon buffer ring
  679. * c. reap status ring for a PPDU and deliver all mpdus
  680. * to upper layer
  681. */
  682. rx_mon_stats->dest_ppdu_done++;
  683. work_done = dp_rx_mon_reap_status_ring(soc, mac_id,
  684. quota, desc_info);
  685. /* Deliver all MPDUs for a PPDU */
  686. if (desc_info->drop_ppdu)
  687. dp_rx_mon_drop_ppdu(pdev, mac_id);
  688. else if (!pdev->hold_mon_dest_ring)
  689. dp_rx_monitor_deliver_ppdu(soc, mac_id);
  690. hal_srng_dst_get_next(hal_soc, mon_dest_srng);
  691. quota -= work_done;
  692. break;
  693. }
  694. hal_srng_access_end(hal_soc, mon_dest_srng);
  695. done1:
  696. qdf_spin_unlock_bh(&pdev->mon_lock);
  697. return quota;
  698. }
  699. /**
  700. * dp_full_mon_attach() - attach full monitor mode
  701. * resources
  702. * @pdev: Datapath PDEV handle
  703. *
  704. * Return: void
  705. */
  706. void dp_full_mon_attach(struct dp_pdev *pdev)
  707. {
  708. struct dp_soc *soc = pdev->soc;
  709. if (!soc->full_mon_mode) {
  710. qdf_debug("Full monitor is not enabled");
  711. return;
  712. }
  713. pdev->mon_desc = qdf_mem_malloc(sizeof(struct hal_rx_mon_desc_info));
  714. if (!pdev->mon_desc) {
  715. qdf_err("Memory allocation failed for hal_rx_mon_desc_info ");
  716. return;
  717. }
  718. TAILQ_INIT(&pdev->mon_mpdu_q);
  719. }
  720. /**
  721. * dp_full_mon_detach() - detach full monitor mode
  722. * resources
  723. * @pdev: Datapath PDEV handle
  724. *
  725. * Return: void
  726. *
  727. */
  728. void dp_full_mon_detach(struct dp_pdev *pdev)
  729. {
  730. struct dp_soc *soc = pdev->soc;
  731. struct dp_mon_mpdu *mpdu = NULL;
  732. struct dp_mon_mpdu *temp_mpdu = NULL;
  733. if (!soc->full_mon_mode) {
  734. qdf_debug("Full monitor is not enabled");
  735. return;
  736. }
  737. if (pdev->mon_desc)
  738. qdf_mem_free(pdev->mon_desc);
  739. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  740. TAILQ_FOREACH_SAFE(mpdu,
  741. &pdev->mon_mpdu_q,
  742. mpdu_list_elem,
  743. temp_mpdu) {
  744. qdf_mem_free(mpdu);
  745. }
  746. }
  747. }
  748. #endif