dp_full_mon.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. /*
  2. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "dp_types.h"
  17. #include "hal_rx.h"
  18. #include "hal_api.h"
  19. #include "qdf_trace.h"
  20. #include "qdf_nbuf.h"
  21. #include "hal_api_mon.h"
  22. #include "dp_rx.h"
  23. #include "dp_rx_mon.h"
  24. #include "dp_internal.h"
  25. #include "dp_htt.h"
  26. #include "dp_full_mon.h"
  27. #include "qdf_mem.h"
  28. #ifdef QCA_SUPPORT_FULL_MON
  29. uint32_t
  30. dp_rx_mon_status_process(struct dp_soc *soc,
  31. uint32_t mac_id,
  32. uint32_t quota);
  33. /*
  34. * dp_rx_mon_status_buf_validate () - Validate first monitor status buffer addr
  35. * against status buf addr given in monitor destination ring
  36. *
  37. * @pdev: DP pdev handle
  38. * @mac_id: lmac id
  39. *
  40. * Return: QDF_STATUS
  41. */
  42. static inline enum dp_mon_reap_status
  43. dp_rx_mon_status_buf_validate(struct dp_pdev *pdev, uint32_t mac_id)
  44. {
  45. struct dp_soc *soc = pdev->soc;
  46. hal_soc_handle_t hal_soc;
  47. void *mon_status_srng;
  48. void *ring_entry;
  49. uint32_t rx_buf_cookie;
  50. qdf_nbuf_t status_nbuf;
  51. struct dp_rx_desc *rx_desc;
  52. uint64_t buf_paddr;
  53. struct rx_desc_pool *rx_desc_pool;
  54. uint32_t tlv_tag;
  55. void *rx_tlv;
  56. struct hal_rx_ppdu_info *ppdu_info;
  57. enum dp_mon_reap_status status = dp_mon_status_match;
  58. QDF_STATUS buf_status;
  59. uint32_t ppdu_id_diff;
  60. mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
  61. qdf_assert(mon_status_srng);
  62. if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
  63. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  64. "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
  65. __func__, __LINE__, mon_status_srng);
  66. return status;
  67. }
  68. hal_soc = soc->hal_soc;
  69. qdf_assert(hal_soc);
  70. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng))) {
  71. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  72. "%s %d : HAL SRNG access Failed -- %pK",
  73. __func__, __LINE__, mon_status_srng);
  74. return status;
  75. }
  76. ring_entry = hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng);
  77. if (!ring_entry) {
  78. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  79. "%s %d : HAL SRNG entry is NULL srng:-- %pK",
  80. __func__, __LINE__, mon_status_srng);
  81. status = dp_mon_status_replenish;
  82. goto done;
  83. }
  84. ppdu_info = &pdev->ppdu_info;
  85. rx_desc_pool = &soc->rx_desc_status[mac_id];
  86. buf_paddr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_entry) |
  87. ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_entry)) << 32));
  88. if (!buf_paddr) {
  89. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  90. "%s %d : buf addr is NULL -- %pK",
  91. __func__, __LINE__, mon_status_srng);
  92. status = dp_mon_status_replenish;
  93. goto done;
  94. }
  95. rx_buf_cookie = HAL_RX_BUF_COOKIE_GET(ring_entry);
  96. rx_desc = dp_rx_cookie_2_va_mon_status(soc, rx_buf_cookie);
  97. qdf_assert(rx_desc);
  98. status_nbuf = rx_desc->nbuf;
  99. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  100. QDF_DMA_FROM_DEVICE);
  101. rx_tlv = qdf_nbuf_data(status_nbuf);
  102. buf_status = hal_get_rx_status_done(rx_tlv);
  103. /* If status buffer DMA is not done,
  104. * hold on to mon destination ring.
  105. */
  106. if (buf_status != QDF_STATUS_SUCCESS) {
  107. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  108. FL("Monitor status ring: DMA is not done "
  109. "for nbuf: %pK buf_addr: %llx"),
  110. status_nbuf, buf_paddr);
  111. pdev->rx_mon_stats.tlv_tag_status_err++;
  112. status = dp_mon_status_no_dma;
  113. goto done;
  114. }
  115. qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
  116. QDF_DMA_FROM_DEVICE,
  117. rx_desc_pool->buf_size);
  118. rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
  119. tlv_tag = HAL_RX_GET_USER_TLV32_TYPE(rx_tlv);
  120. if (tlv_tag == WIFIRX_PPDU_START_E) {
  121. rx_tlv = (uint8_t *)rx_tlv + HAL_RX_TLV32_HDR_SIZE;
  122. ppdu_info->com_info.ppdu_id = HAL_RX_GET(rx_tlv,
  123. RX_PPDU_START_0,
  124. PHY_PPDU_ID);
  125. pdev->status_buf_addr = buf_paddr;
  126. }
  127. if (pdev->mon_desc->ppdu_id < pdev->ppdu_info.com_info.ppdu_id) {
  128. status = dp_mon_status_lead;
  129. /* For wrap around case */
  130. ppdu_id_diff = pdev->ppdu_info.com_info.ppdu_id -
  131. pdev->mon_desc->ppdu_id;
  132. if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
  133. status = dp_mon_status_lag;
  134. } else if (pdev->mon_desc->ppdu_id > pdev->ppdu_info.com_info.ppdu_id) {
  135. status = dp_mon_status_lag;
  136. /* For wrap around case */
  137. ppdu_id_diff = pdev->mon_desc->ppdu_id -
  138. pdev->ppdu_info.com_info.ppdu_id;
  139. if (ppdu_id_diff > DP_RX_MON_PPDU_ID_WRAP)
  140. status = dp_mon_status_lead;
  141. }
  142. if ((pdev->mon_desc->status_buf.paddr != buf_paddr) ||
  143. (pdev->mon_desc->ppdu_id != pdev->ppdu_info.com_info.ppdu_id)) {
  144. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  145. FL("Monitor: PPDU id or status buf_addr mismatch "
  146. "status_ppdu_id: %d dest_ppdu_id: %d "
  147. "status_addr: %llx status_buf_cookie: %d "
  148. "dest_addr: %llx tlv_tag: %d"
  149. " status_nbuf: %pK pdev->hold_mon_dest: %d"),
  150. pdev->ppdu_info.com_info.ppdu_id,
  151. pdev->mon_desc->ppdu_id, pdev->status_buf_addr,
  152. rx_buf_cookie,
  153. pdev->mon_desc->status_buf.paddr, tlv_tag,
  154. status_nbuf, pdev->hold_mon_dest_ring);
  155. }
  156. done:
  157. hal_srng_access_end(hal_soc, mon_status_srng);
  158. return status;
  159. }
  160. /*
  161. * dp_rx_mon_prepare_mon_mpdu () - API to prepare dp_mon_mpdu object
  162. *
  163. * @pdev: DP pdev object
  164. * @head_msdu: Head msdu
  165. * @tail_msdu: Tail msdu
  166. *
  167. */
  168. static inline struct dp_mon_mpdu *
  169. dp_rx_mon_prepare_mon_mpdu(struct dp_pdev *pdev,
  170. qdf_nbuf_t head_msdu,
  171. qdf_nbuf_t tail_msdu)
  172. {
  173. struct dp_mon_mpdu *mon_mpdu = NULL;
  174. mon_mpdu = qdf_mem_malloc(sizeof(struct dp_mon_mpdu));
  175. if (!mon_mpdu) {
  176. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  177. FL("Monitor MPDU object allocation failed -- %pK"),
  178. pdev);
  179. qdf_assert_always(0);
  180. }
  181. mon_mpdu->head = head_msdu;
  182. mon_mpdu->tail = tail_msdu;
  183. mon_mpdu->rs_flags = pdev->ppdu_info.rx_status.rs_flags;
  184. mon_mpdu->ant_signal_db = pdev->ppdu_info.rx_status.ant_signal_db;
  185. mon_mpdu->is_stbc = pdev->ppdu_info.rx_status.is_stbc;
  186. mon_mpdu->sgi = pdev->ppdu_info.rx_status.sgi;
  187. mon_mpdu->beamformed = pdev->ppdu_info.rx_status.beamformed;
  188. return mon_mpdu;
  189. }
  190. static inline void
  191. dp_rx_mon_drop_ppdu(struct dp_pdev *pdev, uint32_t mac_id)
  192. {
  193. struct dp_mon_mpdu *mpdu = NULL;
  194. struct dp_mon_mpdu *temp_mpdu = NULL;
  195. qdf_nbuf_t mon_skb, skb_next;
  196. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  197. TAILQ_FOREACH_SAFE(mpdu,
  198. &pdev->mon_mpdu_q,
  199. mpdu_list_elem,
  200. temp_mpdu) {
  201. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  202. mpdu, mpdu_list_elem);
  203. mon_skb = mpdu->head;
  204. while (mon_skb) {
  205. skb_next = qdf_nbuf_next(mon_skb);
  206. QDF_TRACE(QDF_MODULE_ID_DP,
  207. QDF_TRACE_LEVEL_DEBUG,
  208. "[%s][%d] mon_skb=%pK len %u"
  209. " __func__, __LINE__",
  210. mon_skb, mon_skb->len);
  211. qdf_nbuf_free(mon_skb);
  212. mon_skb = skb_next;
  213. }
  214. qdf_mem_free(mpdu);
  215. }
  216. }
  217. pdev->mon_desc->drop_ppdu = 0;
  218. }
  219. /*
  220. * dp_rx_monitor_deliver_ppdu () - API to deliver all MPDU for a MPDU
  221. * to upper layer stack
  222. *
  223. * @soc: DP soc handle
  224. * @mac_id: lmac id
  225. */
  226. static inline QDF_STATUS
  227. dp_rx_monitor_deliver_ppdu(struct dp_soc *soc, uint32_t mac_id)
  228. {
  229. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  230. struct dp_mon_mpdu *mpdu = NULL;
  231. struct dp_mon_mpdu *temp_mpdu = NULL;
  232. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  233. TAILQ_FOREACH_SAFE(mpdu,
  234. &pdev->mon_mpdu_q,
  235. mpdu_list_elem,
  236. temp_mpdu) {
  237. TAILQ_REMOVE(&pdev->mon_mpdu_q,
  238. mpdu, mpdu_list_elem);
  239. pdev->ppdu_info.rx_status.rs_flags = mpdu->rs_flags;
  240. pdev->ppdu_info.rx_status.ant_signal_db =
  241. mpdu->ant_signal_db;
  242. pdev->ppdu_info.rx_status.is_stbc = mpdu->is_stbc;
  243. pdev->ppdu_info.rx_status.sgi = mpdu->sgi;
  244. pdev->ppdu_info.rx_status.beamformed = mpdu->beamformed;
  245. dp_rx_mon_deliver(soc, mac_id,
  246. mpdu->head, mpdu->tail);
  247. qdf_mem_free(mpdu);
  248. }
  249. }
  250. return QDF_STATUS_SUCCESS;
  251. }
  252. /**
  253. * dp_rx_mon_reap_status_ring () - Reap status_buf_count of status buffers for
  254. * status ring.
  255. *
  256. * @soc: DP soc handle
  257. * @mac_id: mac id on which interrupt is received
  258. * @quota: number of status ring entries to be reaped
  259. * @desc_info: Rx ppdu desc info
  260. */
  261. static inline uint32_t
  262. dp_rx_mon_reap_status_ring(struct dp_soc *soc,
  263. uint32_t mac_id,
  264. uint32_t quota,
  265. struct hal_rx_mon_desc_info *desc_info)
  266. {
  267. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  268. uint8_t status_buf_count;
  269. uint32_t work_done = 0;
  270. enum dp_mon_reap_status status;
  271. status_buf_count = desc_info->status_buf_count;
  272. desc_info->drop_ppdu = false;
  273. status = dp_rx_mon_status_buf_validate(pdev, mac_id);
  274. switch (status) {
  275. case dp_mon_status_no_dma:
  276. /* If DMA is not done for status ring entry,
  277. * hold on to monitor destination ring and
  278. * deliver current ppdu data once DMA is done.
  279. */
  280. pdev->hold_mon_dest_ring = true;
  281. break;
  282. case dp_mon_status_lag:
  283. /* If status_ppdu_id is lagging behind destination,
  284. * a. Hold on to destination ring
  285. * b. Drop status ppdus until ppdu id matches
  286. * c. Increment stats for ppdu_id mismatch and
  287. * status ppdu drop
  288. */
  289. pdev->hold_mon_dest_ring = true;
  290. pdev->rx_mon_stats.ppdu_id_mismatch++;
  291. pdev->rx_mon_stats.status_ppdu_drop++;
  292. break;
  293. case dp_mon_status_lead:
  294. /* If status_ppdu_id is leading ahead destination,
  295. * a. Drop destination ring ppdu until ppdu_id matches
  296. * b. Unhold monitor destination ring so status ppdus
  297. * can be dropped.
  298. * c. Increment stats for ppdu_id mismatch and
  299. * destination ppdu drop
  300. */
  301. desc_info->drop_ppdu = true;
  302. pdev->hold_mon_dest_ring = false;
  303. pdev->rx_mon_stats.ppdu_id_mismatch++;
  304. pdev->rx_mon_stats.dest_ppdu_drop++;
  305. break;
  306. case dp_mon_status_replenish:
  307. /* If status ring hp entry is NULL, replenish it */
  308. work_done = dp_rx_mon_status_process(soc, mac_id, 1);
  309. break;
  310. case dp_mon_status_match:
  311. /* If status ppdu id matches with destnation,
  312. * unhold monitor destination ring and deliver ppdu
  313. */
  314. pdev->hold_mon_dest_ring = false;
  315. break;
  316. default:
  317. dp_err("mon reap status is not supported");
  318. }
  319. /* If status ring is lagging behind detination ring,
  320. * reap only one status buffer
  321. */
  322. if (status == dp_mon_status_lag)
  323. status_buf_count = 1;
  324. if (status == dp_mon_status_lag ||
  325. status == dp_mon_status_match) {
  326. work_done = dp_rx_mon_status_process(soc,
  327. mac_id,
  328. status_buf_count);
  329. }
  330. return work_done;
  331. }
  332. /**
  333. * dp_rx_mon_mpdu_reap () - This API reaps a mpdu from mon dest ring descriptor
  334. * and returns link descriptor to HW (WBM)
  335. *
  336. * @soc: DP soc handle
  337. * @mac_id: lmac id
  338. * @ring_desc: SW monitor ring desc
  339. * @head_msdu: nbuf pointing to first msdu in a chain
  340. * @tail_msdu: nbuf pointing to last msdu in a chain
  341. * @head_desc: head pointer to free desc list
  342. * @tail_desc: tail pointer to free desc list
  343. *
  344. * Return: number of reaped buffers
  345. */
  346. static inline uint32_t
  347. dp_rx_mon_mpdu_reap(struct dp_soc *soc, uint32_t mac_id, void *ring_desc,
  348. qdf_nbuf_t *head_msdu, qdf_nbuf_t *tail_msdu,
  349. union dp_rx_desc_list_elem_t **head_desc,
  350. union dp_rx_desc_list_elem_t **tail_desc)
  351. {
  352. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  353. struct dp_rx_desc *rx_desc = NULL;
  354. struct hal_rx_msdu_list msdu_list;
  355. uint32_t rx_buf_reaped = 0;
  356. uint16_t num_msdus = 0, msdu_index, rx_hdr_tlv_len, l3_hdr_pad;
  357. uint32_t total_frag_len = 0, frag_len = 0;
  358. bool drop_mpdu = false;
  359. bool msdu_frag = false;
  360. void *link_desc_va;
  361. uint8_t *rx_tlv_hdr;
  362. qdf_nbuf_t msdu = NULL, last_msdu = NULL;
  363. uint32_t rx_link_buf_info[HAL_RX_BUFFINFO_NUM_DWORDS];
  364. struct hal_rx_mon_desc_info *desc_info;
  365. desc_info = pdev->mon_desc;
  366. qdf_mem_zero(desc_info, sizeof(struct hal_rx_mon_desc_info));
  367. /* Read SW Mon ring descriptor */
  368. hal_rx_sw_mon_desc_info_get((struct hal_soc *)soc->hal_soc,
  369. ring_desc,
  370. (void *)desc_info);
  371. /* If end_of_ppdu is 1, return*/
  372. if (desc_info->end_of_ppdu)
  373. return rx_buf_reaped;
  374. /* If there is rxdma error, drop mpdu */
  375. if (qdf_unlikely(dp_rx_mon_is_rxdma_error(desc_info)
  376. == QDF_STATUS_SUCCESS)) {
  377. drop_mpdu = true;
  378. pdev->rx_mon_stats.dest_mpdu_drop++;
  379. }
  380. /*
  381. * while loop iterates through all link descriptors and
  382. * reaps msdu_count number of msdus for one SW_MONITOR_RING descriptor
  383. * and forms nbuf queue.
  384. */
  385. while (desc_info->msdu_count && desc_info->link_desc.paddr) {
  386. link_desc_va = dp_rx_cookie_2_mon_link_desc(pdev,
  387. desc_info->link_desc,
  388. mac_id);
  389. qdf_assert_always(link_desc_va);
  390. hal_rx_msdu_list_get(soc->hal_soc,
  391. link_desc_va,
  392. &msdu_list,
  393. &num_msdus);
  394. for (msdu_index = 0; msdu_index < num_msdus; msdu_index++) {
  395. rx_desc = dp_rx_get_mon_desc(soc,
  396. msdu_list.sw_cookie[msdu_index]);
  397. qdf_assert_always(rx_desc);
  398. msdu = rx_desc->nbuf;
  399. if (rx_desc->unmapped == 0) {
  400. qdf_nbuf_unmap_single(soc->osdev,
  401. msdu,
  402. QDF_DMA_FROM_DEVICE);
  403. rx_desc->unmapped = 1;
  404. }
  405. if (drop_mpdu) {
  406. qdf_nbuf_free(msdu);
  407. msdu = NULL;
  408. desc_info->msdu_count--;
  409. goto next_msdu;
  410. }
  411. rx_tlv_hdr = qdf_nbuf_data(msdu);
  412. if (hal_rx_desc_is_first_msdu(soc->hal_soc,
  413. rx_tlv_hdr))
  414. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
  415. rx_tlv_hdr,
  416. &pdev->ppdu_info.rx_status);
  417. /** If msdu is fragmented, spread across multiple
  418. * buffers
  419. * a. calculate len of each fragmented buffer
  420. * b. calculate the number of fragmented buffers for
  421. * a msdu and decrement one msdu_count
  422. */
  423. if (msdu_list.msdu_info[msdu_index].msdu_flags
  424. & HAL_MSDU_F_MSDU_CONTINUATION) {
  425. if (!msdu_frag) {
  426. total_frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
  427. msdu_frag = true;
  428. }
  429. dp_mon_adjust_frag_len(&total_frag_len,
  430. &frag_len);
  431. } else {
  432. if (msdu_frag)
  433. dp_mon_adjust_frag_len(&total_frag_len,
  434. &frag_len);
  435. else
  436. frag_len = msdu_list.msdu_info[msdu_index].msdu_len;
  437. msdu_frag = false;
  438. desc_info->msdu_count--;
  439. }
  440. rx_hdr_tlv_len = SIZE_OF_MONITOR_TLV;
  441. /*
  442. * HW structures call this L3 header padding.
  443. * this is actually the offset
  444. * from the buffer beginning where the L2
  445. * header begins.
  446. */
  447. l3_hdr_pad = hal_rx_msdu_end_l3_hdr_padding_get(
  448. soc->hal_soc,
  449. rx_tlv_hdr);
  450. /*******************************************************
  451. * RX_PACKET *
  452. * ----------------------------------------------------*
  453. | RX_PKT_TLVS | L3 Padding header | msdu data| |
  454. * ----------------------------------------------------*
  455. ******************************************************/
  456. qdf_nbuf_set_pktlen(msdu,
  457. rx_hdr_tlv_len +
  458. l3_hdr_pad +
  459. frag_len);
  460. if (head_msdu && !*head_msdu)
  461. *head_msdu = msdu;
  462. else if (last_msdu)
  463. qdf_nbuf_set_next(last_msdu, msdu);
  464. last_msdu = msdu;
  465. next_msdu:
  466. rx_buf_reaped++;
  467. dp_rx_add_to_free_desc_list(head_desc,
  468. tail_desc,
  469. rx_desc);
  470. QDF_TRACE(QDF_MODULE_ID_DP,
  471. QDF_TRACE_LEVEL_DEBUG,
  472. FL("%s total_len %u frag_len %u flags %u"),
  473. total_frag_len, frag_len,
  474. msdu_list.msdu_info[msdu_index].msdu_flags);
  475. }
  476. hal_rxdma_buff_addr_info_set(rx_link_buf_info,
  477. desc_info->link_desc.paddr,
  478. desc_info->link_desc.sw_cookie,
  479. desc_info->link_desc.rbm);
  480. /* Get next link desc VA from current link desc */
  481. hal_rx_mon_next_link_desc_get(link_desc_va,
  482. &desc_info->link_desc);
  483. /* return msdu link descriptor to WBM */
  484. if (dp_rx_monitor_link_desc_return(pdev,
  485. (hal_buff_addrinfo_t)rx_link_buf_info,
  486. mac_id,
  487. HAL_BM_ACTION_PUT_IN_IDLE_LIST)
  488. != QDF_STATUS_SUCCESS) {
  489. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  490. "dp_rx_monitor_link_desc_return failed");
  491. }
  492. }
  493. pdev->rx_mon_stats.dest_mpdu_done++;
  494. if (last_msdu)
  495. qdf_nbuf_set_next(last_msdu, NULL);
  496. *tail_msdu = msdu;
  497. return rx_buf_reaped;
  498. }
  499. /*
  500. * dp_rx_mon_deliver_prev_ppdu () - Deliver previous PPDU
  501. *
  502. * @pdev: DP pdev handle
  503. * @mac_id: lmac id
  504. * @quota: quota
  505. *
  506. * Return: remaining qouta
  507. */
  508. static inline uint32_t
  509. dp_rx_mon_deliver_prev_ppdu(struct dp_pdev *pdev,
  510. uint32_t mac_id,
  511. uint32_t quota)
  512. {
  513. struct dp_soc *soc = pdev->soc;
  514. struct hal_rx_mon_desc_info *desc_info = pdev->mon_desc;
  515. uint32_t work_done = 0, work = 0;
  516. bool deliver_ppdu = false;
  517. enum dp_mon_reap_status status;
  518. while (pdev->hold_mon_dest_ring) {
  519. status = dp_rx_mon_status_buf_validate(pdev, mac_id);
  520. switch (status) {
  521. case dp_mon_status_no_dma:
  522. /* If DMA is not done for status ring entry,
  523. * hold on to monitor destination ring and
  524. * deliver current ppdu data once DMA is done.
  525. */
  526. pdev->hold_mon_dest_ring = true;
  527. break;
  528. case dp_mon_status_lag:
  529. /* If status_ppdu_id is lagging behind destination,
  530. * a. Hold on to destination ring
  531. * b. Drop status ppdus until ppdu id matches
  532. * c. Increment stats for ppdu_id mismatch and
  533. * status ppdu drop
  534. */
  535. pdev->hold_mon_dest_ring = true;
  536. pdev->rx_mon_stats.ppdu_id_mismatch++;
  537. pdev->rx_mon_stats.status_ppdu_drop++;
  538. break;
  539. case dp_mon_status_lead:
  540. /* If status_ppdu_id is leading ahead destination,
  541. * a. Drop destination ring ppdu until ppdu_id matches
  542. * b. Unhold monitor destination ring so status ppdus
  543. * can be dropped.
  544. * c. Increment stats for ppdu_id mismatch and
  545. * destination ppdu drop
  546. */
  547. desc_info->drop_ppdu = true;
  548. pdev->hold_mon_dest_ring = false;
  549. pdev->rx_mon_stats.ppdu_id_mismatch++;
  550. pdev->rx_mon_stats.dest_ppdu_drop++;
  551. break;
  552. case dp_mon_status_replenish:
  553. /* If status ring hp entry is NULL, replenish it */
  554. work = dp_rx_mon_status_process(soc, mac_id, 1);
  555. break;
  556. case dp_mon_status_match:
  557. /* If status ppdu id matches with destnation,
  558. * unhold monitor destination ring and deliver ppdu
  559. */
  560. pdev->hold_mon_dest_ring = false;
  561. break;
  562. default:
  563. dp_err("mon reap status is not supported");
  564. }
  565. /* When status ring entry's DMA is not done or
  566. * status ring entry is replenished, ppdu status is not
  567. * available for radiotap construction, so return and
  568. * check for status on next interrupt
  569. */
  570. if ((status == dp_mon_status_no_dma) ||
  571. (status == dp_mon_status_replenish)) {
  572. return work_done;
  573. }
  574. if (status == dp_mon_status_lag) {
  575. work = dp_rx_mon_status_process(soc, mac_id, 1);
  576. if (!work)
  577. return 0;
  578. work_done += work;
  579. }
  580. deliver_ppdu = true;
  581. }
  582. if (deliver_ppdu) {
  583. if (pdev->mon_desc->drop_ppdu) {
  584. dp_rx_mon_drop_ppdu(pdev, mac_id);
  585. return work_done;
  586. }
  587. work_done += dp_rx_mon_status_process(soc, mac_id,
  588. desc_info->status_buf_count);
  589. dp_rx_monitor_deliver_ppdu(soc, mac_id);
  590. }
  591. return work_done;
  592. }
  593. /**
  594. * dp_rx_mon_process () - Core brain processing for monitor mode
  595. *
  596. * This API processes monitor destination ring followed by monitor status ring
  597. * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
  598. *
  599. * @soc: datapath soc context
  600. * @mac_id: mac_id on which interrupt is received
  601. * @quota: Number of status ring entry that can be serviced in one shot.
  602. *
  603. * @Return: Number of reaped status ring entries
  604. */
  605. uint32_t dp_rx_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
  606. {
  607. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  608. union dp_rx_desc_list_elem_t *head_desc = NULL;
  609. union dp_rx_desc_list_elem_t *tail_desc = NULL;
  610. uint32_t rx_bufs_reaped = 0;
  611. struct dp_mon_mpdu *mon_mpdu;
  612. struct cdp_pdev_mon_stats *rx_mon_stats;
  613. hal_rxdma_desc_t ring_desc;
  614. hal_soc_handle_t hal_soc;
  615. hal_ring_handle_t mon_dest_srng;
  616. qdf_nbuf_t head_msdu = NULL;
  617. qdf_nbuf_t tail_msdu = NULL;
  618. struct hal_rx_mon_desc_info *desc_info;
  619. int mac_for_pdev = mac_id;
  620. QDF_STATUS status;
  621. uint32_t work_done = 0;
  622. if (!pdev) {
  623. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  624. "pdev is null for mac_id = %d", mac_id);
  625. return work_done;
  626. }
  627. qdf_spin_lock_bh(&pdev->mon_lock);
  628. if (qdf_unlikely(!dp_soc_is_full_mon_enable(pdev))) {
  629. work_done += dp_rx_mon_status_process(soc, mac_id, quota);
  630. qdf_spin_unlock_bh(&pdev->mon_lock);
  631. return work_done;
  632. }
  633. desc_info = pdev->mon_desc;
  634. rx_mon_stats = &pdev->rx_mon_stats;
  635. work_done = dp_rx_mon_deliver_prev_ppdu(pdev, mac_id, quota);
  636. /* Do not proceed if work_done zero */
  637. if (!work_done && pdev->hold_mon_dest_ring) {
  638. qdf_spin_unlock_bh(&pdev->mon_lock);
  639. return work_done;
  640. }
  641. mon_dest_srng = dp_rxdma_get_mon_dst_ring(pdev, mac_for_pdev);
  642. if (qdf_unlikely(!mon_dest_srng ||
  643. !hal_srng_initialized(mon_dest_srng))) {
  644. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  645. FL("HAL Monitor Destination Ring Init Failed -- %pK"),
  646. mon_dest_srng);
  647. goto done1;
  648. }
  649. hal_soc = soc->hal_soc;
  650. qdf_assert_always(hal_soc && pdev);
  651. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dest_srng))) {
  652. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  653. FL("HAL Monitor Destination Ring access Failed -- %pK"),
  654. mon_dest_srng);
  655. goto done1;
  656. }
  657. /* Each entry in mon dest ring carries mpdu data
  658. * reap all msdus for a mpdu and form skb chain
  659. */
  660. while (qdf_likely(ring_desc =
  661. hal_srng_dst_peek(hal_soc, mon_dest_srng))) {
  662. head_msdu = NULL;
  663. tail_msdu = NULL;
  664. rx_bufs_reaped = dp_rx_mon_mpdu_reap(soc, mac_id,
  665. ring_desc, &head_msdu,
  666. &tail_msdu, &head_desc,
  667. &tail_desc);
  668. /* Assert if end_of_ppdu is zero and number of reaped buffers
  669. * are zero.
  670. */
  671. if (qdf_unlikely(!desc_info->end_of_ppdu && !rx_bufs_reaped)) {
  672. qdf_err("end_of_ppdu and rx_bufs_reaped are zero");
  673. }
  674. rx_mon_stats->mon_rx_bufs_reaped_dest += rx_bufs_reaped;
  675. /* replenish rx_bufs_reaped buffers back to
  676. * RxDMA Monitor buffer ring
  677. */
  678. if (rx_bufs_reaped) {
  679. status = dp_rx_buffers_replenish(soc, mac_id,
  680. dp_rxdma_get_mon_buf_ring(pdev,
  681. mac_for_pdev),
  682. dp_rx_get_mon_desc_pool(soc, mac_id,
  683. pdev->pdev_id),
  684. rx_bufs_reaped,
  685. &head_desc, &tail_desc);
  686. if (status != QDF_STATUS_SUCCESS)
  687. qdf_assert_always(0);
  688. rx_mon_stats->mon_rx_bufs_replenished_dest += rx_bufs_reaped;
  689. }
  690. head_desc = NULL;
  691. tail_desc = NULL;
  692. /* If end_of_ppdu is zero, it is a valid data mpdu
  693. * a. Add head_msdu and tail_msdu to mpdu list
  694. * b. continue reaping next SW_MONITOR_RING descriptor
  695. */
  696. if (!desc_info->end_of_ppdu) {
  697. /*
  698. * In case of rxdma error, MPDU is dropped
  699. * from sw_monitor_ring descriptor.
  700. * in this case, head_msdu remains NULL.
  701. * move srng to next and continue reaping next entry
  702. */
  703. if (!head_msdu) {
  704. ring_desc = hal_srng_dst_get_next(hal_soc,
  705. mon_dest_srng);
  706. continue;
  707. }
  708. /*
  709. * Prepare a MPDU object which holds chain of msdus
  710. * and MPDU specific status and add this is to
  711. * monitor mpdu queue
  712. */
  713. mon_mpdu = dp_rx_mon_prepare_mon_mpdu(pdev,
  714. head_msdu,
  715. tail_msdu);
  716. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  717. FL("Dest_srng: %pK MPDU_OBJ: %pK "
  718. "head_msdu: %pK tail_msdu: %pK -- "),
  719. mon_dest_srng,
  720. mon_mpdu,
  721. head_msdu,
  722. tail_msdu);
  723. TAILQ_INSERT_TAIL(&pdev->mon_mpdu_q,
  724. mon_mpdu,
  725. mpdu_list_elem);
  726. head_msdu = NULL;
  727. tail_msdu = NULL;
  728. ring_desc = hal_srng_dst_get_next(hal_soc,
  729. mon_dest_srng);
  730. continue;
  731. }
  732. /* It is observed sometimes that, ppdu_id, status_buf_addr
  733. * and link desc addr is NULL, this WAR is to handle same
  734. */
  735. if (!desc_info->ppdu_id && !desc_info->status_buf.paddr) {
  736. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  737. FL("ppdu_id: %d ring_entry: %pK"
  738. "status_buf_count: %d rxdma_push: %d"
  739. "rxdma_err: %d link_desc: %pK "),
  740. desc_info->ppdu_id, ring_desc,
  741. desc_info->status_buf_count,
  742. desc_info->rxdma_push_reason,
  743. desc_info->rxdma_error_code,
  744. desc_info->link_desc.paddr);
  745. goto next_entry;
  746. }
  747. /*
  748. * end_of_ppdu is one,
  749. * a. update ppdu_done stattistics
  750. * b. Replenish buffers back to mon buffer ring
  751. * c. reap status ring for a PPDU and deliver all mpdus
  752. * to upper layer
  753. */
  754. rx_mon_stats->dest_ppdu_done++;
  755. work_done += dp_rx_mon_reap_status_ring(soc, mac_id,
  756. quota, desc_info);
  757. /* Deliver all MPDUs for a PPDU */
  758. if (desc_info->drop_ppdu)
  759. dp_rx_mon_drop_ppdu(pdev, mac_id);
  760. else if (!pdev->hold_mon_dest_ring)
  761. dp_rx_monitor_deliver_ppdu(soc, mac_id);
  762. next_entry:
  763. hal_srng_dst_get_next(hal_soc, mon_dest_srng);
  764. break;
  765. }
  766. hal_srng_access_end(hal_soc, mon_dest_srng);
  767. done1:
  768. qdf_spin_unlock_bh(&pdev->mon_lock);
  769. return work_done;
  770. }
  771. /**
  772. * dp_full_mon_attach() - attach full monitor mode
  773. * resources
  774. * @pdev: Datapath PDEV handle
  775. *
  776. * Return: void
  777. */
  778. void dp_full_mon_attach(struct dp_pdev *pdev)
  779. {
  780. struct dp_soc *soc = pdev->soc;
  781. if (!soc->full_mon_mode) {
  782. qdf_debug("Full monitor is not enabled");
  783. return;
  784. }
  785. pdev->mon_desc = qdf_mem_malloc(sizeof(struct hal_rx_mon_desc_info));
  786. if (!pdev->mon_desc) {
  787. qdf_err("Memory allocation failed for hal_rx_mon_desc_info ");
  788. return;
  789. }
  790. TAILQ_INIT(&pdev->mon_mpdu_q);
  791. }
  792. /**
  793. * dp_full_mon_detach() - detach full monitor mode
  794. * resources
  795. * @pdev: Datapath PDEV handle
  796. *
  797. * Return: void
  798. *
  799. */
  800. void dp_full_mon_detach(struct dp_pdev *pdev)
  801. {
  802. struct dp_soc *soc = pdev->soc;
  803. struct dp_mon_mpdu *mpdu = NULL;
  804. struct dp_mon_mpdu *temp_mpdu = NULL;
  805. if (!soc->full_mon_mode) {
  806. qdf_debug("Full monitor is not enabled");
  807. return;
  808. }
  809. if (pdev->mon_desc)
  810. qdf_mem_free(pdev->mon_desc);
  811. if (!TAILQ_EMPTY(&pdev->mon_mpdu_q)) {
  812. TAILQ_FOREACH_SAFE(mpdu,
  813. &pdev->mon_mpdu_q,
  814. mpdu_list_elem,
  815. temp_mpdu) {
  816. qdf_mem_free(mpdu);
  817. }
  818. }
  819. }
  820. #endif