dp_tx_capture.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584
  1. /*
  2. * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <htt.h>
  19. #include "qdf_trace.h"
  20. #include "qdf_nbuf.h"
  21. #include "dp_peer.h"
  22. #include "dp_types.h"
  23. #include "dp_internal.h"
  24. #include "dp_rx_mon.h"
  25. #include "htt_ppdu_stats.h"
  26. #include "dp_htt.h"
  27. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  28. #include "cdp_txrx_cmn_struct.h"
  29. #include <enet.h>
  30. #include "dp_tx_capture.h"
  31. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  32. /*
  33. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  34. * @peer: Datapath peer
  35. *
  36. */
  37. void dp_peer_tid_queue_init(struct dp_peer *peer)
  38. {
  39. int tid;
  40. struct dp_tx_tid *tx_tid;
  41. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  42. tx_tid = &peer->tx_capture.tx_tid[tid];
  43. tx_tid->tid = tid;
  44. qdf_nbuf_queue_init(&tx_tid->msdu_comp_q);
  45. tx_tid->max_ppdu_id = 0;
  46. /* spinlock create */
  47. qdf_spinlock_create(&tx_tid->tid_lock);
  48. }
  49. }
  50. /*
  51. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  52. * @peer: Datapath peer
  53. *
  54. */
  55. void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  56. {
  57. int tid;
  58. struct dp_tx_tid *tx_tid;
  59. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  60. tx_tid = &peer->tx_capture.tx_tid[tid];
  61. qdf_spin_lock_bh(&tx_tid->tid_lock);
  62. qdf_nbuf_queue_free(&tx_tid->msdu_comp_q);
  63. qdf_spin_unlock_bh(&tx_tid->tid_lock);
  64. /* spinlock destroy */
  65. qdf_spinlock_destroy(&tx_tid->tid_lock);
  66. tx_tid->max_ppdu_id = 0;
  67. }
  68. }
  69. /*
  70. * dp_peer_update_80211_hdr: update 80211 hdr
  71. * @vdev: DP VDEV
  72. * @peer: DP PEER
  73. *
  74. * return: void
  75. */
  76. void dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  77. {
  78. struct ieee80211_frame *ptr_wh;
  79. ptr_wh = &peer->tx_capture.tx_wifi_hdr;
  80. /* i_addr1 - Receiver mac address */
  81. /* i_addr2 - Transmitter mac address */
  82. /* i_addr3 - Destination mac address */
  83. qdf_mem_copy(ptr_wh->i_addr1,
  84. peer->mac_addr.raw,
  85. QDF_MAC_ADDR_SIZE);
  86. qdf_mem_copy(ptr_wh->i_addr3,
  87. peer->mac_addr.raw,
  88. QDF_MAC_ADDR_SIZE);
  89. qdf_mem_copy(ptr_wh->i_addr2,
  90. vdev->mac_addr.raw,
  91. QDF_MAC_ADDR_SIZE);
  92. }
  93. /*
  94. * dp_deliver_mgmt_frm: Process
  95. * @pdev: DP PDEV handle
  96. * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  97. *
  98. * return: void
  99. */
  100. void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
  101. {
  102. struct cdp_tx_indication_info tx_capture_info;
  103. if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
  104. dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
  105. nbuf, HTT_INVALID_PEER,
  106. WDI_NO_VAL, pdev->pdev_id);
  107. } else if (pdev->tx_capture_enabled) {
  108. /* invoke WDI event handler here send mgmt pkt here */
  109. /* pull ppdu_id from the packet */
  110. qdf_nbuf_pull_head(nbuf, sizeof(uint32_t));
  111. tx_capture_info.frame_payload = 1;
  112. tx_capture_info.mpdu_nbuf = nbuf;
  113. /*
  114. * send MPDU to osif layer
  115. * do we need to update mpdu_info before tranmit
  116. * get current mpdu_nbuf
  117. */
  118. dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
  119. &tx_capture_info, HTT_INVALID_PEER,
  120. WDI_NO_VAL, pdev->pdev_id);
  121. if (tx_capture_info.mpdu_nbuf)
  122. qdf_nbuf_free(tx_capture_info.mpdu_nbuf);
  123. }
  124. }
  125. /**
  126. * dp_tx_ppdu_stats_attach - Initialize Tx PPDU stats and enhanced capture
  127. * @pdev: DP PDEV
  128. *
  129. * Return: none
  130. */
  131. void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev)
  132. {
  133. /* Work queue setup for HTT stats and tx capture handling */
  134. qdf_create_work(0, &pdev->tx_capture.ppdu_stats_work,
  135. dp_tx_ppdu_stats_process,
  136. pdev);
  137. pdev->tx_capture.ppdu_stats_workqueue =
  138. qdf_alloc_unbound_workqueue("ppdu_stats_work_queue");
  139. STAILQ_INIT(&pdev->tx_capture.ppdu_stats_queue);
  140. STAILQ_INIT(&pdev->tx_capture.ppdu_stats_defer_queue);
  141. qdf_spinlock_create(&pdev->tx_capture.ppdu_stats_lock);
  142. pdev->tx_capture.ppdu_stats_queue_depth = 0;
  143. pdev->tx_capture.ppdu_stats_next_sched = 0;
  144. pdev->tx_capture.ppdu_stats_defer_queue_depth = 0;
  145. pdev->tx_capture.ppdu_dropped = 0;
  146. }
  147. /**
  148. * dp_tx_ppdu_stats_detach - Cleanup Tx PPDU stats and enhanced capture
  149. * @pdev: DP PDEV
  150. *
  151. * Return: none
  152. */
  153. void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev)
  154. {
  155. struct ppdu_info *ppdu_info, *tmp_ppdu_info = NULL;
  156. if (!pdev || !pdev->tx_capture.ppdu_stats_workqueue)
  157. return;
  158. qdf_flush_workqueue(0, pdev->tx_capture.ppdu_stats_workqueue);
  159. qdf_destroy_workqueue(0, pdev->tx_capture.ppdu_stats_workqueue);
  160. qdf_spinlock_destroy(&pdev->tx_capture.ppdu_stats_lock);
  161. STAILQ_FOREACH_SAFE(ppdu_info,
  162. &pdev->tx_capture.ppdu_stats_queue,
  163. ppdu_info_queue_elem, tmp_ppdu_info) {
  164. STAILQ_REMOVE(&pdev->tx_capture.ppdu_stats_queue,
  165. ppdu_info, ppdu_info, ppdu_info_queue_elem);
  166. qdf_nbuf_free(ppdu_info->nbuf);
  167. qdf_mem_free(ppdu_info);
  168. }
  169. STAILQ_FOREACH_SAFE(ppdu_info,
  170. &pdev->tx_capture.ppdu_stats_defer_queue,
  171. ppdu_info_queue_elem, tmp_ppdu_info) {
  172. STAILQ_REMOVE(&pdev->tx_capture.ppdu_stats_defer_queue,
  173. ppdu_info, ppdu_info, ppdu_info_queue_elem);
  174. qdf_nbuf_free(ppdu_info->nbuf);
  175. qdf_mem_free(ppdu_info);
  176. }
  177. }
  178. /**
  179. * dp_update_msdu_to_list(): Function to queue msdu from wbm
  180. * @pdev: dp_pdev
  181. * @peer: dp_peer
  182. * @ts: hal tx completion status
  183. * @netbuf: msdu
  184. *
  185. * return: status
  186. */
  187. QDF_STATUS
  188. dp_update_msdu_to_list(struct dp_soc *soc,
  189. struct dp_pdev *pdev,
  190. struct dp_peer *peer,
  191. struct hal_tx_completion_status *ts,
  192. qdf_nbuf_t netbuf)
  193. {
  194. struct dp_tx_tid *tx_tid;
  195. struct msdu_completion_info *msdu_comp_info;
  196. if (!peer) {
  197. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  198. "%s: %d peer NULL !", __func__, __LINE__);
  199. return QDF_STATUS_E_FAILURE;
  200. }
  201. if (ts->tid > DP_NON_QOS_TID) {
  202. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  203. "%s: %d peer_id %d, tid %d > NON_QOS_TID!",
  204. __func__, __LINE__, ts->peer_id, ts->tid);
  205. return QDF_STATUS_E_FAILURE;
  206. }
  207. tx_tid = &peer->tx_capture.tx_tid[ts->tid];
  208. if (!tx_tid) {
  209. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  210. "%s: %d tid[%d] NULL !", __func__, __LINE__, ts->tid);
  211. return QDF_STATUS_E_FAILURE;
  212. }
  213. qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
  214. if (!qdf_nbuf_push_head(netbuf, sizeof(struct msdu_completion_info))) {
  215. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  216. FL("No headroom"));
  217. return QDF_STATUS_E_NOMEM;
  218. }
  219. msdu_comp_info = (struct msdu_completion_info *)qdf_nbuf_data(netbuf);
  220. /* copy msdu_completion_info to control buffer */
  221. msdu_comp_info->ppdu_id = ts->ppdu_id;
  222. msdu_comp_info->peer_id = ts->peer_id;
  223. msdu_comp_info->tid = ts->tid;
  224. msdu_comp_info->first_msdu = ts->first_msdu;
  225. msdu_comp_info->last_msdu = ts->last_msdu;
  226. msdu_comp_info->msdu_part_of_amsdu = ts->msdu_part_of_amsdu;
  227. msdu_comp_info->transmit_cnt = ts->transmit_cnt;
  228. msdu_comp_info->tsf = ts->tsf;
  229. /* update max ppdu_id */
  230. tx_tid->max_ppdu_id = ts->ppdu_id;
  231. pdev->tx_capture.last_msdu_id = ts->ppdu_id;
  232. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_INFO,
  233. "msdu_completion: ppdu_id[%d] peer_id[%d] tid[%d] rel_src[%d] status[%d] tsf[%u] A[%d] CNT[%d]",
  234. ts->ppdu_id, ts->peer_id, ts->tid, ts->release_src,
  235. ts->status, ts->tsf, ts->msdu_part_of_amsdu,
  236. ts->transmit_cnt);
  237. /* lock here */
  238. qdf_spin_lock_bh(&tx_tid->tid_lock);
  239. /* add nbuf to tail queue per peer tid */
  240. qdf_nbuf_queue_add(&tx_tid->msdu_comp_q, netbuf);
  241. qdf_spin_unlock_bh(&tx_tid->tid_lock);
  242. return QDF_STATUS_SUCCESS;
  243. }
  244. /**
  245. * dp_tx_add_to_comp_queue() - add completion msdu to queue
  246. * @soc: DP Soc handle
  247. * @tx_desc: software Tx descriptor
  248. * @ts : Tx completion status from HAL/HTT descriptor
  249. * @peer: DP peer
  250. *
  251. * Return: none
  252. */
  253. QDF_STATUS dp_tx_add_to_comp_queue(struct dp_soc *soc,
  254. struct dp_tx_desc_s *desc,
  255. struct hal_tx_completion_status *ts,
  256. struct dp_peer *peer)
  257. {
  258. int ret = QDF_STATUS_E_FAILURE;
  259. if (desc->pdev->tx_capture_enabled == 1 &&
  260. ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  261. ret = dp_update_msdu_to_list(soc, desc->pdev,
  262. peer, ts, desc->nbuf);
  263. }
  264. return ret;
  265. }
  266. /**
  267. * dp_process_ppdu_stats_update_failed_bitmap(): update failed bitmap
  268. * @pdev: dp_pdev
  269. * @data: tx completion ppdu desc
  270. * @ppdu_id: ppdu id
  271. * @size: size of bitmap
  272. *
  273. * return: status
  274. */
  275. void dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
  276. void *data,
  277. uint32_t ppdu_id,
  278. uint32_t size)
  279. {
  280. struct cdp_tx_completion_ppdu_user *user;
  281. uint32_t mpdu_tried;
  282. uint32_t ba_seq_no;
  283. uint32_t start_seq;
  284. uint32_t num_mpdu;
  285. uint32_t diff;
  286. uint32_t carry = 0;
  287. uint32_t bitmask = 0;
  288. uint32_t i;
  289. uint32_t k;
  290. uint32_t ba_bitmap = 0;
  291. user = (struct cdp_tx_completion_ppdu_user *)data;
  292. /* get number of mpdu from ppdu_desc */
  293. mpdu_tried = user->mpdu_tried_mcast + user->mpdu_tried_ucast;
  294. ba_seq_no = user->ba_seq_no;
  295. start_seq = user->start_seq;
  296. num_mpdu = user->num_mpdu;
  297. /* assumption: number of mpdu will be less than 32 */
  298. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_INFO,
  299. "ppdu_id[%d] ba_seq_no[%d] start_seq_no[%d] mpdu_tried[%d]",
  300. ppdu_id, ba_seq_no, start_seq, mpdu_tried);
  301. for (i = 0; i < size; i++) {
  302. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_INFO,
  303. "ppdu_id[%d] ba_bitmap[%x] enqueue_bitmap[%x]",
  304. ppdu_id, user->ba_bitmap[i], user->enq_bitmap[i]);
  305. }
  306. if (start_seq <= ba_seq_no) {
  307. diff = ba_seq_no - start_seq;
  308. bitmask = (1 << diff) - 1;
  309. for (i = 0; i < size; i++) {
  310. ba_bitmap = user->ba_bitmap[i];
  311. user->failed_bitmap[i] = (ba_bitmap << diff);
  312. user->failed_bitmap[i] |= (bitmask & carry);
  313. carry = ((ba_bitmap & (bitmask << (32 - diff))) >>
  314. (32 - diff));
  315. user->failed_bitmap[i] = user->enq_bitmap[i] &
  316. user->failed_bitmap[i];
  317. }
  318. } else {
  319. diff = start_seq - ba_seq_no;
  320. /* array index */
  321. k = diff >> 5;
  322. diff = diff & 0x1F;
  323. bitmask = (1 << diff) - 1;
  324. for (i = 0; i < size; i++, k++) {
  325. ba_bitmap = user->ba_bitmap[k];
  326. user->failed_bitmap[i] = ba_bitmap >> diff;
  327. /* get next ba_bitmap */
  328. ba_bitmap = user->ba_bitmap[k + 1];
  329. carry = (ba_bitmap & bitmask);
  330. user->failed_bitmap[i] |=
  331. ((carry & bitmask) << (32 - diff));
  332. user->failed_bitmap[i] = user->enq_bitmap[i] &
  333. user->failed_bitmap[i];
  334. }
  335. }
  336. }
  337. /*
  338. * dp_soc_set_txrx_ring_map_single()
  339. * @dp_soc: DP handler for soc
  340. *
  341. * Return: Void
  342. */
  343. static void dp_soc_set_txrx_ring_map_single(struct dp_soc *soc)
  344. {
  345. uint32_t i;
  346. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  347. soc->tx_ring_map[i] =
  348. dp_cpu_ring_map[DP_SINGLE_TX_RING_MAP][i];
  349. }
  350. }
  351. /*
  352. * dp_iterate_free_peer_msdu_q()- API to free msdu queue
  353. * @pdev_handle: DP_PDEV handle
  354. *
  355. * Return: void
  356. */
  357. static void dp_iterate_free_peer_msdu_q(void *pdev_hdl)
  358. {
  359. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  360. struct dp_soc *soc = pdev->soc;
  361. struct dp_vdev *vdev = NULL;
  362. struct dp_peer *peer = NULL;
  363. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  364. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  365. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  366. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  367. int tid;
  368. struct dp_tx_tid *tx_tid;
  369. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  370. tx_tid = &peer->tx_capture.tx_tid[tid];
  371. /* spinlock hold */
  372. qdf_spin_lock_bh(&tx_tid->tid_lock);
  373. qdf_nbuf_queue_free(&tx_tid->msdu_comp_q);
  374. qdf_spin_unlock_bh(&tx_tid->tid_lock);
  375. }
  376. }
  377. }
  378. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  379. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  380. }
  381. /*
  382. * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
  383. * @pdev_handle: DP_PDEV handle
  384. * @val: user provided value
  385. *
  386. * Return: QDF_STATUS
  387. */
  388. QDF_STATUS
  389. dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, int val)
  390. {
  391. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  392. pdev->tx_capture_enabled = val;
  393. if (pdev->tx_capture_enabled) {
  394. dp_soc_set_txrx_ring_map_single(pdev->soc);
  395. if (!pdev->pktlog_ppdu_stats)
  396. dp_h2t_cfg_stats_msg_send(pdev,
  397. DP_PPDU_STATS_CFG_SNIFFER,
  398. pdev->pdev_id);
  399. } else {
  400. dp_soc_set_txrx_ring_map(pdev->soc);
  401. dp_h2t_cfg_stats_msg_send(pdev,
  402. DP_PPDU_STATS_CFG_ENH_STATS,
  403. pdev->pdev_id);
  404. dp_iterate_free_peer_msdu_q(pdev);
  405. }
  406. return QDF_STATUS_SUCCESS;
  407. }
  408. /**
  409. * get_number_of_1s(): Function to get number of 1s
  410. * @value: value to find
  411. *
  412. * return: number of 1s
  413. */
  414. static
  415. inline uint32_t get_number_of_1s(uint32_t value)
  416. {
  417. uint32_t shift[] = {1, 2, 4, 8, 16};
  418. uint32_t magic_number[] = { 0x55555555, 0x33333333, 0x0F0F0F0F,
  419. 0x00FF00FF, 0x0000FFFF};
  420. uint8_t k = 0;
  421. for (; k <= 4; k++) {
  422. value = (value & magic_number[k]) +
  423. ((value >> shift[k]) & magic_number[k]);
  424. }
  425. return value;
  426. }
  427. /**
  428. * dp_tx_print_bitmap(): Function to print bitmap
  429. * @pdev: dp_pdev
  430. * @ppdu_desc: ppdu completion descriptor
  431. * @user_inder: user index
  432. * @ppdu_id: ppdu id
  433. *
  434. * return: status
  435. */
  436. static
  437. QDF_STATUS dp_tx_print_bitmap(struct dp_pdev *pdev,
  438. struct cdp_tx_completion_ppdu *ppdu_desc,
  439. uint32_t user_index,
  440. uint32_t ppdu_id)
  441. {
  442. struct cdp_tx_completion_ppdu_user *user;
  443. uint8_t i;
  444. uint32_t mpdu_tried;
  445. uint32_t ba_seq_no;
  446. uint32_t start_seq;
  447. uint32_t num_mpdu;
  448. uint32_t fail_num_mpdu = 0;
  449. user = &ppdu_desc->user[user_index];
  450. /* get number of mpdu from ppdu_desc */
  451. mpdu_tried = user->mpdu_tried_mcast + user->mpdu_tried_ucast;
  452. ba_seq_no = user->ba_seq_no;
  453. start_seq = user->start_seq;
  454. num_mpdu = user->mpdu_success;
  455. if (user->tid > DP_NON_QOS_TID) {
  456. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  457. "%s: ppdu[%d] peer_id[%d] TID[%d] > NON_QOS_TID!",
  458. __func__, ppdu_id, user->peer_id, user->tid);
  459. return QDF_STATUS_E_FAILURE;
  460. }
  461. if (mpdu_tried != num_mpdu) {
  462. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_INFO,
  463. "%s: ppdu[%d] peer[%d] tid[%d] ba[%d] start[%d] mpdu_tri[%d] num_mpdu[%d] is_mcast[%d]",
  464. __func__, ppdu_id, user->peer_id, user->tid,
  465. ba_seq_no, start_seq, mpdu_tried,
  466. num_mpdu, user->is_mcast);
  467. for (i = 0; i < CDP_BA_256_BIT_MAP_SIZE_DWORDS; i++) {
  468. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  469. QDF_TRACE_LEVEL_ERROR,
  470. "ppdu_id[%d] ba_bitmap[0x%x] enqueue_bitmap[0x%x] failed_bitmap[0x%x]",
  471. ppdu_id, user->ba_bitmap[i],
  472. user->enq_bitmap[i],
  473. user->failed_bitmap[i]);
  474. fail_num_mpdu +=
  475. get_number_of_1s(user->failed_bitmap[i]);
  476. }
  477. }
  478. if (fail_num_mpdu == num_mpdu && num_mpdu)
  479. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  480. "%s: %d ppdu_id[%d] num_mpdu[%d, %d]",
  481. __func__, __LINE__, ppdu_id, num_mpdu, fail_num_mpdu);
  482. return QDF_STATUS_SUCCESS;
  483. }
  484. static uint32_t dp_tx_update_80211_hdr(struct dp_pdev *pdev,
  485. struct dp_peer *peer,
  486. void *data,
  487. qdf_nbuf_t nbuf)
  488. {
  489. struct cdp_tx_completion_ppdu *ppdu_desc;
  490. struct ieee80211_frame *ptr_wh;
  491. struct ieee80211_qoscntl *ptr_qoscntl;
  492. uint32_t mpdu_buf_len;
  493. uint8_t *ptr_hdr;
  494. ppdu_desc = (struct cdp_tx_completion_ppdu *)data;
  495. ptr_wh = &peer->tx_capture.tx_wifi_hdr;
  496. ptr_qoscntl = &peer->tx_capture.tx_qoscntl;
  497. /*
  498. * update framectrl only for first ppdu_id
  499. * rest of mpdu will have same frame ctrl
  500. * mac address and duration
  501. */
  502. if (ppdu_desc->ppdu_id != peer->tx_capture.tx_wifi_ppdu_id) {
  503. ptr_wh->i_fc[1] = (ppdu_desc->frame_ctrl & 0xFF00) >> 8;
  504. ptr_wh->i_fc[0] = (ppdu_desc->frame_ctrl & 0xFF);
  505. ptr_wh->i_dur[1] = (ppdu_desc->tx_duration & 0xFF00) >> 8;
  506. ptr_wh->i_dur[0] = (ppdu_desc->tx_duration & 0xFF);
  507. ptr_qoscntl->i_qos[1] = (ppdu_desc->user[0].qos_ctrl &
  508. 0xFF00) >> 8;
  509. ptr_qoscntl->i_qos[0] = (ppdu_desc->user[0].qos_ctrl & 0xFF);
  510. peer->tx_capture.tx_wifi_ppdu_id = ppdu_desc->ppdu_id;
  511. }
  512. mpdu_buf_len = sizeof(struct ieee80211_frame) + LLC_SNAP_HDR_LEN;
  513. mpdu_buf_len += sizeof(struct ieee80211_qoscntl);
  514. nbuf->protocol = qdf_htons(ETH_P_802_2);
  515. /* update ieee80211_frame header */
  516. if (!qdf_nbuf_push_head(nbuf, mpdu_buf_len)) {
  517. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  518. FL("No headroom"));
  519. return QDF_STATUS_E_NOMEM;
  520. }
  521. ptr_hdr = (void *)qdf_nbuf_data(nbuf);
  522. qdf_mem_copy(ptr_hdr, ptr_wh, sizeof(struct ieee80211_frame));
  523. ptr_hdr = ptr_hdr + (sizeof(struct ieee80211_frame));
  524. /* update qoscntl header */
  525. qdf_mem_copy(ptr_hdr, ptr_qoscntl, sizeof(struct ieee80211_qoscntl));
  526. ptr_hdr = ptr_hdr + sizeof(struct ieee80211_qoscntl);
  527. /* update LLC */
  528. *ptr_hdr = LLC_SNAP_LSAP;
  529. *(ptr_hdr + 1) = LLC_SNAP_LSAP;
  530. *(ptr_hdr + 2) = LLC_UI;
  531. *(ptr_hdr + 3) = 0x00;
  532. *(ptr_hdr + 4) = 0x00;
  533. *(ptr_hdr + 5) = 0x00;
  534. /* TYPE: IPV4 ?? */
  535. *(ptr_hdr + 6) = (ETHERTYPE_IPV4 & 0xFF00) >> 8;
  536. *(ptr_hdr + 7) = (ETHERTYPE_IPV4 & 0xFF);
  537. qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - mpdu_buf_len);
  538. return 0;
  539. }
  540. #define MAX_MONITOR_HEADER (512)
  541. /**
  542. * dp_tx_mon_restitch_mpdu_from_msdus(): Function to restitch msdu to mpdu
  543. * @pdev: dp_pdev
  544. * @peer: dp_peer
  545. * @head_msdu: head msdu queue
  546. *
  547. * return: status
  548. */
  549. static uint32_t
  550. dp_tx_mon_restitch_mpdu_from_msdus(struct dp_pdev *pdev,
  551. struct dp_peer *peer,
  552. struct cdp_tx_completion_ppdu *ppdu_desc,
  553. qdf_nbuf_queue_t *head_msdu)
  554. {
  555. qdf_nbuf_t curr_nbuf = NULL;
  556. qdf_nbuf_t first_nbuf = NULL;
  557. qdf_nbuf_t prev_nbuf = NULL;
  558. qdf_nbuf_t mpdu_nbuf = NULL;
  559. qdf_nbuf_queue_t *mpdu = NULL;
  560. uint32_t num_mpdu = 0;
  561. struct msdu_completion_info *ptr_msdu_info = NULL;
  562. uint8_t first_msdu = 0;
  563. uint8_t last_msdu = 0;
  564. uint32_t frag_list_sum_len = 0;
  565. uint8_t first_msdu_not_seen = 1;
  566. num_mpdu = ppdu_desc->num_mpdu;
  567. mpdu = &ppdu_desc->mpdu_q;
  568. curr_nbuf = qdf_nbuf_queue_remove(head_msdu);
  569. while (curr_nbuf) {
  570. ptr_msdu_info =
  571. (struct msdu_completion_info *)qdf_nbuf_data(curr_nbuf);
  572. first_msdu = ptr_msdu_info->first_msdu;
  573. last_msdu = ptr_msdu_info->last_msdu;
  574. /* pull msdu_completion_info added in pre header */
  575. /* pull ethernet header from header */
  576. qdf_nbuf_pull_head(curr_nbuf,
  577. sizeof(struct msdu_completion_info) +
  578. sizeof(qdf_ether_header_t));
  579. if (first_msdu && first_msdu_not_seen) {
  580. first_nbuf = curr_nbuf;
  581. frag_list_sum_len = 0;
  582. first_msdu_not_seen = 0;
  583. mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
  584. MAX_MONITOR_HEADER,
  585. MAX_MONITOR_HEADER,
  586. 4, FALSE);
  587. if (!mpdu_nbuf) {
  588. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  589. QDF_TRACE_LEVEL_FATAL,
  590. "MPDU head allocation failed !!!");
  591. goto free_ppdu_desc_mpdu_q;
  592. }
  593. dp_tx_update_80211_hdr(pdev, peer,
  594. ppdu_desc, mpdu_nbuf);
  595. /* update first buffer to previous buffer */
  596. prev_nbuf = curr_nbuf;
  597. } else if (first_msdu && !first_msdu_not_seen) {
  598. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  599. QDF_TRACE_LEVEL_FATAL,
  600. "!!!!! NO LAST MSDU\n");
  601. /*
  602. * no last msdu in a mpdu
  603. * handle this case
  604. */
  605. qdf_nbuf_free(curr_nbuf);
  606. /*
  607. * No last msdu found because WBM comes out
  608. * of order, free the pkt
  609. */
  610. goto free_ppdu_desc_mpdu_q;
  611. } else if (!first_msdu && first_msdu_not_seen) {
  612. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  613. QDF_TRACE_LEVEL_FATAL,
  614. "!!!!! NO FIRST MSDU\n");
  615. /*
  616. * no first msdu in a mpdu
  617. * handle this case
  618. */
  619. qdf_nbuf_free(curr_nbuf);
  620. /*
  621. * no first msdu found beacuse WBM comes out
  622. * of order, free the pkt
  623. */
  624. goto free_ppdu_desc_mpdu_q;
  625. } else {
  626. /* update current buffer to previous buffer next */
  627. prev_nbuf->next = curr_nbuf;
  628. /* move the previous buffer to next buffer */
  629. prev_nbuf = prev_nbuf->next;
  630. }
  631. frag_list_sum_len += qdf_nbuf_len(curr_nbuf);
  632. if (last_msdu) {
  633. /*
  634. * first nbuf will hold list of msdu
  635. * stored in prev_nbuf
  636. */
  637. qdf_nbuf_append_ext_list(mpdu_nbuf,
  638. first_nbuf,
  639. frag_list_sum_len);
  640. /* add mpdu to mpdu queue */
  641. qdf_nbuf_queue_add(mpdu, mpdu_nbuf);
  642. first_nbuf = NULL;
  643. mpdu_nbuf = NULL;
  644. /* next msdu will start with first msdu */
  645. first_msdu_not_seen = 1;
  646. goto check_for_next_msdu;
  647. }
  648. /* get next msdu from the head_msdu */
  649. curr_nbuf = qdf_nbuf_queue_remove(head_msdu);
  650. if (!curr_nbuf) {
  651. /* msdu missed in list */
  652. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  653. QDF_TRACE_LEVEL_FATAL,
  654. "!!!! WAITING for msdu but list empty !!!!");
  655. }
  656. continue;
  657. check_for_next_msdu:
  658. if (qdf_nbuf_is_queue_empty(head_msdu))
  659. return 0;
  660. curr_nbuf = qdf_nbuf_queue_remove(head_msdu);
  661. }
  662. return 0;
  663. free_ppdu_desc_mpdu_q:
  664. /* free already chained msdu pkt */
  665. while (first_nbuf) {
  666. curr_nbuf = first_nbuf;
  667. first_nbuf = first_nbuf->next;
  668. qdf_nbuf_free(curr_nbuf);
  669. }
  670. /* free allocated mpdu hdr */
  671. if (mpdu_nbuf)
  672. qdf_nbuf_free(mpdu_nbuf);
  673. /* free queued remaining msdu pkt per ppdu */
  674. qdf_nbuf_queue_free(head_msdu);
  675. /* free queued mpdu per ppdu */
  676. qdf_nbuf_queue_free(mpdu);
  677. return 0;
  678. }
  679. /**
  680. * dp_tx_msdu_dequeue(): Function to dequeue msdu from peer based tid
  681. * @peer: dp_peer
  682. * @ppdu_id: ppdu_id
  683. * @tid: tid
  684. * @num_msdu: number of msdu
  685. * @head: head queue
  686. * @start_tsf: start tsf from ppdu_desc
  687. * @end_tsf: end tsf from ppdu_desc
  688. *
  689. * return: status
  690. */
  691. static
  692. uint32_t dp_tx_msdu_dequeue(struct dp_peer *peer, uint32_t ppdu_id,
  693. uint16_t tid, uint32_t num_msdu,
  694. qdf_nbuf_queue_t *head,
  695. uint32_t start_tsf, uint32_t end_tsf)
  696. {
  697. struct dp_tx_tid *tx_tid = NULL;
  698. uint32_t msdu_ppdu_id;
  699. qdf_nbuf_t curr_msdu = NULL;
  700. qdf_nbuf_t prev_msdu = NULL;
  701. qdf_nbuf_t nbuf = NULL;
  702. struct msdu_completion_info *ptr_msdu_info = NULL;
  703. uint32_t wbm_tsf;
  704. uint32_t matched = 0;
  705. if (qdf_unlikely(!peer))
  706. return 0;
  707. tx_tid = &peer->tx_capture.tx_tid[tid];
  708. if (qdf_unlikely(!tx_tid))
  709. return 0;
  710. if (qdf_nbuf_is_queue_empty(&tx_tid->msdu_comp_q))
  711. return 0;
  712. /* lock here */
  713. qdf_spin_lock_bh(&tx_tid->tid_lock);
  714. curr_msdu = qdf_nbuf_queue_first(&tx_tid->msdu_comp_q);
  715. while (curr_msdu) {
  716. if (qdf_nbuf_queue_len(head) == num_msdu)
  717. break;
  718. ptr_msdu_info =
  719. (struct msdu_completion_info *)qdf_nbuf_data(curr_msdu);
  720. msdu_ppdu_id = ptr_msdu_info->ppdu_id;
  721. wbm_tsf = ptr_msdu_info->tsf;
  722. if (wbm_tsf < start_tsf) {
  723. /* remove the aged packet */
  724. nbuf = qdf_nbuf_queue_remove(
  725. &tx_tid->msdu_comp_q);
  726. qdf_nbuf_free(nbuf);
  727. curr_msdu = qdf_nbuf_queue_first(
  728. &tx_tid->msdu_comp_q);
  729. prev_msdu = NULL;
  730. continue;
  731. }
  732. if (msdu_ppdu_id == ppdu_id) {
  733. matched = 1;
  734. if (wbm_tsf > start_tsf && wbm_tsf < end_tsf) {
  735. /*packet found */
  736. } else if (wbm_tsf > end_tsf) {
  737. /*
  738. * Do we need delta in above case.
  739. * for searching ppdu out of ppdu tsf
  740. */
  741. break;
  742. }
  743. if (qdf_likely(!prev_msdu)) {
  744. /* remove head */
  745. curr_msdu = qdf_nbuf_queue_remove(
  746. &tx_tid->msdu_comp_q);
  747. /* add msdu to head queue */
  748. qdf_nbuf_queue_add(head, curr_msdu);
  749. /* get next msdu from msdu_comp_q */
  750. curr_msdu = qdf_nbuf_queue_first(
  751. &tx_tid->msdu_comp_q);
  752. continue;
  753. } else {
  754. /* update prev_msdu next to current msdu next */
  755. prev_msdu->next = curr_msdu->next;
  756. /* set current msdu next as NULL */
  757. curr_msdu->next = NULL;
  758. /* decrement length */
  759. ((qdf_nbuf_queue_t *)(
  760. &tx_tid->msdu_comp_q))->qlen--;
  761. /* add msdu to head queue */
  762. qdf_nbuf_queue_add(head, curr_msdu);
  763. /* set previous msdu to current msdu */
  764. curr_msdu = prev_msdu->next;
  765. continue;
  766. }
  767. }
  768. prev_msdu = curr_msdu;
  769. curr_msdu = prev_msdu->next;
  770. }
  771. if (qdf_nbuf_queue_len(head) != num_msdu)
  772. matched = 0;
  773. qdf_spin_unlock_bh(&tx_tid->tid_lock);
  774. return matched;
  775. }
  776. /**
  777. * get_mpdu_clone_from_next_ppdu(): Function to clone missing mpdu from
  778. * next ppdu
  779. * @nbuf_ppdu_desc_list: nbuf list
  780. * @ppdu_desc_cnt: ppdu_desc_cnt
  781. * @missed_seq_no:
  782. * @ppdu_id: ppdu_id
  783. * @mpdu_info: cdp_tx_indication_mpdu_info
  784. *
  785. * return: void
  786. */
  787. static
  788. qdf_nbuf_t get_mpdu_clone_from_next_ppdu(qdf_nbuf_t nbuf_ppdu_desc_list[],
  789. uint32_t ppdu_desc_cnt,
  790. uint32_t missed_seq_no,
  791. uint32_t ppdu_id)
  792. {
  793. uint32_t i = 0;
  794. uint32_t k = 0;
  795. uint32_t mpdu_tried = 0;
  796. uint32_t found = 0;
  797. uint32_t seq_no = 0;
  798. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  799. qdf_nbuf_t mpdu = NULL;
  800. for (i = 1; i < ppdu_desc_cnt; i++) {
  801. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  802. qdf_nbuf_data(nbuf_ppdu_desc_list[i]);
  803. mpdu_tried = ppdu_desc->user[0].mpdu_tried_ucast +
  804. ppdu_desc->user[0].mpdu_tried_mcast;
  805. /* check if seq number is between the range */
  806. if ((missed_seq_no >= ppdu_desc->user[0].start_seq) &&
  807. (missed_seq_no <= ppdu_desc->user[0].start_seq +
  808. mpdu_tried)) {
  809. seq_no = ppdu_desc->user[0].start_seq;
  810. if (ppdu_desc->user[0].failed_bitmap[k] &
  811. (1 << (missed_seq_no - seq_no))) {
  812. found = 1;
  813. break;
  814. }
  815. }
  816. }
  817. if (found == 0) {
  818. /* mpdu not found in sched cmd id */
  819. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  820. "%s: missed seq_no[%d] ppdu_id[%d] [%d] not found!!!",
  821. __func__, missed_seq_no, ppdu_id, ppdu_desc_cnt);
  822. return NULL;
  823. }
  824. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  825. "%s: seq_no[%d] missed ppdu_id[%d] m[%d] found in ppdu_id[%d]!!",
  826. __func__,
  827. missed_seq_no, ppdu_id,
  828. (missed_seq_no - seq_no), ppdu_desc->ppdu_id);
  829. mpdu = qdf_nbuf_queue_first(&ppdu_desc->mpdu_q);
  830. if (!mpdu) {
  831. /* bitmap shows it found sequence number, but queue empty */
  832. /* do we need to allocate skb and send instead of NULL ? */
  833. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  834. "%s: missed seq_no[%d] ppdu_id[%d] [%d] found but queue empty!!!",
  835. __func__, missed_seq_no, ppdu_id, ppdu_desc_cnt);
  836. return NULL;
  837. }
  838. for (i = 0; i < (missed_seq_no - seq_no); i++) {
  839. mpdu = mpdu->next;
  840. if (!mpdu) {
  841. /*
  842. * bitmap shows it found sequence number,
  843. * but queue empty, do we need to allocate
  844. * skb and send instead of NULL ?
  845. * add counter here:
  846. */
  847. return NULL;
  848. }
  849. }
  850. return skb_copy_expand(mpdu, MAX_MONITOR_HEADER, 0, GFP_ATOMIC);
  851. }
  852. /**
  853. * dp_tx_update_user_mpdu_info(): Function to update mpdu info
  854. * from ppdu_desc
  855. * @ppdu_id: ppdu_id
  856. * @mpdu_info: cdp_tx_indication_mpdu_info
  857. * @user: cdp_tx_completion_ppdu_user
  858. *
  859. * return: void
  860. */
  861. static void
  862. dp_tx_update_user_mpdu_info(uint32_t ppdu_id,
  863. struct cdp_tx_indication_mpdu_info *mpdu_info,
  864. struct cdp_tx_completion_ppdu_user *user)
  865. {
  866. mpdu_info->ppdu_id = ppdu_id;
  867. mpdu_info->frame_ctrl = user->frame_ctrl;
  868. mpdu_info->qos_ctrl = user->qos_ctrl;
  869. mpdu_info->tid = user->tid;
  870. mpdu_info->ltf_size = user->ltf_size;
  871. mpdu_info->he_re = user->he_re;
  872. mpdu_info->txbf = user->txbf;
  873. mpdu_info->bw = user->bw;
  874. mpdu_info->nss = user->nss;
  875. mpdu_info->mcs = user->mcs;
  876. mpdu_info->preamble = user->preamble;
  877. mpdu_info->gi = user->gi;
  878. mpdu_info->ack_rssi = user->ack_rssi[0];
  879. mpdu_info->tx_rate = user->tx_rate;
  880. mpdu_info->ldpc = user->ldpc;
  881. mpdu_info->ppdu_cookie = user->ppdu_cookie;
  882. qdf_mem_copy(mpdu_info->mac_address, user->mac_addr, 6);
  883. mpdu_info->ba_start_seq = user->ba_seq_no;
  884. qdf_mem_copy(mpdu_info->ba_bitmap, user->ba_bitmap,
  885. CDP_BA_256_BIT_MAP_SIZE_DWORDS * sizeof(uint32_t));
  886. }
  887. static inline
  888. void dp_tx_update_sequence_number(qdf_nbuf_t nbuf, uint32_t seq_no)
  889. {
  890. struct ieee80211_frame *ptr_wh = NULL;
  891. uint16_t wh_seq = 0;
  892. if (!nbuf)
  893. return;
  894. /* update sequence number in frame header */
  895. ptr_wh = (struct ieee80211_frame *)qdf_nbuf_data(nbuf);
  896. wh_seq = (seq_no & 0xFFF) << 4;
  897. qdf_mem_copy(ptr_wh->i_seq, &wh_seq, sizeof(uint16_t));
  898. }
  899. /**
  900. * dp_send_mpdu_info_to_stack(): Function to deliver mpdu info to stack
  901. * to upper layer
  902. * @pdev: DP pdev handle
  903. * @nbuf_ppdu_desc_list: ppdu_desc_list per sched cmd id
  904. * @ppdu_desc_cnt: number of ppdu_desc_cnt
  905. *
  906. * return: status
  907. */
  908. static
  909. QDF_STATUS dp_send_mpdu_info_to_stack(struct dp_pdev *pdev,
  910. qdf_nbuf_t nbuf_ppdu_desc_list[],
  911. uint32_t ppdu_desc_cnt)
  912. {
  913. uint32_t ppdu_id;
  914. uint32_t desc_cnt;
  915. qdf_nbuf_t tmp_nbuf;
  916. for (desc_cnt = 0; desc_cnt < ppdu_desc_cnt; desc_cnt++) {
  917. struct cdp_tx_completion_ppdu *ppdu_desc;
  918. uint32_t mpdu_tried;
  919. uint32_t num_mpdu;
  920. uint32_t k;
  921. uint32_t i;
  922. uint32_t seq_no = 0;
  923. uint32_t len;
  924. qdf_nbuf_t mpdu_nbuf;
  925. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  926. qdf_nbuf_data(nbuf_ppdu_desc_list[desc_cnt]);
  927. if (!ppdu_desc)
  928. continue;
  929. if (qdf_nbuf_is_queue_empty(&ppdu_desc->mpdu_q)) {
  930. tmp_nbuf = nbuf_ppdu_desc_list[desc_cnt];
  931. nbuf_ppdu_desc_list[desc_cnt] = NULL;
  932. qdf_nbuf_free(tmp_nbuf);
  933. continue;
  934. }
  935. ppdu_id = ppdu_desc->ppdu_id;
  936. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
  937. struct cdp_tx_indication_info tx_capture_info;
  938. struct cdp_tx_indication_mpdu_info *mpdu_info;
  939. qdf_mem_set(&tx_capture_info,
  940. sizeof(struct cdp_tx_indication_info),
  941. 0);
  942. mpdu_info = &tx_capture_info.mpdu_info;
  943. mpdu_info->channel = ppdu_desc->channel;
  944. mpdu_info->frame_type = ppdu_desc->frame_type;
  945. mpdu_info->ppdu_start_timestamp =
  946. ppdu_desc->ppdu_start_timestamp;
  947. mpdu_info->ppdu_end_timestamp =
  948. ppdu_desc->ppdu_end_timestamp;
  949. mpdu_info->tx_duration = ppdu_desc->tx_duration;
  950. mpdu_info->seq_no = seq_no;
  951. mpdu_info->num_msdu = ppdu_desc->num_msdu;
  952. /* update cdp_tx_indication_mpdu_info */
  953. dp_tx_update_user_mpdu_info(ppdu_desc->ppdu_id,
  954. &tx_capture_info.mpdu_info,
  955. &ppdu_desc->user[0]);
  956. tx_capture_info.mpdu_info.channel_num =
  957. pdev->operating_channel;
  958. tx_capture_info.mpdu_nbuf =
  959. qdf_nbuf_queue_remove(&ppdu_desc->mpdu_q);
  960. /*
  961. * send MPDU to osif layer
  962. * do we need to update mpdu_info before tranmit
  963. * get current mpdu_nbuf
  964. */
  965. dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
  966. &tx_capture_info, HTT_INVALID_PEER,
  967. WDI_NO_VAL, pdev->pdev_id);
  968. if (tx_capture_info.mpdu_nbuf)
  969. qdf_nbuf_free(tx_capture_info.mpdu_nbuf);
  970. tmp_nbuf = nbuf_ppdu_desc_list[desc_cnt];
  971. nbuf_ppdu_desc_list[desc_cnt] = NULL;
  972. qdf_nbuf_free(tmp_nbuf);
  973. continue;
  974. }
  975. ppdu_id = ppdu_desc->ppdu_id;
  976. /* find mpdu tried is same as success mpdu */
  977. mpdu_tried = ppdu_desc->user[0].mpdu_tried_ucast +
  978. ppdu_desc->user[0].mpdu_tried_mcast;
  979. num_mpdu = ppdu_desc->user[0].mpdu_success;
  980. /* get length */
  981. len = qdf_nbuf_queue_len(&ppdu_desc->mpdu_q);
  982. /* find list of missing sequence */
  983. for (i = 0, k = 0; i < mpdu_tried; i++) {
  984. struct cdp_tx_indication_info tx_capture_info;
  985. struct cdp_tx_indication_mpdu_info *mpdu_info;
  986. qdf_mem_set(&tx_capture_info,
  987. sizeof(struct cdp_tx_indication_info),
  988. 0);
  989. mpdu_info = &tx_capture_info.mpdu_info;
  990. /* missed seq number */
  991. seq_no = ppdu_desc->user[0].start_seq + i;
  992. if (!(ppdu_desc->user[0].failed_bitmap[k] & 1 << i)) {
  993. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  994. QDF_TRACE_LEVEL_ERROR,
  995. "%s: finding missing seq no: %d in other ppdu list cnt[%d]",
  996. __func__, seq_no, ppdu_desc_cnt);
  997. /* send rest of ppdu_desc list */
  998. mpdu_nbuf = get_mpdu_clone_from_next_ppdu(
  999. nbuf_ppdu_desc_list + desc_cnt,
  1000. ppdu_desc_cnt - desc_cnt,
  1001. seq_no,
  1002. ppdu_desc->ppdu_id);
  1003. /* check mpdu_nbuf NULL */
  1004. if (!mpdu_nbuf)
  1005. continue;
  1006. qdf_nbuf_queue_add(&ppdu_desc->mpdu_q,
  1007. mpdu_nbuf);
  1008. } else {
  1009. /* any error case we need to handle */
  1010. }
  1011. /* k need to be increase, if i increased more than 32 */
  1012. tx_capture_info.mpdu_nbuf =
  1013. qdf_nbuf_queue_remove(&ppdu_desc->mpdu_q);
  1014. if (!tx_capture_info.mpdu_nbuf)
  1015. continue;
  1016. mpdu_info->channel = ppdu_desc->channel;
  1017. mpdu_info->frame_type = ppdu_desc->frame_type;
  1018. mpdu_info->ppdu_start_timestamp =
  1019. ppdu_desc->ppdu_start_timestamp;
  1020. mpdu_info->ppdu_end_timestamp =
  1021. ppdu_desc->ppdu_end_timestamp;
  1022. mpdu_info->tx_duration = ppdu_desc->tx_duration;
  1023. mpdu_info->seq_no = seq_no;
  1024. mpdu_info->num_msdu = ppdu_desc->num_msdu;
  1025. /* update cdp_tx_indication_mpdu_info */
  1026. dp_tx_update_user_mpdu_info(ppdu_id,
  1027. &tx_capture_info.mpdu_info,
  1028. &ppdu_desc->user[0]);
  1029. tx_capture_info.mpdu_info.channel_num =
  1030. pdev->operating_channel;
  1031. dp_tx_update_sequence_number(tx_capture_info.mpdu_nbuf,
  1032. seq_no);
  1033. /*
  1034. * send MPDU to osif layer
  1035. * do we need to update mpdu_info before tranmit
  1036. * get current mpdu_nbuf
  1037. */
  1038. dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
  1039. &tx_capture_info,
  1040. HTT_INVALID_PEER,
  1041. WDI_NO_VAL, pdev->pdev_id);
  1042. if (tx_capture_info.mpdu_nbuf)
  1043. qdf_nbuf_free(tx_capture_info.mpdu_nbuf);
  1044. }
  1045. qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
  1046. tmp_nbuf = nbuf_ppdu_desc_list[desc_cnt];
  1047. nbuf_ppdu_desc_list[desc_cnt] = NULL;
  1048. qdf_nbuf_free(tmp_nbuf);
  1049. }
  1050. return QDF_STATUS_SUCCESS;
  1051. }
  1052. /*
  1053. * number of data PPDU scheduled in a burst is 10
  1054. * which doesn't include BAR and other non data frame
  1055. * ~50 is maximum scheduled ppdu count
  1056. */
  1057. #define SCHED_MAX_PPDU_CNT 64
  1058. /**
  1059. * dp_tx_ppdu_stats_process - Deferred PPDU stats handler
  1060. * @context: Opaque work context (PDEV)
  1061. *
  1062. * Return: none
  1063. */
  1064. void dp_tx_ppdu_stats_process(void *context)
  1065. {
  1066. uint32_t curr_sched_cmdid;
  1067. uint32_t last_ppdu_id;
  1068. uint32_t ppdu_cnt;
  1069. uint32_t ppdu_desc_cnt = 0;
  1070. uint32_t j;
  1071. struct dp_pdev *pdev = (struct dp_pdev *)context;
  1072. struct ppdu_info *ppdu_info, *tmp_ppdu_info;
  1073. uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
  1074. struct ppdu_info *sched_ppdu_list[SCHED_MAX_PPDU_CNT];
  1075. qdf_nbuf_t nbuf_ppdu_desc_list[SCHED_MAX_PPDU_CNT];
  1076. struct dp_pdev_tx_capture *ptr_tx_cap;
  1077. uint32_t tx_capture = pdev->tx_capture_enabled;
  1078. ptr_tx_cap = &pdev->tx_capture;
  1079. /* Move the PPDU entries to defer list */
  1080. qdf_spin_lock_bh(&ptr_tx_cap->ppdu_stats_lock);
  1081. STAILQ_CONCAT(&ptr_tx_cap->ppdu_stats_defer_queue,
  1082. &ptr_tx_cap->ppdu_stats_queue);
  1083. ptr_tx_cap->ppdu_stats_defer_queue_depth +=
  1084. ptr_tx_cap->ppdu_stats_queue_depth;
  1085. ptr_tx_cap->ppdu_stats_queue_depth = 0;
  1086. qdf_spin_unlock_bh(&ptr_tx_cap->ppdu_stats_lock);
  1087. while (!STAILQ_EMPTY(&ptr_tx_cap->ppdu_stats_defer_queue)) {
  1088. ppdu_info =
  1089. STAILQ_FIRST(&ptr_tx_cap->ppdu_stats_defer_queue);
  1090. curr_sched_cmdid = ppdu_info->sched_cmdid;
  1091. ppdu_cnt = 0;
  1092. STAILQ_FOREACH_SAFE(ppdu_info,
  1093. &ptr_tx_cap->ppdu_stats_defer_queue,
  1094. ppdu_info_queue_elem, tmp_ppdu_info) {
  1095. if (curr_sched_cmdid != ppdu_info->sched_cmdid)
  1096. break;
  1097. qdf_assert_always(ppdu_cnt < SCHED_MAX_PPDU_CNT);
  1098. sched_ppdu_list[ppdu_cnt] = ppdu_info;
  1099. ppdu_cnt++;
  1100. }
  1101. if (ppdu_info && (curr_sched_cmdid == ppdu_info->sched_cmdid) &&
  1102. ptr_tx_cap->ppdu_stats_next_sched < now_ms)
  1103. break;
  1104. last_ppdu_id = sched_ppdu_list[ppdu_cnt - 1]->ppdu_id;
  1105. STAILQ_REMOVE_HEAD_UNTIL(&ptr_tx_cap->ppdu_stats_defer_queue,
  1106. sched_ppdu_list[ppdu_cnt - 1],
  1107. ppdu_info_queue_elem);
  1108. ptr_tx_cap->ppdu_stats_defer_queue_depth -= ppdu_cnt;
  1109. ppdu_desc_cnt = 0;
  1110. /* Process tx buffer list based on last_ppdu_id stored above */
  1111. for (j = 0; j < ppdu_cnt; j++) {
  1112. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  1113. struct dp_peer *peer = NULL;
  1114. qdf_nbuf_t nbuf;
  1115. uint32_t retries = 0;
  1116. uint32_t ret = 0;
  1117. qdf_nbuf_queue_t head_msdu;
  1118. uint32_t start_tsf = 0;
  1119. uint32_t end_tsf = 0;
  1120. uint16_t tid = 0;
  1121. uint32_t num_msdu = 0;
  1122. uint32_t qlen = 0;
  1123. qdf_nbuf_queue_init(&head_msdu);
  1124. ppdu_info = sched_ppdu_list[j];
  1125. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  1126. qdf_nbuf_data(ppdu_info->nbuf);
  1127. pdev->tx_ppdu_proc++;
  1128. dp_ppdu_desc_user_stats_update(pdev, ppdu_info);
  1129. /*
  1130. * While processing/corelating Tx buffers, we should
  1131. * hold the entire PPDU list for the give sched_cmdid
  1132. * instead of freeing below.
  1133. */
  1134. nbuf = ppdu_info->nbuf;
  1135. qdf_mem_free(ppdu_info);
  1136. qdf_assert_always(nbuf);
  1137. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  1138. qdf_nbuf_data(nbuf);
  1139. /* send WDI event */
  1140. if (!tx_capture) {
  1141. /**
  1142. * Deliver PPDU stats only for valid (acked)
  1143. * data frames if sniffer mode is not enabled.
  1144. * If sniffer mode is enabled,
  1145. * PPDU stats for all frames including
  1146. * mgmt/control frames should be delivered
  1147. * to upper layer
  1148. */
  1149. if (pdev->tx_sniffer_enable ||
  1150. pdev->mcopy_mode) {
  1151. dp_wdi_event_handler(
  1152. WDI_EVENT_TX_PPDU_DESC,
  1153. pdev->soc,
  1154. nbuf,
  1155. HTT_INVALID_PEER,
  1156. WDI_NO_VAL,
  1157. pdev->pdev_id);
  1158. } else {
  1159. if (ppdu_desc->num_mpdu != 0 &&
  1160. ppdu_desc->num_users != 0 &&
  1161. (ppdu_desc->frame_ctrl &
  1162. HTT_FRAMECTRL_DATATYPE)) {
  1163. dp_wdi_event_handler(
  1164. WDI_EVENT_TX_PPDU_DESC,
  1165. pdev->soc,
  1166. nbuf,
  1167. HTT_INVALID_PEER,
  1168. WDI_NO_VAL,
  1169. pdev->pdev_id);
  1170. } else {
  1171. qdf_nbuf_free(nbuf);
  1172. }
  1173. }
  1174. continue;
  1175. }
  1176. peer = dp_peer_find_by_id(pdev->soc,
  1177. ppdu_desc->user[0].peer_id);
  1178. /*
  1179. * peer can be NULL
  1180. */
  1181. if (!peer) {
  1182. qdf_nbuf_free(nbuf);
  1183. continue;
  1184. }
  1185. /*
  1186. * check whether it is bss peer,
  1187. * if bss_peer no need to process further
  1188. */
  1189. if (!peer->bss_peer &&
  1190. tx_capture &&
  1191. (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA) &&
  1192. (!ppdu_desc->user[0].completion_status)) {
  1193. /* print the bit map */
  1194. dp_tx_print_bitmap(pdev, ppdu_desc,
  1195. 0, ppdu_desc->ppdu_id);
  1196. if (ppdu_desc->user[0].tid > DP_NON_QOS_TID) {
  1197. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1198. QDF_TRACE_LEVEL_ERROR,
  1199. "%s: ppdu[%d] peer_id[%d] TID[%d] > NON_QOS_TID!",
  1200. __func__,
  1201. ppdu_desc->ppdu_id,
  1202. ppdu_desc->user[0].peer_id,
  1203. ppdu_desc->user[0].tid);
  1204. dp_peer_unref_del_find_by_id(peer);
  1205. qdf_nbuf_free(nbuf);
  1206. continue;
  1207. }
  1208. dequeue_msdu_again:
  1209. tid = ppdu_desc->user[0].tid;
  1210. num_msdu = ppdu_desc->user[0].num_msdu;
  1211. start_tsf = ppdu_desc->ppdu_start_timestamp;
  1212. end_tsf = ppdu_desc->ppdu_end_timestamp;
  1213. /*
  1214. * retrieve msdu buffer based on ppdu_id & tid
  1215. * based msdu queue and store it in local queue
  1216. * sometimes, wbm comes late than per ppdu
  1217. * stats. Assumption: all packets are SU,
  1218. * and packets comes in order
  1219. */
  1220. ret = dp_tx_msdu_dequeue(peer,
  1221. ppdu_desc->ppdu_id,
  1222. tid, num_msdu,
  1223. &head_msdu, start_tsf,
  1224. end_tsf);
  1225. if (!ret && (++retries < 3)) {
  1226. /* wait for wbm to complete */
  1227. qdf_sleep(20);
  1228. goto dequeue_msdu_again;
  1229. }
  1230. if (qdf_nbuf_is_queue_empty(&head_msdu)) {
  1231. dp_peer_unref_del_find_by_id(peer);
  1232. /*
  1233. * head msdu is NULL
  1234. * free nbuf as no processing required
  1235. */
  1236. qdf_nbuf_free(nbuf);
  1237. continue;
  1238. }
  1239. /*
  1240. * now head_msdu hold - msdu list for that
  1241. * particular ppdu_id, restitch mpdu from
  1242. * msdu and create a mpdu queue
  1243. */
  1244. dp_tx_mon_restitch_mpdu_from_msdus(pdev,
  1245. peer,
  1246. ppdu_desc,
  1247. &head_msdu);
  1248. /*
  1249. * sanity: free local head msdu queue
  1250. * do we need this ?
  1251. */
  1252. qdf_nbuf_queue_free(&head_msdu);
  1253. qlen = qdf_nbuf_queue_len(&ppdu_desc->mpdu_q);
  1254. if (!qlen) {
  1255. qdf_nbuf_free(nbuf);
  1256. dp_peer_unref_del_find_by_id(peer);
  1257. continue;
  1258. }
  1259. nbuf_ppdu_desc_list[ppdu_desc_cnt++] = nbuf;
  1260. /* print ppdu_desc info for debugging purpose */
  1261. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1262. QDF_TRACE_LEVEL_INFO,
  1263. "%s: ppdu[%d], p_id[%d], tid[%d], n_mpdu[%d %d] n_msdu[%d] retr[%d] qlen[%d] s_tsf[%u] dur[%u] seq[%d] [%d %d]",
  1264. __func__, ppdu_desc->ppdu_id,
  1265. ppdu_desc->user[0].peer_id,
  1266. ppdu_desc->user[0].tid,
  1267. ppdu_desc->num_mpdu,
  1268. ppdu_desc->user[0].mpdu_success,
  1269. ppdu_desc->num_msdu, retries,
  1270. qlen,
  1271. ppdu_desc->ppdu_start_timestamp,
  1272. ppdu_desc->tx_duration,
  1273. ppdu_desc->user[0].start_seq,
  1274. ppdu_cnt,
  1275. ppdu_desc_cnt);
  1276. nbuf->next =
  1277. qdf_nbuf_queue_first(&ppdu_desc->mpdu_q);
  1278. } else if (ppdu_desc->frame_type ==
  1279. CDP_PPDU_FTYPE_CTRL &&
  1280. tx_capture) {
  1281. nbuf->next =
  1282. qdf_nbuf_queue_first(&ppdu_desc->mpdu_q);
  1283. nbuf_ppdu_desc_list[ppdu_desc_cnt++] = nbuf;
  1284. } else {
  1285. qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
  1286. qdf_nbuf_free(nbuf);
  1287. }
  1288. dp_peer_unref_del_find_by_id(peer);
  1289. }
  1290. /*
  1291. * At this point we have mpdu queued per ppdu_desc
  1292. * based on packet capture flags send mpdu info to upper stack
  1293. */
  1294. if (ppdu_desc_cnt) {
  1295. dp_send_mpdu_info_to_stack(pdev,
  1296. nbuf_ppdu_desc_list,
  1297. ppdu_desc_cnt);
  1298. }
  1299. }
  1300. }
  1301. /**
  1302. * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
  1303. * to upper layer
  1304. * @pdev: DP pdev handle
  1305. * @ppdu_info: per PPDU TLV descriptor
  1306. *
  1307. * return: void
  1308. */
  1309. void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
  1310. struct ppdu_info *ppdu_info)
  1311. {
  1312. uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
  1313. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  1314. TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  1315. pdev->list_depth--;
  1316. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  1317. qdf_nbuf_data(ppdu_info->nbuf);
  1318. qdf_spin_lock_bh(&pdev->tx_capture.ppdu_stats_lock);
  1319. if (qdf_unlikely(!pdev->tx_capture_enabled &&
  1320. (pdev->tx_capture.ppdu_stats_queue_depth +
  1321. pdev->tx_capture.ppdu_stats_defer_queue_depth) >
  1322. DP_TX_PPDU_PROC_MAX_DEPTH)) {
  1323. qdf_nbuf_free(ppdu_info->nbuf);
  1324. qdf_mem_free(ppdu_info);
  1325. pdev->tx_capture.ppdu_dropped++;
  1326. } else {
  1327. STAILQ_INSERT_TAIL(&pdev->tx_capture.ppdu_stats_queue,
  1328. ppdu_info, ppdu_info_queue_elem);
  1329. pdev->tx_capture.ppdu_stats_queue_depth++;
  1330. }
  1331. qdf_spin_unlock_bh(&pdev->tx_capture.ppdu_stats_lock);
  1332. if ((pdev->tx_capture.ppdu_stats_queue_depth >
  1333. DP_TX_PPDU_PROC_THRESHOLD) ||
  1334. (pdev->tx_capture.ppdu_stats_next_sched <= now_ms)) {
  1335. qdf_queue_work(0, pdev->tx_capture.ppdu_stats_workqueue,
  1336. &pdev->tx_capture.ppdu_stats_work);
  1337. pdev->tx_capture.ppdu_stats_next_sched =
  1338. now_ms + DP_TX_PPDU_PROC_TIMEOUT;
  1339. }
  1340. }
  1341. #endif