dp_tx_capture.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354
  1. /*
  2. * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <htt.h>
  19. #include "qdf_trace.h"
  20. #include "qdf_nbuf.h"
  21. #include "dp_peer.h"
  22. #include "dp_types.h"
  23. #include "dp_internal.h"
  24. #include "dp_rx_mon.h"
  25. #include "htt_ppdu_stats.h"
  26. #include "dp_htt.h"
  27. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  28. #include "cdp_txrx_cmn_struct.h"
  29. #include <enet.h>
  30. #include "dp_tx_capture.h"
  31. #define MAX_MONITOR_HEADER (512)
  32. #define MAX_DUMMY_FRM_BODY (128)
  33. /* Macros to handle sequence number bitmaps */
  34. /* Size (in bits) of a segment of sequence number bitmap */
  35. #define SEQ_SEG_SZ_BITS(_seqarr) (sizeof(_seqarr[0]) << 3)
  36. /* Array index of a segment of sequence number bitmap */
  37. #define SEQ_SEG_INDEX(_seqarr, _seqno) ((_seqno) / SEQ_SEG_SZ_BITS(_seqarr))
  38. /* Bit mask of a seqno within a segment of sequence bitmap */
  39. #define SEQ_SEG_MSK(_seqseg, _index) \
  40. (1 << ((_index) & ((sizeof(_seqseg) << 3) - 1)))
  41. /* Check seqno bit in a segment of sequence bitmap */
  42. #define SEQ_SEG_BIT(_seqseg, _index) \
  43. ((_seqseg) & SEQ_SEG_MSK((_seqseg), _index))
  44. /* Segment of sequence bitmap containing a given sequence number */
  45. #define SEQ_SEG(_seqarr, _seqno) \
  46. (_seqarr[(_seqno) / (sizeof(_seqarr[0]) << 3)])
  47. /* Check seqno bit in the sequence bitmap */
  48. #define SEQ_BIT(_seqarr, _seqno) \
  49. SEQ_SEG_BIT(SEQ_SEG(_seqarr, (_seqno)), (_seqno))
  50. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  51. /*
  52. * dp_tx_capture_htt_frame_counter: increment counter for htt_frame_type
  53. * pdev: DP pdev handle
  54. * htt_frame_type: htt frame type received from fw
  55. *
  56. * return: void
  57. */
  58. void dp_tx_capture_htt_frame_counter(struct dp_pdev *pdev,
  59. uint32_t htt_frame_type)
  60. {
  61. if (htt_frame_type >= TX_CAP_HTT_MAX_FTYPE)
  62. return;
  63. pdev->tx_capture.htt_frame_type[htt_frame_type]++;
  64. }
  65. /*
  66. * dp_tx_cature_stats: print tx capture stats
  67. * @pdev: DP PDEV handle
  68. *
  69. * return: void
  70. */
  71. void dp_print_pdev_tx_capture_stats(struct dp_pdev *pdev)
  72. {
  73. struct dp_pdev_tx_capture *ptr_tx_cap;
  74. uint8_t i = 0, j = 0;
  75. ptr_tx_cap = &(pdev->tx_capture);
  76. DP_PRINT_STATS("tx capture stats\n");
  77. for (i = 0; i < TXCAP_MAX_TYPE; i++) {
  78. for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
  79. if (ptr_tx_cap->ctl_mgmt_q[i][j].qlen)
  80. DP_PRINT_STATS(" ctl_mgmt_q[%d][%d] = queue_len[%d]\n",
  81. i, j, ptr_tx_cap->ctl_mgmt_q[i][j].qlen);
  82. }
  83. }
  84. for (i = 0; i < TX_CAP_HTT_MAX_FTYPE; i++) {
  85. if (!ptr_tx_cap->htt_frame_type[i])
  86. continue;
  87. DP_PRINT_STATS(" sgen htt frame type[%d] = %d",
  88. i, ptr_tx_cap->htt_frame_type[i]);
  89. }
  90. }
  91. /**
  92. * dp_peer_or_pdev_tx_cap_enabled - Returns status of tx_cap_enabled
  93. * based on global per-pdev setting or per-peer setting
  94. * @pdev: Datapath pdev handle
  95. * @peer: Datapath peer
  96. *
  97. * Return: true if feature is enabled on a per-pdev basis or if
  98. * enabled for the given peer when per-peer mode is set, false otherwise
  99. */
  100. inline bool
  101. dp_peer_or_pdev_tx_cap_enabled(struct dp_pdev *pdev,
  102. struct dp_peer *peer)
  103. {
  104. if ((pdev->tx_capture_enabled ==
  105. CDP_TX_ENH_CAPTURE_ENABLE_ALL_PEERS) ||
  106. ((pdev->tx_capture_enabled ==
  107. CDP_TX_ENH_CAPTURE_ENDIS_PER_PEER) &&
  108. peer->tx_cap_enabled))
  109. return true;
  110. return false;
  111. }
  112. /*
  113. * dp_peer_tid_queue_init() – Initialize ppdu stats queue per TID
  114. * @peer: Datapath peer
  115. *
  116. */
  117. void dp_peer_tid_queue_init(struct dp_peer *peer)
  118. {
  119. int tid;
  120. struct dp_tx_tid *tx_tid;
  121. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  122. tx_tid = &peer->tx_capture.tx_tid[tid];
  123. tx_tid->tid = tid;
  124. qdf_nbuf_queue_init(&tx_tid->msdu_comp_q);
  125. qdf_nbuf_queue_init(&tx_tid->pending_ppdu_q);
  126. tx_tid->max_ppdu_id = 0;
  127. /* spinlock create */
  128. qdf_spinlock_create(&tx_tid->tid_lock);
  129. }
  130. }
  131. /*
  132. * dp_peer_tid_queue_cleanup() – remove ppdu stats queue per TID
  133. * @peer: Datapath peer
  134. *
  135. */
  136. void dp_peer_tid_queue_cleanup(struct dp_peer *peer)
  137. {
  138. int tid;
  139. struct dp_tx_tid *tx_tid;
  140. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  141. tx_tid = &peer->tx_capture.tx_tid[tid];
  142. qdf_spin_lock_bh(&tx_tid->tid_lock);
  143. qdf_nbuf_queue_free(&tx_tid->msdu_comp_q);
  144. qdf_spin_unlock_bh(&tx_tid->tid_lock);
  145. /* spinlock destroy */
  146. qdf_spinlock_destroy(&tx_tid->tid_lock);
  147. tx_tid->max_ppdu_id = 0;
  148. }
  149. }
  150. /*
  151. * dp_peer_update_80211_hdr: update 80211 hdr
  152. * @vdev: DP VDEV
  153. * @peer: DP PEER
  154. *
  155. * return: void
  156. */
  157. void dp_peer_update_80211_hdr(struct dp_vdev *vdev, struct dp_peer *peer)
  158. {
  159. struct ieee80211_frame *ptr_wh;
  160. ptr_wh = &peer->tx_capture.tx_wifi_hdr;
  161. /* i_addr1 - Receiver mac address */
  162. /* i_addr2 - Transmitter mac address */
  163. /* i_addr3 - Destination mac address */
  164. qdf_mem_copy(ptr_wh->i_addr1,
  165. peer->mac_addr.raw,
  166. QDF_MAC_ADDR_SIZE);
  167. qdf_mem_copy(ptr_wh->i_addr3,
  168. peer->mac_addr.raw,
  169. QDF_MAC_ADDR_SIZE);
  170. qdf_mem_copy(ptr_wh->i_addr2,
  171. vdev->mac_addr.raw,
  172. QDF_MAC_ADDR_SIZE);
  173. }
  174. /*
  175. * dp_deliver_mgmt_frm: Process
  176. * @pdev: DP PDEV handle
  177. * @nbuf: buffer containing the htt_ppdu_stats_tx_mgmtctrl_payload_tlv
  178. *
  179. * return: void
  180. */
  181. void dp_deliver_mgmt_frm(struct dp_pdev *pdev, qdf_nbuf_t nbuf)
  182. {
  183. uint32_t ppdu_id;
  184. if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
  185. dp_wdi_event_handler(WDI_EVENT_TX_MGMT_CTRL, pdev->soc,
  186. nbuf, HTT_INVALID_PEER,
  187. WDI_NO_VAL, pdev->pdev_id);
  188. return;
  189. }
  190. if (pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENABLE_ALL_PEERS ||
  191. pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENDIS_PER_PEER) {
  192. /* invoke WDI event handler here send mgmt pkt here */
  193. struct ieee80211_frame *wh;
  194. uint8_t type, subtype;
  195. ppdu_id = *(uint32_t *)qdf_nbuf_data(nbuf);
  196. wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) +
  197. sizeof(uint32_t));
  198. type = (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) >>
  199. IEEE80211_FC0_TYPE_SHIFT;
  200. subtype = (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >>
  201. IEEE80211_FC0_SUBTYPE_SHIFT;
  202. qdf_spin_lock_bh(
  203. &pdev->tx_capture.ctl_mgmt_lock[type][subtype]);
  204. qdf_nbuf_queue_add(&pdev->tx_capture.ctl_mgmt_q[type][subtype],
  205. nbuf);
  206. qdf_spin_unlock_bh(
  207. &pdev->tx_capture.ctl_mgmt_lock[type][subtype]);
  208. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_DEBUG,
  209. "dlvr mgmt frm(0x%08x): fc 0x%x %x, dur 0x%x%x\n",
  210. ppdu_id, wh->i_fc[1], wh->i_fc[0],
  211. wh->i_dur[1], wh->i_dur[0]);
  212. }
  213. }
  214. /**
  215. * dp_tx_ppdu_stats_attach - Initialize Tx PPDU stats and enhanced capture
  216. * @pdev: DP PDEV
  217. *
  218. * Return: none
  219. */
  220. void dp_tx_ppdu_stats_attach(struct dp_pdev *pdev)
  221. {
  222. int i, j;
  223. /* Work queue setup for HTT stats and tx capture handling */
  224. qdf_create_work(0, &pdev->tx_capture.ppdu_stats_work,
  225. dp_tx_ppdu_stats_process,
  226. pdev);
  227. pdev->tx_capture.ppdu_stats_workqueue =
  228. qdf_alloc_unbound_workqueue("ppdu_stats_work_queue");
  229. STAILQ_INIT(&pdev->tx_capture.ppdu_stats_queue);
  230. STAILQ_INIT(&pdev->tx_capture.ppdu_stats_defer_queue);
  231. qdf_spinlock_create(&pdev->tx_capture.ppdu_stats_lock);
  232. pdev->tx_capture.ppdu_stats_queue_depth = 0;
  233. pdev->tx_capture.ppdu_stats_next_sched = 0;
  234. pdev->tx_capture.ppdu_stats_defer_queue_depth = 0;
  235. pdev->tx_capture.ppdu_dropped = 0;
  236. for (i = 0; i < TXCAP_MAX_TYPE; i++) {
  237. for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
  238. qdf_nbuf_queue_init(
  239. &pdev->tx_capture.ctl_mgmt_q[i][j]);
  240. qdf_spinlock_create(
  241. &pdev->tx_capture.ctl_mgmt_lock[i][j]);
  242. }
  243. }
  244. }
  245. /**
  246. * dp_tx_ppdu_stats_detach - Cleanup Tx PPDU stats and enhanced capture
  247. * @pdev: DP PDEV
  248. *
  249. * Return: none
  250. */
  251. void dp_tx_ppdu_stats_detach(struct dp_pdev *pdev)
  252. {
  253. struct ppdu_info *ppdu_info, *tmp_ppdu_info = NULL;
  254. int i, j;
  255. if (!pdev || !pdev->tx_capture.ppdu_stats_workqueue)
  256. return;
  257. qdf_flush_workqueue(0, pdev->tx_capture.ppdu_stats_workqueue);
  258. qdf_destroy_workqueue(0, pdev->tx_capture.ppdu_stats_workqueue);
  259. qdf_spinlock_destroy(&pdev->tx_capture.ppdu_stats_lock);
  260. STAILQ_FOREACH_SAFE(ppdu_info,
  261. &pdev->tx_capture.ppdu_stats_queue,
  262. ppdu_info_queue_elem, tmp_ppdu_info) {
  263. STAILQ_REMOVE(&pdev->tx_capture.ppdu_stats_queue,
  264. ppdu_info, ppdu_info, ppdu_info_queue_elem);
  265. qdf_nbuf_free(ppdu_info->nbuf);
  266. qdf_mem_free(ppdu_info);
  267. }
  268. STAILQ_FOREACH_SAFE(ppdu_info,
  269. &pdev->tx_capture.ppdu_stats_defer_queue,
  270. ppdu_info_queue_elem, tmp_ppdu_info) {
  271. STAILQ_REMOVE(&pdev->tx_capture.ppdu_stats_defer_queue,
  272. ppdu_info, ppdu_info, ppdu_info_queue_elem);
  273. qdf_nbuf_free(ppdu_info->nbuf);
  274. qdf_mem_free(ppdu_info);
  275. }
  276. for (i = 0; i < TXCAP_MAX_TYPE; i++) {
  277. for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
  278. qdf_spin_lock_bh(
  279. &pdev->tx_capture.ctl_mgmt_lock[i][j]);
  280. qdf_nbuf_queue_free(
  281. &pdev->tx_capture.ctl_mgmt_q[i][j]);
  282. qdf_spin_unlock_bh(
  283. &pdev->tx_capture.ctl_mgmt_lock[i][j]);
  284. qdf_spinlock_destroy(
  285. &pdev->tx_capture.ctl_mgmt_lock[i][j]);
  286. }
  287. }
  288. }
  289. /**
  290. * dp_update_msdu_to_list(): Function to queue msdu from wbm
  291. * @pdev: dp_pdev
  292. * @peer: dp_peer
  293. * @ts: hal tx completion status
  294. * @netbuf: msdu
  295. *
  296. * return: status
  297. */
  298. QDF_STATUS
  299. dp_update_msdu_to_list(struct dp_soc *soc,
  300. struct dp_pdev *pdev,
  301. struct dp_peer *peer,
  302. struct hal_tx_completion_status *ts,
  303. qdf_nbuf_t netbuf)
  304. {
  305. struct dp_tx_tid *tx_tid;
  306. struct msdu_completion_info *msdu_comp_info;
  307. if (!peer) {
  308. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  309. "%s: %d peer NULL !", __func__, __LINE__);
  310. return QDF_STATUS_E_FAILURE;
  311. }
  312. if (ts->tid > DP_NON_QOS_TID) {
  313. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  314. "%s: %d peer_id %d, tid %d > NON_QOS_TID!",
  315. __func__, __LINE__, ts->peer_id, ts->tid);
  316. return QDF_STATUS_E_FAILURE;
  317. }
  318. tx_tid = &peer->tx_capture.tx_tid[ts->tid];
  319. if (!tx_tid) {
  320. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  321. "%s: %d tid[%d] NULL !", __func__, __LINE__, ts->tid);
  322. return QDF_STATUS_E_FAILURE;
  323. }
  324. qdf_nbuf_unmap(soc->osdev, netbuf, QDF_DMA_TO_DEVICE);
  325. if (!qdf_nbuf_push_head(netbuf, sizeof(struct msdu_completion_info))) {
  326. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  327. FL("No headroom"));
  328. return QDF_STATUS_E_NOMEM;
  329. }
  330. msdu_comp_info = (struct msdu_completion_info *)qdf_nbuf_data(netbuf);
  331. /* copy msdu_completion_info to control buffer */
  332. msdu_comp_info->ppdu_id = ts->ppdu_id;
  333. msdu_comp_info->peer_id = ts->peer_id;
  334. msdu_comp_info->tid = ts->tid;
  335. msdu_comp_info->first_msdu = ts->first_msdu;
  336. msdu_comp_info->last_msdu = ts->last_msdu;
  337. msdu_comp_info->msdu_part_of_amsdu = ts->msdu_part_of_amsdu;
  338. msdu_comp_info->transmit_cnt = ts->transmit_cnt;
  339. msdu_comp_info->tsf = ts->tsf;
  340. msdu_comp_info->status = ts->status;
  341. /* update max ppdu_id */
  342. tx_tid->max_ppdu_id = ts->ppdu_id;
  343. pdev->tx_capture.last_msdu_id = ts->ppdu_id;
  344. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_INFO,
  345. "msdu_completion: ppdu_id[%d] peer_id[%d] tid[%d] rel_src[%d] status[%d] tsf[%u] A[%d] CNT[%d]",
  346. ts->ppdu_id, ts->peer_id, ts->tid, ts->release_src,
  347. ts->status, ts->tsf, ts->msdu_part_of_amsdu,
  348. ts->transmit_cnt);
  349. /* lock here */
  350. qdf_spin_lock_bh(&tx_tid->tid_lock);
  351. /* add nbuf to tail queue per peer tid */
  352. qdf_nbuf_queue_add(&tx_tid->msdu_comp_q, netbuf);
  353. qdf_spin_unlock_bh(&tx_tid->tid_lock);
  354. return QDF_STATUS_SUCCESS;
  355. }
  356. /**
  357. * dp_tx_add_to_comp_queue() - add completion msdu to queue
  358. * @soc: DP Soc handle
  359. * @tx_desc: software Tx descriptor
  360. * @ts : Tx completion status from HAL/HTT descriptor
  361. * @peer: DP peer
  362. *
  363. * Return: none
  364. */
  365. QDF_STATUS dp_tx_add_to_comp_queue(struct dp_soc *soc,
  366. struct dp_tx_desc_s *desc,
  367. struct hal_tx_completion_status *ts,
  368. struct dp_peer *peer)
  369. {
  370. int ret = QDF_STATUS_E_FAILURE;
  371. if ((desc->pdev->tx_capture_enabled != CDP_TX_ENH_CAPTURE_DISABLED) &&
  372. ((ts->status == HAL_TX_TQM_RR_FRAME_ACKED) ||
  373. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) ||
  374. ((ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) && ts->transmit_cnt))) {
  375. ret = dp_update_msdu_to_list(soc, desc->pdev,
  376. peer, ts, desc->nbuf);
  377. }
  378. return ret;
  379. }
  380. /**
  381. * dp_process_ppdu_stats_update_failed_bitmap(): update failed bitmap
  382. * @pdev: dp_pdev
  383. * @data: tx completion ppdu desc
  384. * @ppdu_id: ppdu id
  385. * @size: size of bitmap
  386. *
  387. * return: status
  388. */
  389. void dp_process_ppdu_stats_update_failed_bitmap(struct dp_pdev *pdev,
  390. void *data,
  391. uint32_t ppdu_id,
  392. uint32_t size)
  393. {
  394. struct cdp_tx_completion_ppdu_user *user;
  395. uint32_t mpdu_tried;
  396. uint32_t ba_seq_no;
  397. uint32_t start_seq;
  398. uint32_t num_mpdu;
  399. uint32_t diff;
  400. uint32_t carry = 0;
  401. uint32_t bitmask = 0;
  402. uint32_t i;
  403. uint32_t k;
  404. uint32_t ba_bitmap = 0;
  405. int last_set_bit;
  406. user = (struct cdp_tx_completion_ppdu_user *)data;
  407. /* get number of mpdu from ppdu_desc */
  408. mpdu_tried = user->mpdu_tried_mcast + user->mpdu_tried_ucast;
  409. ba_seq_no = user->ba_seq_no;
  410. start_seq = user->start_seq;
  411. num_mpdu = user->num_mpdu;
  412. /* assumption: number of mpdu will be less than 32 */
  413. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_INFO,
  414. "ppdu_id[%d] ba_seq_no[%d] start_seq_no[%d] mpdu_tried[%d]",
  415. ppdu_id, ba_seq_no, start_seq, mpdu_tried);
  416. for (i = 0; i < size; i++) {
  417. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_INFO,
  418. "ppdu_id[%d] ba_bitmap[%x] enqueue_bitmap[%x]",
  419. ppdu_id, user->ba_bitmap[i], user->enq_bitmap[i]);
  420. }
  421. /* Handle sequence no. wraparound */
  422. if (start_seq <= ba_seq_no) {
  423. diff = ba_seq_no - start_seq;
  424. /* Sequence delta of more than 2048 is considered wraparound
  425. * and we extend start_seq to be more than ba_seq just to
  426. * adjust failed_bitmap
  427. */
  428. if (qdf_unlikely(diff > (IEEE80211_SEQ_MAX / 2))) {
  429. diff = (start_seq - ba_seq_no) &
  430. (IEEE80211_SEQ_MAX - 1);
  431. start_seq = ba_seq_no + diff;
  432. }
  433. } else {
  434. diff = start_seq - ba_seq_no;
  435. /* Sequence delta of more than 2048 is considered wraparound
  436. * and we extend ba_seq to be more than start_seq just to
  437. * adjust failed_bitmap
  438. */
  439. if (qdf_unlikely(diff > (IEEE80211_SEQ_MAX / 2))) {
  440. diff = (ba_seq_no - start_seq) &
  441. (IEEE80211_SEQ_MAX - 1);
  442. ba_seq_no = start_seq + diff;
  443. }
  444. }
  445. /* Adjust failed_bitmap to start from same seq_no as enq_bitmap */
  446. last_set_bit = 0;
  447. if (start_seq <= ba_seq_no) {
  448. bitmask = (1 << diff) - 1;
  449. for (i = 0; i < size; i++) {
  450. ba_bitmap = user->ba_bitmap[i];
  451. user->failed_bitmap[i] = (ba_bitmap << diff);
  452. user->failed_bitmap[i] |= (bitmask & carry);
  453. carry = ((ba_bitmap & (bitmask << (32 - diff))) >>
  454. (32 - diff));
  455. user->failed_bitmap[i] = user->enq_bitmap[i] &
  456. user->failed_bitmap[i];
  457. if (user->enq_bitmap[i]) {
  458. last_set_bit = i * 32 +
  459. qdf_fls(user->enq_bitmap[i]) - 1;
  460. }
  461. }
  462. } else {
  463. /* array index */
  464. k = diff >> 5;
  465. diff = diff & 0x1F;
  466. bitmask = (1 << diff) - 1;
  467. for (i = 0; i < size; i++, k++) {
  468. ba_bitmap = user->ba_bitmap[k];
  469. user->failed_bitmap[i] = ba_bitmap >> diff;
  470. /* get next ba_bitmap */
  471. ba_bitmap = user->ba_bitmap[k + 1];
  472. carry = (ba_bitmap & bitmask);
  473. user->failed_bitmap[i] |=
  474. ((carry & bitmask) << (32 - diff));
  475. user->failed_bitmap[i] = user->enq_bitmap[i] &
  476. user->failed_bitmap[i];
  477. if (user->enq_bitmap[i]) {
  478. last_set_bit = i * 32 +
  479. qdf_fls(user->enq_bitmap[i]) - 1;
  480. }
  481. }
  482. }
  483. user->last_enq_seq = user->start_seq + last_set_bit;
  484. user->ba_size = user->last_enq_seq - user->start_seq + 1;
  485. }
  486. /*
  487. * dp_soc_set_txrx_ring_map_single()
  488. * @dp_soc: DP handler for soc
  489. *
  490. * Return: Void
  491. */
  492. static void dp_soc_set_txrx_ring_map_single(struct dp_soc *soc)
  493. {
  494. uint32_t i;
  495. for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
  496. soc->tx_ring_map[i] =
  497. dp_cpu_ring_map[DP_SINGLE_TX_RING_MAP][i];
  498. }
  499. }
  500. /*
  501. * dp_iterate_free_peer_msdu_q()- API to free msdu queue
  502. * @pdev_handle: DP_PDEV handle
  503. *
  504. * Return: void
  505. */
  506. static void dp_iterate_free_peer_msdu_q(void *pdev_hdl)
  507. {
  508. struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
  509. struct dp_soc *soc = pdev->soc;
  510. struct dp_vdev *vdev = NULL;
  511. struct dp_peer *peer = NULL;
  512. qdf_spin_lock_bh(&soc->peer_ref_mutex);
  513. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  514. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  515. DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
  516. int tid;
  517. struct dp_tx_tid *tx_tid;
  518. for (tid = 0; tid < DP_MAX_TIDS; tid++) {
  519. qdf_nbuf_t ppdu_nbuf = NULL;
  520. struct cdp_tx_completion_ppdu *ppdu_desc =
  521. NULL;
  522. int i;
  523. tx_tid = &peer->tx_capture.tx_tid[tid];
  524. /* spinlock hold */
  525. qdf_spin_lock_bh(&tx_tid->tid_lock);
  526. qdf_nbuf_queue_free(&tx_tid->msdu_comp_q);
  527. qdf_spin_unlock_bh(&tx_tid->tid_lock);
  528. while ((ppdu_nbuf = qdf_nbuf_queue_remove(
  529. &tx_tid->pending_ppdu_q))) {
  530. ppdu_desc =
  531. (struct cdp_tx_completion_ppdu *)
  532. qdf_nbuf_data(ppdu_nbuf);
  533. if (!ppdu_desc->mpdus) {
  534. qdf_nbuf_free(ppdu_nbuf);
  535. continue;
  536. }
  537. for (i = 0; i <
  538. ppdu_desc->user[0].ba_size; i++) {
  539. if (!ppdu_desc->mpdus[i])
  540. continue;
  541. qdf_nbuf_free(
  542. ppdu_desc->mpdus[i]);
  543. }
  544. qdf_mem_free(ppdu_desc->mpdus);
  545. ppdu_desc->mpdus = NULL;
  546. qdf_nbuf_free(ppdu_nbuf);
  547. }
  548. }
  549. }
  550. }
  551. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  552. qdf_spin_unlock_bh(&soc->peer_ref_mutex);
  553. }
  554. /*
  555. * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
  556. * @pdev_handle: DP_PDEV handle
  557. * @val: user provided value
  558. *
  559. * Return: QDF_STATUS
  560. */
  561. QDF_STATUS
  562. dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
  563. {
  564. struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
  565. int i, j;
  566. qdf_spin_lock(&pdev->tx_capture.config_lock);
  567. pdev->tx_capture_enabled = val;
  568. if (pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENABLE_ALL_PEERS ||
  569. pdev->tx_capture_enabled == CDP_TX_ENH_CAPTURE_ENDIS_PER_PEER) {
  570. dp_soc_set_txrx_ring_map_single(pdev->soc);
  571. if (!pdev->pktlog_ppdu_stats)
  572. dp_h2t_cfg_stats_msg_send(pdev,
  573. DP_PPDU_STATS_CFG_SNIFFER,
  574. pdev->pdev_id);
  575. } else {
  576. dp_soc_set_txrx_ring_map(pdev->soc);
  577. dp_h2t_cfg_stats_msg_send(pdev,
  578. DP_PPDU_STATS_CFG_ENH_STATS,
  579. pdev->pdev_id);
  580. dp_iterate_free_peer_msdu_q(pdev);
  581. for (i = 0; i < TXCAP_MAX_TYPE; i++) {
  582. for (j = 0; j < TXCAP_MAX_SUBTYPE; j++) {
  583. qdf_spin_lock_bh(
  584. &pdev->tx_capture.ctl_mgmt_lock[i][j]);
  585. qdf_nbuf_queue_free(
  586. &pdev->tx_capture.ctl_mgmt_q[i][j]);
  587. qdf_spin_unlock_bh(
  588. &pdev->tx_capture.ctl_mgmt_lock[i][j]);
  589. }
  590. }
  591. }
  592. qdf_spin_unlock(&pdev->tx_capture.config_lock);
  593. return QDF_STATUS_SUCCESS;
  594. }
  595. /**
  596. * get_number_of_1s(): Function to get number of 1s
  597. * @value: value to find
  598. *
  599. * return: number of 1s
  600. */
  601. static
  602. inline uint32_t get_number_of_1s(uint32_t value)
  603. {
  604. uint32_t shift[] = {1, 2, 4, 8, 16};
  605. uint32_t magic_number[] = { 0x55555555, 0x33333333, 0x0F0F0F0F,
  606. 0x00FF00FF, 0x0000FFFF};
  607. uint8_t k = 0;
  608. for (; k <= 4; k++) {
  609. value = (value & magic_number[k]) +
  610. ((value >> shift[k]) & magic_number[k]);
  611. }
  612. return value;
  613. }
  614. /**
  615. * dp_tx_print_bitmap(): Function to print bitmap
  616. * @pdev: dp_pdev
  617. * @ppdu_desc: ppdu completion descriptor
  618. * @user_inder: user index
  619. * @ppdu_id: ppdu id
  620. *
  621. * return: status
  622. */
  623. static
  624. QDF_STATUS dp_tx_print_bitmap(struct dp_pdev *pdev,
  625. struct cdp_tx_completion_ppdu *ppdu_desc,
  626. uint32_t user_index,
  627. uint32_t ppdu_id)
  628. {
  629. struct cdp_tx_completion_ppdu_user *user;
  630. uint8_t i;
  631. uint32_t mpdu_tried;
  632. uint32_t ba_seq_no;
  633. uint32_t start_seq;
  634. uint32_t num_mpdu;
  635. uint32_t fail_num_mpdu = 0;
  636. user = &ppdu_desc->user[user_index];
  637. /* get number of mpdu from ppdu_desc */
  638. mpdu_tried = user->mpdu_tried_mcast + user->mpdu_tried_ucast;
  639. ba_seq_no = user->ba_seq_no;
  640. start_seq = user->start_seq;
  641. num_mpdu = user->mpdu_success;
  642. if (user->tid > DP_NON_QOS_TID) {
  643. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  644. "%s: ppdu[%d] peer_id[%d] TID[%d] > NON_QOS_TID!",
  645. __func__, ppdu_id, user->peer_id, user->tid);
  646. return QDF_STATUS_E_FAILURE;
  647. }
  648. if (mpdu_tried != num_mpdu) {
  649. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_INFO,
  650. "%s: ppdu[%d] peer[%d] tid[%d] ba[%d] start[%d] mpdu_tri[%d] num_mpdu[%d] is_mcast[%d]",
  651. __func__, ppdu_id, user->peer_id, user->tid,
  652. ba_seq_no, start_seq, mpdu_tried,
  653. num_mpdu, user->is_mcast);
  654. for (i = 0; i < CDP_BA_256_BIT_MAP_SIZE_DWORDS; i++) {
  655. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  656. QDF_TRACE_LEVEL_INFO,
  657. "ppdu_id[%d] ba_bitmap[0x%x] enqueue_bitmap[0x%x] failed_bitmap[0x%x]",
  658. ppdu_id, user->ba_bitmap[i],
  659. user->enq_bitmap[i],
  660. user->failed_bitmap[i]);
  661. fail_num_mpdu +=
  662. get_number_of_1s(user->failed_bitmap[i]);
  663. }
  664. }
  665. if (fail_num_mpdu == num_mpdu && num_mpdu)
  666. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_DEBUG,
  667. "%s: %d ppdu_id[%d] num_mpdu[%d, %d]",
  668. __func__, __LINE__, ppdu_id, num_mpdu, fail_num_mpdu);
  669. return QDF_STATUS_SUCCESS;
  670. }
  671. static uint32_t dp_tx_update_80211_hdr(struct dp_pdev *pdev,
  672. struct dp_peer *peer,
  673. void *data,
  674. qdf_nbuf_t nbuf,
  675. uint16_t ether_type,
  676. uint8_t *src_addr)
  677. {
  678. struct cdp_tx_completion_ppdu *ppdu_desc;
  679. struct ieee80211_frame *ptr_wh;
  680. struct ieee80211_qoscntl *ptr_qoscntl;
  681. uint32_t mpdu_buf_len;
  682. uint8_t *ptr_hdr;
  683. uint16_t eth_type = qdf_htons(ether_type);
  684. ppdu_desc = (struct cdp_tx_completion_ppdu *)data;
  685. ptr_wh = &peer->tx_capture.tx_wifi_hdr;
  686. ptr_qoscntl = &peer->tx_capture.tx_qoscntl;
  687. /*
  688. * update framectrl only for first ppdu_id
  689. * rest of mpdu will have same frame ctrl
  690. * mac address and duration
  691. */
  692. if (ppdu_desc->ppdu_id != peer->tx_capture.tx_wifi_ppdu_id) {
  693. ptr_wh->i_fc[1] = (ppdu_desc->frame_ctrl & 0xFF00) >> 8;
  694. ptr_wh->i_fc[0] = (ppdu_desc->frame_ctrl & 0xFF);
  695. ptr_wh->i_dur[1] = (ppdu_desc->tx_duration & 0xFF00) >> 8;
  696. ptr_wh->i_dur[0] = (ppdu_desc->tx_duration & 0xFF);
  697. ptr_qoscntl->i_qos[1] = (ppdu_desc->user[0].qos_ctrl &
  698. 0xFF00) >> 8;
  699. ptr_qoscntl->i_qos[0] = (ppdu_desc->user[0].qos_ctrl & 0xFF);
  700. /* Update Addr 3 (SA) with SA derived from ether packet */
  701. qdf_mem_copy(ptr_wh->i_addr3, src_addr, QDF_MAC_ADDR_SIZE);
  702. peer->tx_capture.tx_wifi_ppdu_id = ppdu_desc->ppdu_id;
  703. }
  704. mpdu_buf_len = sizeof(struct ieee80211_frame) + LLC_SNAP_HDR_LEN;
  705. if (qdf_likely(ppdu_desc->user[0].tid != DP_NON_QOS_TID))
  706. mpdu_buf_len += sizeof(struct ieee80211_qoscntl);
  707. nbuf->protocol = qdf_htons(ETH_P_802_2);
  708. /* update ieee80211_frame header */
  709. if (!qdf_nbuf_push_head(nbuf, mpdu_buf_len)) {
  710. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  711. FL("No headroom"));
  712. return QDF_STATUS_E_NOMEM;
  713. }
  714. ptr_hdr = (void *)qdf_nbuf_data(nbuf);
  715. qdf_mem_copy(ptr_hdr, ptr_wh, sizeof(struct ieee80211_frame));
  716. ptr_hdr = ptr_hdr + (sizeof(struct ieee80211_frame));
  717. /* update qoscntl header */
  718. if (qdf_likely(ppdu_desc->user[0].tid != DP_NON_QOS_TID)) {
  719. qdf_mem_copy(ptr_hdr, ptr_qoscntl,
  720. sizeof(struct ieee80211_qoscntl));
  721. ptr_hdr = ptr_hdr + sizeof(struct ieee80211_qoscntl);
  722. }
  723. /* update LLC */
  724. *ptr_hdr = LLC_SNAP_LSAP;
  725. *(ptr_hdr + 1) = LLC_SNAP_LSAP;
  726. *(ptr_hdr + 2) = LLC_UI;
  727. *(ptr_hdr + 3) = 0x00;
  728. *(ptr_hdr + 4) = 0x00;
  729. *(ptr_hdr + 5) = 0x00;
  730. *(ptr_hdr + 6) = (eth_type & 0xFF00) >> 8;
  731. *(ptr_hdr + 7) = (eth_type & 0xFF);
  732. qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - mpdu_buf_len);
  733. return 0;
  734. }
  735. /**
  736. * dp_tx_mon_restitch_mpdu(): Function to restitch msdu to mpdu
  737. * @pdev: dp_pdev
  738. * @peer: dp_peer
  739. * @head_msdu: head msdu queue
  740. *
  741. * return: status
  742. */
  743. static uint32_t
  744. dp_tx_mon_restitch_mpdu(struct dp_pdev *pdev, struct dp_peer *peer,
  745. struct cdp_tx_completion_ppdu *ppdu_desc,
  746. qdf_nbuf_queue_t *head_msdu,
  747. qdf_nbuf_queue_t *mpdu_q)
  748. {
  749. qdf_nbuf_t curr_nbuf = NULL;
  750. qdf_nbuf_t first_nbuf = NULL;
  751. qdf_nbuf_t prev_nbuf = NULL;
  752. qdf_nbuf_t mpdu_nbuf = NULL;
  753. struct msdu_completion_info *ptr_msdu_info = NULL;
  754. uint8_t first_msdu = 0;
  755. uint8_t last_msdu = 0;
  756. uint32_t frag_list_sum_len = 0;
  757. uint8_t first_msdu_not_seen = 1;
  758. uint16_t ether_type = 0;
  759. qdf_ether_header_t *eh = NULL;
  760. curr_nbuf = qdf_nbuf_queue_remove(head_msdu);
  761. while (curr_nbuf) {
  762. ptr_msdu_info =
  763. (struct msdu_completion_info *)qdf_nbuf_data(curr_nbuf);
  764. first_msdu = ptr_msdu_info->first_msdu;
  765. last_msdu = ptr_msdu_info->last_msdu;
  766. eh = (qdf_ether_header_t *)(curr_nbuf->data +
  767. sizeof(struct msdu_completion_info));
  768. ether_type = eh->ether_type;
  769. /* pull msdu_completion_info added in pre header */
  770. qdf_nbuf_pull_head(curr_nbuf,
  771. sizeof(struct msdu_completion_info));
  772. if (first_msdu && first_msdu_not_seen) {
  773. first_nbuf = curr_nbuf;
  774. frag_list_sum_len = 0;
  775. first_msdu_not_seen = 0;
  776. /* pull ethernet header from first MSDU alone */
  777. qdf_nbuf_pull_head(curr_nbuf,
  778. sizeof(qdf_ether_header_t));
  779. mpdu_nbuf = qdf_nbuf_alloc(pdev->soc->osdev,
  780. MAX_MONITOR_HEADER,
  781. MAX_MONITOR_HEADER,
  782. 4, FALSE);
  783. if (!mpdu_nbuf) {
  784. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  785. QDF_TRACE_LEVEL_FATAL,
  786. "MPDU head allocation failed !!!");
  787. goto free_ppdu_desc_mpdu_q;
  788. }
  789. dp_tx_update_80211_hdr(pdev, peer,
  790. ppdu_desc, mpdu_nbuf,
  791. ether_type, eh->ether_shost);
  792. /* update first buffer to previous buffer */
  793. prev_nbuf = curr_nbuf;
  794. } else if (first_msdu && !first_msdu_not_seen) {
  795. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  796. QDF_TRACE_LEVEL_FATAL,
  797. "!!!!! NO LAST MSDU\n");
  798. /*
  799. * no last msdu in a mpdu
  800. * handle this case
  801. */
  802. qdf_nbuf_free(curr_nbuf);
  803. /*
  804. * No last msdu found because WBM comes out
  805. * of order, free the pkt
  806. */
  807. goto free_ppdu_desc_mpdu_q;
  808. } else if (!first_msdu && first_msdu_not_seen) {
  809. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  810. QDF_TRACE_LEVEL_FATAL,
  811. "!!!!! NO FIRST MSDU\n");
  812. /*
  813. * no first msdu in a mpdu
  814. * handle this case
  815. */
  816. qdf_nbuf_free(curr_nbuf);
  817. /*
  818. * no first msdu found beacuse WBM comes out
  819. * of order, free the pkt
  820. */
  821. goto free_ppdu_desc_mpdu_q;
  822. } else {
  823. /* update current buffer to previous buffer next */
  824. prev_nbuf->next = curr_nbuf;
  825. /* move the previous buffer to next buffer */
  826. prev_nbuf = prev_nbuf->next;
  827. }
  828. frag_list_sum_len += qdf_nbuf_len(curr_nbuf);
  829. if (last_msdu) {
  830. /*
  831. * first nbuf will hold list of msdu
  832. * stored in prev_nbuf
  833. */
  834. qdf_nbuf_append_ext_list(mpdu_nbuf,
  835. first_nbuf,
  836. frag_list_sum_len);
  837. /* add mpdu to mpdu queue */
  838. qdf_nbuf_queue_add(mpdu_q, mpdu_nbuf);
  839. first_nbuf = NULL;
  840. mpdu_nbuf = NULL;
  841. /* next msdu will start with first msdu */
  842. first_msdu_not_seen = 1;
  843. goto check_for_next_msdu;
  844. }
  845. /* get next msdu from the head_msdu */
  846. curr_nbuf = qdf_nbuf_queue_remove(head_msdu);
  847. if (!curr_nbuf) {
  848. /* msdu missed in list */
  849. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  850. QDF_TRACE_LEVEL_FATAL,
  851. "!!!! WAITING for msdu but list empty !!!!");
  852. }
  853. continue;
  854. check_for_next_msdu:
  855. if (qdf_nbuf_is_queue_empty(head_msdu))
  856. return 0;
  857. curr_nbuf = qdf_nbuf_queue_remove(head_msdu);
  858. }
  859. return 0;
  860. free_ppdu_desc_mpdu_q:
  861. /* free already chained msdu pkt */
  862. while (first_nbuf) {
  863. curr_nbuf = first_nbuf;
  864. first_nbuf = first_nbuf->next;
  865. qdf_nbuf_free(curr_nbuf);
  866. }
  867. /* free allocated mpdu hdr */
  868. if (mpdu_nbuf)
  869. qdf_nbuf_free(mpdu_nbuf);
  870. /* free queued remaining msdu pkt per ppdu */
  871. qdf_nbuf_queue_free(head_msdu);
  872. /* free queued mpdu per ppdu */
  873. qdf_nbuf_queue_free(mpdu_q);
  874. return 0;
  875. }
  876. /**
  877. * dp_tx_msdu_dequeue(): Function to dequeue msdu from peer based tid
  878. * @peer: dp_peer
  879. * @ppdu_id: ppdu_id
  880. * @tid: tid
  881. * @num_msdu: number of msdu
  882. * @head: head queue
  883. * @start_tsf: start tsf from ppdu_desc
  884. * @end_tsf: end tsf from ppdu_desc
  885. *
  886. * return: status
  887. */
  888. static
  889. uint32_t dp_tx_msdu_dequeue(struct dp_peer *peer, uint32_t ppdu_id,
  890. uint16_t tid, uint32_t num_msdu,
  891. qdf_nbuf_queue_t *head,
  892. qdf_nbuf_queue_t *head_xretries,
  893. uint32_t start_tsf, uint32_t end_tsf)
  894. {
  895. struct dp_tx_tid *tx_tid = NULL;
  896. uint32_t msdu_ppdu_id;
  897. qdf_nbuf_t curr_msdu = NULL;
  898. qdf_nbuf_t prev_msdu = NULL;
  899. struct msdu_completion_info *ptr_msdu_info = NULL;
  900. uint32_t wbm_tsf;
  901. uint32_t matched = 0;
  902. if (qdf_unlikely(!peer))
  903. return 0;
  904. tx_tid = &peer->tx_capture.tx_tid[tid];
  905. if (qdf_unlikely(!tx_tid))
  906. return 0;
  907. if (qdf_nbuf_is_queue_empty(&tx_tid->msdu_comp_q))
  908. return 0;
  909. /* lock here */
  910. qdf_spin_lock_bh(&tx_tid->tid_lock);
  911. curr_msdu = qdf_nbuf_queue_first(&tx_tid->msdu_comp_q);
  912. while (curr_msdu) {
  913. if (qdf_nbuf_queue_len(head) == num_msdu) {
  914. matched = 1;
  915. break;
  916. }
  917. ptr_msdu_info =
  918. (struct msdu_completion_info *)qdf_nbuf_data(curr_msdu);
  919. msdu_ppdu_id = ptr_msdu_info->ppdu_id;
  920. wbm_tsf = ptr_msdu_info->tsf;
  921. if ((ptr_msdu_info->status == HAL_TX_TQM_RR_REM_CMD_TX) ||
  922. (ptr_msdu_info->status == HAL_TX_TQM_RR_REM_CMD_AGED)) {
  923. /* Frames removed due to excessive retries */
  924. qdf_nbuf_queue_remove(&tx_tid->msdu_comp_q);
  925. qdf_nbuf_queue_add(head_xretries, curr_msdu);
  926. curr_msdu = qdf_nbuf_queue_first(
  927. &tx_tid->msdu_comp_q);
  928. prev_msdu = NULL;
  929. continue;
  930. }
  931. if (wbm_tsf > end_tsf) {
  932. /* PPDU being matched is older than MSDU at head of
  933. * completion queue. Return matched=1 to skip PPDU
  934. */
  935. matched = 1;
  936. break;
  937. }
  938. if (wbm_tsf && (wbm_tsf < start_tsf)) {
  939. /* remove the aged packet */
  940. qdf_nbuf_queue_remove(&tx_tid->msdu_comp_q);
  941. qdf_nbuf_free(curr_msdu);
  942. curr_msdu = qdf_nbuf_queue_first(
  943. &tx_tid->msdu_comp_q);
  944. prev_msdu = NULL;
  945. continue;
  946. }
  947. if (msdu_ppdu_id == ppdu_id) {
  948. if (qdf_likely(!prev_msdu)) {
  949. /* remove head */
  950. qdf_nbuf_queue_remove(&tx_tid->msdu_comp_q);
  951. /* add msdu to head queue */
  952. qdf_nbuf_queue_add(head, curr_msdu);
  953. /* get next msdu from msdu_comp_q */
  954. curr_msdu = qdf_nbuf_queue_first(
  955. &tx_tid->msdu_comp_q);
  956. continue;
  957. } else {
  958. /* update prev_msdu next to current msdu next */
  959. prev_msdu->next = curr_msdu->next;
  960. /* set current msdu next as NULL */
  961. curr_msdu->next = NULL;
  962. /* decrement length */
  963. ((qdf_nbuf_queue_t *)(
  964. &tx_tid->msdu_comp_q))->qlen--;
  965. /* add msdu to head queue */
  966. qdf_nbuf_queue_add(head, curr_msdu);
  967. /* set previous msdu to current msdu */
  968. curr_msdu = prev_msdu->next;
  969. continue;
  970. }
  971. }
  972. prev_msdu = curr_msdu;
  973. curr_msdu = prev_msdu->next;
  974. }
  975. qdf_spin_unlock_bh(&tx_tid->tid_lock);
  976. return matched;
  977. }
  978. /**
  979. * get_mpdu_clone_from_next_ppdu(): Function to clone missing mpdu from
  980. * next ppdu
  981. * @nbuf_ppdu_desc_list: nbuf list
  982. * @ppdu_desc_cnt: ppdu_desc_cnt
  983. * @missed_seq_no:
  984. * @ppdu_id: ppdu_id
  985. * @mpdu_info: cdp_tx_indication_mpdu_info
  986. *
  987. * return: void
  988. */
  989. static
  990. qdf_nbuf_t get_mpdu_clone_from_next_ppdu(qdf_nbuf_t nbuf_ppdu_desc_list[],
  991. uint32_t ppdu_desc_cnt,
  992. uint16_t missed_seq_no,
  993. uint16_t peer_id, uint32_t ppdu_id)
  994. {
  995. uint32_t i = 0;
  996. uint32_t found = 0;
  997. uint32_t seq_no = 0;
  998. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  999. qdf_nbuf_t mpdu = NULL;
  1000. for (i = 1; i < ppdu_desc_cnt; i++) {
  1001. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  1002. qdf_nbuf_data(nbuf_ppdu_desc_list[i]);
  1003. /* check if seq number is between the range */
  1004. if ((peer_id == ppdu_desc->user[0].peer_id) &&
  1005. ((missed_seq_no >= ppdu_desc->user[0].start_seq) &&
  1006. (missed_seq_no <= ppdu_desc->user[0].last_enq_seq))) {
  1007. seq_no = ppdu_desc->user[0].start_seq;
  1008. if (SEQ_BIT(ppdu_desc->user[0].failed_bitmap,
  1009. (missed_seq_no - seq_no))) {
  1010. found = 1;
  1011. break;
  1012. }
  1013. }
  1014. }
  1015. if (found == 0) {
  1016. /* mpdu not found in sched cmd id */
  1017. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_DEBUG,
  1018. "%s: missed seq_no[%d] ppdu_id[%d] [%d] not found!!!",
  1019. __func__, missed_seq_no, ppdu_id, ppdu_desc_cnt);
  1020. return NULL;
  1021. }
  1022. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_DEBUG,
  1023. "%s: seq_no[%d] missed ppdu_id[%d] m[%d] found in ppdu_id[%d]!!",
  1024. __func__,
  1025. missed_seq_no, ppdu_id,
  1026. (missed_seq_no - seq_no), ppdu_desc->ppdu_id);
  1027. mpdu = qdf_nbuf_queue_first(&ppdu_desc->mpdu_q);
  1028. if (!mpdu) {
  1029. /* bitmap shows it found sequence number, but
  1030. * MPDU not found in PPDU
  1031. */
  1032. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE, QDF_TRACE_LEVEL_ERROR,
  1033. "%s: missed seq_no[%d] ppdu_id[%d] [%d] found but queue empty!!!",
  1034. __func__, missed_seq_no, ppdu_id, ppdu_desc_cnt);
  1035. return NULL;
  1036. }
  1037. for (i = 0; i < (missed_seq_no - seq_no); i++) {
  1038. mpdu = mpdu->next;
  1039. if (!mpdu) {
  1040. /*
  1041. * bitmap shows it found sequence number,
  1042. * but queue empty, do we need to allocate
  1043. * skb and send instead of NULL ?
  1044. * add counter here:
  1045. */
  1046. return NULL;
  1047. }
  1048. }
  1049. return qdf_nbuf_copy_expand(mpdu, MAX_MONITOR_HEADER, 0);
  1050. }
  1051. /**
  1052. * dp_tx_update_user_mpdu_info(): Function to update mpdu info
  1053. * from ppdu_desc
  1054. * @ppdu_id: ppdu_id
  1055. * @mpdu_info: cdp_tx_indication_mpdu_info
  1056. * @user: cdp_tx_completion_ppdu_user
  1057. *
  1058. * return: void
  1059. */
  1060. static void
  1061. dp_tx_update_user_mpdu_info(uint32_t ppdu_id,
  1062. struct cdp_tx_indication_mpdu_info *mpdu_info,
  1063. struct cdp_tx_completion_ppdu_user *user)
  1064. {
  1065. mpdu_info->ppdu_id = ppdu_id;
  1066. mpdu_info->frame_ctrl = user->frame_ctrl;
  1067. mpdu_info->qos_ctrl = user->qos_ctrl;
  1068. mpdu_info->tid = user->tid;
  1069. mpdu_info->ltf_size = user->ltf_size;
  1070. mpdu_info->he_re = user->he_re;
  1071. mpdu_info->txbf = user->txbf;
  1072. mpdu_info->bw = user->bw;
  1073. mpdu_info->nss = user->nss;
  1074. mpdu_info->mcs = user->mcs;
  1075. mpdu_info->preamble = user->preamble;
  1076. mpdu_info->gi = user->gi;
  1077. mpdu_info->ack_rssi = user->ack_rssi[0];
  1078. mpdu_info->tx_rate = user->tx_rate;
  1079. mpdu_info->ldpc = user->ldpc;
  1080. mpdu_info->ppdu_cookie = user->ppdu_cookie;
  1081. qdf_mem_copy(mpdu_info->mac_address, user->mac_addr, 6);
  1082. mpdu_info->ba_start_seq = user->ba_seq_no;
  1083. qdf_mem_copy(mpdu_info->ba_bitmap, user->ba_bitmap,
  1084. CDP_BA_256_BIT_MAP_SIZE_DWORDS * sizeof(uint32_t));
  1085. }
  1086. static inline
  1087. void dp_tx_update_sequence_number(qdf_nbuf_t nbuf, uint32_t seq_no)
  1088. {
  1089. struct ieee80211_frame *ptr_wh = NULL;
  1090. uint16_t wh_seq = 0;
  1091. if (!nbuf)
  1092. return;
  1093. /* update sequence number in frame header */
  1094. ptr_wh = (struct ieee80211_frame *)qdf_nbuf_data(nbuf);
  1095. wh_seq = (seq_no & 0xFFF) << 4;
  1096. qdf_mem_copy(ptr_wh->i_seq, &wh_seq, sizeof(uint16_t));
  1097. }
  1098. /**
  1099. * dp_send_mpdu_info_to_stack(): Function to deliver mpdu info to stack
  1100. * to upper layer
  1101. * @pdev: DP pdev handle
  1102. * @nbuf_ppdu_desc_list: ppdu_desc_list per sched cmd id
  1103. * @ppdu_desc_cnt: number of ppdu_desc_cnt
  1104. *
  1105. * return: status
  1106. */
  1107. static
  1108. void dp_send_mpdu_info_to_stack(struct dp_pdev *pdev,
  1109. struct cdp_tx_completion_ppdu
  1110. *ppdu_desc)
  1111. {
  1112. struct cdp_tx_indication_info tx_capture_info;
  1113. struct cdp_tx_indication_mpdu_info *mpdu_info;
  1114. int i;
  1115. uint32_t seq_no, start_seq;
  1116. uint32_t ppdu_id = ppdu_desc->ppdu_id;
  1117. qdf_mem_set(&tx_capture_info,
  1118. sizeof(struct cdp_tx_indication_info),
  1119. 0);
  1120. mpdu_info = &tx_capture_info.mpdu_info;
  1121. mpdu_info->channel = ppdu_desc->channel;
  1122. mpdu_info->frame_type = ppdu_desc->frame_type;
  1123. mpdu_info->ppdu_start_timestamp =
  1124. ppdu_desc->ppdu_start_timestamp;
  1125. mpdu_info->ppdu_end_timestamp =
  1126. ppdu_desc->ppdu_end_timestamp;
  1127. mpdu_info->tx_duration = ppdu_desc->tx_duration;
  1128. mpdu_info->num_msdu = ppdu_desc->num_msdu;
  1129. /* update cdp_tx_indication_mpdu_info */
  1130. dp_tx_update_user_mpdu_info(ppdu_id,
  1131. &tx_capture_info.mpdu_info,
  1132. &ppdu_desc->user[0]);
  1133. tx_capture_info.mpdu_info.channel_num =
  1134. pdev->operating_channel;
  1135. start_seq = ppdu_desc->user[0].start_seq;
  1136. for (i = 0; i < ppdu_desc->user[0].ba_size; i++) {
  1137. if (qdf_likely(ppdu_desc->user[0].tid !=
  1138. DP_NON_QOS_TID) &&
  1139. !(SEQ_BIT(ppdu_desc->user[0].enq_bitmap, i))) {
  1140. continue;
  1141. }
  1142. seq_no = start_seq + i;
  1143. if (!ppdu_desc->mpdus[i])
  1144. continue;
  1145. tx_capture_info.mpdu_nbuf = ppdu_desc->mpdus[i];
  1146. ppdu_desc->mpdus[i] = NULL;
  1147. mpdu_info->seq_no = seq_no;
  1148. dp_tx_update_sequence_number(tx_capture_info.mpdu_nbuf, seq_no);
  1149. /*
  1150. * send MPDU to osif layer
  1151. * do we need to update mpdu_info before tranmit
  1152. * get current mpdu_nbuf
  1153. */
  1154. dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
  1155. &tx_capture_info,
  1156. HTT_INVALID_PEER,
  1157. WDI_NO_VAL, pdev->pdev_id);
  1158. if (tx_capture_info.mpdu_nbuf)
  1159. qdf_nbuf_free(tx_capture_info.mpdu_nbuf);
  1160. }
  1161. }
  1162. static void
  1163. dp_tx_mon_proc_xretries(struct dp_pdev *pdev, struct dp_peer *peer,
  1164. uint16_t tid)
  1165. {
  1166. struct dp_tx_tid *tx_tid = &peer->tx_capture.tx_tid[tid];
  1167. struct cdp_tx_completion_ppdu *ppdu_desc;
  1168. struct cdp_tx_completion_ppdu *xretry_ppdu;
  1169. qdf_nbuf_t ppdu_nbuf;
  1170. qdf_nbuf_t mpdu_nbuf;
  1171. uint32_t mpdu_tried = 0;
  1172. int i;
  1173. uint32_t seq_no;
  1174. xretry_ppdu = &tx_tid->xretry_ppdu;
  1175. if (qdf_nbuf_is_queue_empty(&tx_tid->pending_ppdu_q)) {
  1176. qdf_nbuf_queue_free(&xretry_ppdu->mpdu_q);
  1177. return;
  1178. }
  1179. if (qdf_nbuf_is_queue_empty(&xretry_ppdu->mpdu_q))
  1180. return;
  1181. ppdu_nbuf = qdf_nbuf_queue_first(&tx_tid->pending_ppdu_q);
  1182. while (ppdu_nbuf) {
  1183. struct msdu_completion_info *ptr_msdu_info = NULL;
  1184. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  1185. qdf_nbuf_data(ppdu_nbuf);
  1186. if (ppdu_desc->pending_retries) {
  1187. uint32_t start_seq = ppdu_desc->user[0].start_seq;
  1188. mpdu_tried = ppdu_desc->user[0].mpdu_tried_ucast +
  1189. ppdu_desc->user[0].mpdu_tried_mcast;
  1190. mpdu_nbuf = qdf_nbuf_queue_first(&xretry_ppdu->mpdu_q);
  1191. for (i = 0; (mpdu_tried > 0) && mpdu_nbuf; i++) {
  1192. if (!(SEQ_BIT(ppdu_desc->user[0].enq_bitmap,
  1193. i)))
  1194. continue;
  1195. mpdu_tried--;
  1196. /* missed seq number */
  1197. seq_no = start_seq + i;
  1198. if (SEQ_BIT(ppdu_desc->user[0].failed_bitmap, i))
  1199. continue;
  1200. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1201. QDF_TRACE_LEVEL_INFO,
  1202. "%s: fill seqno %d from xretries",
  1203. __func__, seq_no);
  1204. ptr_msdu_info = (struct msdu_completion_info *)
  1205. (qdf_nbuf_data(qdf_nbuf_get_ext_list(
  1206. mpdu_nbuf)) -
  1207. (sizeof(struct msdu_completion_info) +
  1208. sizeof(qdf_ether_header_t)));
  1209. ptr_msdu_info->transmit_cnt--;
  1210. SEQ_SEG(ppdu_desc->user[0].failed_bitmap, i) |=
  1211. SEQ_SEG_MSK(ppdu_desc->user[0].failed_bitmap[0],
  1212. i);
  1213. ppdu_desc->pending_retries--;
  1214. if (ptr_msdu_info->transmit_cnt == 0) {
  1215. ppdu_desc->mpdus[seq_no - start_seq] =
  1216. mpdu_nbuf;
  1217. qdf_nbuf_queue_remove(
  1218. &xretry_ppdu->mpdu_q);
  1219. mpdu_nbuf = qdf_nbuf_queue_first(
  1220. &xretry_ppdu->mpdu_q);
  1221. } else {
  1222. ppdu_desc->mpdus[seq_no - start_seq] =
  1223. qdf_nbuf_copy_expand(mpdu_nbuf,
  1224. MAX_MONITOR_HEADER,
  1225. 0);
  1226. mpdu_nbuf =
  1227. qdf_nbuf_queue_next(mpdu_nbuf);
  1228. }
  1229. }
  1230. }
  1231. if ((ppdu_desc->pending_retries == 0) && (ppdu_nbuf ==
  1232. qdf_nbuf_queue_first(&tx_tid->pending_ppdu_q))) {
  1233. qdf_nbuf_queue_remove(&tx_tid->pending_ppdu_q);
  1234. /* Deliver PPDU */
  1235. dp_send_mpdu_info_to_stack(pdev, ppdu_desc);
  1236. qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
  1237. qdf_mem_free(ppdu_desc->mpdus);
  1238. ppdu_desc->mpdus = NULL;
  1239. qdf_nbuf_free(ppdu_nbuf);
  1240. ppdu_nbuf = qdf_nbuf_queue_first(
  1241. &tx_tid->pending_ppdu_q);
  1242. } else {
  1243. ppdu_nbuf = qdf_nbuf_queue_next(ppdu_nbuf);
  1244. }
  1245. }
  1246. qdf_nbuf_queue_free(&xretry_ppdu->mpdu_q);
  1247. }
  1248. #define MAX_PENDING_PPDUS 32
  1249. static void
  1250. dp_tx_mon_proc_pending_ppdus(struct dp_pdev *pdev, struct dp_tx_tid *tx_tid,
  1251. qdf_nbuf_t nbuf_ppdu_desc_list[], uint32_t
  1252. ppdu_desc_cnt, qdf_nbuf_queue_t *head_ppdu,
  1253. uint32_t peer_id)
  1254. {
  1255. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  1256. struct cdp_tx_completion_ppdu *cur_ppdu_desc = NULL;
  1257. qdf_nbuf_t pend_ppdu;
  1258. uint32_t ppdu_cnt;
  1259. uint32_t failed_seq;
  1260. uint32_t cur_index, cur_start_seq, cur_last_seq;
  1261. int i, k;
  1262. bool last_pend_ppdu = false;
  1263. qdf_nbuf_t tmp_nbuf;
  1264. pend_ppdu = qdf_nbuf_queue_first(&tx_tid->pending_ppdu_q);
  1265. if (!pend_ppdu) {
  1266. for (ppdu_cnt = 0; ppdu_cnt < ppdu_desc_cnt; ppdu_cnt++) {
  1267. if (!nbuf_ppdu_desc_list[ppdu_cnt])
  1268. continue;
  1269. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  1270. qdf_nbuf_data(
  1271. nbuf_ppdu_desc_list[ppdu_cnt]);
  1272. if (!ppdu_desc || (peer_id !=
  1273. ppdu_desc->user[0].peer_id) || (tx_tid->tid !=
  1274. ppdu_desc->user[0].tid))
  1275. continue;
  1276. if ((ppdu_desc->pending_retries == 0) &&
  1277. qdf_nbuf_is_queue_empty(&tx_tid->pending_ppdu_q) &&
  1278. qdf_nbuf_is_queue_empty(head_ppdu)) {
  1279. dp_send_mpdu_info_to_stack(pdev, ppdu_desc);
  1280. qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
  1281. qdf_mem_free(ppdu_desc->mpdus);
  1282. ppdu_desc->mpdus = NULL;
  1283. tmp_nbuf = nbuf_ppdu_desc_list[ppdu_cnt];
  1284. nbuf_ppdu_desc_list[ppdu_cnt] = NULL;
  1285. qdf_nbuf_free(tmp_nbuf);
  1286. } else {
  1287. qdf_nbuf_queue_add(head_ppdu,
  1288. nbuf_ppdu_desc_list[ppdu_cnt]);
  1289. nbuf_ppdu_desc_list[ppdu_cnt] = NULL;
  1290. }
  1291. }
  1292. return;
  1293. }
  1294. while (pend_ppdu) {
  1295. qdf_nbuf_t mpdu_nbuf;
  1296. /* Find missing mpdus from current schedule list */
  1297. ppdu_cnt = 0;
  1298. if (!nbuf_ppdu_desc_list[ppdu_cnt]) {
  1299. ppdu_cnt++;
  1300. if (ppdu_cnt < ppdu_desc_cnt)
  1301. continue;
  1302. break;
  1303. }
  1304. ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(
  1305. pend_ppdu);
  1306. cur_ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(
  1307. nbuf_ppdu_desc_list[ppdu_cnt]);
  1308. if (pend_ppdu == qdf_nbuf_queue_last(
  1309. &tx_tid->pending_ppdu_q)) {
  1310. last_pend_ppdu = true;
  1311. qdf_nbuf_queue_add(head_ppdu,
  1312. nbuf_ppdu_desc_list[ppdu_cnt]);
  1313. nbuf_ppdu_desc_list[ppdu_cnt] = NULL;
  1314. }
  1315. cur_index = 0;
  1316. cur_start_seq = cur_ppdu_desc->user[0].start_seq;
  1317. cur_last_seq = cur_ppdu_desc->user[0].last_enq_seq;
  1318. if (qdf_unlikely(ppdu_desc->user[0].ba_size >
  1319. CDP_BA_256_BIT_MAP_SIZE_DWORDS *
  1320. SEQ_SEG_SZ_BITS(ppdu_desc->user[0].failed_bitmap))) {
  1321. qdf_assert_always(0);
  1322. return;
  1323. }
  1324. for (i = 0; (i < ppdu_desc->user[0].ba_size) && cur_ppdu_desc;
  1325. i++) {
  1326. if (!(i & (SEQ_SEG_SZ_BITS(
  1327. ppdu_desc->user[0].failed_bitmap) - 1))) {
  1328. k =
  1329. SEQ_SEG_INDEX(ppdu_desc->user[0].failed_bitmap,
  1330. i);
  1331. failed_seq =
  1332. ppdu_desc->user[0].failed_bitmap[k] ^
  1333. ppdu_desc->user[0].enq_bitmap[k];
  1334. }
  1335. /* Skip to next bitmap segment if there are no
  1336. * more holes in current segment
  1337. */
  1338. if (!failed_seq) {
  1339. i = ((k + 1) *
  1340. SEQ_SEG_SZ_BITS(ppdu_desc->user[0].failed_bitmap))
  1341. - 1;
  1342. continue;
  1343. }
  1344. if (!(SEQ_SEG_BIT(failed_seq, i)))
  1345. continue;
  1346. failed_seq ^= SEQ_SEG_MSK(failed_seq, i);
  1347. mpdu_nbuf = cur_ppdu_desc->mpdus[cur_index];
  1348. if (mpdu_nbuf) {
  1349. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1350. QDF_TRACE_LEVEL_INFO,
  1351. "%s: fill seqno %d (%d) from swretries",
  1352. __func__,
  1353. ppdu_desc->user[0].start_seq + i,
  1354. ppdu_desc->ppdu_id);
  1355. ppdu_desc->mpdus[i] =
  1356. qdf_nbuf_copy_expand(mpdu_nbuf,
  1357. MAX_MONITOR_HEADER, 0);
  1358. ppdu_desc->user[0].failed_bitmap[k] |=
  1359. SEQ_SEG_MSK(ppdu_desc->user[0].failed_bitmap[k],
  1360. i);
  1361. ppdu_desc->pending_retries--;
  1362. }
  1363. cur_index++;
  1364. /* Skip through empty slots in current PPDU */
  1365. while (!(SEQ_BIT(cur_ppdu_desc->user[0].enq_bitmap,
  1366. cur_index))) {
  1367. struct cdp_tx_completion_ppdu *next_ppdu = NULL;
  1368. cur_index++;
  1369. if (cur_index <= (cur_last_seq -
  1370. cur_start_seq))
  1371. continue;
  1372. cur_ppdu_desc = NULL;
  1373. /* Check if subsequent PPDUs in this schedule
  1374. * has higher sequence numbers enqueued
  1375. */
  1376. while (ppdu_cnt < (ppdu_desc_cnt - 1)) {
  1377. ppdu_cnt++;
  1378. if (!nbuf_ppdu_desc_list[ppdu_cnt])
  1379. continue;
  1380. next_ppdu =
  1381. (struct cdp_tx_completion_ppdu *)
  1382. qdf_nbuf_data(
  1383. nbuf_ppdu_desc_list[
  1384. ppdu_cnt]);
  1385. if (!next_ppdu || (peer_id !=
  1386. next_ppdu->user[0].peer_id))
  1387. continue;
  1388. if (last_pend_ppdu) {
  1389. qdf_nbuf_queue_add(head_ppdu,
  1390. nbuf_ppdu_desc_list[ppdu_cnt]);
  1391. nbuf_ppdu_desc_list[ppdu_cnt] =
  1392. NULL;
  1393. }
  1394. if (next_ppdu->user[0].last_enq_seq >
  1395. cur_last_seq) {
  1396. cur_ppdu_desc = next_ppdu;
  1397. break;
  1398. }
  1399. }
  1400. if (!cur_ppdu_desc)
  1401. break;
  1402. /* Start from seq. no following cur_last_seq
  1403. * since everything before is already populated
  1404. * from previous PPDU
  1405. */
  1406. cur_start_seq =
  1407. cur_ppdu_desc->user[0].start_seq;
  1408. cur_index = (cur_last_seq >= cur_start_seq) ?
  1409. cur_last_seq - cur_start_seq + 1 : 0;
  1410. cur_last_seq =
  1411. cur_ppdu_desc->user[0].last_enq_seq;
  1412. }
  1413. }
  1414. if ((pend_ppdu ==
  1415. qdf_nbuf_queue_first(&tx_tid->pending_ppdu_q)) &&
  1416. (ppdu_desc->pending_retries == 0)) {
  1417. qdf_nbuf_queue_remove(&tx_tid->pending_ppdu_q);
  1418. dp_send_mpdu_info_to_stack(pdev, ppdu_desc);
  1419. qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
  1420. qdf_mem_free(ppdu_desc->mpdus);
  1421. ppdu_desc->mpdus = NULL;
  1422. qdf_nbuf_free(pend_ppdu);
  1423. pend_ppdu = qdf_nbuf_queue_first(
  1424. &tx_tid->pending_ppdu_q);
  1425. } else {
  1426. pend_ppdu = qdf_nbuf_queue_next(pend_ppdu);
  1427. }
  1428. }
  1429. }
  1430. /**
  1431. * dp_check_ppdu_and_deliver(): Check PPDUs for any holes and deliver
  1432. * to upper layer if complete
  1433. * @pdev: DP pdev handle
  1434. * @nbuf_ppdu_desc_list: ppdu_desc_list per sched cmd id
  1435. * @ppdu_desc_cnt: number of ppdu_desc_cnt
  1436. *
  1437. * return: status
  1438. */
  1439. static void
  1440. dp_check_ppdu_and_deliver(struct dp_pdev *pdev,
  1441. qdf_nbuf_t nbuf_ppdu_desc_list[],
  1442. uint32_t ppdu_desc_cnt)
  1443. {
  1444. uint32_t ppdu_id;
  1445. uint32_t desc_cnt;
  1446. qdf_nbuf_t tmp_nbuf;
  1447. struct dp_tx_tid *tx_tid = NULL;
  1448. int i;
  1449. for (desc_cnt = 0; desc_cnt < ppdu_desc_cnt; desc_cnt++) {
  1450. struct cdp_tx_completion_ppdu *ppdu_desc;
  1451. uint32_t num_mpdu;
  1452. uint16_t start_seq, seq_no = 0;
  1453. int i;
  1454. uint32_t len;
  1455. qdf_nbuf_t mpdu_nbuf;
  1456. struct dp_peer *peer;
  1457. if (!nbuf_ppdu_desc_list[desc_cnt])
  1458. continue;
  1459. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  1460. qdf_nbuf_data(nbuf_ppdu_desc_list[desc_cnt]);
  1461. ppdu_id = ppdu_desc->ppdu_id;
  1462. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_CTRL) {
  1463. struct cdp_tx_indication_info tx_capture_info;
  1464. struct cdp_tx_indication_mpdu_info *mpdu_info;
  1465. qdf_nbuf_t mgmt_ctl_nbuf;
  1466. uint8_t type, subtype;
  1467. qdf_mem_set(&tx_capture_info,
  1468. sizeof(struct cdp_tx_indication_info),
  1469. 0);
  1470. mpdu_info = &tx_capture_info.mpdu_info;
  1471. mpdu_info->channel = ppdu_desc->channel;
  1472. mpdu_info->frame_type = ppdu_desc->frame_type;
  1473. mpdu_info->ppdu_start_timestamp =
  1474. ppdu_desc->ppdu_start_timestamp;
  1475. mpdu_info->ppdu_end_timestamp =
  1476. ppdu_desc->ppdu_end_timestamp;
  1477. mpdu_info->tx_duration = ppdu_desc->tx_duration;
  1478. mpdu_info->seq_no = seq_no;
  1479. mpdu_info->num_msdu = ppdu_desc->num_msdu;
  1480. /* update cdp_tx_indication_mpdu_info */
  1481. dp_tx_update_user_mpdu_info(ppdu_id,
  1482. &tx_capture_info.mpdu_info,
  1483. &ppdu_desc->user[0]);
  1484. tx_capture_info.mpdu_info.channel_num =
  1485. pdev->operating_channel;
  1486. type = (ppdu_desc->frame_ctrl &
  1487. IEEE80211_FC0_TYPE_MASK) >>
  1488. IEEE80211_FC0_TYPE_SHIFT;
  1489. subtype = (ppdu_desc->frame_ctrl &
  1490. IEEE80211_FC0_SUBTYPE_MASK) >>
  1491. IEEE80211_FC0_SUBTYPE_SHIFT;
  1492. qdf_spin_lock_bh(
  1493. &pdev->tx_capture.ctl_mgmt_lock[type][subtype]);
  1494. mgmt_ctl_nbuf = qdf_nbuf_queue_remove(
  1495. &pdev->tx_capture.ctl_mgmt_q[type][subtype]);
  1496. qdf_spin_unlock_bh(
  1497. &pdev->tx_capture.ctl_mgmt_lock[type][subtype]);
  1498. if (mgmt_ctl_nbuf) {
  1499. struct ieee80211_frame *wh;
  1500. uint16_t duration_le, seq_le;
  1501. tx_capture_info.mpdu_nbuf =
  1502. qdf_nbuf_alloc(pdev->soc->osdev,
  1503. MAX_MONITOR_HEADER, MAX_MONITOR_HEADER,
  1504. 4, FALSE);
  1505. if (!tx_capture_info.mpdu_nbuf) {
  1506. qdf_nbuf_free(mgmt_ctl_nbuf);
  1507. goto free_ppdu_desc;
  1508. }
  1509. /* pull ppdu_id from the packet */
  1510. tx_capture_info.mpdu_info.ppdu_id =
  1511. *(uint32_t *)qdf_nbuf_data(mgmt_ctl_nbuf);
  1512. qdf_nbuf_pull_head(mgmt_ctl_nbuf, sizeof(uint32_t));
  1513. wh = (struct ieee80211_frame *)qdf_nbuf_data(mgmt_ctl_nbuf);
  1514. if (subtype != IEEE80211_FC0_SUBTYPE_BEACON) {
  1515. duration_le = qdf_cpu_to_le16(
  1516. ppdu_desc->tx_duration);
  1517. wh->i_dur[1] =
  1518. (duration_le & 0xFF00) >> 8;
  1519. wh->i_dur[0] = duration_le & 0xFF;
  1520. seq_le = qdf_cpu_to_le16(
  1521. ppdu_desc->user[0].start_seq <<
  1522. IEEE80211_SEQ_SEQ_SHIFT);
  1523. wh->i_seq[1] = (seq_le & 0xFF00) >> 8;
  1524. wh->i_seq[0] = seq_le & 0xFF;
  1525. }
  1526. qdf_nbuf_append_ext_list(
  1527. tx_capture_info.mpdu_nbuf,
  1528. mgmt_ctl_nbuf,
  1529. qdf_nbuf_len(mgmt_ctl_nbuf));
  1530. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1531. QDF_TRACE_LEVEL_DEBUG,
  1532. "ctrl/mgmt frm(0x%08x): fc 0x%x 0x%x\n",
  1533. tx_capture_info.mpdu_info.ppdu_id,
  1534. wh->i_fc[1], wh->i_fc[0]);
  1535. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1536. QDF_TRACE_LEVEL_DEBUG,
  1537. "desc->ppdu_id 0x%08x\n", ppdu_id);
  1538. } else if ((ppdu_desc->frame_ctrl &
  1539. IEEE80211_FC0_TYPE_MASK) ==
  1540. IEEE80211_FC0_TYPE_CTL) {
  1541. struct ieee80211_frame_min_one *wh_min;
  1542. uint16_t frame_ctrl_le, duration_le;
  1543. tx_capture_info.mpdu_nbuf =
  1544. qdf_nbuf_alloc(pdev->soc->osdev,
  1545. MAX_MONITOR_HEADER + MAX_DUMMY_FRM_BODY,
  1546. MAX_MONITOR_HEADER,
  1547. 4, FALSE);
  1548. if (!tx_capture_info.mpdu_nbuf)
  1549. goto free_ppdu_desc;
  1550. wh_min = (struct ieee80211_frame_min_one *)
  1551. qdf_nbuf_data(
  1552. tx_capture_info.mpdu_nbuf);
  1553. qdf_mem_zero(wh_min, MAX_DUMMY_FRM_BODY);
  1554. frame_ctrl_le =
  1555. qdf_cpu_to_le16(ppdu_desc->frame_ctrl);
  1556. duration_le =
  1557. qdf_cpu_to_le16(ppdu_desc->tx_duration);
  1558. wh_min->i_fc[1] = (frame_ctrl_le & 0xFF00) >> 8;
  1559. wh_min->i_fc[0] = (frame_ctrl_le & 0xFF);
  1560. wh_min->i_dur[1] = (duration_le & 0xFF00) >> 8;
  1561. wh_min->i_dur[0] = (duration_le & 0xFF);
  1562. qdf_mem_copy(wh_min->i_addr1,
  1563. mpdu_info->mac_address,
  1564. QDF_MAC_ADDR_SIZE);
  1565. qdf_nbuf_set_pktlen(tx_capture_info.mpdu_nbuf,
  1566. sizeof(*wh_min));
  1567. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1568. QDF_TRACE_LEVEL_DEBUG,
  1569. "frm(0x%08x): fc %x %x, dur 0x%x%x\n",
  1570. ppdu_id, wh_min->i_fc[1], wh_min->i_fc[0],
  1571. wh_min->i_dur[1], wh_min->i_dur[0]);
  1572. }
  1573. /*
  1574. * send MPDU to osif layer
  1575. */
  1576. dp_wdi_event_handler(WDI_EVENT_TX_DATA, pdev->soc,
  1577. &tx_capture_info, HTT_INVALID_PEER,
  1578. WDI_NO_VAL, pdev->pdev_id);
  1579. if (tx_capture_info.mpdu_nbuf)
  1580. qdf_nbuf_free(tx_capture_info.mpdu_nbuf);
  1581. free_ppdu_desc:
  1582. tmp_nbuf = nbuf_ppdu_desc_list[desc_cnt];
  1583. nbuf_ppdu_desc_list[desc_cnt] = NULL;
  1584. qdf_nbuf_free(tmp_nbuf);
  1585. continue;
  1586. }
  1587. if (qdf_nbuf_is_queue_empty(&ppdu_desc->mpdu_q)) {
  1588. tmp_nbuf = nbuf_ppdu_desc_list[desc_cnt];
  1589. nbuf_ppdu_desc_list[desc_cnt] = NULL;
  1590. qdf_nbuf_free(tmp_nbuf);
  1591. continue;
  1592. }
  1593. peer = dp_peer_find_by_id(pdev->soc,
  1594. ppdu_desc->user[0].peer_id);
  1595. if (!peer) {
  1596. tmp_nbuf = nbuf_ppdu_desc_list[desc_cnt];
  1597. nbuf_ppdu_desc_list[desc_cnt] = NULL;
  1598. qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
  1599. qdf_nbuf_free(tmp_nbuf);
  1600. continue;
  1601. }
  1602. tx_tid = &peer->tx_capture.tx_tid[ppdu_desc->user[0].tid];
  1603. ppdu_id = ppdu_desc->ppdu_id;
  1604. /* find mpdu tried is same as success mpdu */
  1605. num_mpdu = ppdu_desc->user[0].mpdu_success;
  1606. /* get length */
  1607. len = qdf_nbuf_queue_len(&ppdu_desc->mpdu_q);
  1608. /* ba_size is updated in BA bitmap TLVs, which are not received
  1609. * in case of non-QoS TID.
  1610. */
  1611. if (qdf_unlikely(ppdu_desc->user[0].tid == DP_NON_QOS_TID)) {
  1612. ppdu_desc->user[0].ba_size = 1;
  1613. ppdu_desc->user[0].last_enq_seq =
  1614. ppdu_desc->user[0].start_seq;
  1615. }
  1616. /* find list of missing sequence */
  1617. ppdu_desc->mpdus = qdf_mem_malloc(sizeof(qdf_nbuf_t) *
  1618. ppdu_desc->user[0].ba_size);
  1619. if (qdf_unlikely(!ppdu_desc->mpdus)) {
  1620. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1621. QDF_TRACE_LEVEL_FATAL,
  1622. "%s: ppdu_desc->mpdus allocation failed",
  1623. __func__);
  1624. tmp_nbuf = nbuf_ppdu_desc_list[desc_cnt];
  1625. nbuf_ppdu_desc_list[desc_cnt] = NULL;
  1626. qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
  1627. qdf_nbuf_free(tmp_nbuf);
  1628. continue;
  1629. }
  1630. if (qdf_unlikely(ppdu_desc->user[0].ba_size >
  1631. CDP_BA_256_BIT_MAP_SIZE_DWORDS *
  1632. SEQ_SEG_SZ_BITS(ppdu_desc->user[0].failed_bitmap))) {
  1633. qdf_assert_always(0);
  1634. return;
  1635. }
  1636. /* Fill seq holes within current schedule list */
  1637. start_seq = ppdu_desc->user[0].start_seq;
  1638. for (i = 0; i < ppdu_desc->user[0].ba_size; i++) {
  1639. if (qdf_likely(ppdu_desc->user[0].tid !=
  1640. DP_NON_QOS_TID) &&
  1641. !(SEQ_BIT(ppdu_desc->user[0].enq_bitmap, i)))
  1642. continue;
  1643. /* missed seq number */
  1644. seq_no = start_seq + i;
  1645. /* Fill failed MPDUs in AMPDU if they're available in
  1646. * subsequent PPDUs in current burst schedule. This
  1647. * is not applicable for non-QoS TIDs (no AMPDUs)
  1648. */
  1649. if (qdf_likely(ppdu_desc->user[0].tid !=
  1650. DP_NON_QOS_TID) &&
  1651. !(SEQ_BIT(ppdu_desc->user[0].failed_bitmap, i))) {
  1652. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1653. QDF_TRACE_LEVEL_DEBUG,
  1654. "%s:find seq %d in next ppdu %d",
  1655. __func__, seq_no,
  1656. ppdu_desc_cnt);
  1657. mpdu_nbuf = get_mpdu_clone_from_next_ppdu(
  1658. nbuf_ppdu_desc_list +
  1659. desc_cnt,
  1660. ppdu_desc_cnt -
  1661. desc_cnt, seq_no,
  1662. ppdu_desc->user[0].peer_id,
  1663. ppdu_id);
  1664. /* check mpdu_nbuf NULL */
  1665. if (!mpdu_nbuf) {
  1666. ppdu_desc->pending_retries++;
  1667. continue;
  1668. }
  1669. ppdu_desc->mpdus[seq_no - start_seq] =
  1670. mpdu_nbuf;
  1671. SEQ_SEG(ppdu_desc->user[0].failed_bitmap, i) |=
  1672. SEQ_SEG_MSK(ppdu_desc->user[0].failed_bitmap[0],
  1673. i);
  1674. } else {
  1675. /* any error case we need to handle */
  1676. ppdu_desc->mpdus[seq_no - start_seq] =
  1677. qdf_nbuf_queue_remove(
  1678. &ppdu_desc->mpdu_q);
  1679. }
  1680. }
  1681. dp_peer_unref_del_find_by_id(peer);
  1682. if ((ppdu_desc->pending_retries == 0) &&
  1683. qdf_nbuf_is_queue_empty(&tx_tid->pending_ppdu_q)) {
  1684. dp_send_mpdu_info_to_stack(pdev, ppdu_desc);
  1685. qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
  1686. qdf_mem_free(ppdu_desc->mpdus);
  1687. ppdu_desc->mpdus = NULL;
  1688. tmp_nbuf = nbuf_ppdu_desc_list[desc_cnt];
  1689. nbuf_ppdu_desc_list[desc_cnt] = NULL;
  1690. qdf_nbuf_free(tmp_nbuf);
  1691. }
  1692. }
  1693. for (i = 0; i < ppdu_desc_cnt; i++) {
  1694. uint32_t pending_ppdus;
  1695. struct cdp_tx_completion_ppdu *cur_ppdu_desc;
  1696. struct dp_peer *peer;
  1697. qdf_nbuf_queue_t head_ppdu;
  1698. if (!nbuf_ppdu_desc_list[i])
  1699. continue;
  1700. cur_ppdu_desc = (struct cdp_tx_completion_ppdu *)qdf_nbuf_data(
  1701. nbuf_ppdu_desc_list[i]);
  1702. if (!cur_ppdu_desc)
  1703. continue;
  1704. peer = dp_peer_find_by_id(pdev->soc,
  1705. cur_ppdu_desc->user[0].peer_id);
  1706. if (!peer) {
  1707. tmp_nbuf = nbuf_ppdu_desc_list[i];
  1708. nbuf_ppdu_desc_list[i] = NULL;
  1709. qdf_nbuf_queue_free(&cur_ppdu_desc->mpdu_q);
  1710. qdf_mem_free(cur_ppdu_desc->mpdus);
  1711. qdf_nbuf_free(tmp_nbuf);
  1712. continue;
  1713. }
  1714. tx_tid = &peer->tx_capture.tx_tid[cur_ppdu_desc->user[0].tid];
  1715. qdf_nbuf_queue_init(&head_ppdu);
  1716. dp_tx_mon_proc_pending_ppdus(pdev, tx_tid,
  1717. nbuf_ppdu_desc_list + i,
  1718. ppdu_desc_cnt - i, &head_ppdu,
  1719. cur_ppdu_desc->user[0].peer_id);
  1720. if (qdf_nbuf_is_queue_empty(&tx_tid->pending_ppdu_q)) {
  1721. while ((tmp_nbuf = qdf_nbuf_queue_first(&head_ppdu))) {
  1722. cur_ppdu_desc =
  1723. (struct cdp_tx_completion_ppdu *)
  1724. qdf_nbuf_data(tmp_nbuf);
  1725. if (cur_ppdu_desc->pending_retries)
  1726. break;
  1727. dp_send_mpdu_info_to_stack(pdev, cur_ppdu_desc);
  1728. qdf_nbuf_queue_free(&cur_ppdu_desc->mpdu_q);
  1729. qdf_mem_free(cur_ppdu_desc->mpdus);
  1730. cur_ppdu_desc->mpdus = NULL;
  1731. qdf_nbuf_queue_remove(&head_ppdu);
  1732. qdf_nbuf_free(tmp_nbuf);
  1733. }
  1734. }
  1735. qdf_nbuf_queue_append(&tx_tid->pending_ppdu_q, &head_ppdu);
  1736. dp_tx_mon_proc_xretries(pdev, peer, tx_tid->tid);
  1737. dp_peer_unref_del_find_by_id(peer);
  1738. pending_ppdus = qdf_nbuf_queue_len(&tx_tid->pending_ppdu_q);
  1739. if (pending_ppdus > MAX_PENDING_PPDUS) {
  1740. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1741. QDF_TRACE_LEVEL_FATAL,
  1742. "pending ppdus (%d, %d) : %d\n",
  1743. cur_ppdu_desc->user[0].peer_id,
  1744. tx_tid->tid, pending_ppdus);
  1745. }
  1746. }
  1747. }
  1748. /**
  1749. * dp_tx_ppdu_stats_process - Deferred PPDU stats handler
  1750. * @context: Opaque work context (PDEV)
  1751. *
  1752. * Return: none
  1753. */
  1754. void dp_tx_ppdu_stats_process(void *context)
  1755. {
  1756. uint32_t curr_sched_cmdid;
  1757. uint32_t last_ppdu_id;
  1758. uint32_t ppdu_cnt;
  1759. uint32_t ppdu_desc_cnt = 0;
  1760. struct dp_pdev *pdev = (struct dp_pdev *)context;
  1761. struct ppdu_info *ppdu_info, *tmp_ppdu_info = NULL;
  1762. uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
  1763. struct ppdu_info *sched_ppdu_info = NULL;
  1764. STAILQ_HEAD(, ppdu_info) sched_ppdu_queue;
  1765. struct ppdu_info *sched_ppdu_list_last_ptr;
  1766. qdf_nbuf_t *nbuf_ppdu_desc_list;
  1767. qdf_nbuf_t tmp_nbuf;
  1768. struct dp_pdev_tx_capture *ptr_tx_cap = &pdev->tx_capture;
  1769. qdf_nbuf_queue_t head_xretries;
  1770. STAILQ_INIT(&sched_ppdu_queue);
  1771. /* Move the PPDU entries to defer list */
  1772. qdf_spin_lock_bh(&ptr_tx_cap->ppdu_stats_lock);
  1773. STAILQ_CONCAT(&ptr_tx_cap->ppdu_stats_defer_queue,
  1774. &ptr_tx_cap->ppdu_stats_queue);
  1775. ptr_tx_cap->ppdu_stats_defer_queue_depth +=
  1776. ptr_tx_cap->ppdu_stats_queue_depth;
  1777. ptr_tx_cap->ppdu_stats_queue_depth = 0;
  1778. qdf_spin_unlock_bh(&ptr_tx_cap->ppdu_stats_lock);
  1779. while (!STAILQ_EMPTY(&ptr_tx_cap->ppdu_stats_defer_queue)) {
  1780. ppdu_info =
  1781. STAILQ_FIRST(&ptr_tx_cap->ppdu_stats_defer_queue);
  1782. curr_sched_cmdid = ppdu_info->sched_cmdid;
  1783. ppdu_cnt = 0;
  1784. STAILQ_FOREACH_SAFE(ppdu_info,
  1785. &ptr_tx_cap->ppdu_stats_defer_queue,
  1786. ppdu_info_queue_elem, tmp_ppdu_info) {
  1787. if (curr_sched_cmdid != ppdu_info->sched_cmdid)
  1788. break;
  1789. sched_ppdu_list_last_ptr = ppdu_info;
  1790. ppdu_cnt++;
  1791. }
  1792. if (ppdu_info && (curr_sched_cmdid == ppdu_info->sched_cmdid) &&
  1793. ptr_tx_cap->ppdu_stats_next_sched < now_ms)
  1794. break;
  1795. last_ppdu_id = sched_ppdu_list_last_ptr->ppdu_id;
  1796. STAILQ_FIRST(&sched_ppdu_queue) =
  1797. STAILQ_FIRST(&ptr_tx_cap->ppdu_stats_defer_queue);
  1798. STAILQ_REMOVE_HEAD_UNTIL(&ptr_tx_cap->ppdu_stats_defer_queue,
  1799. sched_ppdu_list_last_ptr,
  1800. ppdu_info_queue_elem);
  1801. STAILQ_NEXT(sched_ppdu_list_last_ptr,
  1802. ppdu_info_queue_elem) = NULL;
  1803. ptr_tx_cap->ppdu_stats_defer_queue_depth -= ppdu_cnt;
  1804. nbuf_ppdu_desc_list =
  1805. (qdf_nbuf_t *) qdf_mem_malloc(sizeof(qdf_nbuf_t) *
  1806. ppdu_cnt);
  1807. /*
  1808. * if there is no memory allocated we need to free sched ppdu
  1809. * list, no ppdu stats will be updated.
  1810. */
  1811. if (!nbuf_ppdu_desc_list) {
  1812. STAILQ_FOREACH_SAFE(sched_ppdu_info,
  1813. &sched_ppdu_queue,
  1814. ppdu_info_queue_elem,
  1815. tmp_ppdu_info) {
  1816. ppdu_info = sched_ppdu_info;
  1817. tmp_nbuf = ppdu_info->nbuf;
  1818. qdf_mem_free(ppdu_info);
  1819. qdf_nbuf_free(tmp_nbuf);
  1820. }
  1821. continue;
  1822. }
  1823. qdf_spin_lock(&ptr_tx_cap->config_lock);
  1824. ppdu_desc_cnt = 0;
  1825. STAILQ_FOREACH_SAFE(sched_ppdu_info,
  1826. &sched_ppdu_queue,
  1827. ppdu_info_queue_elem, tmp_ppdu_info) {
  1828. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  1829. struct dp_peer *peer = NULL;
  1830. qdf_nbuf_t nbuf;
  1831. uint32_t retries = 0;
  1832. uint32_t ret = 0;
  1833. qdf_nbuf_queue_t head_msdu;
  1834. uint32_t start_tsf = 0;
  1835. uint32_t end_tsf = 0;
  1836. uint16_t tid = 0;
  1837. uint32_t num_msdu = 0;
  1838. uint32_t qlen = 0;
  1839. qdf_nbuf_queue_init(&head_msdu);
  1840. qdf_nbuf_queue_init(&head_xretries);
  1841. ppdu_info = sched_ppdu_info;
  1842. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  1843. qdf_nbuf_data(ppdu_info->nbuf);
  1844. pdev->tx_ppdu_proc++;
  1845. dp_ppdu_desc_user_stats_update(pdev, ppdu_info);
  1846. /*
  1847. * While processing/corelating Tx buffers, we should
  1848. * hold the entire PPDU list for the give sched_cmdid
  1849. * instead of freeing below.
  1850. */
  1851. nbuf = ppdu_info->nbuf;
  1852. qdf_mem_free(ppdu_info);
  1853. qdf_assert_always(nbuf);
  1854. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  1855. qdf_nbuf_data(nbuf);
  1856. /* send WDI event */
  1857. if (pdev->tx_capture_enabled ==
  1858. CDP_TX_ENH_CAPTURE_DISABLED) {
  1859. /**
  1860. * Deliver PPDU stats only for valid (acked)
  1861. * data frames if sniffer mode is not enabled.
  1862. * If sniffer mode is enabled,
  1863. * PPDU stats for all frames including
  1864. * mgmt/control frames should be delivered
  1865. * to upper layer
  1866. */
  1867. if (pdev->tx_sniffer_enable ||
  1868. pdev->mcopy_mode) {
  1869. dp_wdi_event_handler(
  1870. WDI_EVENT_TX_PPDU_DESC,
  1871. pdev->soc,
  1872. nbuf,
  1873. HTT_INVALID_PEER,
  1874. WDI_NO_VAL,
  1875. pdev->pdev_id);
  1876. } else {
  1877. if (ppdu_desc->num_mpdu != 0 &&
  1878. ppdu_desc->num_users != 0 &&
  1879. (ppdu_desc->frame_ctrl &
  1880. HTT_FRAMECTRL_DATATYPE)) {
  1881. dp_wdi_event_handler(
  1882. WDI_EVENT_TX_PPDU_DESC,
  1883. pdev->soc,
  1884. nbuf,
  1885. HTT_INVALID_PEER,
  1886. WDI_NO_VAL,
  1887. pdev->pdev_id);
  1888. } else {
  1889. qdf_nbuf_free(nbuf);
  1890. }
  1891. }
  1892. continue;
  1893. }
  1894. peer = dp_peer_find_by_id(pdev->soc,
  1895. ppdu_desc->user[0].peer_id);
  1896. /**
  1897. * peer can be NULL
  1898. */
  1899. if (!peer) {
  1900. qdf_nbuf_free(nbuf);
  1901. continue;
  1902. }
  1903. if (ppdu_desc->frame_type == CDP_PPDU_FTYPE_DATA) {
  1904. /**
  1905. * check whether it is bss peer,
  1906. * if bss_peer no need to process further
  1907. * check whether tx_capture feature is enabled
  1908. * for this peer or globally for all peers
  1909. */
  1910. if (peer->bss_peer ||
  1911. !dp_peer_or_pdev_tx_cap_enabled(pdev,
  1912. peer)) {
  1913. dp_peer_unref_del_find_by_id(peer);
  1914. qdf_nbuf_free(nbuf);
  1915. continue;
  1916. }
  1917. /* print the bit map */
  1918. dp_tx_print_bitmap(pdev, ppdu_desc,
  1919. 0, ppdu_desc->ppdu_id);
  1920. if (ppdu_desc->user[0].tid > DP_NON_QOS_TID) {
  1921. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  1922. QDF_TRACE_LEVEL_ERROR,
  1923. "%s: ppdu[%d] peer_id[%d] TID[%d] > NON_QOS_TID!",
  1924. __func__,
  1925. ppdu_desc->ppdu_id,
  1926. ppdu_desc->user[0].peer_id,
  1927. ppdu_desc->user[0].tid);
  1928. dp_peer_unref_del_find_by_id(peer);
  1929. qdf_nbuf_free(nbuf);
  1930. continue;
  1931. }
  1932. /* Non-QOS frames are being indicated with TID 0
  1933. * in WBM completion path, an hence we should
  1934. * TID 0 to reap MSDUs from completion path
  1935. */
  1936. if (qdf_unlikely(ppdu_desc->user[0].tid ==
  1937. DP_NON_QOS_TID))
  1938. tid = 0;
  1939. else
  1940. tid = ppdu_desc->user[0].tid;
  1941. dequeue_msdu_again:
  1942. num_msdu = ppdu_desc->user[0].num_msdu;
  1943. start_tsf = ppdu_desc->ppdu_start_timestamp;
  1944. end_tsf = ppdu_desc->ppdu_end_timestamp;
  1945. /*
  1946. * retrieve msdu buffer based on ppdu_id & tid
  1947. * based msdu queue and store it in local queue
  1948. * sometimes, wbm comes later than per ppdu
  1949. * stats. Assumption: all packets are SU,
  1950. * and packets comes in order
  1951. */
  1952. ret = dp_tx_msdu_dequeue(peer,
  1953. ppdu_desc->ppdu_id,
  1954. tid, num_msdu,
  1955. &head_msdu,
  1956. &head_xretries,
  1957. start_tsf, end_tsf);
  1958. if (!ret && (++retries < 2)) {
  1959. /* wait for wbm to complete */
  1960. qdf_mdelay(2);
  1961. goto dequeue_msdu_again;
  1962. }
  1963. if (!qdf_nbuf_is_queue_empty(&head_xretries)) {
  1964. struct dp_tx_tid *tx_tid =
  1965. &peer->tx_capture.tx_tid[tid];
  1966. struct cdp_tx_completion_ppdu
  1967. *xretry_ppdu =
  1968. &tx_tid->xretry_ppdu;
  1969. xretry_ppdu->ppdu_id =
  1970. peer->tx_capture.tx_wifi_ppdu_id;
  1971. /* Restitch MPDUs from xretry MSDUs */
  1972. dp_tx_mon_restitch_mpdu(pdev, peer,
  1973. xretry_ppdu,
  1974. &head_xretries,
  1975. &xretry_ppdu->mpdu_q);
  1976. }
  1977. if (!qdf_nbuf_is_queue_empty(&head_msdu)) {
  1978. /*
  1979. * now head_msdu hold - msdu list for
  1980. * that particular ppdu_id, restitch
  1981. * mpdu from msdu and create a mpdu
  1982. * queue
  1983. */
  1984. dp_tx_mon_restitch_mpdu(pdev, peer,
  1985. ppdu_desc,
  1986. &head_msdu,
  1987. &ppdu_desc->mpdu_q);
  1988. /*
  1989. * sanity: free local head msdu queue
  1990. * do we need this ?
  1991. */
  1992. qdf_nbuf_queue_free(&head_msdu);
  1993. qlen =
  1994. qdf_nbuf_queue_len(&ppdu_desc->mpdu_q);
  1995. if (!qlen) {
  1996. qdf_nbuf_free(nbuf);
  1997. dp_peer_unref_del_find_by_id(peer);
  1998. continue;
  1999. }
  2000. }
  2001. nbuf_ppdu_desc_list[ppdu_desc_cnt++] = nbuf;
  2002. /* print ppdu_desc info for debugging purpose */
  2003. QDF_TRACE(QDF_MODULE_ID_TX_CAPTURE,
  2004. QDF_TRACE_LEVEL_INFO,
  2005. "%s: ppdu[%d], p_id[%d], tid[%d], n_mpdu[%d %d] n_msdu[%d] retr[%d] qlen[%d] s_tsf[%u] dur[%u] seq[%d] [%d %d]",
  2006. __func__, ppdu_desc->ppdu_id,
  2007. ppdu_desc->user[0].peer_id,
  2008. ppdu_desc->user[0].tid,
  2009. ppdu_desc->num_mpdu,
  2010. ppdu_desc->user[0].mpdu_success,
  2011. ppdu_desc->num_msdu, retries,
  2012. qlen,
  2013. ppdu_desc->ppdu_start_timestamp,
  2014. ppdu_desc->tx_duration,
  2015. ppdu_desc->user[0].start_seq,
  2016. ppdu_cnt,
  2017. ppdu_desc_cnt);
  2018. } else if (ppdu_desc->frame_type ==
  2019. CDP_PPDU_FTYPE_CTRL) {
  2020. nbuf_ppdu_desc_list[ppdu_desc_cnt++] = nbuf;
  2021. } else {
  2022. qdf_nbuf_queue_free(&ppdu_desc->mpdu_q);
  2023. qdf_nbuf_free(nbuf);
  2024. }
  2025. dp_peer_unref_del_find_by_id(peer);
  2026. }
  2027. /*
  2028. * At this point we have mpdu queued per ppdu_desc
  2029. * based on packet capture flags send mpdu info to upper stack
  2030. */
  2031. if (ppdu_desc_cnt) {
  2032. dp_check_ppdu_and_deliver(pdev, nbuf_ppdu_desc_list,
  2033. ppdu_desc_cnt);
  2034. }
  2035. qdf_spin_unlock(&ptr_tx_cap->config_lock);
  2036. qdf_mem_free(nbuf_ppdu_desc_list);
  2037. }
  2038. }
  2039. /**
  2040. * dp_ppdu_desc_deliver(): Function to deliver Tx PPDU status descriptor
  2041. * to upper layer
  2042. * @pdev: DP pdev handle
  2043. * @ppdu_info: per PPDU TLV descriptor
  2044. *
  2045. * return: void
  2046. */
  2047. void dp_ppdu_desc_deliver(struct dp_pdev *pdev,
  2048. struct ppdu_info *ppdu_info)
  2049. {
  2050. uint32_t now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
  2051. struct cdp_tx_completion_ppdu *ppdu_desc = NULL;
  2052. TAILQ_REMOVE(&pdev->ppdu_info_list, ppdu_info, ppdu_info_list_elem);
  2053. pdev->list_depth--;
  2054. ppdu_desc = (struct cdp_tx_completion_ppdu *)
  2055. qdf_nbuf_data(ppdu_info->nbuf);
  2056. qdf_spin_lock_bh(&pdev->tx_capture.ppdu_stats_lock);
  2057. if (qdf_unlikely(!pdev->tx_capture_enabled &&
  2058. (pdev->tx_capture.ppdu_stats_queue_depth +
  2059. pdev->tx_capture.ppdu_stats_defer_queue_depth) >
  2060. DP_TX_PPDU_PROC_MAX_DEPTH)) {
  2061. qdf_nbuf_free(ppdu_info->nbuf);
  2062. qdf_mem_free(ppdu_info);
  2063. pdev->tx_capture.ppdu_dropped++;
  2064. } else {
  2065. STAILQ_INSERT_TAIL(&pdev->tx_capture.ppdu_stats_queue,
  2066. ppdu_info, ppdu_info_queue_elem);
  2067. pdev->tx_capture.ppdu_stats_queue_depth++;
  2068. }
  2069. qdf_spin_unlock_bh(&pdev->tx_capture.ppdu_stats_lock);
  2070. if ((pdev->tx_capture.ppdu_stats_queue_depth >
  2071. DP_TX_PPDU_PROC_THRESHOLD) ||
  2072. (pdev->tx_capture.ppdu_stats_next_sched <= now_ms)) {
  2073. qdf_queue_work(0, pdev->tx_capture.ppdu_stats_workqueue,
  2074. &pdev->tx_capture.ppdu_stats_work);
  2075. pdev->tx_capture.ppdu_stats_next_sched =
  2076. now_ms + DP_TX_PPDU_PROC_TIMEOUT;
  2077. }
  2078. }
  2079. #endif