dp_rx_mon_status.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589
  1. /*
  2. * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "hal_rx.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #include "hal_api_mon.h"
  27. #include "dp_rx_mon.h"
  28. #include "dp_internal.h"
  29. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  30. #include "htt.h"
  31. #ifdef FEATURE_PERPKT_INFO
  32. #include "dp_ratetable.h"
  33. #endif
  34. #define dp_rx_mon_status_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX_MON_STATUS, params)
  35. #define dp_rx_mon_status_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_MON_STATUS, params)
  36. #define dp_rx_mon_status_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX_MON_STATUS, params)
  37. #define dp_rx_mon_status_info(params...) \
  38. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX_MON_STATUS, ## params)
  39. #define dp_rx_mon_status_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX_MON_STATUS, params)
  40. static inline
  41. QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
  42. uint32_t mac_id,
  43. struct dp_srng *dp_rxdma_srng,
  44. struct rx_desc_pool *rx_desc_pool,
  45. uint32_t num_req_buffers,
  46. union dp_rx_desc_list_elem_t **desc_list,
  47. union dp_rx_desc_list_elem_t **tail,
  48. uint8_t owner);
  49. static inline void
  50. dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev,
  51. struct hal_rx_ppdu_info *ppdu_info,
  52. struct cdp_rx_indication_ppdu *cdp_rx_ppdu);
  53. /**
  54. * dp_rx_mon_handle_status_buf_done () - Handle status buf DMA not done
  55. *
  56. * @pdev: DP pdev handle
  57. * @mon_status_srng: Monitor status SRNG
  58. *
  59. * As per MAC team's suggestion, If HP + 2 entry's DMA done is set,
  60. * skip HP + 1 entry and start processing in next interrupt.
  61. * If HP + 2 entry's DMA done is not set, poll onto HP + 1 entry
  62. * for it's DMA done TLV to be set.
  63. *
  64. * Return: enum dp_mon_reap_status
  65. */
  66. enum dp_mon_reap_status
  67. dp_rx_mon_handle_status_buf_done(struct dp_pdev *pdev,
  68. void *mon_status_srng)
  69. {
  70. struct dp_soc *soc = pdev->soc;
  71. hal_soc_handle_t hal_soc;
  72. void *ring_entry;
  73. uint32_t rx_buf_cookie;
  74. qdf_nbuf_t status_nbuf;
  75. struct dp_rx_desc *rx_desc;
  76. void *rx_tlv;
  77. QDF_STATUS buf_status;
  78. hal_soc = soc->hal_soc;
  79. ring_entry = hal_srng_src_peek_n_get_next_next(hal_soc,
  80. mon_status_srng);
  81. if (!ring_entry) {
  82. dp_rx_mon_status_debug("%pK: Monitor status ring entry is NULL for SRNG: %pK",
  83. soc, mon_status_srng);
  84. return DP_MON_STATUS_NO_DMA;
  85. }
  86. rx_buf_cookie = HAL_RX_BUF_COOKIE_GET(ring_entry);
  87. rx_desc = dp_rx_cookie_2_va_mon_status(soc, rx_buf_cookie);
  88. qdf_assert(rx_desc);
  89. status_nbuf = rx_desc->nbuf;
  90. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  91. QDF_DMA_FROM_DEVICE);
  92. rx_tlv = qdf_nbuf_data(status_nbuf);
  93. buf_status = hal_get_rx_status_done(rx_tlv);
  94. /* If status buffer DMA is not done,
  95. * 1. As per MAC team's suggestion, If HP + 2 entry's DMA done is set,
  96. * replenish HP + 1 entry and start processing in next interrupt.
  97. * 2. If HP + 2 entry's DMA done is not set
  98. * hold on to mon destination ring.
  99. */
  100. if (buf_status != QDF_STATUS_SUCCESS) {
  101. dp_err_rl("Monitor status ring: DMA is not done "
  102. "for nbuf: %pK", status_nbuf);
  103. pdev->rx_mon_stats.tlv_tag_status_err++;
  104. return DP_MON_STATUS_NO_DMA;
  105. }
  106. pdev->rx_mon_stats.status_buf_done_war++;
  107. return DP_MON_STATUS_REPLENISH;
  108. }
  109. #ifndef QCA_SUPPORT_FULL_MON
  110. /**
  111. * dp_rx_mon_process () - Core brain processing for monitor mode
  112. *
  113. * This API processes monitor destination ring followed by monitor status ring
  114. * Called from bottom half (tasklet/NET_RX_SOFTIRQ)
  115. *
  116. * @soc: datapath soc context
  117. * @int_ctx: interrupt context
  118. * @mac_id: mac_id on which interrupt is received
  119. * @quota: Number of status ring entry that can be serviced in one shot.
  120. *
  121. * @Return: Number of reaped status ring entries
  122. */
  123. static inline uint32_t
  124. dp_rx_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
  125. uint32_t mac_id, uint32_t quota)
  126. {
  127. return quota;
  128. }
  129. #endif
  130. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  131. #include "dp_rx_mon_feature.h"
  132. #else
  133. static QDF_STATUS
  134. dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  135. struct hal_rx_ppdu_info *ppdu_info)
  136. {
  137. return QDF_STATUS_SUCCESS;
  138. }
  139. static void
  140. dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status,
  141. qdf_nbuf_t status_nbuf,
  142. struct hal_rx_ppdu_info *ppdu_info,
  143. bool *nbuf_used)
  144. {
  145. }
  146. #endif
  147. #ifdef WLAN_TX_PKT_CAPTURE_ENH
  148. #include "dp_rx_mon_feature.h"
  149. #else
  150. static QDF_STATUS
  151. dp_send_ack_frame_to_stack(struct dp_soc *soc,
  152. struct dp_pdev *pdev,
  153. struct hal_rx_ppdu_info *ppdu_info)
  154. {
  155. return QDF_STATUS_SUCCESS;
  156. }
  157. #endif
  158. #ifdef FEATURE_PERPKT_INFO
  159. static inline void
  160. dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info,
  161. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  162. {
  163. uint8_t chain, bw;
  164. int8_t rssi;
  165. for (chain = 0; chain < SS_COUNT; chain++) {
  166. for (bw = 0; bw < MAX_BW; bw++) {
  167. rssi = ppdu_info->rx_status.rssi_chain[chain][bw];
  168. if (rssi != DP_RSSI_INVAL)
  169. cdp_rx_ppdu->rssi_chain[chain][bw] = rssi;
  170. else
  171. cdp_rx_ppdu->rssi_chain[chain][bw] = 0;
  172. }
  173. }
  174. }
  175. /*
  176. * dp_rx_populate_su_evm_details() - Populate su evm info
  177. * @ppdu_info: ppdu info structure from ppdu ring
  178. * @cdp_rx_ppdu: rx ppdu indication structure
  179. */
  180. static inline void
  181. dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info,
  182. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  183. {
  184. uint8_t pilot_evm;
  185. uint8_t nss_count;
  186. uint8_t pilot_count;
  187. nss_count = ppdu_info->evm_info.nss_count;
  188. pilot_count = ppdu_info->evm_info.pilot_count;
  189. if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) {
  190. qdf_err("pilot evm count is more than expected");
  191. return;
  192. }
  193. cdp_rx_ppdu->evm_info.pilot_count = pilot_count;
  194. cdp_rx_ppdu->evm_info.nss_count = nss_count;
  195. /* Populate evm for pilot_evm = nss_count*pilot_count */
  196. for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) {
  197. cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] =
  198. ppdu_info->evm_info.pilot_evm[pilot_evm];
  199. }
  200. }
  201. /**
  202. * dp_rx_inc_rusize_cnt() - increment pdev stats based on RU size
  203. * @pdev: pdev ctx
  204. * @rx_user_status: mon rx user status
  205. *
  206. * Return: bool
  207. */
  208. static inline bool
  209. dp_rx_inc_rusize_cnt(struct dp_pdev *pdev,
  210. struct mon_rx_user_status *rx_user_status)
  211. {
  212. uint32_t ru_size;
  213. bool is_data;
  214. ru_size = rx_user_status->ofdma_ru_size;
  215. if (dp_is_subtype_data(rx_user_status->frame_control)) {
  216. DP_STATS_INC(pdev,
  217. ul_ofdma.data_rx_ru_size[ru_size], 1);
  218. is_data = true;
  219. } else {
  220. DP_STATS_INC(pdev,
  221. ul_ofdma.nondata_rx_ru_size[ru_size], 1);
  222. is_data = false;
  223. }
  224. return is_data;
  225. }
  226. /**
  227. * dp_rx_populate_cdp_indication_ppdu_user() - Populate per user cdp indication
  228. * @pdev: pdev ctx
  229. * @ppdu_info: ppdu info structure from ppdu ring
  230. * @cdp_rx_ppdu: Rx PPDU indication structure
  231. *
  232. * Return: none
  233. */
  234. static inline void
  235. dp_rx_populate_cdp_indication_ppdu_user(struct dp_pdev *pdev,
  236. struct hal_rx_ppdu_info *ppdu_info,
  237. struct cdp_rx_indication_ppdu
  238. *cdp_rx_ppdu)
  239. {
  240. struct dp_peer *peer;
  241. struct dp_soc *soc = pdev->soc;
  242. struct dp_ast_entry *ast_entry;
  243. uint32_t ast_index;
  244. int i;
  245. struct mon_rx_user_status *rx_user_status;
  246. struct mon_rx_user_info *rx_user_info;
  247. struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
  248. int ru_size;
  249. bool is_data = false;
  250. uint32_t num_users;
  251. num_users = ppdu_info->com_info.num_users;
  252. for (i = 0; i < num_users; i++) {
  253. if (i > OFDMA_NUM_USERS)
  254. return;
  255. rx_user_status = &ppdu_info->rx_user_status[i];
  256. rx_user_info = &ppdu_info->rx_user_info[i];
  257. rx_stats_peruser = &cdp_rx_ppdu->user[i];
  258. ast_index = rx_user_status->ast_index;
  259. if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
  260. rx_stats_peruser->peer_id = HTT_INVALID_PEER;
  261. continue;
  262. }
  263. ast_entry = soc->ast_table[ast_index];
  264. if (!ast_entry || ast_entry->peer_id == HTT_INVALID_PEER) {
  265. rx_stats_peruser->peer_id = HTT_INVALID_PEER;
  266. continue;
  267. }
  268. peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
  269. DP_MOD_ID_RX_PPDU_STATS);
  270. if (!peer) {
  271. rx_stats_peruser->peer_id = HTT_INVALID_PEER;
  272. continue;
  273. }
  274. rx_stats_peruser->is_bss_peer = peer->bss_peer;
  275. rx_stats_peruser->first_data_seq_ctrl =
  276. rx_user_status->first_data_seq_ctrl;
  277. rx_stats_peruser->frame_control_info_valid =
  278. rx_user_status->frame_control_info_valid;
  279. rx_stats_peruser->frame_control =
  280. rx_user_status->frame_control;
  281. rx_stats_peruser->qos_control_info_valid =
  282. rx_user_info->qos_control_info_valid;
  283. rx_stats_peruser->qos_control =
  284. rx_user_info->qos_control;
  285. rx_stats_peruser->tcp_msdu_count =
  286. rx_user_status->tcp_msdu_count;
  287. rx_stats_peruser->udp_msdu_count =
  288. rx_user_status->udp_msdu_count;
  289. rx_stats_peruser->other_msdu_count =
  290. rx_user_status->other_msdu_count;
  291. rx_stats_peruser->num_msdu =
  292. rx_stats_peruser->tcp_msdu_count +
  293. rx_stats_peruser->udp_msdu_count +
  294. rx_stats_peruser->other_msdu_count;
  295. rx_stats_peruser->preamble_type =
  296. rx_user_status->preamble_type;
  297. rx_stats_peruser->mpdu_cnt_fcs_ok =
  298. rx_user_status->mpdu_cnt_fcs_ok;
  299. rx_stats_peruser->mpdu_cnt_fcs_err =
  300. rx_user_status->mpdu_cnt_fcs_err;
  301. qdf_mem_copy(&rx_stats_peruser->mpdu_fcs_ok_bitmap,
  302. &rx_user_status->mpdu_fcs_ok_bitmap,
  303. HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
  304. sizeof(rx_user_status->mpdu_fcs_ok_bitmap[0]));
  305. rx_stats_peruser->mpdu_ok_byte_count =
  306. rx_user_status->mpdu_ok_byte_count;
  307. rx_stats_peruser->mpdu_err_byte_count =
  308. rx_user_status->mpdu_err_byte_count;
  309. cdp_rx_ppdu->num_mpdu += rx_user_status->mpdu_cnt_fcs_ok;
  310. cdp_rx_ppdu->num_msdu += rx_stats_peruser->num_msdu;
  311. rx_stats_peruser->retries =
  312. CDP_FC_IS_RETRY_SET(rx_stats_peruser->frame_control) ?
  313. rx_stats_peruser->mpdu_cnt_fcs_ok : 0;
  314. if (rx_stats_peruser->mpdu_cnt_fcs_ok > 1)
  315. rx_stats_peruser->is_ampdu = 1;
  316. else
  317. rx_stats_peruser->is_ampdu = 0;
  318. rx_stats_peruser->tid = ppdu_info->rx_status.tid;
  319. qdf_mem_copy(rx_stats_peruser->mac_addr,
  320. peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  321. rx_stats_peruser->peer_id = peer->peer_id;
  322. cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
  323. rx_stats_peruser->vdev_id = peer->vdev->vdev_id;
  324. rx_stats_peruser->mu_ul_info_valid = 0;
  325. dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
  326. if (cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_OFDMA ||
  327. cdp_rx_ppdu->u.ppdu_type == HAL_RX_TYPE_MU_MIMO) {
  328. if (rx_user_status->mu_ul_info_valid) {
  329. rx_stats_peruser->nss = rx_user_status->nss;
  330. rx_stats_peruser->mcs = rx_user_status->mcs;
  331. rx_stats_peruser->mu_ul_info_valid =
  332. rx_user_status->mu_ul_info_valid;
  333. rx_stats_peruser->ofdma_ru_start_index =
  334. rx_user_status->ofdma_ru_start_index;
  335. rx_stats_peruser->ofdma_ru_width =
  336. rx_user_status->ofdma_ru_width;
  337. rx_stats_peruser->user_index = i;
  338. ru_size = rx_user_status->ofdma_ru_size;
  339. /*
  340. * max RU size will be equal to
  341. * HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2
  342. */
  343. if (ru_size >= OFDMA_NUM_RU_SIZE) {
  344. dp_err("invalid ru_size %d\n",
  345. ru_size);
  346. return;
  347. }
  348. is_data = dp_rx_inc_rusize_cnt(pdev,
  349. rx_user_status);
  350. }
  351. if (is_data) {
  352. /* counter to get number of MU OFDMA */
  353. pdev->stats.ul_ofdma.data_rx_ppdu++;
  354. pdev->stats.ul_ofdma.data_users[num_users]++;
  355. }
  356. }
  357. }
  358. }
  359. /**
  360. * dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
  361. * @pdev: pdev ctx
  362. * @ppdu_info: ppdu info structure from ppdu ring
  363. * @cdp_rx_ppdu: Rx PPDU indication structure
  364. *
  365. * Return: none
  366. */
  367. static inline void
  368. dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
  369. struct hal_rx_ppdu_info *ppdu_info,
  370. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  371. {
  372. struct dp_peer *peer;
  373. struct dp_soc *soc = pdev->soc;
  374. struct dp_ast_entry *ast_entry;
  375. uint32_t ast_index;
  376. uint32_t i;
  377. cdp_rx_ppdu->first_data_seq_ctrl =
  378. ppdu_info->rx_status.first_data_seq_ctrl;
  379. cdp_rx_ppdu->frame_ctrl =
  380. ppdu_info->rx_status.frame_control;
  381. cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
  382. cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
  383. cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
  384. cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
  385. /* num mpdu is consolidated and added together in num user loop */
  386. cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
  387. /* num msdu is consolidated and added together in num user loop */
  388. cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
  389. cdp_rx_ppdu->udp_msdu_count +
  390. cdp_rx_ppdu->other_msdu_count);
  391. cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
  392. ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
  393. if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
  394. cdp_rx_ppdu->is_ampdu = 1;
  395. else
  396. cdp_rx_ppdu->is_ampdu = 0;
  397. cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
  398. ast_index = ppdu_info->rx_status.ast_index;
  399. if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
  400. cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
  401. cdp_rx_ppdu->num_users = 0;
  402. goto end;
  403. }
  404. ast_entry = soc->ast_table[ast_index];
  405. if (!ast_entry || ast_entry->peer_id == HTT_INVALID_PEER) {
  406. cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
  407. cdp_rx_ppdu->num_users = 0;
  408. goto end;
  409. }
  410. peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
  411. DP_MOD_ID_RX_PPDU_STATS);
  412. if (!peer) {
  413. cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
  414. cdp_rx_ppdu->num_users = 0;
  415. goto end;
  416. }
  417. qdf_mem_copy(cdp_rx_ppdu->mac_addr,
  418. peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  419. cdp_rx_ppdu->peer_id = peer->peer_id;
  420. cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
  421. cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
  422. cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
  423. cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
  424. cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
  425. cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
  426. cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
  427. if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
  428. (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
  429. cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
  430. else
  431. cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
  432. cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
  433. cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
  434. cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
  435. QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
  436. cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
  437. cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
  438. cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
  439. cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
  440. cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
  441. cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
  442. cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
  443. dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu);
  444. dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
  445. cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
  446. cdp_rx_ppdu->nf = ppdu_info->rx_status.chan_noise_floor;
  447. for (i = 0; i < MAX_CHAIN; i++)
  448. cdp_rx_ppdu->per_chain_rssi[i] = ppdu_info->rx_status.rssi[i];
  449. cdp_rx_ppdu->is_mcast_bcast = ppdu_info->nac_info.mcast_bcast;
  450. cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
  451. cdp_rx_ppdu->num_mpdu = 0;
  452. cdp_rx_ppdu->num_msdu = 0;
  453. dp_rx_populate_cdp_indication_ppdu_user(pdev, ppdu_info, cdp_rx_ppdu);
  454. dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
  455. return;
  456. end:
  457. dp_rx_populate_cfr_non_assoc_sta(pdev, ppdu_info, cdp_rx_ppdu);
  458. }
  459. #else
  460. static inline void
  461. dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
  462. struct hal_rx_ppdu_info *ppdu_info,
  463. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  464. {
  465. }
  466. #endif
  467. /**
  468. * dp_rx_stats_update() - Update per-peer statistics
  469. * @soc: Datapath SOC handle
  470. * @peer: Datapath peer handle
  471. * @ppdu: PPDU Descriptor
  472. *
  473. * Return: None
  474. */
  475. #ifdef FEATURE_PERPKT_INFO
  476. static inline void dp_rx_rate_stats_update(struct dp_peer *peer,
  477. struct cdp_rx_indication_ppdu *ppdu,
  478. uint32_t user)
  479. {
  480. uint32_t ratekbps = 0;
  481. uint32_t ppdu_rx_rate = 0;
  482. uint32_t nss = 0;
  483. uint8_t mcs = 0;
  484. uint32_t rix;
  485. uint16_t ratecode;
  486. struct cdp_rx_stats_ppdu_user *ppdu_user = NULL;
  487. if (!peer || !ppdu)
  488. return;
  489. ppdu_user = &ppdu->user[user];
  490. if (ppdu->u.ppdu_type != HAL_RX_TYPE_SU) {
  491. if (ppdu_user->nss == 0)
  492. nss = 0;
  493. else
  494. nss = ppdu_user->nss - 1;
  495. mcs = ppdu_user->mcs;
  496. } else {
  497. if (ppdu->u.nss == 0)
  498. nss = 0;
  499. else
  500. nss = ppdu->u.nss - 1;
  501. mcs = ppdu->u.mcs;
  502. }
  503. ratekbps = dp_getrateindex(ppdu->u.gi,
  504. mcs,
  505. nss,
  506. ppdu->u.preamble,
  507. ppdu->u.bw,
  508. &rix,
  509. &ratecode);
  510. if (!ratekbps) {
  511. ppdu->rix = 0;
  512. ppdu->rx_ratekbps = 0;
  513. ppdu->rx_ratecode = 0;
  514. ppdu_user->rx_ratekbps = 0;
  515. return;
  516. }
  517. ppdu->rix = rix;
  518. DP_STATS_UPD(peer, rx.last_rx_rate, ratekbps);
  519. dp_ath_rate_lpf(peer->stats.rx.avg_rx_rate, ratekbps);
  520. ppdu_rx_rate = dp_ath_rate_out(peer->stats.rx.avg_rx_rate);
  521. DP_STATS_UPD(peer, rx.rnd_avg_rx_rate, ppdu_rx_rate);
  522. ppdu->rx_ratekbps = ratekbps;
  523. ppdu->rx_ratecode = ratecode;
  524. ppdu_user->rx_ratekbps = ratekbps;
  525. if (peer->vdev)
  526. peer->vdev->stats.rx.last_rx_rate = ratekbps;
  527. }
  528. static void dp_rx_stats_update(struct dp_pdev *pdev,
  529. struct cdp_rx_indication_ppdu *ppdu)
  530. {
  531. struct dp_soc *soc = NULL;
  532. uint8_t mcs, preamble, ac = 0, nss, ppdu_type;
  533. uint16_t num_msdu;
  534. uint8_t pkt_bw_offset;
  535. struct dp_peer *peer;
  536. struct cdp_rx_stats_ppdu_user *ppdu_user;
  537. uint32_t i;
  538. enum cdp_mu_packet_type mu_pkt_type;
  539. if (pdev)
  540. soc = pdev->soc;
  541. else
  542. return;
  543. if (!soc || soc->process_rx_status)
  544. return;
  545. preamble = ppdu->u.preamble;
  546. ppdu_type = ppdu->u.ppdu_type;
  547. for (i = 0; i < ppdu->num_users && i < CDP_MU_MAX_USERS; i++) {
  548. peer = NULL;
  549. ppdu_user = &ppdu->user[i];
  550. peer = dp_peer_get_ref_by_id(soc, ppdu_user->peer_id,
  551. DP_MOD_ID_RX_PPDU_STATS);
  552. if (!peer)
  553. peer = pdev->invalid_peer;
  554. if ((preamble == DOT11_A) || (preamble == DOT11_B))
  555. ppdu->u.nss = 1;
  556. if (ppdu_type == HAL_RX_TYPE_SU) {
  557. mcs = ppdu->u.mcs;
  558. nss = ppdu->u.nss;
  559. } else {
  560. mcs = ppdu_user->mcs;
  561. nss = ppdu_user->nss;
  562. }
  563. num_msdu = ppdu_user->num_msdu;
  564. switch (ppdu->u.bw) {
  565. case CMN_BW_20MHZ:
  566. pkt_bw_offset = PKT_BW_GAIN_20MHZ;
  567. break;
  568. case CMN_BW_40MHZ:
  569. pkt_bw_offset = PKT_BW_GAIN_40MHZ;
  570. break;
  571. case CMN_BW_80MHZ:
  572. pkt_bw_offset = PKT_BW_GAIN_80MHZ;
  573. break;
  574. case CMN_BW_160MHZ:
  575. pkt_bw_offset = PKT_BW_GAIN_160MHZ;
  576. break;
  577. default:
  578. pkt_bw_offset = 0;
  579. dp_rx_mon_status_debug("%pK: Invalid BW index = %d",
  580. soc, ppdu->u.bw);
  581. }
  582. DP_STATS_UPD(peer, rx.snr, (ppdu->rssi + pkt_bw_offset));
  583. if (peer->stats.rx.avg_snr == CDP_INVALID_SNR)
  584. peer->stats.rx.avg_snr =
  585. CDP_SNR_IN(peer->stats.rx.snr);
  586. else
  587. CDP_SNR_UPDATE_AVG(peer->stats.rx.avg_snr,
  588. peer->stats.rx.snr);
  589. if (ppdu_type == HAL_RX_TYPE_SU) {
  590. if (nss) {
  591. DP_STATS_INC(peer, rx.nss[nss - 1], num_msdu);
  592. DP_STATS_INC(peer, rx.ppdu_nss[nss - 1], 1);
  593. }
  594. DP_STATS_INC(peer, rx.mpdu_cnt_fcs_ok,
  595. ppdu_user->mpdu_cnt_fcs_ok);
  596. DP_STATS_INC(peer, rx.mpdu_cnt_fcs_err,
  597. ppdu_user->mpdu_cnt_fcs_err);
  598. }
  599. if (ppdu_type >= HAL_RX_TYPE_MU_MIMO &&
  600. ppdu_type <= HAL_RX_TYPE_MU_OFDMA) {
  601. if (ppdu_type == HAL_RX_TYPE_MU_MIMO)
  602. mu_pkt_type = RX_TYPE_MU_MIMO;
  603. else
  604. mu_pkt_type = RX_TYPE_MU_OFDMA;
  605. if (nss) {
  606. DP_STATS_INC(peer, rx.nss[nss - 1], num_msdu);
  607. DP_STATS_INC(peer,
  608. rx.rx_mu[mu_pkt_type].ppdu_nss[nss - 1],
  609. 1);
  610. }
  611. DP_STATS_INC(peer,
  612. rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_ok,
  613. ppdu_user->mpdu_cnt_fcs_ok);
  614. DP_STATS_INC(peer,
  615. rx.rx_mu[mu_pkt_type].mpdu_cnt_fcs_err,
  616. ppdu_user->mpdu_cnt_fcs_err);
  617. }
  618. DP_STATS_INC(peer, rx.sgi_count[ppdu->u.gi], num_msdu);
  619. DP_STATS_INC(peer, rx.bw[ppdu->u.bw], num_msdu);
  620. DP_STATS_INC(peer, rx.reception_type[ppdu->u.ppdu_type],
  621. num_msdu);
  622. DP_STATS_INC(peer, rx.ppdu_cnt[ppdu->u.ppdu_type], 1);
  623. DP_STATS_INCC(peer, rx.ampdu_cnt, num_msdu,
  624. ppdu_user->is_ampdu);
  625. DP_STATS_INCC(peer, rx.non_ampdu_cnt, num_msdu,
  626. !(ppdu_user->is_ampdu));
  627. DP_STATS_UPD(peer, rx.rx_rate, mcs);
  628. DP_STATS_INCC(peer,
  629. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  630. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
  631. DP_STATS_INCC(peer,
  632. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  633. ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
  634. DP_STATS_INCC(peer,
  635. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  636. ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
  637. DP_STATS_INCC(peer,
  638. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  639. ((mcs < MAX_MCS_11B) && (preamble == DOT11_B)));
  640. DP_STATS_INCC(peer,
  641. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  642. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
  643. DP_STATS_INCC(peer,
  644. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  645. ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
  646. DP_STATS_INCC(peer,
  647. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  648. ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
  649. DP_STATS_INCC(peer,
  650. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  651. ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
  652. DP_STATS_INCC(peer,
  653. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  654. ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
  655. DP_STATS_INCC(peer,
  656. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  657. ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
  658. DP_STATS_INCC(peer,
  659. rx.su_ax_ppdu_cnt.mcs_count[MAX_MCS - 1], 1,
  660. ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX) &&
  661. (ppdu_type == HAL_RX_TYPE_SU)));
  662. DP_STATS_INCC(peer,
  663. rx.su_ax_ppdu_cnt.mcs_count[mcs], 1,
  664. ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX) &&
  665. (ppdu_type == HAL_RX_TYPE_SU)));
  666. DP_STATS_INCC(peer,
  667. rx.rx_mu[RX_TYPE_MU_OFDMA].ppdu.mcs_count[MAX_MCS - 1],
  668. 1, ((mcs >= (MAX_MCS - 1)) &&
  669. (preamble == DOT11_AX) &&
  670. (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
  671. DP_STATS_INCC(peer,
  672. rx.rx_mu[RX_TYPE_MU_OFDMA].ppdu.mcs_count[mcs],
  673. 1, ((mcs < (MAX_MCS - 1)) &&
  674. (preamble == DOT11_AX) &&
  675. (ppdu_type == HAL_RX_TYPE_MU_OFDMA)));
  676. DP_STATS_INCC(peer,
  677. rx.rx_mu[RX_TYPE_MU_MIMO].ppdu.mcs_count[MAX_MCS - 1],
  678. 1, ((mcs >= (MAX_MCS - 1)) &&
  679. (preamble == DOT11_AX) &&
  680. (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
  681. DP_STATS_INCC(peer,
  682. rx.rx_mu[RX_TYPE_MU_MIMO].ppdu.mcs_count[mcs],
  683. 1, ((mcs < (MAX_MCS - 1)) &&
  684. (preamble == DOT11_AX) &&
  685. (ppdu_type == HAL_RX_TYPE_MU_MIMO)));
  686. /*
  687. * If invalid TID, it could be a non-qos frame, hence do not
  688. * update any AC counters
  689. */
  690. ac = TID_TO_WME_AC(ppdu_user->tid);
  691. if (ppdu->tid != HAL_TID_INVALID)
  692. DP_STATS_INC(peer, rx.wme_ac_type[ac], num_msdu);
  693. dp_peer_stats_notify(pdev, peer);
  694. DP_STATS_UPD(peer, rx.last_snr, ppdu->rssi);
  695. dp_peer_qos_stats_notify(pdev, ppdu_user);
  696. if (peer == pdev->invalid_peer)
  697. continue;
  698. if (dp_is_subtype_data(ppdu->frame_ctrl))
  699. dp_rx_rate_stats_update(peer, ppdu, i);
  700. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  701. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  702. &peer->stats, ppdu->peer_id,
  703. UPDATE_PEER_STATS, pdev->pdev_id);
  704. #endif
  705. dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
  706. }
  707. }
  708. #endif
  709. /**
  710. * dp_rx_handle_mcopy_mode() - Allocate and deliver first MSDU payload
  711. * @soc: core txrx main context
  712. * @pdev: pdev structure
  713. * @ppdu_info: structure for rx ppdu ring
  714. * @nbuf: QDF nbuf
  715. * @fcs_ok_mpdu_cnt: fcs passsed mpdu index
  716. * @deliver_frame: flag to deliver wdi event
  717. *
  718. * Return: QDF_STATUS_SUCCESS - If nbuf to be freed by caller
  719. * QDF_STATUS_E_ALREADY - If nbuf not to be freed by caller
  720. */
  721. #ifdef FEATURE_PERPKT_INFO
  722. static inline QDF_STATUS
  723. dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  724. struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf,
  725. uint8_t fcs_ok_mpdu_cnt, bool deliver_frame)
  726. {
  727. uint16_t size = 0;
  728. struct ieee80211_frame *wh;
  729. uint32_t *nbuf_data;
  730. if (!ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload)
  731. return QDF_STATUS_SUCCESS;
  732. /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
  733. if (pdev->mcopy_mode == M_COPY) {
  734. if (pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id)
  735. return QDF_STATUS_SUCCESS;
  736. }
  737. wh = (struct ieee80211_frame *)(ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload + 4);
  738. size = (ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].first_msdu_payload -
  739. qdf_nbuf_data(nbuf));
  740. if (qdf_nbuf_pull_head(nbuf, size) == NULL)
  741. return QDF_STATUS_SUCCESS;
  742. if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
  743. IEEE80211_FC0_TYPE_MGT) ||
  744. ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
  745. IEEE80211_FC0_TYPE_CTL)) {
  746. return QDF_STATUS_SUCCESS;
  747. }
  748. nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf);
  749. *nbuf_data = pdev->ppdu_info.com_info.ppdu_id;
  750. /* only retain RX MSDU payload in the skb */
  751. qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) - ppdu_info->ppdu_msdu_info[fcs_ok_mpdu_cnt].payload_len);
  752. if (deliver_frame) {
  753. pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
  754. dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
  755. nbuf, HTT_INVALID_PEER,
  756. WDI_NO_VAL, pdev->pdev_id);
  757. }
  758. return QDF_STATUS_E_ALREADY;
  759. }
  760. #else
  761. static inline QDF_STATUS
  762. dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  763. struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf,
  764. uint8_t fcs_ok_cnt, bool deliver_frame)
  765. {
  766. return QDF_STATUS_SUCCESS;
  767. }
  768. #endif
  769. /**
  770. * dp_rx_mcopy_handle_last_mpdu() - cache and delive last MPDU header in a
  771. * status buffer if MPDU end tlv is received in different buffer
  772. * @soc: core txrx main context
  773. * @pdev: pdev structure
  774. * @ppdu_info: structure for rx ppdu ring
  775. * @status_nbuf: QDF nbuf
  776. *
  777. * Return: void
  778. */
  779. #ifdef FEATURE_PERPKT_INFO
  780. static inline void
  781. dp_rx_mcopy_handle_last_mpdu(struct dp_soc *soc, struct dp_pdev *pdev,
  782. struct hal_rx_ppdu_info *ppdu_info,
  783. qdf_nbuf_t status_nbuf)
  784. {
  785. QDF_STATUS mcopy_status;
  786. qdf_nbuf_t nbuf_clone = NULL;
  787. /* If the MPDU end tlv and RX header are received in different buffers,
  788. * process the RX header based on fcs status.
  789. */
  790. if (pdev->mcopy_status_nbuf) {
  791. /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
  792. if (pdev->mcopy_mode == M_COPY) {
  793. if (pdev->m_copy_id.rx_ppdu_id ==
  794. ppdu_info->com_info.ppdu_id)
  795. goto end1;
  796. }
  797. if (ppdu_info->is_fcs_passed) {
  798. nbuf_clone = qdf_nbuf_clone(pdev->mcopy_status_nbuf);
  799. if (!nbuf_clone) {
  800. QDF_TRACE(QDF_MODULE_ID_TXRX,
  801. QDF_TRACE_LEVEL_ERROR,
  802. "Failed to clone nbuf");
  803. goto end1;
  804. }
  805. pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
  806. dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
  807. nbuf_clone,
  808. HTT_INVALID_PEER,
  809. WDI_NO_VAL, pdev->pdev_id);
  810. ppdu_info->is_fcs_passed = false;
  811. }
  812. end1:
  813. qdf_nbuf_free(pdev->mcopy_status_nbuf);
  814. pdev->mcopy_status_nbuf = NULL;
  815. }
  816. /* If the MPDU end tlv and RX header are received in different buffers,
  817. * preserve the RX header as the fcs status will be received in MPDU
  818. * end tlv in next buffer. So, cache the buffer to be processd in next
  819. * iteration
  820. */
  821. if ((ppdu_info->fcs_ok_cnt + ppdu_info->fcs_err_cnt) !=
  822. ppdu_info->com_info.mpdu_cnt) {
  823. pdev->mcopy_status_nbuf = qdf_nbuf_clone(status_nbuf);
  824. if (pdev->mcopy_status_nbuf) {
  825. mcopy_status = dp_rx_handle_mcopy_mode(
  826. soc, pdev,
  827. ppdu_info,
  828. pdev->mcopy_status_nbuf,
  829. ppdu_info->fcs_ok_cnt,
  830. false);
  831. if (mcopy_status == QDF_STATUS_SUCCESS) {
  832. qdf_nbuf_free(pdev->mcopy_status_nbuf);
  833. pdev->mcopy_status_nbuf = NULL;
  834. }
  835. }
  836. }
  837. }
  838. #else
  839. static inline void
  840. dp_rx_mcopy_handle_last_mpdu(struct dp_soc *soc, struct dp_pdev *pdev,
  841. struct hal_rx_ppdu_info *ppdu_info,
  842. qdf_nbuf_t status_nbuf)
  843. {
  844. }
  845. #endif
  846. /**
  847. * dp_rx_mcopy_process_ppdu_info() - update mcopy ppdu info
  848. * @ppdu_info: structure for rx ppdu ring
  849. * @tlv_status: processed TLV status
  850. *
  851. * Return: void
  852. */
  853. #ifdef FEATURE_PERPKT_INFO
  854. static inline void
  855. dp_rx_mcopy_process_ppdu_info(struct dp_pdev *pdev,
  856. struct hal_rx_ppdu_info *ppdu_info,
  857. uint32_t tlv_status)
  858. {
  859. if (!pdev->mcopy_mode)
  860. return;
  861. /* The fcs status is received in MPDU end tlv. If the RX header
  862. * and its MPDU end tlv are received in different status buffer then
  863. * to process that header ppdu_info->is_fcs_passed is used.
  864. * If end tlv is received in next status buffer then com_info.mpdu_cnt
  865. * will be 0 at the time of receiving MPDU end tlv and we update the
  866. * is_fcs_passed flag based on ppdu_info->fcs_err.
  867. */
  868. if (tlv_status != HAL_TLV_STATUS_MPDU_END)
  869. return;
  870. if (!ppdu_info->fcs_err) {
  871. if (ppdu_info->fcs_ok_cnt >
  872. HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER) {
  873. dp_err("No. of MPDUs(%d) per status buff exceeded",
  874. ppdu_info->fcs_ok_cnt);
  875. return;
  876. }
  877. if (ppdu_info->com_info.mpdu_cnt)
  878. ppdu_info->fcs_ok_cnt++;
  879. else
  880. ppdu_info->is_fcs_passed = true;
  881. } else {
  882. if (ppdu_info->com_info.mpdu_cnt)
  883. ppdu_info->fcs_err_cnt++;
  884. else
  885. ppdu_info->is_fcs_passed = false;
  886. }
  887. }
  888. #else
  889. static inline void
  890. dp_rx_mcopy_process_ppdu_info(struct dp_pdev *pdev,
  891. struct hal_rx_ppdu_info *ppdu_info,
  892. uint32_t tlv_status)
  893. {
  894. }
  895. #endif
  896. #ifdef FEATURE_PERPKT_INFO
  897. static inline void
  898. dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  899. struct hal_rx_ppdu_info *ppdu_info,
  900. uint32_t tlv_status,
  901. qdf_nbuf_t status_nbuf)
  902. {
  903. QDF_STATUS mcopy_status;
  904. qdf_nbuf_t nbuf_clone = NULL;
  905. uint8_t fcs_ok_mpdu_cnt = 0;
  906. dp_rx_mcopy_handle_last_mpdu(soc, pdev, ppdu_info, status_nbuf);
  907. if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt))
  908. goto end;
  909. if (qdf_unlikely(!ppdu_info->fcs_ok_cnt))
  910. goto end;
  911. /* For M_COPY mode only one msdu per ppdu is sent to upper layer*/
  912. if (pdev->mcopy_mode == M_COPY)
  913. ppdu_info->fcs_ok_cnt = 1;
  914. while (fcs_ok_mpdu_cnt < ppdu_info->fcs_ok_cnt) {
  915. nbuf_clone = qdf_nbuf_clone(status_nbuf);
  916. if (!nbuf_clone) {
  917. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  918. "Failed to clone nbuf");
  919. goto end;
  920. }
  921. mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev,
  922. ppdu_info,
  923. nbuf_clone,
  924. fcs_ok_mpdu_cnt,
  925. true);
  926. if (mcopy_status == QDF_STATUS_SUCCESS)
  927. qdf_nbuf_free(nbuf_clone);
  928. fcs_ok_mpdu_cnt++;
  929. }
  930. end:
  931. qdf_nbuf_free(status_nbuf);
  932. ppdu_info->fcs_ok_cnt = 0;
  933. ppdu_info->fcs_err_cnt = 0;
  934. ppdu_info->com_info.mpdu_cnt = 0;
  935. qdf_mem_zero(&ppdu_info->ppdu_msdu_info,
  936. HAL_RX_MAX_MPDU_H_PER_STATUS_BUFFER
  937. * sizeof(struct hal_rx_msdu_payload_info));
  938. }
  939. #else
  940. static inline void
  941. dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  942. struct hal_rx_ppdu_info *ppdu_info,
  943. uint32_t tlv_status,
  944. qdf_nbuf_t status_nbuf)
  945. {
  946. }
  947. #endif
  948. /**
  949. * dp_rx_handle_smart_mesh_mode() - Deliver header for smart mesh
  950. * @soc: Datapath SOC handle
  951. * @pdev: Datapath PDEV handle
  952. * @ppdu_info: Structure for rx ppdu info
  953. * @nbuf: Qdf nbuf abstraction for linux skb
  954. *
  955. * Return: 0 on success, 1 on failure
  956. */
  957. static inline int
  958. dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  959. struct hal_rx_ppdu_info *ppdu_info,
  960. qdf_nbuf_t nbuf)
  961. {
  962. uint8_t size = 0;
  963. if (!pdev->monitor_vdev) {
  964. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  965. "[%s]:[%d] Monitor vdev is NULL !!",
  966. __func__, __LINE__);
  967. return 1;
  968. }
  969. if (!ppdu_info->msdu_info.first_msdu_payload) {
  970. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  971. "[%s]:[%d] First msdu payload not present",
  972. __func__, __LINE__);
  973. return 1;
  974. }
  975. /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */
  976. size = (ppdu_info->msdu_info.first_msdu_payload -
  977. qdf_nbuf_data(nbuf)) + 4;
  978. ppdu_info->msdu_info.first_msdu_payload = NULL;
  979. if (qdf_nbuf_pull_head(nbuf, size) == NULL) {
  980. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  981. "[%s]:[%d] No header present",
  982. __func__, __LINE__);
  983. return 1;
  984. }
  985. /* Only retain RX MSDU payload in the skb */
  986. qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
  987. ppdu_info->msdu_info.payload_len);
  988. if (!qdf_nbuf_update_radiotap(&pdev->ppdu_info.rx_status, nbuf,
  989. qdf_nbuf_headroom(nbuf))) {
  990. DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
  991. return 1;
  992. }
  993. pdev->monitor_vdev->osif_rx_mon(pdev->monitor_vdev->osif_vdev,
  994. nbuf, NULL);
  995. pdev->ppdu_info.rx_status.monitor_direct_used = 0;
  996. return 0;
  997. }
  998. #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
  999. /*
  1000. * dp_rx_mon_handle_cfr_mu_info() - Gather macaddr and ast_index of peer(s) in
  1001. * the PPDU received, this will be used for correlation of CFR data captured
  1002. * for an UL-MU-PPDU
  1003. * @pdev: pdev ctx
  1004. * @ppdu_info: pointer to ppdu info structure populated from ppdu status TLVs
  1005. * @cdp_rx_ppdu: Rx PPDU indication structure
  1006. *
  1007. * Return: none
  1008. */
  1009. static inline void
  1010. dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev,
  1011. struct hal_rx_ppdu_info *ppdu_info,
  1012. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  1013. {
  1014. struct dp_peer *peer;
  1015. struct dp_soc *soc = pdev->soc;
  1016. struct dp_ast_entry *ast_entry;
  1017. struct mon_rx_user_status *rx_user_status;
  1018. struct cdp_rx_stats_ppdu_user *rx_stats_peruser;
  1019. uint32_t num_users;
  1020. int user_id;
  1021. uint32_t ast_index;
  1022. qdf_spin_lock_bh(&soc->ast_lock);
  1023. num_users = ppdu_info->com_info.num_users;
  1024. for (user_id = 0; user_id < num_users; user_id++) {
  1025. if (user_id > OFDMA_NUM_USERS) {
  1026. qdf_spin_unlock_bh(&soc->ast_lock);
  1027. return;
  1028. }
  1029. rx_user_status = &ppdu_info->rx_user_status[user_id];
  1030. rx_stats_peruser = &cdp_rx_ppdu->user[user_id];
  1031. ast_index = rx_user_status->ast_index;
  1032. if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
  1033. rx_stats_peruser->peer_id = HTT_INVALID_PEER;
  1034. continue;
  1035. }
  1036. ast_entry = soc->ast_table[ast_index];
  1037. if (!ast_entry || ast_entry->peer_id == HTT_INVALID_PEER) {
  1038. rx_stats_peruser->peer_id = HTT_INVALID_PEER;
  1039. continue;
  1040. }
  1041. peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
  1042. DP_MOD_ID_RX_PPDU_STATS);
  1043. if (!peer) {
  1044. rx_stats_peruser->peer_id = HTT_INVALID_PEER;
  1045. continue;
  1046. }
  1047. qdf_mem_copy(rx_stats_peruser->mac_addr,
  1048. peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  1049. dp_peer_unref_delete(peer, DP_MOD_ID_RX_PPDU_STATS);
  1050. }
  1051. qdf_spin_unlock_bh(&soc->ast_lock);
  1052. }
  1053. /*
  1054. * dp_rx_mon_populate_cfr_ppdu_info() - Populate cdp ppdu info from hal ppdu
  1055. * info
  1056. * @pdev: pdev ctx
  1057. * @ppdu_info: ppdu info structure from ppdu ring
  1058. * @cdp_rx_ppdu : Rx PPDU indication structure
  1059. *
  1060. * Return: none
  1061. */
  1062. static inline void
  1063. dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev,
  1064. struct hal_rx_ppdu_info *ppdu_info,
  1065. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  1066. {
  1067. int chain;
  1068. cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
  1069. cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
  1070. cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
  1071. cdp_rx_ppdu->num_users = ppdu_info->com_info.num_users;
  1072. for (chain = 0; chain < MAX_CHAIN; chain++)
  1073. cdp_rx_ppdu->per_chain_rssi[chain] =
  1074. ppdu_info->rx_status.rssi[chain];
  1075. dp_rx_mon_handle_cfr_mu_info(pdev, ppdu_info, cdp_rx_ppdu);
  1076. }
  1077. /**
  1078. * dp_cfr_rcc_mode_status() - Return status of cfr rcc mode
  1079. * @pdev: pdev ctx
  1080. *
  1081. * Return: True or False
  1082. */
  1083. static inline bool
  1084. dp_cfr_rcc_mode_status(struct dp_pdev *pdev)
  1085. {
  1086. return pdev->cfr_rcc_mode;
  1087. }
  1088. /*
  1089. * dp_rx_mon_populate_cfr_info() - Populate cdp ppdu info from hal cfr info
  1090. * @pdev: pdev ctx
  1091. * @ppdu_info: ppdu info structure from ppdu ring
  1092. * @cdp_rx_ppdu: Rx PPDU indication structure
  1093. *
  1094. * Return: none
  1095. */
  1096. static inline void
  1097. dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev,
  1098. struct hal_rx_ppdu_info *ppdu_info,
  1099. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  1100. {
  1101. struct cdp_rx_ppdu_cfr_info *cfr_info;
  1102. if (!dp_cfr_rcc_mode_status(pdev))
  1103. return;
  1104. cfr_info = &cdp_rx_ppdu->cfr_info;
  1105. cfr_info->bb_captured_channel
  1106. = ppdu_info->cfr_info.bb_captured_channel;
  1107. cfr_info->bb_captured_timeout
  1108. = ppdu_info->cfr_info.bb_captured_timeout;
  1109. cfr_info->bb_captured_reason
  1110. = ppdu_info->cfr_info.bb_captured_reason;
  1111. cfr_info->rx_location_info_valid
  1112. = ppdu_info->cfr_info.rx_location_info_valid;
  1113. cfr_info->chan_capture_status
  1114. = ppdu_info->cfr_info.chan_capture_status;
  1115. cfr_info->rtt_che_buffer_pointer_high8
  1116. = ppdu_info->cfr_info.rtt_che_buffer_pointer_high8;
  1117. cfr_info->rtt_che_buffer_pointer_low32
  1118. = ppdu_info->cfr_info.rtt_che_buffer_pointer_low32;
  1119. cfr_info->rtt_cfo_measurement
  1120. = (int16_t)ppdu_info->cfr_info.rtt_cfo_measurement;
  1121. cfr_info->agc_gain_info0
  1122. = ppdu_info->cfr_info.agc_gain_info0;
  1123. cfr_info->agc_gain_info1
  1124. = ppdu_info->cfr_info.agc_gain_info1;
  1125. cfr_info->agc_gain_info2
  1126. = ppdu_info->cfr_info.agc_gain_info2;
  1127. cfr_info->agc_gain_info3
  1128. = ppdu_info->cfr_info.agc_gain_info3;
  1129. cfr_info->rx_start_ts
  1130. = ppdu_info->cfr_info.rx_start_ts;
  1131. }
  1132. /**
  1133. * dp_update_cfr_dbg_stats() - Increment RCC debug statistics
  1134. * @pdev: pdev structure
  1135. * @ppdu_info: structure for rx ppdu ring
  1136. *
  1137. * Return: none
  1138. */
  1139. static inline void
  1140. dp_update_cfr_dbg_stats(struct dp_pdev *pdev,
  1141. struct hal_rx_ppdu_info *ppdu_info)
  1142. {
  1143. struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info;
  1144. DP_STATS_INC(pdev,
  1145. rcc.chan_capture_status[cfr->chan_capture_status], 1);
  1146. if (cfr->rx_location_info_valid) {
  1147. DP_STATS_INC(pdev, rcc.rx_loc_info_valid_cnt, 1);
  1148. if (cfr->bb_captured_channel) {
  1149. DP_STATS_INC(pdev, rcc.bb_captured_channel_cnt, 1);
  1150. DP_STATS_INC(pdev,
  1151. rcc.reason_cnt[cfr->bb_captured_reason],
  1152. 1);
  1153. } else if (cfr->bb_captured_timeout) {
  1154. DP_STATS_INC(pdev, rcc.bb_captured_timeout_cnt, 1);
  1155. DP_STATS_INC(pdev,
  1156. rcc.reason_cnt[cfr->bb_captured_reason],
  1157. 1);
  1158. }
  1159. }
  1160. }
  1161. /*
  1162. * dp_rx_handle_cfr() - Gather cfr info from hal ppdu info
  1163. * @soc: core txrx main context
  1164. * @pdev: pdev ctx
  1165. * @ppdu_info: ppdu info structure from ppdu ring
  1166. *
  1167. * Return: none
  1168. */
  1169. static inline void
  1170. dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev,
  1171. struct hal_rx_ppdu_info *ppdu_info)
  1172. {
  1173. qdf_nbuf_t ppdu_nbuf;
  1174. struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
  1175. dp_update_cfr_dbg_stats(pdev, ppdu_info);
  1176. if (!ppdu_info->cfr_info.bb_captured_channel)
  1177. return;
  1178. ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
  1179. sizeof(struct cdp_rx_indication_ppdu),
  1180. 0,
  1181. 0,
  1182. FALSE);
  1183. if (ppdu_nbuf) {
  1184. cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
  1185. dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu);
  1186. dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu);
  1187. qdf_nbuf_put_tail(ppdu_nbuf,
  1188. sizeof(struct cdp_rx_indication_ppdu));
  1189. dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
  1190. ppdu_nbuf, HTT_INVALID_PEER,
  1191. WDI_NO_VAL, pdev->pdev_id);
  1192. }
  1193. }
  1194. /**
  1195. * dp_rx_populate_cfr_non_assoc_sta() - Populate cfr ppdu info for PPDUs from
  1196. * non-associated stations
  1197. * @pdev: pdev ctx
  1198. * @ppdu_info: ppdu info structure from ppdu ring
  1199. * @cdp_rx_ppdu: Rx PPDU indication structure
  1200. *
  1201. * Return: none
  1202. */
  1203. static inline void
  1204. dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev,
  1205. struct hal_rx_ppdu_info *ppdu_info,
  1206. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  1207. {
  1208. if (!dp_cfr_rcc_mode_status(pdev))
  1209. return;
  1210. if (ppdu_info->cfr_info.bb_captured_channel)
  1211. dp_rx_mon_populate_cfr_ppdu_info(pdev, ppdu_info, cdp_rx_ppdu);
  1212. }
  1213. /**
  1214. * dp_bb_captured_chan_status() - Get the bb_captured_channel status
  1215. * @ppdu_info: structure for rx ppdu ring
  1216. *
  1217. * Return: Success/ Failure
  1218. */
  1219. static inline QDF_STATUS
  1220. dp_bb_captured_chan_status(struct dp_pdev *pdev,
  1221. struct hal_rx_ppdu_info *ppdu_info)
  1222. {
  1223. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  1224. struct hal_rx_ppdu_cfr_info *cfr = &ppdu_info->cfr_info;
  1225. if (dp_cfr_rcc_mode_status(pdev)) {
  1226. if (cfr->bb_captured_channel)
  1227. status = QDF_STATUS_SUCCESS;
  1228. }
  1229. return status;
  1230. }
  1231. #else
  1232. static inline void
  1233. dp_rx_mon_handle_cfr_mu_info(struct dp_pdev *pdev,
  1234. struct hal_rx_ppdu_info *ppdu_info,
  1235. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  1236. {
  1237. }
  1238. static inline void
  1239. dp_rx_mon_populate_cfr_ppdu_info(struct dp_pdev *pdev,
  1240. struct hal_rx_ppdu_info *ppdu_info,
  1241. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  1242. {
  1243. }
  1244. static inline void
  1245. dp_rx_mon_populate_cfr_info(struct dp_pdev *pdev,
  1246. struct hal_rx_ppdu_info *ppdu_info,
  1247. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  1248. {
  1249. }
  1250. static inline void
  1251. dp_rx_handle_cfr(struct dp_soc *soc, struct dp_pdev *pdev,
  1252. struct hal_rx_ppdu_info *ppdu_info)
  1253. {
  1254. }
  1255. static inline void
  1256. dp_rx_populate_cfr_non_assoc_sta(struct dp_pdev *pdev,
  1257. struct hal_rx_ppdu_info *ppdu_info,
  1258. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  1259. {
  1260. }
  1261. static inline void
  1262. dp_update_cfr_dbg_stats(struct dp_pdev *pdev,
  1263. struct hal_rx_ppdu_info *ppdu_info)
  1264. {
  1265. }
  1266. static inline QDF_STATUS
  1267. dp_bb_captured_chan_status(struct dp_pdev *pdev,
  1268. struct hal_rx_ppdu_info *ppdu_info)
  1269. {
  1270. return QDF_STATUS_E_NOSUPPORT;
  1271. }
  1272. static inline bool
  1273. dp_cfr_rcc_mode_status(struct dp_pdev *pdev)
  1274. {
  1275. return false;
  1276. }
  1277. #endif
  1278. /**
  1279. * dp_rx_handle_ppdu_stats() - Allocate and deliver ppdu stats to cdp layer
  1280. * @soc: core txrx main context
  1281. * @pdev: pdev strcuture
  1282. * @ppdu_info: structure for rx ppdu ring
  1283. *
  1284. * Return: none
  1285. */
  1286. #ifdef FEATURE_PERPKT_INFO
  1287. static inline void
  1288. dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
  1289. struct hal_rx_ppdu_info *ppdu_info)
  1290. {
  1291. qdf_nbuf_t ppdu_nbuf;
  1292. struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
  1293. /*
  1294. * Do not allocate if fcs error,
  1295. * ast idx invalid / fctl invalid
  1296. *
  1297. * In CFR RCC mode - PPDU status TLVs of error pkts are also needed
  1298. */
  1299. if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0)
  1300. return;
  1301. if (ppdu_info->nac_info.fc_valid &&
  1302. ppdu_info->nac_info.to_ds_flag &&
  1303. ppdu_info->nac_info.mac_addr2_valid) {
  1304. struct dp_neighbour_peer *peer = NULL;
  1305. uint8_t rssi = ppdu_info->rx_status.rssi_comb;
  1306. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  1307. if (pdev->neighbour_peers_added) {
  1308. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  1309. neighbour_peer_list_elem) {
  1310. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
  1311. &ppdu_info->nac_info.mac_addr2,
  1312. QDF_MAC_ADDR_SIZE)) {
  1313. peer->rssi = rssi;
  1314. break;
  1315. }
  1316. }
  1317. }
  1318. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  1319. }
  1320. /* need not generate wdi event when mcopy, cfr rcc mode and
  1321. * enhanced stats are not enabled
  1322. */
  1323. if (!pdev->mcopy_mode && !pdev->enhanced_stats_en &&
  1324. !dp_cfr_rcc_mode_status(pdev))
  1325. return;
  1326. if (dp_cfr_rcc_mode_status(pdev))
  1327. dp_update_cfr_dbg_stats(pdev, ppdu_info);
  1328. if (!ppdu_info->rx_status.frame_control_info_valid ||
  1329. (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)) {
  1330. if (!(pdev->mcopy_mode ||
  1331. (dp_bb_captured_chan_status(pdev, ppdu_info) ==
  1332. QDF_STATUS_SUCCESS)))
  1333. return;
  1334. }
  1335. ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
  1336. sizeof(struct cdp_rx_indication_ppdu),
  1337. 0, 0, FALSE);
  1338. if (ppdu_nbuf) {
  1339. cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
  1340. dp_rx_mon_populate_cfr_info(pdev, ppdu_info, cdp_rx_ppdu);
  1341. dp_rx_populate_cdp_indication_ppdu(pdev,
  1342. ppdu_info, cdp_rx_ppdu);
  1343. if (!qdf_nbuf_put_tail(ppdu_nbuf,
  1344. sizeof(struct cdp_rx_indication_ppdu)))
  1345. return;
  1346. dp_rx_stats_update(pdev, cdp_rx_ppdu);
  1347. if (cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) {
  1348. dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
  1349. soc, ppdu_nbuf,
  1350. cdp_rx_ppdu->peer_id,
  1351. WDI_NO_VAL, pdev->pdev_id);
  1352. } else if (pdev->mcopy_mode || dp_cfr_rcc_mode_status(pdev)) {
  1353. dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
  1354. ppdu_nbuf, HTT_INVALID_PEER,
  1355. WDI_NO_VAL, pdev->pdev_id);
  1356. } else {
  1357. qdf_nbuf_free(ppdu_nbuf);
  1358. }
  1359. }
  1360. }
  1361. #else
  1362. static inline void
  1363. dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
  1364. struct hal_rx_ppdu_info *ppdu_info)
  1365. {
  1366. }
  1367. #endif
  1368. /**
  1369. * dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based
  1370. * filtering enabled
  1371. * @soc: core txrx main context
  1372. * @ppdu_info: Structure for rx ppdu info
  1373. * @status_nbuf: Qdf nbuf abstraction for linux skb
  1374. * @pdev_id: mac_id/pdev_id correspondinggly for MCL and WIN
  1375. *
  1376. * Return: none
  1377. */
  1378. static inline void
  1379. dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
  1380. struct hal_rx_ppdu_info *ppdu_info,
  1381. qdf_nbuf_t status_nbuf, uint32_t pdev_id)
  1382. {
  1383. struct dp_peer *peer;
  1384. struct dp_ast_entry *ast_entry;
  1385. uint32_t ast_index;
  1386. ast_index = ppdu_info->rx_status.ast_index;
  1387. if (ast_index < wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
  1388. ast_entry = soc->ast_table[ast_index];
  1389. if (ast_entry) {
  1390. peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
  1391. DP_MOD_ID_RX_PPDU_STATS);
  1392. if (peer) {
  1393. if ((peer->peer_id != HTT_INVALID_PEER) &&
  1394. (peer->peer_based_pktlog_filter)) {
  1395. dp_wdi_event_handler(
  1396. WDI_EVENT_RX_DESC, soc,
  1397. status_nbuf,
  1398. peer->peer_id,
  1399. WDI_NO_VAL, pdev_id);
  1400. }
  1401. dp_peer_unref_delete(peer,
  1402. DP_MOD_ID_RX_PPDU_STATS);
  1403. }
  1404. }
  1405. }
  1406. }
  1407. #if defined(HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_M)
  1408. static inline void
  1409. dp_rx_ul_ofdma_ru_size_to_width(
  1410. uint32_t ru_size,
  1411. uint32_t *ru_width)
  1412. {
  1413. uint32_t width;
  1414. width = 0;
  1415. switch (ru_size) {
  1416. case HTT_UL_OFDMA_V0_RU_SIZE_RU_26:
  1417. width = 1;
  1418. break;
  1419. case HTT_UL_OFDMA_V0_RU_SIZE_RU_52:
  1420. width = 2;
  1421. break;
  1422. case HTT_UL_OFDMA_V0_RU_SIZE_RU_106:
  1423. width = 4;
  1424. break;
  1425. case HTT_UL_OFDMA_V0_RU_SIZE_RU_242:
  1426. width = 9;
  1427. break;
  1428. case HTT_UL_OFDMA_V0_RU_SIZE_RU_484:
  1429. width = 18;
  1430. break;
  1431. case HTT_UL_OFDMA_V0_RU_SIZE_RU_996:
  1432. width = 37;
  1433. break;
  1434. case HTT_UL_OFDMA_V0_RU_SIZE_RU_996x2:
  1435. width = 74;
  1436. break;
  1437. default:
  1438. dp_rx_mon_status_err("RU size to width convert err");
  1439. break;
  1440. }
  1441. *ru_width = width;
  1442. }
  1443. static inline void
  1444. dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info)
  1445. {
  1446. struct mon_rx_user_status *mon_rx_user_status;
  1447. uint32_t num_users;
  1448. uint32_t i;
  1449. uint32_t mu_ul_user_v0_word0;
  1450. uint32_t mu_ul_user_v0_word1;
  1451. uint32_t ru_width;
  1452. uint32_t ru_size;
  1453. if (!(ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_OFDMA ||
  1454. ppdu_info->rx_status.reception_type == HAL_RX_TYPE_MU_MIMO))
  1455. return;
  1456. num_users = ppdu_info->com_info.num_users;
  1457. if (num_users > HAL_MAX_UL_MU_USERS)
  1458. num_users = HAL_MAX_UL_MU_USERS;
  1459. for (i = 0; i < num_users; i++) {
  1460. mon_rx_user_status = &ppdu_info->rx_user_status[i];
  1461. mu_ul_user_v0_word0 =
  1462. mon_rx_user_status->mu_ul_user_v0_word0;
  1463. mu_ul_user_v0_word1 =
  1464. mon_rx_user_status->mu_ul_user_v0_word1;
  1465. if (HTT_UL_OFDMA_USER_INFO_V0_W0_VALID_GET(
  1466. mu_ul_user_v0_word0) &&
  1467. !HTT_UL_OFDMA_USER_INFO_V0_W0_VER_GET(
  1468. mu_ul_user_v0_word0)) {
  1469. mon_rx_user_status->mcs =
  1470. HTT_UL_OFDMA_USER_INFO_V0_W1_MCS_GET(
  1471. mu_ul_user_v0_word1);
  1472. mon_rx_user_status->nss =
  1473. HTT_UL_OFDMA_USER_INFO_V0_W1_NSS_GET(
  1474. mu_ul_user_v0_word1) + 1;
  1475. mon_rx_user_status->mu_ul_info_valid = 1;
  1476. mon_rx_user_status->ofdma_ru_start_index =
  1477. HTT_UL_OFDMA_USER_INFO_V0_W1_RU_START_GET(
  1478. mu_ul_user_v0_word1);
  1479. ru_size =
  1480. HTT_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE_GET(
  1481. mu_ul_user_v0_word1);
  1482. dp_rx_ul_ofdma_ru_size_to_width(ru_size, &ru_width);
  1483. mon_rx_user_status->ofdma_ru_width = ru_width;
  1484. mon_rx_user_status->ofdma_ru_size = ru_size;
  1485. }
  1486. }
  1487. }
  1488. #else
  1489. static inline void
  1490. dp_rx_mon_handle_mu_ul_info(struct hal_rx_ppdu_info *ppdu_info)
  1491. {
  1492. }
  1493. #endif
  1494. /**
  1495. * dp_rx_mon_status_process_tlv() - Process status TLV in status
  1496. * buffer on Rx status Queue posted by status SRNG processing.
  1497. * @soc: core txrx main context
  1498. * @int_ctx: interrupt context
  1499. * @mac_id: mac_id which is one of 3 mac_ids _ring
  1500. * @quota: amount of work which can be done
  1501. *
  1502. * Return: none
  1503. */
  1504. static inline void
  1505. dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
  1506. uint32_t mac_id, uint32_t quota)
  1507. {
  1508. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1509. struct hal_rx_ppdu_info *ppdu_info;
  1510. qdf_nbuf_t status_nbuf;
  1511. uint8_t *rx_tlv;
  1512. uint8_t *rx_tlv_start;
  1513. uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
  1514. QDF_STATUS enh_log_status = QDF_STATUS_SUCCESS;
  1515. struct cdp_pdev_mon_stats *rx_mon_stats;
  1516. int smart_mesh_status;
  1517. enum WDI_EVENT pktlog_mode = WDI_NO_VAL;
  1518. bool nbuf_used;
  1519. uint32_t rx_enh_capture_mode;
  1520. if (!pdev) {
  1521. dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d", soc,
  1522. mac_id);
  1523. return;
  1524. }
  1525. ppdu_info = &pdev->ppdu_info;
  1526. rx_mon_stats = &pdev->rx_mon_stats;
  1527. if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
  1528. return;
  1529. rx_enh_capture_mode = pdev->rx_enh_capture_mode;
  1530. while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
  1531. status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
  1532. rx_tlv = qdf_nbuf_data(status_nbuf);
  1533. rx_tlv_start = rx_tlv;
  1534. nbuf_used = false;
  1535. if ((pdev->monitor_vdev) || (pdev->enhanced_stats_en) ||
  1536. (pdev->mcopy_mode) || (dp_cfr_rcc_mode_status(pdev)) ||
  1537. (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
  1538. do {
  1539. tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
  1540. ppdu_info, pdev->soc->hal_soc,
  1541. status_nbuf);
  1542. dp_rx_mon_update_dbg_ppdu_stats(ppdu_info,
  1543. rx_mon_stats);
  1544. dp_rx_mon_enh_capture_process(pdev, tlv_status,
  1545. status_nbuf, ppdu_info,
  1546. &nbuf_used);
  1547. dp_rx_mcopy_process_ppdu_info(pdev,
  1548. ppdu_info,
  1549. tlv_status);
  1550. rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
  1551. if ((rx_tlv - rx_tlv_start) >=
  1552. RX_MON_STATUS_BUF_SIZE)
  1553. break;
  1554. } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
  1555. (tlv_status == HAL_TLV_STATUS_HEADER) ||
  1556. (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
  1557. (tlv_status == HAL_TLV_STATUS_MSDU_END));
  1558. }
  1559. if (pdev->dp_peer_based_pktlog) {
  1560. dp_rx_process_peer_based_pktlog(soc, ppdu_info,
  1561. status_nbuf,
  1562. pdev->pdev_id);
  1563. } else {
  1564. if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
  1565. pktlog_mode = WDI_EVENT_RX_DESC;
  1566. else if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
  1567. pktlog_mode = WDI_EVENT_LITE_RX;
  1568. if (pktlog_mode != WDI_NO_VAL)
  1569. dp_wdi_event_handler(pktlog_mode, soc,
  1570. status_nbuf,
  1571. HTT_INVALID_PEER,
  1572. WDI_NO_VAL, pdev->pdev_id);
  1573. }
  1574. /* smart monitor vap and m_copy cannot co-exist */
  1575. if (ppdu_info->rx_status.monitor_direct_used && pdev->neighbour_peers_added
  1576. && pdev->monitor_vdev) {
  1577. smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc,
  1578. pdev, ppdu_info, status_nbuf);
  1579. if (smart_mesh_status)
  1580. qdf_nbuf_free(status_nbuf);
  1581. } else if (qdf_unlikely(pdev->mcopy_mode)) {
  1582. dp_rx_process_mcopy_mode(soc, pdev,
  1583. ppdu_info, tlv_status,
  1584. status_nbuf);
  1585. } else if (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED) {
  1586. if (!nbuf_used)
  1587. qdf_nbuf_free(status_nbuf);
  1588. if (tlv_status == HAL_TLV_STATUS_PPDU_DONE)
  1589. enh_log_status =
  1590. dp_rx_handle_enh_capture(soc,
  1591. pdev, ppdu_info);
  1592. } else {
  1593. qdf_nbuf_free(status_nbuf);
  1594. }
  1595. if (tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE) {
  1596. dp_rx_mon_deliver_non_std(soc, mac_id);
  1597. } else if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
  1598. rx_mon_stats->status_ppdu_done++;
  1599. dp_rx_mon_handle_mu_ul_info(ppdu_info);
  1600. if (pdev->tx_capture_enabled
  1601. != CDP_TX_ENH_CAPTURE_DISABLED)
  1602. dp_send_ack_frame_to_stack(soc, pdev,
  1603. ppdu_info);
  1604. if (pdev->enhanced_stats_en ||
  1605. pdev->mcopy_mode || pdev->neighbour_peers_added)
  1606. dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
  1607. else if (dp_cfr_rcc_mode_status(pdev))
  1608. dp_rx_handle_cfr(soc, pdev, ppdu_info);
  1609. pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
  1610. /*
  1611. * if chan_num is not fetched correctly from ppdu RX TLV,
  1612. * get it from pdev saved.
  1613. */
  1614. if (qdf_unlikely(pdev->ppdu_info.rx_status.chan_num == 0))
  1615. pdev->ppdu_info.rx_status.chan_num = pdev->mon_chan_num;
  1616. /*
  1617. * if chan_freq is not fetched correctly from ppdu RX TLV,
  1618. * get it from pdev saved.
  1619. */
  1620. if (qdf_unlikely(pdev->ppdu_info.rx_status.chan_freq == 0)) {
  1621. pdev->ppdu_info.rx_status.chan_freq =
  1622. pdev->mon_chan_freq;
  1623. }
  1624. if (!soc->full_mon_mode)
  1625. dp_rx_mon_dest_process(soc, int_ctx, mac_id,
  1626. quota);
  1627. pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
  1628. }
  1629. }
  1630. return;
  1631. }
  1632. /*
  1633. * dp_rx_nbuf_prepare() - prepare RX nbuf
  1634. * @soc: core txrx main context
  1635. * @pdev: core txrx pdev context
  1636. *
  1637. * This function alloc & map nbuf for RX dma usage, retry it if failed
  1638. * until retry times reaches max threshold or succeeded.
  1639. *
  1640. * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
  1641. */
  1642. static inline qdf_nbuf_t
  1643. dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
  1644. {
  1645. uint8_t *buf;
  1646. int32_t nbuf_retry_count;
  1647. QDF_STATUS ret;
  1648. qdf_nbuf_t nbuf = NULL;
  1649. for (nbuf_retry_count = 0; nbuf_retry_count <
  1650. QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
  1651. nbuf_retry_count++) {
  1652. /* Allocate a new skb using alloc_skb */
  1653. nbuf = qdf_nbuf_alloc_no_recycler(RX_MON_STATUS_BUF_SIZE,
  1654. RX_MON_STATUS_BUF_RESERVATION,
  1655. RX_DATA_BUFFER_ALIGNMENT);
  1656. if (!nbuf) {
  1657. DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1);
  1658. continue;
  1659. }
  1660. buf = qdf_nbuf_data(nbuf);
  1661. memset(buf, 0, RX_MON_STATUS_BUF_SIZE);
  1662. ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
  1663. QDF_DMA_FROM_DEVICE,
  1664. RX_MON_STATUS_BUF_SIZE);
  1665. /* nbuf map failed */
  1666. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  1667. qdf_nbuf_free(nbuf);
  1668. DP_STATS_INC(pdev, replenish.map_err, 1);
  1669. continue;
  1670. }
  1671. /* qdf_nbuf alloc and map succeeded */
  1672. break;
  1673. }
  1674. /* qdf_nbuf still alloc or map failed */
  1675. if (qdf_unlikely(nbuf_retry_count >=
  1676. QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
  1677. return NULL;
  1678. return nbuf;
  1679. }
  1680. /*
  1681. * dp_rx_mon_status_srng_process() - Process monitor status ring
  1682. * post the status ring buffer to Rx status Queue for later
  1683. * processing when status ring is filled with status TLV.
  1684. * Allocate a new buffer to status ring if the filled buffer
  1685. * is posted.
  1686. * @soc: core txrx main context
  1687. * @int_ctx: interrupt context
  1688. * @mac_id: mac_id which is one of 3 mac_ids
  1689. * @quota: No. of ring entry that can be serviced in one shot.
  1690. * Return: uint32_t: No. of ring entry that is processed.
  1691. */
  1692. static inline uint32_t
  1693. dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx,
  1694. uint32_t mac_id, uint32_t quota)
  1695. {
  1696. struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1697. hal_soc_handle_t hal_soc;
  1698. void *mon_status_srng;
  1699. void *rxdma_mon_status_ring_entry;
  1700. QDF_STATUS status;
  1701. enum dp_mon_reap_status reap_status;
  1702. uint32_t work_done = 0;
  1703. if (!pdev) {
  1704. dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d",
  1705. soc, mac_id);
  1706. return work_done;
  1707. }
  1708. mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
  1709. qdf_assert(mon_status_srng);
  1710. if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
  1711. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1712. "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
  1713. __func__, __LINE__, mon_status_srng);
  1714. return work_done;
  1715. }
  1716. hal_soc = soc->hal_soc;
  1717. qdf_assert(hal_soc);
  1718. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, mon_status_srng)))
  1719. goto done;
  1720. /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
  1721. * BUFFER_ADDR_INFO STRUCT
  1722. */
  1723. while (qdf_likely((rxdma_mon_status_ring_entry =
  1724. hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng))
  1725. && quota--)) {
  1726. uint32_t rx_buf_cookie;
  1727. qdf_nbuf_t status_nbuf;
  1728. struct dp_rx_desc *rx_desc;
  1729. uint8_t *status_buf;
  1730. qdf_dma_addr_t paddr;
  1731. uint64_t buf_addr;
  1732. struct rx_desc_pool *rx_desc_pool;
  1733. rx_desc_pool = &soc->rx_desc_status[mac_id];
  1734. buf_addr =
  1735. (HAL_RX_BUFFER_ADDR_31_0_GET(
  1736. rxdma_mon_status_ring_entry) |
  1737. ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
  1738. rxdma_mon_status_ring_entry)) << 32));
  1739. if (qdf_likely(buf_addr)) {
  1740. rx_buf_cookie =
  1741. HAL_RX_BUF_COOKIE_GET(
  1742. rxdma_mon_status_ring_entry);
  1743. rx_desc = dp_rx_cookie_2_va_mon_status(soc,
  1744. rx_buf_cookie);
  1745. qdf_assert(rx_desc);
  1746. status_nbuf = rx_desc->nbuf;
  1747. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  1748. QDF_DMA_FROM_DEVICE);
  1749. status_buf = qdf_nbuf_data(status_nbuf);
  1750. status = hal_get_rx_status_done(status_buf);
  1751. if (status != QDF_STATUS_SUCCESS) {
  1752. uint32_t hp, tp;
  1753. hal_get_sw_hptp(hal_soc, mon_status_srng,
  1754. &tp, &hp);
  1755. dp_info_rl("tlv tag status error hp:%u, tp:%u",
  1756. hp, tp);
  1757. /* RxDMA status done bit might not be set even
  1758. * though tp is moved by HW.
  1759. */
  1760. /* If done status is missing:
  1761. * 1. As per MAC team's suggestion,
  1762. * when HP + 1 entry is peeked and if DMA
  1763. * is not done and if HP + 2 entry's DMA done
  1764. * is set. skip HP + 1 entry and
  1765. * start processing in next interrupt.
  1766. * 2. If HP + 2 entry's DMA done is not set,
  1767. * poll onto HP + 1 entry DMA done to be set.
  1768. * Check status for same buffer for next time
  1769. * dp_rx_mon_status_srng_process
  1770. */
  1771. reap_status = dp_rx_mon_handle_status_buf_done(pdev,
  1772. mon_status_srng);
  1773. if (reap_status == DP_MON_STATUS_NO_DMA)
  1774. continue;
  1775. else if (reap_status == DP_MON_STATUS_REPLENISH) {
  1776. qdf_nbuf_unmap_nbytes_single(
  1777. soc->osdev, status_nbuf,
  1778. QDF_DMA_FROM_DEVICE,
  1779. rx_desc_pool->buf_size);
  1780. qdf_nbuf_free(status_nbuf);
  1781. goto buf_replenish;
  1782. }
  1783. }
  1784. qdf_nbuf_set_pktlen(status_nbuf,
  1785. RX_MON_STATUS_BUF_SIZE);
  1786. qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
  1787. QDF_DMA_FROM_DEVICE,
  1788. rx_desc_pool->buf_size);
  1789. /* Put the status_nbuf to queue */
  1790. qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
  1791. } else {
  1792. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1793. union dp_rx_desc_list_elem_t *tail = NULL;
  1794. uint32_t num_alloc_desc;
  1795. num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
  1796. rx_desc_pool,
  1797. 1,
  1798. &desc_list,
  1799. &tail);
  1800. /*
  1801. * No free descriptors available
  1802. */
  1803. if (qdf_unlikely(num_alloc_desc == 0)) {
  1804. work_done++;
  1805. break;
  1806. }
  1807. rx_desc = &desc_list->rx_desc;
  1808. }
  1809. buf_replenish:
  1810. status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
  1811. /*
  1812. * qdf_nbuf alloc or map failed,
  1813. * free the dp rx desc to free list,
  1814. * fill in NULL dma address at current HP entry,
  1815. * keep HP in mon_status_ring unchanged,
  1816. * wait next time dp_rx_mon_status_srng_process
  1817. * to fill in buffer at current HP.
  1818. */
  1819. if (qdf_unlikely(!status_nbuf)) {
  1820. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1821. union dp_rx_desc_list_elem_t *tail = NULL;
  1822. struct rx_desc_pool *rx_desc_pool;
  1823. rx_desc_pool = &soc->rx_desc_status[mac_id];
  1824. dp_info_rl("fail to allocate or map qdf_nbuf");
  1825. dp_rx_add_to_free_desc_list(&desc_list,
  1826. &tail, rx_desc);
  1827. dp_rx_add_desc_list_to_free_list(soc, &desc_list,
  1828. &tail, mac_id, rx_desc_pool);
  1829. hal_rxdma_buff_addr_info_set(
  1830. rxdma_mon_status_ring_entry,
  1831. 0, 0, HAL_RX_BUF_RBM_SW3_BM);
  1832. work_done++;
  1833. break;
  1834. }
  1835. paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
  1836. rx_desc->nbuf = status_nbuf;
  1837. rx_desc->in_use = 1;
  1838. hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
  1839. paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
  1840. hal_srng_src_get_next(hal_soc, mon_status_srng);
  1841. work_done++;
  1842. }
  1843. done:
  1844. dp_srng_access_end(int_ctx, soc, mon_status_srng);
  1845. return work_done;
  1846. }
  1847. uint32_t
  1848. dp_rx_mon_status_process(struct dp_soc *soc, struct dp_intr *int_ctx,
  1849. uint32_t mac_id, uint32_t quota)
  1850. {
  1851. uint32_t work_done;
  1852. work_done = dp_rx_mon_status_srng_process(soc, int_ctx, mac_id, quota);
  1853. quota -= work_done;
  1854. dp_rx_mon_status_process_tlv(soc, int_ctx, mac_id, quota);
  1855. return work_done;
  1856. }
  1857. #ifndef DISABLE_MON_CONFIG
  1858. uint32_t
  1859. dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
  1860. uint32_t mac_id, uint32_t quota)
  1861. {
  1862. if (qdf_unlikely(soc->full_mon_mode))
  1863. return dp_rx_mon_process(soc, int_ctx, mac_id, quota);
  1864. return dp_rx_mon_status_process(soc, int_ctx, mac_id, quota);
  1865. }
  1866. #else
  1867. uint32_t
  1868. dp_mon_process(struct dp_soc *soc, struct dp_intr *int_ctx,
  1869. uint32_t mac_id, uint32_t quota)
  1870. {
  1871. return 0;
  1872. }
  1873. #endif
  1874. QDF_STATUS
  1875. dp_rx_pdev_mon_status_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id)
  1876. {
  1877. uint8_t pdev_id = pdev->pdev_id;
  1878. struct dp_soc *soc = pdev->soc;
  1879. struct dp_srng *mon_status_ring;
  1880. uint32_t num_entries;
  1881. struct rx_desc_pool *rx_desc_pool;
  1882. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  1883. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1884. union dp_rx_desc_list_elem_t *tail = NULL;
  1885. soc_cfg_ctx = soc->wlan_cfg_ctx;
  1886. mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
  1887. num_entries = mon_status_ring->num_entries;
  1888. rx_desc_pool = &soc->rx_desc_status[mac_id];
  1889. dp_debug("Mon RX Desc Pool[%d] entries=%u",
  1890. pdev_id, num_entries);
  1891. return dp_rx_mon_status_buffers_replenish(soc, mac_id, mon_status_ring,
  1892. rx_desc_pool, num_entries,
  1893. &desc_list, &tail,
  1894. HAL_RX_BUF_RBM_SW3_BM);
  1895. }
  1896. QDF_STATUS
  1897. dp_rx_pdev_mon_status_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
  1898. {
  1899. uint8_t pdev_id = pdev->pdev_id;
  1900. struct dp_soc *soc = pdev->soc;
  1901. struct dp_srng *mon_status_ring;
  1902. uint32_t num_entries;
  1903. struct rx_desc_pool *rx_desc_pool;
  1904. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  1905. soc_cfg_ctx = soc->wlan_cfg_ctx;
  1906. mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
  1907. num_entries = mon_status_ring->num_entries;
  1908. rx_desc_pool = &soc->rx_desc_status[mac_id];
  1909. dp_debug("Mon RX Desc Pool[%d] entries=%u", pdev_id, num_entries);
  1910. rx_desc_pool->desc_type = DP_RX_DESC_STATUS_TYPE;
  1911. return dp_rx_desc_pool_alloc(soc, num_entries + 1, rx_desc_pool);
  1912. }
  1913. void
  1914. dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
  1915. {
  1916. uint32_t i;
  1917. uint8_t pdev_id = pdev->pdev_id;
  1918. struct dp_soc *soc = pdev->soc;
  1919. struct dp_srng *mon_status_ring;
  1920. uint32_t num_entries;
  1921. struct rx_desc_pool *rx_desc_pool;
  1922. struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
  1923. soc_cfg_ctx = soc->wlan_cfg_ctx;
  1924. mon_status_ring = &soc->rxdma_mon_status_ring[mac_id];
  1925. num_entries = mon_status_ring->num_entries;
  1926. rx_desc_pool = &soc->rx_desc_status[mac_id];
  1927. dp_debug("Mon RX Desc status Pool[%d] init entries=%u",
  1928. pdev_id, num_entries);
  1929. rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
  1930. rx_desc_pool->buf_size = RX_MON_STATUS_BUF_SIZE;
  1931. rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
  1932. /* Disable frag processing flag */
  1933. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  1934. dp_rx_desc_pool_init(soc, mac_id, num_entries + 1, rx_desc_pool);
  1935. qdf_nbuf_queue_init(&pdev->rx_status_q);
  1936. pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
  1937. qdf_mem_zero(&pdev->ppdu_info, sizeof(pdev->ppdu_info));
  1938. /*
  1939. * Set last_ppdu_id to HAL_INVALID_PPDU_ID in order to avoid ppdu_id
  1940. * match with '0' ppdu_id from monitor status ring
  1941. */
  1942. pdev->ppdu_info.com_info.last_ppdu_id = HAL_INVALID_PPDU_ID;
  1943. qdf_mem_zero(&pdev->rx_mon_stats, sizeof(pdev->rx_mon_stats));
  1944. dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info,
  1945. &pdev->rx_mon_stats);
  1946. for (i = 0; i < MAX_MU_USERS; i++) {
  1947. qdf_nbuf_queue_init(&pdev->mpdu_q[i]);
  1948. pdev->is_mpdu_hdr[i] = true;
  1949. }
  1950. qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS]));
  1951. pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED;
  1952. }
  1953. void
  1954. dp_rx_pdev_mon_status_desc_pool_deinit(struct dp_pdev *pdev, uint32_t mac_id) {
  1955. uint8_t pdev_id = pdev->pdev_id;
  1956. struct dp_soc *soc = pdev->soc;
  1957. struct rx_desc_pool *rx_desc_pool;
  1958. rx_desc_pool = &soc->rx_desc_status[mac_id];
  1959. dp_debug("Mon RX Desc status Pool[%d] deinit", pdev_id);
  1960. dp_rx_desc_pool_deinit(soc, rx_desc_pool);
  1961. }
  1962. void
  1963. dp_rx_pdev_mon_status_desc_pool_free(struct dp_pdev *pdev, uint32_t mac_id) {
  1964. uint8_t pdev_id = pdev->pdev_id;
  1965. struct dp_soc *soc = pdev->soc;
  1966. struct rx_desc_pool *rx_desc_pool;
  1967. rx_desc_pool = &soc->rx_desc_status[mac_id];
  1968. dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id);
  1969. dp_rx_desc_pool_free(soc, rx_desc_pool);
  1970. }
  1971. void
  1972. dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
  1973. {
  1974. uint8_t pdev_id = pdev->pdev_id;
  1975. struct dp_soc *soc = pdev->soc;
  1976. struct rx_desc_pool *rx_desc_pool;
  1977. rx_desc_pool = &soc->rx_desc_status[mac_id];
  1978. dp_debug("Mon RX Status Desc Pool Free pdev[%d]", pdev_id);
  1979. dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  1980. }
  1981. /*
  1982. * dp_rx_buffers_replenish() - replenish monitor status ring with
  1983. * rx nbufs called during dp rx
  1984. * monitor status ring initialization
  1985. *
  1986. * @soc: core txrx main context
  1987. * @mac_id: mac_id which is one of 3 mac_ids
  1988. * @dp_rxdma_srng: dp monitor status circular ring
  1989. * @rx_desc_pool; Pointer to Rx descriptor pool
  1990. * @num_req_buffers: number of buffer to be replenished
  1991. * @desc_list: list of descs if called from dp rx monitor status
  1992. * process or NULL during dp rx initialization or
  1993. * out of buffer interrupt
  1994. * @tail: tail of descs list
  1995. * @owner: who owns the nbuf (host, NSS etc...)
  1996. * Return: return success or failure
  1997. */
  1998. static inline
  1999. QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
  2000. uint32_t mac_id,
  2001. struct dp_srng *dp_rxdma_srng,
  2002. struct rx_desc_pool *rx_desc_pool,
  2003. uint32_t num_req_buffers,
  2004. union dp_rx_desc_list_elem_t **desc_list,
  2005. union dp_rx_desc_list_elem_t **tail,
  2006. uint8_t owner)
  2007. {
  2008. uint32_t num_alloc_desc;
  2009. uint16_t num_desc_to_free = 0;
  2010. uint32_t num_entries_avail;
  2011. uint32_t count = 0;
  2012. int sync_hw_ptr = 1;
  2013. qdf_dma_addr_t paddr;
  2014. qdf_nbuf_t rx_netbuf;
  2015. void *rxdma_ring_entry;
  2016. union dp_rx_desc_list_elem_t *next;
  2017. void *rxdma_srng;
  2018. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  2019. if (!dp_pdev) {
  2020. dp_rx_mon_status_debug("%pK: pdev is null for mac_id = %d",
  2021. dp_soc, mac_id);
  2022. return QDF_STATUS_E_FAILURE;
  2023. }
  2024. rxdma_srng = dp_rxdma_srng->hal_srng;
  2025. qdf_assert(rxdma_srng);
  2026. dp_rx_mon_status_debug("%pK: requested %d buffers for replenish",
  2027. dp_soc, num_req_buffers);
  2028. /*
  2029. * if desc_list is NULL, allocate the descs from freelist
  2030. */
  2031. if (!(*desc_list)) {
  2032. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  2033. rx_desc_pool,
  2034. num_req_buffers,
  2035. desc_list,
  2036. tail);
  2037. if (!num_alloc_desc) {
  2038. dp_rx_mon_status_err("%pK: no free rx_descs in freelist",
  2039. dp_soc);
  2040. return QDF_STATUS_E_NOMEM;
  2041. }
  2042. dp_rx_mon_status_debug("%pK: %d rx desc allocated", dp_soc,
  2043. num_alloc_desc);
  2044. num_req_buffers = num_alloc_desc;
  2045. }
  2046. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2047. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  2048. rxdma_srng, sync_hw_ptr);
  2049. dp_rx_mon_status_debug("%pK: no of available entries in rxdma ring: %d",
  2050. dp_soc, num_entries_avail);
  2051. if (num_entries_avail < num_req_buffers) {
  2052. num_desc_to_free = num_req_buffers - num_entries_avail;
  2053. num_req_buffers = num_entries_avail;
  2054. }
  2055. while (count <= num_req_buffers) {
  2056. rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev);
  2057. /*
  2058. * qdf_nbuf alloc or map failed,
  2059. * keep HP in mon_status_ring unchanged,
  2060. * wait dp_rx_mon_status_srng_process
  2061. * to fill in buffer at current HP.
  2062. */
  2063. if (qdf_unlikely(!rx_netbuf)) {
  2064. dp_rx_mon_status_err("%pK: qdf_nbuf allocate or map fail, count %d",
  2065. dp_soc, count);
  2066. break;
  2067. }
  2068. paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
  2069. next = (*desc_list)->next;
  2070. rxdma_ring_entry = hal_srng_src_get_cur_hp_n_move_next(
  2071. dp_soc->hal_soc,
  2072. rxdma_srng);
  2073. if (qdf_unlikely(!rxdma_ring_entry)) {
  2074. dp_rx_mon_status_err("%pK: rxdma_ring_entry is NULL, count - %d",
  2075. dp_soc, count);
  2076. qdf_nbuf_unmap_nbytes_single(dp_soc->osdev, rx_netbuf,
  2077. QDF_DMA_FROM_DEVICE,
  2078. rx_desc_pool->buf_size);
  2079. qdf_nbuf_free(rx_netbuf);
  2080. break;
  2081. }
  2082. (*desc_list)->rx_desc.nbuf = rx_netbuf;
  2083. (*desc_list)->rx_desc.in_use = 1;
  2084. count++;
  2085. hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
  2086. (*desc_list)->rx_desc.cookie, owner);
  2087. dp_rx_mon_status_debug("%pK: rx_desc=%pK, cookie=%d, nbuf=%pK, paddr=%pK",
  2088. dp_soc, &(*desc_list)->rx_desc,
  2089. (*desc_list)->rx_desc.cookie, rx_netbuf,
  2090. (void *)paddr);
  2091. *desc_list = next;
  2092. }
  2093. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2094. dp_rx_mon_status_debug("%pK: successfully replenished %d buffers",
  2095. dp_soc, num_req_buffers);
  2096. dp_rx_mon_status_debug("%pK: %d rx desc added back to free list",
  2097. dp_soc, num_desc_to_free);
  2098. /*
  2099. * add any available free desc back to the free list
  2100. */
  2101. if (*desc_list) {
  2102. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  2103. mac_id, rx_desc_pool);
  2104. }
  2105. return QDF_STATUS_SUCCESS;
  2106. }
  2107. #if !defined(DISABLE_MON_CONFIG) && defined(MON_ENABLE_DROP_FOR_MAC)
  2108. /**
  2109. * dp_mon_status_srng_drop_for_mac() - Drop the mon status ring packets for
  2110. * a given mac
  2111. * @pdev: DP pdev
  2112. * @mac_id: mac id
  2113. * @quota: maximum number of ring entries that can be processed
  2114. *
  2115. * Return: Number of ring entries reaped
  2116. */
  2117. static uint32_t
  2118. dp_mon_status_srng_drop_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
  2119. uint32_t quota)
  2120. {
  2121. struct dp_soc *soc = pdev->soc;
  2122. void *mon_status_srng;
  2123. hal_soc_handle_t hal_soc;
  2124. void *ring_desc;
  2125. uint32_t reap_cnt = 0;
  2126. if (qdf_unlikely(!soc || !soc->hal_soc))
  2127. return reap_cnt;
  2128. mon_status_srng = soc->rxdma_mon_status_ring[mac_id].hal_srng;
  2129. if (qdf_unlikely(!mon_status_srng ||
  2130. !hal_srng_initialized(mon_status_srng)))
  2131. return reap_cnt;
  2132. hal_soc = soc->hal_soc;
  2133. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
  2134. return reap_cnt;
  2135. while ((ring_desc =
  2136. hal_srng_src_peek_n_get_next(hal_soc, mon_status_srng)) &&
  2137. reap_cnt < MON_DROP_REAP_LIMIT && quota--) {
  2138. uint64_t buf_addr;
  2139. uint32_t rx_buf_cookie;
  2140. struct dp_rx_desc *rx_desc;
  2141. qdf_nbuf_t status_nbuf;
  2142. uint8_t *status_buf;
  2143. enum dp_mon_reap_status reap_status;
  2144. qdf_dma_addr_t iova;
  2145. struct rx_desc_pool *rx_desc_pool;
  2146. rx_desc_pool = &soc->rx_desc_status[mac_id];
  2147. buf_addr = (HAL_RX_BUFFER_ADDR_31_0_GET(ring_desc) |
  2148. ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(ring_desc)) << 32));
  2149. if (qdf_likely(buf_addr)) {
  2150. rx_buf_cookie = HAL_RX_BUF_COOKIE_GET(ring_desc);
  2151. rx_desc = dp_rx_cookie_2_va_mon_status(soc,
  2152. rx_buf_cookie);
  2153. qdf_assert(rx_desc);
  2154. status_nbuf = rx_desc->nbuf;
  2155. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  2156. QDF_DMA_FROM_DEVICE);
  2157. status_buf = qdf_nbuf_data(status_nbuf);
  2158. if (hal_get_rx_status_done(status_buf) !=
  2159. QDF_STATUS_SUCCESS) {
  2160. /* If done status is missing:
  2161. * 1. As per MAC team's suggestion,
  2162. * when HP + 1 entry is peeked and if DMA
  2163. * is not done and if HP + 2 entry's DMA done
  2164. * is set. skip HP + 1 entry and
  2165. * start processing in next interrupt.
  2166. * 2. If HP + 2 entry's DMA done is not set,
  2167. * poll onto HP + 1 entry DMA done to be set.
  2168. * Check status for same buffer for next time
  2169. * dp_rx_mon_status_srng_process
  2170. */
  2171. reap_status =
  2172. dp_rx_mon_handle_status_buf_done(pdev,
  2173. mon_status_srng);
  2174. if (reap_status == DP_MON_STATUS_NO_DMA)
  2175. break;
  2176. }
  2177. qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
  2178. QDF_DMA_FROM_DEVICE,
  2179. rx_desc_pool->buf_size);
  2180. qdf_nbuf_free(status_nbuf);
  2181. } else {
  2182. union dp_rx_desc_list_elem_t *rx_desc_elem;
  2183. qdf_spin_lock_bh(&rx_desc_pool->lock);
  2184. if (!rx_desc_pool->freelist) {
  2185. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  2186. break;
  2187. }
  2188. rx_desc_elem = rx_desc_pool->freelist;
  2189. rx_desc_pool->freelist = rx_desc_pool->freelist->next;
  2190. qdf_spin_unlock_bh(&rx_desc_pool->lock);
  2191. rx_desc = &rx_desc_elem->rx_desc;
  2192. }
  2193. status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
  2194. if (qdf_unlikely(!status_nbuf)) {
  2195. union dp_rx_desc_list_elem_t *desc_list = NULL;
  2196. union dp_rx_desc_list_elem_t *tail = NULL;
  2197. dp_info_rl("fail to allocate or map nbuf");
  2198. dp_rx_add_to_free_desc_list(&desc_list, &tail,
  2199. rx_desc);
  2200. dp_rx_add_desc_list_to_free_list(soc,
  2201. &desc_list,
  2202. &tail, mac_id,
  2203. rx_desc_pool);
  2204. hal_rxdma_buff_addr_info_set(ring_desc, 0, 0,
  2205. HAL_RX_BUF_RBM_SW3_BM);
  2206. break;
  2207. }
  2208. iova = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
  2209. rx_desc->nbuf = status_nbuf;
  2210. rx_desc->in_use = 1;
  2211. hal_rxdma_buff_addr_info_set(ring_desc, iova, rx_desc->cookie,
  2212. HAL_RX_BUF_RBM_SW3_BM);
  2213. reap_cnt++;
  2214. hal_srng_src_get_next(hal_soc, mon_status_srng);
  2215. }
  2216. hal_srng_access_end(hal_soc, mon_status_srng);
  2217. return reap_cnt;
  2218. }
  2219. uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
  2220. uint32_t quota)
  2221. {
  2222. uint32_t work_done;
  2223. work_done = dp_mon_status_srng_drop_for_mac(pdev, mac_id, quota);
  2224. dp_mon_dest_srng_drop_for_mac(pdev, mac_id);
  2225. return work_done;
  2226. }
  2227. #else
  2228. uint32_t dp_mon_drop_packets_for_mac(struct dp_pdev *pdev, uint32_t mac_id,
  2229. uint32_t quota)
  2230. {
  2231. return 0;
  2232. }
  2233. #endif