dp_rx.c 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_tx.h"
  22. #include "dp_peer.h"
  23. #include "hal_rx.h"
  24. #include "hal_api.h"
  25. #include "qdf_nbuf.h"
  26. #ifdef MESH_MODE_SUPPORT
  27. #include "if_meta_hdr.h"
  28. #endif
  29. #include "dp_internal.h"
  30. #include "dp_ipa.h"
  31. #include "dp_hist.h"
  32. #include "dp_rx_buffer_pool.h"
  33. #ifdef WIFI_MONITOR_SUPPORT
  34. #include "dp_htt.h"
  35. #include <dp_mon.h>
  36. #endif
  37. #ifdef FEATURE_WDS
  38. #include "dp_txrx_wds.h"
  39. #endif
  40. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  41. #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
  42. static inline
  43. bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
  44. {
  45. if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
  46. qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
  47. DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
  48. return false;
  49. }
  50. return true;
  51. }
  52. #else
  53. static inline
  54. bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
  55. {
  56. return true;
  57. }
  58. #endif
  59. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  60. #ifdef DUP_RX_DESC_WAR
  61. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  62. hal_ring_handle_t hal_ring,
  63. hal_ring_desc_t ring_desc,
  64. struct dp_rx_desc *rx_desc)
  65. {
  66. void *hal_soc = soc->hal_soc;
  67. hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
  68. dp_rx_desc_dump(rx_desc);
  69. }
  70. #else
  71. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  72. hal_ring_handle_t hal_ring_hdl,
  73. hal_ring_desc_t ring_desc,
  74. struct dp_rx_desc *rx_desc)
  75. {
  76. hal_soc_handle_t hal_soc = soc->hal_soc;
  77. dp_rx_desc_dump(rx_desc);
  78. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
  79. hal_srng_dump_ring(hal_soc, hal_ring_hdl);
  80. qdf_assert_always(0);
  81. }
  82. #endif
  83. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  84. #ifdef RX_DESC_SANITY_WAR
  85. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  86. hal_ring_handle_t hal_ring_hdl,
  87. hal_ring_desc_t ring_desc,
  88. struct dp_rx_desc *rx_desc)
  89. {
  90. uint8_t return_buffer_manager;
  91. if (qdf_unlikely(!rx_desc)) {
  92. /*
  93. * This is an unlikely case where the cookie obtained
  94. * from the ring_desc is invalid and hence we are not
  95. * able to find the corresponding rx_desc
  96. */
  97. goto fail;
  98. }
  99. return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc);
  100. if (qdf_unlikely(!(return_buffer_manager ==
  101. HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) ||
  102. return_buffer_manager ==
  103. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) {
  104. goto fail;
  105. }
  106. return QDF_STATUS_SUCCESS;
  107. fail:
  108. DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
  109. dp_err("Ring Desc:");
  110. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
  111. ring_desc);
  112. return QDF_STATUS_E_NULL_VALUE;
  113. }
  114. #endif
  115. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  116. /**
  117. * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
  118. *
  119. * @dp_soc: struct dp_soc *
  120. * @nbuf_frag_info_t: nbuf frag info
  121. * @dp_pdev: struct dp_pdev *
  122. * @rx_desc_pool: Rx desc pool
  123. *
  124. * Return: QDF_STATUS
  125. */
  126. #ifdef DP_RX_MON_MEM_FRAG
  127. static inline QDF_STATUS
  128. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  129. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  130. struct dp_pdev *dp_pdev,
  131. struct rx_desc_pool *rx_desc_pool)
  132. {
  133. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  134. (nbuf_frag_info_t->virt_addr).vaddr =
  135. qdf_frag_alloc(rx_desc_pool->buf_size);
  136. if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
  137. dp_err("Frag alloc failed");
  138. DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
  139. return QDF_STATUS_E_NOMEM;
  140. }
  141. ret = qdf_mem_map_page(dp_soc->osdev,
  142. (nbuf_frag_info_t->virt_addr).vaddr,
  143. QDF_DMA_FROM_DEVICE,
  144. rx_desc_pool->buf_size,
  145. &nbuf_frag_info_t->paddr);
  146. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  147. qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
  148. dp_err("Frag map failed");
  149. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  150. return QDF_STATUS_E_FAULT;
  151. }
  152. return QDF_STATUS_SUCCESS;
  153. }
  154. #else
  155. static inline QDF_STATUS
  156. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  157. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  158. struct dp_pdev *dp_pdev,
  159. struct rx_desc_pool *rx_desc_pool)
  160. {
  161. return QDF_STATUS_SUCCESS;
  162. }
  163. #endif /* DP_RX_MON_MEM_FRAG */
  164. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  165. /**
  166. * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history
  167. * @soc: Datapath soc structure
  168. * @ring_num: Refill ring number
  169. * @num_req: number of buffers requested for refill
  170. * @num_refill: number of buffers refilled
  171. *
  172. * Returns: None
  173. */
  174. static inline void
  175. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  176. hal_ring_handle_t hal_ring_hdl,
  177. uint32_t num_req, uint32_t num_refill)
  178. {
  179. struct dp_refill_info_record *record;
  180. uint32_t idx;
  181. uint32_t tp;
  182. uint32_t hp;
  183. if (qdf_unlikely(ring_num >= MAX_PDEV_CNT ||
  184. !soc->rx_refill_ring_history[ring_num]))
  185. return;
  186. idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index,
  187. DP_RX_REFILL_HIST_MAX);
  188. /* No NULL check needed for record since its an array */
  189. record = &soc->rx_refill_ring_history[ring_num]->entry[idx];
  190. hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp);
  191. record->timestamp = qdf_get_log_timestamp();
  192. record->num_req = num_req;
  193. record->num_refill = num_refill;
  194. record->hp = hp;
  195. record->tp = tp;
  196. }
  197. #else
  198. static inline void
  199. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  200. hal_ring_handle_t hal_ring_hdl,
  201. uint32_t num_req, uint32_t num_refill)
  202. {
  203. }
  204. #endif
  205. /**
  206. * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
  207. *
  208. * @dp_soc: struct dp_soc *
  209. * @mac_id: Mac id
  210. * @num_entries_avail: num_entries_avail
  211. * @nbuf_frag_info_t: nbuf frag info
  212. * @dp_pdev: struct dp_pdev *
  213. * @rx_desc_pool: Rx desc pool
  214. *
  215. * Return: QDF_STATUS
  216. */
  217. static inline QDF_STATUS
  218. dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
  219. uint32_t mac_id,
  220. uint32_t num_entries_avail,
  221. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  222. struct dp_pdev *dp_pdev,
  223. struct rx_desc_pool *rx_desc_pool)
  224. {
  225. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  226. (nbuf_frag_info_t->virt_addr).nbuf =
  227. dp_rx_buffer_pool_nbuf_alloc(dp_soc,
  228. mac_id,
  229. rx_desc_pool,
  230. num_entries_avail);
  231. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  232. dp_err("nbuf alloc failed");
  233. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  234. return QDF_STATUS_E_NOMEM;
  235. }
  236. ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
  237. nbuf_frag_info_t);
  238. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  239. dp_rx_buffer_pool_nbuf_free(dp_soc,
  240. (nbuf_frag_info_t->virt_addr).nbuf, mac_id);
  241. dp_err("nbuf map failed");
  242. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  243. return QDF_STATUS_E_FAULT;
  244. }
  245. nbuf_frag_info_t->paddr =
  246. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  247. dp_ipa_handle_rx_buf_smmu_mapping(dp_soc,
  248. (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
  249. rx_desc_pool->buf_size,
  250. true);
  251. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  252. &nbuf_frag_info_t->paddr,
  253. rx_desc_pool);
  254. if (ret == QDF_STATUS_E_FAILURE) {
  255. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  256. return QDF_STATUS_E_ADDRNOTAVAIL;
  257. }
  258. return QDF_STATUS_SUCCESS;
  259. }
  260. /*
  261. * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  262. * called during dp rx initialization
  263. * and at the end of dp_rx_process.
  264. *
  265. * @soc: core txrx main context
  266. * @mac_id: mac_id which is one of 3 mac_ids
  267. * @dp_rxdma_srng: dp rxdma circular ring
  268. * @rx_desc_pool: Pointer to free Rx descriptor pool
  269. * @num_req_buffers: number of buffer to be replenished
  270. * @desc_list: list of descs if called from dp_rx_process
  271. * or NULL during dp rx initialization or out of buffer
  272. * interrupt.
  273. * @tail: tail of descs list
  274. * @func_name: name of the caller function
  275. * Return: return success or failure
  276. */
  277. QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  278. struct dp_srng *dp_rxdma_srng,
  279. struct rx_desc_pool *rx_desc_pool,
  280. uint32_t num_req_buffers,
  281. union dp_rx_desc_list_elem_t **desc_list,
  282. union dp_rx_desc_list_elem_t **tail,
  283. const char *func_name)
  284. {
  285. uint32_t num_alloc_desc;
  286. uint16_t num_desc_to_free = 0;
  287. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  288. uint32_t num_entries_avail;
  289. uint32_t count;
  290. int sync_hw_ptr = 1;
  291. struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
  292. void *rxdma_ring_entry;
  293. union dp_rx_desc_list_elem_t *next;
  294. QDF_STATUS ret;
  295. void *rxdma_srng;
  296. rxdma_srng = dp_rxdma_srng->hal_srng;
  297. if (qdf_unlikely(!dp_pdev)) {
  298. dp_rx_err("%pK: pdev is null for mac_id = %d",
  299. dp_soc, mac_id);
  300. return QDF_STATUS_E_FAILURE;
  301. }
  302. if (qdf_unlikely(!rxdma_srng)) {
  303. dp_rx_debug("%pK: rxdma srng not initialized", dp_soc);
  304. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  305. return QDF_STATUS_E_FAILURE;
  306. }
  307. dp_rx_debug("%pK: requested %d buffers for replenish",
  308. dp_soc, num_req_buffers);
  309. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  310. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  311. rxdma_srng,
  312. sync_hw_ptr);
  313. dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
  314. dp_soc, num_entries_avail);
  315. if (!(*desc_list) && (num_entries_avail >
  316. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  317. num_req_buffers = num_entries_avail;
  318. } else if (num_entries_avail < num_req_buffers) {
  319. num_desc_to_free = num_req_buffers - num_entries_avail;
  320. num_req_buffers = num_entries_avail;
  321. }
  322. if (qdf_unlikely(!num_req_buffers)) {
  323. num_desc_to_free = num_req_buffers;
  324. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  325. goto free_descs;
  326. }
  327. /*
  328. * if desc_list is NULL, allocate the descs from freelist
  329. */
  330. if (!(*desc_list)) {
  331. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  332. rx_desc_pool,
  333. num_req_buffers,
  334. desc_list,
  335. tail);
  336. if (!num_alloc_desc) {
  337. dp_rx_err("%pK: no free rx_descs in freelist", dp_soc);
  338. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  339. num_req_buffers);
  340. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  341. return QDF_STATUS_E_NOMEM;
  342. }
  343. dp_rx_debug("%pK: %d rx desc allocated", dp_soc, num_alloc_desc);
  344. num_req_buffers = num_alloc_desc;
  345. }
  346. count = 0;
  347. while (count < num_req_buffers) {
  348. /* Flag is set while pdev rx_desc_pool initialization */
  349. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  350. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  351. &nbuf_frag_info,
  352. dp_pdev,
  353. rx_desc_pool);
  354. else
  355. ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
  356. mac_id,
  357. num_entries_avail, &nbuf_frag_info,
  358. dp_pdev, rx_desc_pool);
  359. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  360. if (qdf_unlikely(ret == QDF_STATUS_E_FAULT))
  361. continue;
  362. break;
  363. }
  364. count++;
  365. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  366. rxdma_srng);
  367. qdf_assert_always(rxdma_ring_entry);
  368. next = (*desc_list)->next;
  369. /* Flag is set while pdev rx_desc_pool initialization */
  370. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  371. dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
  372. &nbuf_frag_info);
  373. else
  374. dp_rx_desc_prep(&((*desc_list)->rx_desc),
  375. &nbuf_frag_info);
  376. /* rx_desc.in_use should be zero at this time*/
  377. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  378. (*desc_list)->rx_desc.in_use = 1;
  379. (*desc_list)->rx_desc.in_err_state = 0;
  380. dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
  381. func_name, RX_DESC_REPLENISHED);
  382. dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
  383. nbuf_frag_info.virt_addr.nbuf,
  384. (unsigned long long)(nbuf_frag_info.paddr),
  385. (*desc_list)->rx_desc.cookie);
  386. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry,
  387. nbuf_frag_info.paddr,
  388. (*desc_list)->rx_desc.cookie,
  389. rx_desc_pool->owner);
  390. *desc_list = next;
  391. }
  392. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
  393. num_req_buffers, count);
  394. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  395. dp_rx_schedule_refill_thread(dp_soc);
  396. dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
  397. count, num_desc_to_free);
  398. /* No need to count the number of bytes received during replenish.
  399. * Therefore set replenish.pkts.bytes as 0.
  400. */
  401. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  402. free_descs:
  403. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  404. /*
  405. * add any available free desc back to the free list
  406. */
  407. if (*desc_list)
  408. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  409. mac_id, rx_desc_pool);
  410. return QDF_STATUS_SUCCESS;
  411. }
  412. qdf_export_symbol(__dp_rx_buffers_replenish);
  413. /*
  414. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  415. * pkts to RAW mode simulation to
  416. * decapsulate the pkt.
  417. *
  418. * @vdev: vdev on which RAW mode is enabled
  419. * @nbuf_list: list of RAW pkts to process
  420. * @peer: peer object from which the pkt is rx
  421. *
  422. * Return: void
  423. */
  424. void
  425. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  426. struct dp_peer *peer)
  427. {
  428. qdf_nbuf_t deliver_list_head = NULL;
  429. qdf_nbuf_t deliver_list_tail = NULL;
  430. qdf_nbuf_t nbuf;
  431. nbuf = nbuf_list;
  432. while (nbuf) {
  433. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  434. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  435. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  436. DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
  437. /*
  438. * reset the chfrag_start and chfrag_end bits in nbuf cb
  439. * as this is a non-amsdu pkt and RAW mode simulation expects
  440. * these bit s to be 0 for non-amsdu pkt.
  441. */
  442. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  443. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  444. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  445. qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
  446. }
  447. nbuf = next;
  448. }
  449. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  450. &deliver_list_tail, peer->mac_addr.raw);
  451. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  452. }
  453. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  454. #ifndef FEATURE_WDS
  455. void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
  456. struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
  457. {
  458. }
  459. #endif
  460. /*
  461. * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
  462. *
  463. * @soc: core txrx main context
  464. * @ta_peer : source peer entry
  465. * @rx_tlv_hdr : start address of rx tlvs
  466. * @nbuf : nbuf that has to be intrabss forwarded
  467. *
  468. * Return: bool: true if it is forwarded else false
  469. */
  470. bool
  471. dp_rx_intrabss_fwd(struct dp_soc *soc,
  472. struct dp_peer *ta_peer,
  473. uint8_t *rx_tlv_hdr,
  474. qdf_nbuf_t nbuf,
  475. struct hal_rx_msdu_metadata msdu_metadata)
  476. {
  477. uint16_t len;
  478. uint8_t is_frag;
  479. uint16_t da_peer_id = HTT_INVALID_PEER;
  480. struct dp_peer *da_peer = NULL;
  481. bool is_da_bss_peer = false;
  482. struct dp_ast_entry *ast_entry;
  483. qdf_nbuf_t nbuf_copy;
  484. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  485. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  486. struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
  487. tid_stats.tid_rx_stats[ring_id][tid];
  488. /* check if the destination peer is available in peer table
  489. * and also check if the source peer and destination peer
  490. * belong to the same vap and destination peer is not bss peer.
  491. */
  492. if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
  493. ast_entry = soc->ast_table[msdu_metadata.da_idx];
  494. if (!ast_entry)
  495. return false;
  496. if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
  497. ast_entry->is_active = TRUE;
  498. return false;
  499. }
  500. da_peer_id = ast_entry->peer_id;
  501. if (da_peer_id == HTT_INVALID_PEER)
  502. return false;
  503. /* TA peer cannot be same as peer(DA) on which AST is present
  504. * this indicates a change in topology and that AST entries
  505. * are yet to be updated.
  506. */
  507. if (da_peer_id == ta_peer->peer_id)
  508. return false;
  509. if (ast_entry->vdev_id != ta_peer->vdev->vdev_id)
  510. return false;
  511. da_peer = dp_peer_get_ref_by_id(soc, da_peer_id,
  512. DP_MOD_ID_RX);
  513. if (!da_peer)
  514. return false;
  515. is_da_bss_peer = da_peer->bss_peer;
  516. dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
  517. if (!is_da_bss_peer) {
  518. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  519. is_frag = qdf_nbuf_is_frag(nbuf);
  520. memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
  521. /* If the source or destination peer in the isolation
  522. * list then dont forward instead push to bridge stack.
  523. */
  524. if (dp_get_peer_isolation(ta_peer) ||
  525. dp_get_peer_isolation(da_peer))
  526. return false;
  527. /* linearize the nbuf just before we send to
  528. * dp_tx_send()
  529. */
  530. if (qdf_unlikely(is_frag)) {
  531. if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
  532. return false;
  533. nbuf = qdf_nbuf_unshare(nbuf);
  534. if (!nbuf) {
  535. DP_STATS_INC_PKT(ta_peer,
  536. rx.intra_bss.fail,
  537. 1,
  538. len);
  539. /* return true even though the pkt is
  540. * not forwarded. Basically skb_unshare
  541. * failed and we want to continue with
  542. * next nbuf.
  543. */
  544. tid_stats->fail_cnt[INTRABSS_DROP]++;
  545. return true;
  546. }
  547. }
  548. if (!dp_tx_send((struct cdp_soc_t *)soc,
  549. ta_peer->vdev->vdev_id, nbuf)) {
  550. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  551. len);
  552. return true;
  553. } else {
  554. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  555. len);
  556. tid_stats->fail_cnt[INTRABSS_DROP]++;
  557. return false;
  558. }
  559. }
  560. }
  561. /* if it is a broadcast pkt (eg: ARP) and it is not its own
  562. * source, then clone the pkt and send the cloned pkt for
  563. * intra BSS forwarding and original pkt up the network stack
  564. * Note: how do we handle multicast pkts. do we forward
  565. * all multicast pkts as is or let a higher layer module
  566. * like igmpsnoop decide whether to forward or not with
  567. * Mcast enhancement.
  568. */
  569. else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
  570. !ta_peer->bss_peer))) {
  571. if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
  572. goto end;
  573. /* If the source peer in the isolation list
  574. * then dont forward instead push to bridge stack
  575. */
  576. if (dp_get_peer_isolation(ta_peer))
  577. goto end;
  578. nbuf_copy = qdf_nbuf_copy(nbuf);
  579. if (!nbuf_copy)
  580. goto end;
  581. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  582. memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
  583. /* Set cb->ftype to intrabss FWD */
  584. qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD);
  585. if (dp_tx_send((struct cdp_soc_t *)soc,
  586. ta_peer->vdev->vdev_id, nbuf_copy)) {
  587. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
  588. tid_stats->fail_cnt[INTRABSS_DROP]++;
  589. qdf_nbuf_free(nbuf_copy);
  590. } else {
  591. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
  592. tid_stats->intrabss_cnt++;
  593. }
  594. }
  595. end:
  596. /* return false as we have to still send the original pkt
  597. * up the stack
  598. */
  599. return false;
  600. }
  601. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  602. #ifdef MESH_MODE_SUPPORT
  603. /**
  604. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  605. *
  606. * @vdev: DP Virtual device handle
  607. * @nbuf: Buffer pointer
  608. * @rx_tlv_hdr: start of rx tlv header
  609. * @peer: pointer to peer
  610. *
  611. * This function allocated memory for mesh receive stats and fill the
  612. * required stats. Stores the memory address in skb cb.
  613. *
  614. * Return: void
  615. */
  616. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  617. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  618. {
  619. struct mesh_recv_hdr_s *rx_info = NULL;
  620. uint32_t pkt_type;
  621. uint32_t nss;
  622. uint32_t rate_mcs;
  623. uint32_t bw;
  624. uint8_t primary_chan_num;
  625. uint32_t center_chan_freq;
  626. struct dp_soc *soc = vdev->pdev->soc;
  627. /* fill recv mesh stats */
  628. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  629. /* upper layers are resposible to free this memory */
  630. if (!rx_info) {
  631. dp_rx_err("%pK: Memory allocation failed for mesh rx stats",
  632. vdev->pdev->soc);
  633. DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
  634. return;
  635. }
  636. rx_info->rs_flags = MESH_RXHDR_VER1;
  637. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  638. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  639. if (qdf_nbuf_is_rx_chfrag_end(nbuf))
  640. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  641. if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
  642. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  643. rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
  644. rx_tlv_hdr);
  645. if (vdev->osif_get_key)
  646. vdev->osif_get_key(vdev->osif_vdev,
  647. &rx_info->rs_decryptkey[0],
  648. &peer->mac_addr.raw[0],
  649. rx_info->rs_keyix);
  650. }
  651. rx_info->rs_snr = peer->stats.rx.snr;
  652. rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
  653. soc = vdev->pdev->soc;
  654. primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr);
  655. center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16;
  656. if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
  657. rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
  658. soc->ctrl_psoc,
  659. vdev->pdev->pdev_id,
  660. center_chan_freq);
  661. }
  662. rx_info->rs_channel = primary_chan_num;
  663. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  664. rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  665. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  666. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  667. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
  668. (bw << 24);
  669. qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  670. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
  671. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"),
  672. rx_info->rs_flags,
  673. rx_info->rs_rssi,
  674. rx_info->rs_channel,
  675. rx_info->rs_ratephy1,
  676. rx_info->rs_keyix,
  677. rx_info->rs_snr);
  678. }
  679. /**
  680. * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
  681. *
  682. * @vdev: DP Virtual device handle
  683. * @nbuf: Buffer pointer
  684. * @rx_tlv_hdr: start of rx tlv header
  685. *
  686. * This checks if the received packet is matching any filter out
  687. * catogery and and drop the packet if it matches.
  688. *
  689. * Return: status(0 indicates drop, 1 indicate to no drop)
  690. */
  691. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  692. uint8_t *rx_tlv_hdr)
  693. {
  694. union dp_align_mac_addr mac_addr;
  695. struct dp_soc *soc = vdev->pdev->soc;
  696. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  697. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  698. if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  699. rx_tlv_hdr))
  700. return QDF_STATUS_SUCCESS;
  701. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  702. if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
  703. rx_tlv_hdr))
  704. return QDF_STATUS_SUCCESS;
  705. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  706. if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  707. rx_tlv_hdr) &&
  708. !hal_rx_mpdu_get_to_ds(soc->hal_soc,
  709. rx_tlv_hdr))
  710. return QDF_STATUS_SUCCESS;
  711. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  712. if (hal_rx_mpdu_get_addr1(soc->hal_soc,
  713. rx_tlv_hdr,
  714. &mac_addr.raw[0]))
  715. return QDF_STATUS_E_FAILURE;
  716. if (!qdf_mem_cmp(&mac_addr.raw[0],
  717. &vdev->mac_addr.raw[0],
  718. QDF_MAC_ADDR_SIZE))
  719. return QDF_STATUS_SUCCESS;
  720. }
  721. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  722. if (hal_rx_mpdu_get_addr2(soc->hal_soc,
  723. rx_tlv_hdr,
  724. &mac_addr.raw[0]))
  725. return QDF_STATUS_E_FAILURE;
  726. if (!qdf_mem_cmp(&mac_addr.raw[0],
  727. &vdev->mac_addr.raw[0],
  728. QDF_MAC_ADDR_SIZE))
  729. return QDF_STATUS_SUCCESS;
  730. }
  731. }
  732. return QDF_STATUS_E_FAILURE;
  733. }
  734. #else
  735. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  736. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  737. {
  738. }
  739. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  740. uint8_t *rx_tlv_hdr)
  741. {
  742. return QDF_STATUS_E_FAILURE;
  743. }
  744. #endif
  745. #ifdef FEATURE_NAC_RSSI
  746. /**
  747. * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
  748. * @soc: DP SOC handle
  749. * @mpdu: mpdu for which peer is invalid
  750. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  751. * pool_id has same mapping)
  752. *
  753. * return: integer type
  754. */
  755. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  756. uint8_t mac_id)
  757. {
  758. struct dp_invalid_peer_msg msg;
  759. struct dp_vdev *vdev = NULL;
  760. struct dp_pdev *pdev = NULL;
  761. struct ieee80211_frame *wh;
  762. qdf_nbuf_t curr_nbuf, next_nbuf;
  763. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  764. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  765. if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
  766. dp_rx_debug("%pK: Drop decapped frames", soc);
  767. goto free;
  768. }
  769. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  770. if (!DP_FRAME_IS_DATA(wh)) {
  771. dp_rx_debug("%pK: NAWDS valid only for data frames", soc);
  772. goto free;
  773. }
  774. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  775. dp_rx_err("%pK: Invalid nbuf length", soc);
  776. goto free;
  777. }
  778. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  779. if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
  780. dp_rx_err("%pK: PDEV %s", soc, !pdev ? "not found" : "down");
  781. goto free;
  782. }
  783. if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) ==
  784. QDF_STATUS_SUCCESS)
  785. return 0;
  786. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  787. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  788. QDF_MAC_ADDR_SIZE) == 0) {
  789. goto out;
  790. }
  791. }
  792. if (!vdev) {
  793. dp_rx_err("%pK: VDEV not found", soc);
  794. goto free;
  795. }
  796. out:
  797. msg.wh = wh;
  798. qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size);
  799. msg.nbuf = mpdu;
  800. msg.vdev_id = vdev->vdev_id;
  801. /*
  802. * NOTE: Only valid for HKv1.
  803. * If smart monitor mode is enabled on RE, we are getting invalid
  804. * peer frames with RA as STA mac of RE and the TA not matching
  805. * with any NAC list or the the BSSID.Such frames need to dropped
  806. * in order to avoid HM_WDS false addition.
  807. */
  808. if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
  809. if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) {
  810. dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm",
  811. soc, wh->i_addr1);
  812. goto free;
  813. }
  814. pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
  815. (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
  816. pdev->pdev_id, &msg);
  817. }
  818. free:
  819. /* Drop and free packet */
  820. curr_nbuf = mpdu;
  821. while (curr_nbuf) {
  822. next_nbuf = qdf_nbuf_next(curr_nbuf);
  823. qdf_nbuf_free(curr_nbuf);
  824. curr_nbuf = next_nbuf;
  825. }
  826. return 0;
  827. }
  828. /**
  829. * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
  830. * @soc: DP SOC handle
  831. * @mpdu: mpdu for which peer is invalid
  832. * @mpdu_done: if an mpdu is completed
  833. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  834. * pool_id has same mapping)
  835. *
  836. * return: integer type
  837. */
  838. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  839. qdf_nbuf_t mpdu, bool mpdu_done,
  840. uint8_t mac_id)
  841. {
  842. /* Only trigger the process when mpdu is completed */
  843. if (mpdu_done)
  844. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  845. }
  846. #else
  847. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  848. uint8_t mac_id)
  849. {
  850. qdf_nbuf_t curr_nbuf, next_nbuf;
  851. struct dp_pdev *pdev;
  852. struct dp_vdev *vdev = NULL;
  853. struct ieee80211_frame *wh;
  854. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  855. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  856. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  857. if (!DP_FRAME_IS_DATA(wh)) {
  858. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
  859. "only for data frames");
  860. goto free;
  861. }
  862. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  863. dp_rx_info_rl("%pK: Invalid nbuf length", soc);
  864. goto free;
  865. }
  866. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  867. if (!pdev) {
  868. dp_rx_info_rl("%pK: PDEV not found", soc);
  869. goto free;
  870. }
  871. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  872. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  873. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  874. QDF_MAC_ADDR_SIZE) == 0) {
  875. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  876. goto out;
  877. }
  878. }
  879. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  880. if (!vdev) {
  881. dp_rx_info_rl("%pK: VDEV not found", soc);
  882. goto free;
  883. }
  884. out:
  885. if (soc->cdp_soc.ol_ops->rx_invalid_peer)
  886. soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
  887. free:
  888. /* reset the head and tail pointers */
  889. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  890. if (pdev) {
  891. pdev->invalid_peer_head_msdu = NULL;
  892. pdev->invalid_peer_tail_msdu = NULL;
  893. }
  894. /* Drop and free packet */
  895. curr_nbuf = mpdu;
  896. while (curr_nbuf) {
  897. next_nbuf = qdf_nbuf_next(curr_nbuf);
  898. qdf_nbuf_free(curr_nbuf);
  899. curr_nbuf = next_nbuf;
  900. }
  901. /* Reset the head and tail pointers */
  902. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  903. if (pdev) {
  904. pdev->invalid_peer_head_msdu = NULL;
  905. pdev->invalid_peer_tail_msdu = NULL;
  906. }
  907. return 0;
  908. }
  909. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  910. qdf_nbuf_t mpdu, bool mpdu_done,
  911. uint8_t mac_id)
  912. {
  913. /* Process the nbuf */
  914. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  915. }
  916. #endif
  917. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  918. #ifdef RECEIVE_OFFLOAD
  919. /**
  920. * dp_rx_print_offload_info() - Print offload info from RX TLV
  921. * @soc: dp soc handle
  922. * @msdu: MSDU for which the offload info is to be printed
  923. *
  924. * Return: None
  925. */
  926. static void dp_rx_print_offload_info(struct dp_soc *soc,
  927. qdf_nbuf_t msdu)
  928. {
  929. dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
  930. dp_verbose_debug("lro_eligible 0x%x",
  931. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu));
  932. dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu));
  933. dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu));
  934. dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu));
  935. dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu));
  936. dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu));
  937. dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu));
  938. dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu));
  939. dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu));
  940. dp_verbose_debug("---------------------------------------------------------");
  941. }
  942. /**
  943. * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
  944. * @soc: DP SOC handle
  945. * @rx_tlv: RX TLV received for the msdu
  946. * @msdu: msdu for which GRO info needs to be filled
  947. * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
  948. *
  949. * Return: None
  950. */
  951. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  952. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
  953. {
  954. struct hal_offload_info offload_info;
  955. if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
  956. return;
  957. if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info))
  958. return;
  959. *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
  960. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible;
  961. QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack;
  962. QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
  963. hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
  964. rx_tlv);
  965. QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num;
  966. QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num;
  967. QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win;
  968. QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto;
  969. QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto;
  970. QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset;
  971. QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id;
  972. dp_rx_print_offload_info(soc, msdu);
  973. }
  974. #endif /* RECEIVE_OFFLOAD */
  975. /**
  976. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
  977. *
  978. * @soc: DP soc handle
  979. * @nbuf: pointer to msdu.
  980. * @mpdu_len: mpdu length
  981. *
  982. * Return: returns true if nbuf is last msdu of mpdu else retuns false.
  983. */
  984. static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc,
  985. qdf_nbuf_t nbuf, uint16_t *mpdu_len)
  986. {
  987. bool last_nbuf;
  988. if (*mpdu_len > (RX_DATA_BUFFER_SIZE - soc->rx_pkt_tlv_size)) {
  989. qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
  990. last_nbuf = false;
  991. } else {
  992. qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + soc->rx_pkt_tlv_size));
  993. last_nbuf = true;
  994. }
  995. *mpdu_len -= (RX_DATA_BUFFER_SIZE - soc->rx_pkt_tlv_size);
  996. return last_nbuf;
  997. }
  998. /**
  999. * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
  1000. * multiple nbufs.
  1001. * @soc: DP SOC handle
  1002. * @nbuf: pointer to the first msdu of an amsdu.
  1003. *
  1004. * This function implements the creation of RX frag_list for cases
  1005. * where an MSDU is spread across multiple nbufs.
  1006. *
  1007. * Return: returns the head nbuf which contains complete frag_list.
  1008. */
  1009. qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1010. {
  1011. qdf_nbuf_t parent, frag_list, next = NULL;
  1012. uint16_t frag_list_len = 0;
  1013. uint16_t mpdu_len;
  1014. bool last_nbuf;
  1015. /*
  1016. * Use msdu len got from REO entry descriptor instead since
  1017. * there is case the RX PKT TLV is corrupted while msdu_len
  1018. * from REO descriptor is right for non-raw RX scatter msdu.
  1019. */
  1020. mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1021. /*
  1022. * this is a case where the complete msdu fits in one single nbuf.
  1023. * in this case HW sets both start and end bit and we only need to
  1024. * reset these bits for RAW mode simulator to decap the pkt
  1025. */
  1026. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  1027. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1028. qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size);
  1029. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1030. return nbuf;
  1031. }
  1032. /*
  1033. * This is a case where we have multiple msdus (A-MSDU) spread across
  1034. * multiple nbufs. here we create a fraglist out of these nbufs.
  1035. *
  1036. * the moment we encounter a nbuf with continuation bit set we
  1037. * know for sure we have an MSDU which is spread across multiple
  1038. * nbufs. We loop through and reap nbufs till we reach last nbuf.
  1039. */
  1040. parent = nbuf;
  1041. frag_list = nbuf->next;
  1042. nbuf = nbuf->next;
  1043. /*
  1044. * set the start bit in the first nbuf we encounter with continuation
  1045. * bit set. This has the proper mpdu length set as it is the first
  1046. * msdu of the mpdu. this becomes the parent nbuf and the subsequent
  1047. * nbufs will form the frag_list of the parent nbuf.
  1048. */
  1049. qdf_nbuf_set_rx_chfrag_start(parent, 1);
  1050. last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len);
  1051. /*
  1052. * HW issue: MSDU cont bit is set but reported MPDU length can fit
  1053. * in to single buffer
  1054. *
  1055. * Increment error stats and avoid SG list creation
  1056. */
  1057. if (last_nbuf) {
  1058. DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
  1059. qdf_nbuf_pull_head(parent, soc->rx_pkt_tlv_size);
  1060. return parent;
  1061. }
  1062. /*
  1063. * this is where we set the length of the fragments which are
  1064. * associated to the parent nbuf. We iterate through the frag_list
  1065. * till we hit the last_nbuf of the list.
  1066. */
  1067. do {
  1068. last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len);
  1069. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1070. frag_list_len += qdf_nbuf_len(nbuf);
  1071. if (last_nbuf) {
  1072. next = nbuf->next;
  1073. nbuf->next = NULL;
  1074. break;
  1075. }
  1076. nbuf = nbuf->next;
  1077. } while (!last_nbuf);
  1078. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  1079. qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
  1080. parent->next = next;
  1081. qdf_nbuf_pull_head(parent, soc->rx_pkt_tlv_size);
  1082. return parent;
  1083. }
  1084. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1085. #ifdef QCA_PEER_EXT_STATS
  1086. /*
  1087. * dp_rx_compute_tid_delay - Computer per TID delay stats
  1088. * @peer: DP soc context
  1089. * @nbuf: NBuffer
  1090. *
  1091. * Return: Void
  1092. */
  1093. void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  1094. qdf_nbuf_t nbuf)
  1095. {
  1096. struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay;
  1097. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1098. dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
  1099. }
  1100. #endif /* QCA_PEER_EXT_STATS */
  1101. /**
  1102. * dp_rx_compute_delay() - Compute and fill in all timestamps
  1103. * to pass in correct fields
  1104. *
  1105. * @vdev: pdev handle
  1106. * @tx_desc: tx descriptor
  1107. * @tid: tid value
  1108. * Return: none
  1109. */
  1110. void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1111. {
  1112. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  1113. int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
  1114. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1115. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1116. uint32_t interframe_delay =
  1117. (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
  1118. dp_update_delay_stats(vdev->pdev, to_stack, tid,
  1119. CDP_DELAY_STATS_REAP_STACK, ring_id);
  1120. /*
  1121. * Update interframe delay stats calculated at deliver_data_ol point.
  1122. * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
  1123. * interframe delay will not be calculate correctly for 1st frame.
  1124. * On the other side, this will help in avoiding extra per packet check
  1125. * of vdev->prev_rx_deliver_tstamp.
  1126. */
  1127. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  1128. CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
  1129. vdev->prev_rx_deliver_tstamp = current_ts;
  1130. }
  1131. /**
  1132. * dp_rx_drop_nbuf_list() - drop an nbuf list
  1133. * @pdev: dp pdev reference
  1134. * @buf_list: buffer list to be dropepd
  1135. *
  1136. * Return: int (number of bufs dropped)
  1137. */
  1138. static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
  1139. qdf_nbuf_t buf_list)
  1140. {
  1141. struct cdp_tid_rx_stats *stats = NULL;
  1142. uint8_t tid = 0, ring_id = 0;
  1143. int num_dropped = 0;
  1144. qdf_nbuf_t buf, next_buf;
  1145. buf = buf_list;
  1146. while (buf) {
  1147. ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
  1148. next_buf = qdf_nbuf_queue_next(buf);
  1149. tid = qdf_nbuf_get_tid_val(buf);
  1150. if (qdf_likely(pdev)) {
  1151. stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1152. stats->fail_cnt[INVALID_PEER_VDEV]++;
  1153. stats->delivered_to_stack--;
  1154. }
  1155. qdf_nbuf_free(buf);
  1156. buf = next_buf;
  1157. num_dropped++;
  1158. }
  1159. return num_dropped;
  1160. }
  1161. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1162. /**
  1163. * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
  1164. * @soc: core txrx main context
  1165. * @vdev: vdev
  1166. * @peer: peer
  1167. * @nbuf_head: skb list head
  1168. *
  1169. * Return: true if packet is delivered to netdev per STA.
  1170. */
  1171. static inline bool
  1172. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1173. struct dp_peer *peer, qdf_nbuf_t nbuf_head)
  1174. {
  1175. /*
  1176. * When extended WDS is disabled, frames are sent to AP netdevice.
  1177. */
  1178. if (qdf_likely(!vdev->wds_ext_enabled))
  1179. return false;
  1180. /*
  1181. * There can be 2 cases:
  1182. * 1. Send frame to parent netdev if its not for netdev per STA
  1183. * 2. If frame is meant for netdev per STA:
  1184. * a. Send frame to appropriate netdev using registered fp.
  1185. * b. If fp is NULL, drop the frames.
  1186. */
  1187. if (!peer->wds_ext.init)
  1188. return false;
  1189. if (peer->osif_rx)
  1190. peer->osif_rx(peer->wds_ext.osif_peer, nbuf_head);
  1191. else
  1192. dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1193. return true;
  1194. }
  1195. #else
  1196. static inline bool
  1197. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1198. struct dp_peer *peer, qdf_nbuf_t nbuf_head)
  1199. {
  1200. return false;
  1201. }
  1202. #endif
  1203. #ifdef PEER_CACHE_RX_PKTS
  1204. /**
  1205. * dp_rx_flush_rx_cached() - flush cached rx frames
  1206. * @peer: peer
  1207. * @drop: flag to drop frames or forward to net stack
  1208. *
  1209. * Return: None
  1210. */
  1211. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  1212. {
  1213. struct dp_peer_cached_bufq *bufqi;
  1214. struct dp_rx_cached_buf *cache_buf = NULL;
  1215. ol_txrx_rx_fp data_rx = NULL;
  1216. int num_buff_elem;
  1217. QDF_STATUS status;
  1218. if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
  1219. qdf_atomic_dec(&peer->flush_in_progress);
  1220. return;
  1221. }
  1222. qdf_spin_lock_bh(&peer->peer_info_lock);
  1223. if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
  1224. data_rx = peer->vdev->osif_rx;
  1225. else
  1226. drop = true;
  1227. qdf_spin_unlock_bh(&peer->peer_info_lock);
  1228. bufqi = &peer->bufq_info;
  1229. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1230. qdf_list_remove_front(&bufqi->cached_bufq,
  1231. (qdf_list_node_t **)&cache_buf);
  1232. while (cache_buf) {
  1233. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
  1234. cache_buf->buf);
  1235. bufqi->entries -= num_buff_elem;
  1236. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1237. if (drop) {
  1238. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1239. cache_buf->buf);
  1240. } else {
  1241. /* Flush the cached frames to OSIF DEV */
  1242. status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
  1243. if (status != QDF_STATUS_SUCCESS)
  1244. bufqi->dropped = dp_rx_drop_nbuf_list(
  1245. peer->vdev->pdev,
  1246. cache_buf->buf);
  1247. }
  1248. qdf_mem_free(cache_buf);
  1249. cache_buf = NULL;
  1250. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1251. qdf_list_remove_front(&bufqi->cached_bufq,
  1252. (qdf_list_node_t **)&cache_buf);
  1253. }
  1254. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1255. qdf_atomic_dec(&peer->flush_in_progress);
  1256. }
  1257. /**
  1258. * dp_rx_enqueue_rx() - cache rx frames
  1259. * @peer: peer
  1260. * @rx_buf_list: cache buffer list
  1261. *
  1262. * Return: None
  1263. */
  1264. static QDF_STATUS
  1265. dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
  1266. {
  1267. struct dp_rx_cached_buf *cache_buf;
  1268. struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
  1269. int num_buff_elem;
  1270. dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
  1271. bufqi->dropped);
  1272. if (!peer->valid) {
  1273. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1274. rx_buf_list);
  1275. return QDF_STATUS_E_INVAL;
  1276. }
  1277. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1278. if (bufqi->entries >= bufqi->thresh) {
  1279. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1280. rx_buf_list);
  1281. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1282. return QDF_STATUS_E_RESOURCES;
  1283. }
  1284. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1285. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
  1286. cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
  1287. if (!cache_buf) {
  1288. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1289. "Failed to allocate buf to cache rx frames");
  1290. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1291. rx_buf_list);
  1292. return QDF_STATUS_E_NOMEM;
  1293. }
  1294. cache_buf->buf = rx_buf_list;
  1295. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1296. qdf_list_insert_back(&bufqi->cached_bufq,
  1297. &cache_buf->node);
  1298. bufqi->entries += num_buff_elem;
  1299. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1300. return QDF_STATUS_SUCCESS;
  1301. }
  1302. static inline
  1303. bool dp_rx_is_peer_cache_bufq_supported(void)
  1304. {
  1305. return true;
  1306. }
  1307. #else
  1308. static inline
  1309. bool dp_rx_is_peer_cache_bufq_supported(void)
  1310. {
  1311. return false;
  1312. }
  1313. static inline QDF_STATUS
  1314. dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
  1315. {
  1316. return QDF_STATUS_SUCCESS;
  1317. }
  1318. #endif
  1319. #ifndef DELIVERY_TO_STACK_STATUS_CHECK
  1320. /**
  1321. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1322. * using the appropriate call back functions.
  1323. * @soc: soc
  1324. * @vdev: vdev
  1325. * @peer: peer
  1326. * @nbuf_head: skb list head
  1327. * @nbuf_tail: skb list tail
  1328. *
  1329. * Return: None
  1330. */
  1331. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1332. struct dp_vdev *vdev,
  1333. struct dp_peer *peer,
  1334. qdf_nbuf_t nbuf_head)
  1335. {
  1336. if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
  1337. peer, nbuf_head)))
  1338. return;
  1339. /* Function pointer initialized only when FISA is enabled */
  1340. if (vdev->osif_fisa_rx)
  1341. /* on failure send it via regular path */
  1342. vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1343. else
  1344. vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1345. }
  1346. #else
  1347. /**
  1348. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1349. * using the appropriate call back functions.
  1350. * @soc: soc
  1351. * @vdev: vdev
  1352. * @peer: peer
  1353. * @nbuf_head: skb list head
  1354. * @nbuf_tail: skb list tail
  1355. *
  1356. * Check the return status of the call back function and drop
  1357. * the packets if the return status indicates a failure.
  1358. *
  1359. * Return: None
  1360. */
  1361. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1362. struct dp_vdev *vdev,
  1363. struct dp_peer *peer,
  1364. qdf_nbuf_t nbuf_head)
  1365. {
  1366. int num_nbuf = 0;
  1367. QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
  1368. /* Function pointer initialized only when FISA is enabled */
  1369. if (vdev->osif_fisa_rx)
  1370. /* on failure send it via regular path */
  1371. ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1372. else if (vdev->osif_rx)
  1373. ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1374. if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
  1375. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1376. DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
  1377. if (peer)
  1378. DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
  1379. }
  1380. }
  1381. #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
  1382. void dp_rx_deliver_to_stack(struct dp_soc *soc,
  1383. struct dp_vdev *vdev,
  1384. struct dp_peer *peer,
  1385. qdf_nbuf_t nbuf_head,
  1386. qdf_nbuf_t nbuf_tail)
  1387. {
  1388. int num_nbuf = 0;
  1389. if (qdf_unlikely(!vdev || vdev->delete.pending)) {
  1390. num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
  1391. /*
  1392. * This is a special case where vdev is invalid,
  1393. * so we cannot know the pdev to which this packet
  1394. * belonged. Hence we update the soc rx error stats.
  1395. */
  1396. DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
  1397. return;
  1398. }
  1399. /*
  1400. * highly unlikely to have a vdev without a registered rx
  1401. * callback function. if so let us free the nbuf_list.
  1402. */
  1403. if (qdf_unlikely(!vdev->osif_rx)) {
  1404. if (peer && dp_rx_is_peer_cache_bufq_supported()) {
  1405. dp_rx_enqueue_rx(peer, nbuf_head);
  1406. } else {
  1407. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
  1408. nbuf_head);
  1409. DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
  1410. }
  1411. return;
  1412. }
  1413. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
  1414. (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
  1415. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
  1416. &nbuf_tail, peer->mac_addr.raw);
  1417. }
  1418. dp_rx_check_delivery_to_stack(soc, vdev, peer, nbuf_head);
  1419. }
  1420. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1421. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1422. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \
  1423. { \
  1424. qdf_nbuf_t nbuf_local; \
  1425. struct dp_peer *peer_local; \
  1426. struct dp_vdev *vdev_local = vdev_hdl; \
  1427. do { \
  1428. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  1429. break; \
  1430. nbuf_local = nbuf; \
  1431. peer_local = peer; \
  1432. if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
  1433. break; \
  1434. else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
  1435. break; \
  1436. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  1437. (nbuf_local), \
  1438. (peer_local), 0, 1); \
  1439. } while (0); \
  1440. }
  1441. #else
  1442. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer)
  1443. #endif
  1444. /**
  1445. * dp_rx_msdu_stats_update() - update per msdu stats.
  1446. * @soc: core txrx main context
  1447. * @nbuf: pointer to the first msdu of an amsdu.
  1448. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  1449. * @peer: pointer to the peer object.
  1450. * @ring_id: reo dest ring number on which pkt is reaped.
  1451. * @tid_stats: per tid rx stats.
  1452. *
  1453. * update all the per msdu stats for that nbuf.
  1454. * Return: void
  1455. */
  1456. void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1457. uint8_t *rx_tlv_hdr, struct dp_peer *peer,
  1458. uint8_t ring_id,
  1459. struct cdp_tid_rx_stats *tid_stats)
  1460. {
  1461. bool is_ampdu, is_not_amsdu;
  1462. uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
  1463. struct dp_vdev *vdev = peer->vdev;
  1464. qdf_ether_header_t *eh;
  1465. uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1466. dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
  1467. is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
  1468. qdf_nbuf_is_rx_chfrag_end(nbuf);
  1469. DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
  1470. DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
  1471. DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
  1472. DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
  1473. tid_stats->msdu_cnt++;
  1474. if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
  1475. (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
  1476. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1477. DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
  1478. tid_stats->mcast_msdu_cnt++;
  1479. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  1480. DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
  1481. tid_stats->bcast_msdu_cnt++;
  1482. }
  1483. }
  1484. /*
  1485. * currently we can return from here as we have similar stats
  1486. * updated at per ppdu level instead of msdu level
  1487. */
  1488. if (!soc->process_rx_status)
  1489. return;
  1490. /*
  1491. * TODO - For WCN7850 this field is present in ring_desc
  1492. * Try to use ring desc instead of tlv.
  1493. */
  1494. is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
  1495. DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
  1496. DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
  1497. sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
  1498. mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  1499. tid = qdf_nbuf_get_tid_val(nbuf);
  1500. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  1501. reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
  1502. rx_tlv_hdr);
  1503. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  1504. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  1505. DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
  1506. ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  1507. DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
  1508. ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  1509. DP_STATS_INC(peer, rx.bw[bw], 1);
  1510. /*
  1511. * only if nss > 0 and pkt_type is 11N/AC/AX,
  1512. * then increase index [nss - 1] in array counter.
  1513. */
  1514. if (nss > 0 && (pkt_type == DOT11_N ||
  1515. pkt_type == DOT11_AC ||
  1516. pkt_type == DOT11_AX))
  1517. DP_STATS_INC(peer, rx.nss[nss - 1], 1);
  1518. DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
  1519. DP_STATS_INCC(peer, rx.err.mic_err, 1,
  1520. hal_rx_tlv_mic_err_get(soc->hal_soc, rx_tlv_hdr));
  1521. DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
  1522. hal_rx_tlv_decrypt_err_get(soc->hal_soc, rx_tlv_hdr));
  1523. DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
  1524. DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
  1525. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1526. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  1527. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1528. ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  1529. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1530. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  1531. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1532. ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  1533. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1534. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  1535. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1536. ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  1537. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1538. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  1539. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1540. ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  1541. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1542. ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
  1543. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1544. ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
  1545. if ((soc->process_rx_status) &&
  1546. hal_rx_tlv_first_mpdu_get(soc->hal_soc, rx_tlv_hdr)) {
  1547. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  1548. if (!vdev->pdev)
  1549. return;
  1550. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  1551. &peer->stats, peer->peer_id,
  1552. UPDATE_PEER_STATS,
  1553. vdev->pdev->pdev_id);
  1554. #endif
  1555. }
  1556. }
  1557. #ifndef WDS_VENDOR_EXTENSION
  1558. int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
  1559. struct dp_vdev *vdev,
  1560. struct dp_peer *peer)
  1561. {
  1562. return 1;
  1563. }
  1564. #endif
  1565. #ifdef RX_DESC_DEBUG_CHECK
  1566. /**
  1567. * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
  1568. * corruption
  1569. *
  1570. * @ring_desc: REO ring descriptor
  1571. * @rx_desc: Rx descriptor
  1572. *
  1573. * Return: NONE
  1574. */
  1575. QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
  1576. hal_ring_desc_t ring_desc,
  1577. struct dp_rx_desc *rx_desc)
  1578. {
  1579. struct hal_buf_info hbi;
  1580. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1581. /* Sanity check for possible buffer paddr corruption */
  1582. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  1583. return QDF_STATUS_SUCCESS;
  1584. return QDF_STATUS_E_FAILURE;
  1585. }
  1586. /**
  1587. * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer
  1588. * out of bound access from H.W
  1589. *
  1590. * @soc: DP soc
  1591. * @pkt_len: Packet length received from H.W
  1592. *
  1593. * Return: NONE
  1594. */
  1595. static inline void
  1596. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc,
  1597. uint32_t pkt_len)
  1598. {
  1599. struct rx_desc_pool *rx_desc_pool;
  1600. rx_desc_pool = &soc->rx_desc_buf[0];
  1601. qdf_assert_always(pkt_len <= rx_desc_pool->buf_size);
  1602. }
  1603. #else
  1604. static inline void
  1605. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { }
  1606. #endif
  1607. #ifdef DP_RX_PKT_NO_PEER_DELIVER
  1608. /**
  1609. * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
  1610. * no corresbonding peer found
  1611. * @soc: core txrx main context
  1612. * @nbuf: pkt skb pointer
  1613. *
  1614. * This function will try to deliver some RX special frames to stack
  1615. * even there is no peer matched found. for instance, LFR case, some
  1616. * eapol data will be sent to host before peer_map done.
  1617. *
  1618. * Return: None
  1619. */
  1620. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1621. {
  1622. uint16_t peer_id;
  1623. uint8_t vdev_id;
  1624. struct dp_vdev *vdev = NULL;
  1625. uint32_t l2_hdr_offset = 0;
  1626. uint16_t msdu_len = 0;
  1627. uint32_t pkt_len = 0;
  1628. uint8_t *rx_tlv_hdr;
  1629. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  1630. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  1631. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  1632. if (peer_id > soc->max_peers)
  1633. goto deliver_fail;
  1634. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  1635. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
  1636. if (!vdev || vdev->delete.pending || !vdev->osif_rx)
  1637. goto deliver_fail;
  1638. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
  1639. goto deliver_fail;
  1640. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1641. l2_hdr_offset =
  1642. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  1643. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1644. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  1645. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  1646. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1647. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset);
  1648. if (dp_rx_is_special_frame(nbuf, frame_mask)) {
  1649. qdf_nbuf_set_exc_frame(nbuf, 1);
  1650. if (QDF_STATUS_SUCCESS !=
  1651. vdev->osif_rx(vdev->osif_vdev, nbuf))
  1652. goto deliver_fail;
  1653. DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
  1654. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  1655. return;
  1656. }
  1657. deliver_fail:
  1658. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1659. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  1660. qdf_nbuf_free(nbuf);
  1661. if (vdev)
  1662. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  1663. }
  1664. #else
  1665. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1666. {
  1667. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1668. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  1669. qdf_nbuf_free(nbuf);
  1670. }
  1671. #endif
  1672. /**
  1673. * dp_rx_srng_get_num_pending() - get number of pending entries
  1674. * @hal_soc: hal soc opaque pointer
  1675. * @hal_ring: opaque pointer to the HAL Rx Ring
  1676. * @num_entries: number of entries in the hal_ring.
  1677. * @near_full: pointer to a boolean. This is set if ring is near full.
  1678. *
  1679. * The function returns the number of entries in a destination ring which are
  1680. * yet to be reaped. The function also checks if the ring is near full.
  1681. * If more than half of the ring needs to be reaped, the ring is considered
  1682. * approaching full.
  1683. * The function useses hal_srng_dst_num_valid_locked to get the number of valid
  1684. * entries. It should not be called within a SRNG lock. HW pointer value is
  1685. * synced into cached_hp.
  1686. *
  1687. * Return: Number of pending entries if any
  1688. */
  1689. uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
  1690. hal_ring_handle_t hal_ring_hdl,
  1691. uint32_t num_entries,
  1692. bool *near_full)
  1693. {
  1694. uint32_t num_pending = 0;
  1695. num_pending = hal_srng_dst_num_valid_locked(hal_soc,
  1696. hal_ring_hdl,
  1697. true);
  1698. if (num_entries && (num_pending >= num_entries >> 1))
  1699. *near_full = true;
  1700. else
  1701. *near_full = false;
  1702. return num_pending;
  1703. }
  1704. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1705. #ifdef WLAN_SUPPORT_RX_FISA
  1706. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  1707. {
  1708. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  1709. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  1710. }
  1711. /**
  1712. * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
  1713. * @nbuf: pkt skb pointer
  1714. * @l3_padding: l3 padding
  1715. *
  1716. * Return: None
  1717. */
  1718. static inline
  1719. void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
  1720. {
  1721. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  1722. }
  1723. #else
  1724. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  1725. {
  1726. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  1727. }
  1728. static inline
  1729. void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
  1730. {
  1731. }
  1732. #endif
  1733. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1734. #ifdef DP_RX_DROP_RAW_FRM
  1735. /**
  1736. * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
  1737. * @nbuf: pkt skb pointer
  1738. *
  1739. * Return: true - raw frame, dropped
  1740. * false - not raw frame, do nothing
  1741. */
  1742. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
  1743. {
  1744. if (qdf_nbuf_is_raw_frame(nbuf)) {
  1745. qdf_nbuf_free(nbuf);
  1746. return true;
  1747. }
  1748. return false;
  1749. }
  1750. #endif
  1751. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  1752. /**
  1753. * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
  1754. * @soc: Datapath soc structure
  1755. * @ring_num: REO ring number
  1756. * @ring_desc: REO ring descriptor
  1757. *
  1758. * Returns: None
  1759. */
  1760. void
  1761. dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  1762. hal_ring_desc_t ring_desc)
  1763. {
  1764. struct dp_buf_info_record *record;
  1765. struct hal_buf_info hbi;
  1766. uint32_t idx;
  1767. if (qdf_unlikely(!soc->rx_ring_history[ring_num]))
  1768. return;
  1769. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  1770. /* buffer_addr_info is the first element of ring_desc */
  1771. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
  1772. &hbi);
  1773. idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
  1774. DP_RX_HIST_MAX);
  1775. /* No NULL check needed for record since its an array */
  1776. record = &soc->rx_ring_history[ring_num]->entry[idx];
  1777. record->timestamp = qdf_get_log_timestamp();
  1778. record->hbi.paddr = hbi.paddr;
  1779. record->hbi.sw_cookie = hbi.sw_cookie;
  1780. record->hbi.rbm = hbi.rbm;
  1781. }
  1782. #endif
  1783. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1784. /**
  1785. * dp_rx_update_stats() - Update soc level rx packet count
  1786. * @soc: DP soc handle
  1787. * @nbuf: nbuf received
  1788. *
  1789. * Returns: none
  1790. */
  1791. void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1792. {
  1793. DP_STATS_INC_PKT(soc, rx.ingress, 1,
  1794. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  1795. }
  1796. #endif
  1797. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  1798. /**
  1799. * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
  1800. * @soc : dp_soc handle
  1801. * @pdev: dp_pdev handle
  1802. * @peer_id: peer_id of the peer for which completion came
  1803. * @ppdu_id: ppdu_id
  1804. * @netbuf: Buffer pointer
  1805. *
  1806. * This function is used to deliver rx packet to packet capture
  1807. */
  1808. void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  1809. uint16_t peer_id, uint32_t is_offload,
  1810. qdf_nbuf_t netbuf)
  1811. {
  1812. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  1813. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf,
  1814. peer_id, is_offload, pdev->pdev_id);
  1815. }
  1816. void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1817. uint32_t is_offload)
  1818. {
  1819. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  1820. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER,
  1821. soc, nbuf, HTT_INVALID_VDEV,
  1822. is_offload, 0);
  1823. }
  1824. #endif
  1825. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1826. QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
  1827. {
  1828. QDF_STATUS ret;
  1829. if (vdev->osif_rx_flush) {
  1830. ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
  1831. if (!QDF_IS_STATUS_SUCCESS(ret)) {
  1832. dp_err("Failed to flush rx pkts for vdev %d\n",
  1833. vdev->vdev_id);
  1834. return ret;
  1835. }
  1836. }
  1837. return QDF_STATUS_SUCCESS;
  1838. }
  1839. static QDF_STATUS
  1840. dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
  1841. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  1842. struct dp_pdev *dp_pdev,
  1843. struct rx_desc_pool *rx_desc_pool)
  1844. {
  1845. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  1846. (nbuf_frag_info_t->virt_addr).nbuf =
  1847. qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
  1848. RX_BUFFER_RESERVATION,
  1849. rx_desc_pool->buf_alignment, FALSE);
  1850. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  1851. dp_err("nbuf alloc failed");
  1852. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  1853. return ret;
  1854. }
  1855. ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
  1856. (nbuf_frag_info_t->virt_addr).nbuf,
  1857. QDF_DMA_FROM_DEVICE,
  1858. rx_desc_pool->buf_size);
  1859. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  1860. qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
  1861. dp_err("nbuf map failed");
  1862. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  1863. return ret;
  1864. }
  1865. nbuf_frag_info_t->paddr =
  1866. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  1867. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  1868. &nbuf_frag_info_t->paddr,
  1869. rx_desc_pool);
  1870. if (ret == QDF_STATUS_E_FAILURE) {
  1871. dp_err("nbuf check x86 failed");
  1872. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  1873. return ret;
  1874. }
  1875. return QDF_STATUS_SUCCESS;
  1876. }
  1877. QDF_STATUS
  1878. dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
  1879. struct dp_srng *dp_rxdma_srng,
  1880. struct rx_desc_pool *rx_desc_pool,
  1881. uint32_t num_req_buffers)
  1882. {
  1883. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  1884. hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
  1885. union dp_rx_desc_list_elem_t *next;
  1886. void *rxdma_ring_entry;
  1887. qdf_dma_addr_t paddr;
  1888. struct dp_rx_nbuf_frag_info *nf_info;
  1889. uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
  1890. uint32_t buffer_index, nbuf_ptrs_per_page;
  1891. qdf_nbuf_t nbuf;
  1892. QDF_STATUS ret;
  1893. int page_idx, total_pages;
  1894. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1895. union dp_rx_desc_list_elem_t *tail = NULL;
  1896. int sync_hw_ptr = 1;
  1897. uint32_t num_entries_avail;
  1898. if (qdf_unlikely(!dp_pdev)) {
  1899. dp_rx_err("%pK: pdev is null for mac_id = %d",
  1900. dp_soc, mac_id);
  1901. return QDF_STATUS_E_FAILURE;
  1902. }
  1903. if (qdf_unlikely(!rxdma_srng)) {
  1904. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  1905. return QDF_STATUS_E_FAILURE;
  1906. }
  1907. dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
  1908. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  1909. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  1910. rxdma_srng,
  1911. sync_hw_ptr);
  1912. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  1913. if (!num_entries_avail) {
  1914. dp_err("Num of available entries is zero, nothing to do");
  1915. return QDF_STATUS_E_NOMEM;
  1916. }
  1917. if (num_entries_avail < num_req_buffers)
  1918. num_req_buffers = num_entries_avail;
  1919. nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
  1920. num_req_buffers, &desc_list, &tail);
  1921. if (!nr_descs) {
  1922. dp_err("no free rx_descs in freelist");
  1923. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  1924. return QDF_STATUS_E_NOMEM;
  1925. }
  1926. dp_debug("got %u RX descs for driver attach", nr_descs);
  1927. /*
  1928. * Try to allocate pointers to the nbuf one page at a time.
  1929. * Take pointers that can fit in one page of memory and
  1930. * iterate through the total descriptors that need to be
  1931. * allocated in order of pages. Reuse the pointers that
  1932. * have been allocated to fit in one page across each
  1933. * iteration to index into the nbuf.
  1934. */
  1935. total_pages = (nr_descs * sizeof(*nf_info)) / PAGE_SIZE;
  1936. /*
  1937. * Add an extra page to store the remainder if any
  1938. */
  1939. if ((nr_descs * sizeof(*nf_info)) % PAGE_SIZE)
  1940. total_pages++;
  1941. nf_info = qdf_mem_malloc(PAGE_SIZE);
  1942. if (!nf_info) {
  1943. dp_err("failed to allocate nbuf array");
  1944. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  1945. QDF_BUG(0);
  1946. return QDF_STATUS_E_NOMEM;
  1947. }
  1948. nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*nf_info);
  1949. for (page_idx = 0; page_idx < total_pages; page_idx++) {
  1950. qdf_mem_zero(nf_info, PAGE_SIZE);
  1951. for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
  1952. /*
  1953. * The last page of buffer pointers may not be required
  1954. * completely based on the number of descriptors. Below
  1955. * check will ensure we are allocating only the
  1956. * required number of descriptors.
  1957. */
  1958. if (nr_nbuf_total >= nr_descs)
  1959. break;
  1960. /* Flag is set while pdev rx_desc_pool initialization */
  1961. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  1962. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  1963. &nf_info[nr_nbuf], dp_pdev,
  1964. rx_desc_pool);
  1965. else
  1966. ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
  1967. &nf_info[nr_nbuf], dp_pdev,
  1968. rx_desc_pool);
  1969. if (QDF_IS_STATUS_ERROR(ret))
  1970. break;
  1971. nr_nbuf_total++;
  1972. }
  1973. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  1974. for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
  1975. rxdma_ring_entry =
  1976. hal_srng_src_get_next(dp_soc->hal_soc,
  1977. rxdma_srng);
  1978. qdf_assert_always(rxdma_ring_entry);
  1979. next = desc_list->next;
  1980. paddr = nf_info[buffer_index].paddr;
  1981. nbuf = nf_info[buffer_index].virt_addr.nbuf;
  1982. /* Flag is set while pdev rx_desc_pool initialization */
  1983. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  1984. dp_rx_desc_frag_prep(&desc_list->rx_desc,
  1985. &nf_info[buffer_index]);
  1986. else
  1987. dp_rx_desc_prep(&desc_list->rx_desc,
  1988. &nf_info[buffer_index]);
  1989. desc_list->rx_desc.in_use = 1;
  1990. dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
  1991. dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
  1992. __func__,
  1993. RX_DESC_REPLENISHED);
  1994. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr,
  1995. desc_list->rx_desc.cookie,
  1996. rx_desc_pool->owner);
  1997. dp_ipa_handle_rx_buf_smmu_mapping(
  1998. dp_soc, nbuf,
  1999. rx_desc_pool->buf_size,
  2000. true);
  2001. desc_list = next;
  2002. }
  2003. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id,
  2004. rxdma_srng, nr_nbuf, nr_nbuf);
  2005. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2006. }
  2007. dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
  2008. qdf_mem_free(nf_info);
  2009. if (!nr_nbuf_total) {
  2010. dp_err("No nbuf's allocated");
  2011. QDF_BUG(0);
  2012. return QDF_STATUS_E_RESOURCES;
  2013. }
  2014. /* No need to count the number of bytes received during replenish.
  2015. * Therefore set replenish.pkts.bytes as 0.
  2016. */
  2017. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
  2018. return QDF_STATUS_SUCCESS;
  2019. }
  2020. qdf_export_symbol(dp_pdev_rx_buffers_attach);
  2021. /**
  2022. * dp_rx_enable_mon_dest_frag() - Enable frag processing for
  2023. * monitor destination ring via frag.
  2024. *
  2025. * Enable this flag only for monitor destination buffer processing
  2026. * if DP_RX_MON_MEM_FRAG feature is enabled.
  2027. * If flag is set then frag based function will be called for alloc,
  2028. * map, prep desc and free ops for desc buffer else normal nbuf based
  2029. * function will be called.
  2030. *
  2031. * @rx_desc_pool: Rx desc pool
  2032. * @is_mon_dest_desc: Is it for monitor dest buffer
  2033. *
  2034. * Return: None
  2035. */
  2036. #ifdef DP_RX_MON_MEM_FRAG
  2037. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2038. bool is_mon_dest_desc)
  2039. {
  2040. rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
  2041. if (is_mon_dest_desc)
  2042. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
  2043. }
  2044. #else
  2045. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2046. bool is_mon_dest_desc)
  2047. {
  2048. rx_desc_pool->rx_mon_dest_frag_enable = false;
  2049. if (is_mon_dest_desc)
  2050. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
  2051. }
  2052. #endif
  2053. qdf_export_symbol(dp_rx_enable_mon_dest_frag);
  2054. /*
  2055. * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor
  2056. * pool
  2057. *
  2058. * @pdev: core txrx pdev context
  2059. *
  2060. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2061. * QDF_STATUS_E_NOMEM
  2062. */
  2063. QDF_STATUS
  2064. dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
  2065. {
  2066. struct dp_soc *soc = pdev->soc;
  2067. uint32_t rxdma_entries;
  2068. uint32_t rx_sw_desc_num;
  2069. struct dp_srng *dp_rxdma_srng;
  2070. struct rx_desc_pool *rx_desc_pool;
  2071. uint32_t status = QDF_STATUS_SUCCESS;
  2072. int mac_for_pdev;
  2073. mac_for_pdev = pdev->lmac_id;
  2074. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2075. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2076. soc, mac_for_pdev);
  2077. return status;
  2078. }
  2079. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2080. rxdma_entries = dp_rxdma_srng->num_entries;
  2081. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2082. rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2083. rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
  2084. status = dp_rx_desc_pool_alloc(soc,
  2085. rx_sw_desc_num,
  2086. rx_desc_pool);
  2087. if (status != QDF_STATUS_SUCCESS)
  2088. return status;
  2089. return status;
  2090. }
  2091. /*
  2092. * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
  2093. *
  2094. * @pdev: core txrx pdev context
  2095. */
  2096. void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
  2097. {
  2098. int mac_for_pdev = pdev->lmac_id;
  2099. struct dp_soc *soc = pdev->soc;
  2100. struct rx_desc_pool *rx_desc_pool;
  2101. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2102. dp_rx_desc_pool_free(soc, rx_desc_pool);
  2103. }
  2104. /*
  2105. * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
  2106. *
  2107. * @pdev: core txrx pdev context
  2108. *
  2109. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2110. * QDF_STATUS_E_NOMEM
  2111. */
  2112. QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
  2113. {
  2114. int mac_for_pdev = pdev->lmac_id;
  2115. struct dp_soc *soc = pdev->soc;
  2116. uint32_t rxdma_entries;
  2117. uint32_t rx_sw_desc_num;
  2118. struct dp_srng *dp_rxdma_srng;
  2119. struct rx_desc_pool *rx_desc_pool;
  2120. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2121. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2122. /**
  2123. * If NSS is enabled, rx_desc_pool is already filled.
  2124. * Hence, just disable desc_pool frag flag.
  2125. */
  2126. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2127. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2128. soc, mac_for_pdev);
  2129. return QDF_STATUS_SUCCESS;
  2130. }
  2131. if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
  2132. return QDF_STATUS_E_NOMEM;
  2133. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2134. rxdma_entries = dp_rxdma_srng->num_entries;
  2135. soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
  2136. rx_sw_desc_num =
  2137. wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2138. rx_desc_pool->owner = DP_WBM2SW_RBM(soc->wbm_sw0_bm_id);
  2139. rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
  2140. rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
  2141. /* Disable monitor dest processing via frag */
  2142. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2143. dp_rx_desc_pool_init(soc, mac_for_pdev,
  2144. rx_sw_desc_num, rx_desc_pool);
  2145. return QDF_STATUS_SUCCESS;
  2146. }
  2147. /*
  2148. * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
  2149. * @pdev: core txrx pdev context
  2150. *
  2151. * This function resets the freelist of rx descriptors and destroys locks
  2152. * associated with this list of descriptors.
  2153. */
  2154. void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
  2155. {
  2156. int mac_for_pdev = pdev->lmac_id;
  2157. struct dp_soc *soc = pdev->soc;
  2158. struct rx_desc_pool *rx_desc_pool;
  2159. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2160. dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev);
  2161. }
  2162. /*
  2163. * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
  2164. *
  2165. * @pdev: core txrx pdev context
  2166. *
  2167. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2168. * QDF_STATUS_E_NOMEM
  2169. */
  2170. QDF_STATUS
  2171. dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
  2172. {
  2173. int mac_for_pdev = pdev->lmac_id;
  2174. struct dp_soc *soc = pdev->soc;
  2175. struct dp_srng *dp_rxdma_srng;
  2176. struct rx_desc_pool *rx_desc_pool;
  2177. uint32_t rxdma_entries;
  2178. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2179. rxdma_entries = dp_rxdma_srng->num_entries;
  2180. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2181. /* Initialize RX buffer pool which will be
  2182. * used during low memory conditions
  2183. */
  2184. dp_rx_buffer_pool_init(soc, mac_for_pdev);
  2185. return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
  2186. rx_desc_pool, rxdma_entries - 1);
  2187. }
  2188. /*
  2189. * dp_rx_pdev_buffers_free - Free nbufs (skbs)
  2190. *
  2191. * @pdev: core txrx pdev context
  2192. */
  2193. void
  2194. dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
  2195. {
  2196. int mac_for_pdev = pdev->lmac_id;
  2197. struct dp_soc *soc = pdev->soc;
  2198. struct rx_desc_pool *rx_desc_pool;
  2199. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2200. dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  2201. dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
  2202. }
  2203. #ifdef DP_RX_SPECIAL_FRAME_NEED
  2204. bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
  2205. qdf_nbuf_t nbuf, uint32_t frame_mask,
  2206. uint8_t *rx_tlv_hdr)
  2207. {
  2208. uint32_t l2_hdr_offset = 0;
  2209. uint16_t msdu_len = 0;
  2210. uint32_t skip_len;
  2211. l2_hdr_offset =
  2212. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2213. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  2214. skip_len = l2_hdr_offset;
  2215. } else {
  2216. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2217. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  2218. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  2219. }
  2220. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2221. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  2222. qdf_nbuf_pull_head(nbuf, skip_len);
  2223. if (dp_rx_is_special_frame(nbuf, frame_mask)) {
  2224. dp_info("special frame, mpdu sn 0x%x",
  2225. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  2226. qdf_nbuf_set_exc_frame(nbuf, 1);
  2227. dp_rx_deliver_to_stack(soc, peer->vdev, peer,
  2228. nbuf, NULL);
  2229. return true;
  2230. }
  2231. return false;
  2232. }
  2233. #endif