dp_rx.c 88 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_rx.h"
  22. #include "dp_tx.h"
  23. #include "dp_peer.h"
  24. #include "hal_rx.h"
  25. #include "hal_api.h"
  26. #include "qdf_nbuf.h"
  27. #ifdef MESH_MODE_SUPPORT
  28. #include "if_meta_hdr.h"
  29. #endif
  30. #include "dp_internal.h"
  31. #include "dp_ipa.h"
  32. #include "dp_hist.h"
  33. #include "dp_rx_buffer_pool.h"
  34. #ifdef WIFI_MONITOR_SUPPORT
  35. #include "dp_htt.h"
  36. #include <dp_mon.h>
  37. #endif
  38. #ifdef FEATURE_WDS
  39. #include "dp_txrx_wds.h"
  40. #endif
  41. #ifdef DP_RATETABLE_SUPPORT
  42. #include "dp_ratetable.h"
  43. #endif
  44. #ifdef DUP_RX_DESC_WAR
  45. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  46. hal_ring_handle_t hal_ring,
  47. hal_ring_desc_t ring_desc,
  48. struct dp_rx_desc *rx_desc)
  49. {
  50. void *hal_soc = soc->hal_soc;
  51. hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
  52. dp_rx_desc_dump(rx_desc);
  53. }
  54. #else
  55. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  56. hal_ring_handle_t hal_ring_hdl,
  57. hal_ring_desc_t ring_desc,
  58. struct dp_rx_desc *rx_desc)
  59. {
  60. hal_soc_handle_t hal_soc = soc->hal_soc;
  61. dp_rx_desc_dump(rx_desc);
  62. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
  63. hal_srng_dump_ring(hal_soc, hal_ring_hdl);
  64. qdf_assert_always(0);
  65. }
  66. #endif
  67. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  68. #ifdef RX_DESC_SANITY_WAR
  69. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  70. hal_ring_handle_t hal_ring_hdl,
  71. hal_ring_desc_t ring_desc,
  72. struct dp_rx_desc *rx_desc)
  73. {
  74. uint8_t return_buffer_manager;
  75. if (qdf_unlikely(!rx_desc)) {
  76. /*
  77. * This is an unlikely case where the cookie obtained
  78. * from the ring_desc is invalid and hence we are not
  79. * able to find the corresponding rx_desc
  80. */
  81. goto fail;
  82. }
  83. return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc);
  84. if (qdf_unlikely(!(return_buffer_manager ==
  85. HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) ||
  86. return_buffer_manager ==
  87. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) {
  88. goto fail;
  89. }
  90. return QDF_STATUS_SUCCESS;
  91. fail:
  92. DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
  93. dp_err("Ring Desc:");
  94. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
  95. ring_desc);
  96. return QDF_STATUS_E_NULL_VALUE;
  97. }
  98. #endif
  99. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  100. /**
  101. * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
  102. *
  103. * @dp_soc: struct dp_soc *
  104. * @nbuf_frag_info_t: nbuf frag info
  105. * @dp_pdev: struct dp_pdev *
  106. * @rx_desc_pool: Rx desc pool
  107. *
  108. * Return: QDF_STATUS
  109. */
  110. #ifdef DP_RX_MON_MEM_FRAG
  111. static inline QDF_STATUS
  112. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  113. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  114. struct dp_pdev *dp_pdev,
  115. struct rx_desc_pool *rx_desc_pool)
  116. {
  117. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  118. (nbuf_frag_info_t->virt_addr).vaddr =
  119. qdf_frag_alloc(NULL, rx_desc_pool->buf_size);
  120. if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
  121. dp_err("Frag alloc failed");
  122. DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
  123. return QDF_STATUS_E_NOMEM;
  124. }
  125. ret = qdf_mem_map_page(dp_soc->osdev,
  126. (nbuf_frag_info_t->virt_addr).vaddr,
  127. QDF_DMA_FROM_DEVICE,
  128. rx_desc_pool->buf_size,
  129. &nbuf_frag_info_t->paddr);
  130. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  131. qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
  132. dp_err("Frag map failed");
  133. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  134. return QDF_STATUS_E_FAULT;
  135. }
  136. return QDF_STATUS_SUCCESS;
  137. }
  138. #else
  139. static inline QDF_STATUS
  140. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  141. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  142. struct dp_pdev *dp_pdev,
  143. struct rx_desc_pool *rx_desc_pool)
  144. {
  145. return QDF_STATUS_SUCCESS;
  146. }
  147. #endif /* DP_RX_MON_MEM_FRAG */
  148. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  149. /**
  150. * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history
  151. * @soc: Datapath soc structure
  152. * @ring_num: Refill ring number
  153. * @num_req: number of buffers requested for refill
  154. * @num_refill: number of buffers refilled
  155. *
  156. * Returns: None
  157. */
  158. static inline void
  159. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  160. hal_ring_handle_t hal_ring_hdl,
  161. uint32_t num_req, uint32_t num_refill)
  162. {
  163. struct dp_refill_info_record *record;
  164. uint32_t idx;
  165. uint32_t tp;
  166. uint32_t hp;
  167. if (qdf_unlikely(ring_num >= MAX_PDEV_CNT ||
  168. !soc->rx_refill_ring_history[ring_num]))
  169. return;
  170. idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index,
  171. DP_RX_REFILL_HIST_MAX);
  172. /* No NULL check needed for record since its an array */
  173. record = &soc->rx_refill_ring_history[ring_num]->entry[idx];
  174. hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp);
  175. record->timestamp = qdf_get_log_timestamp();
  176. record->num_req = num_req;
  177. record->num_refill = num_refill;
  178. record->hp = hp;
  179. record->tp = tp;
  180. }
  181. #else
  182. static inline void
  183. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  184. hal_ring_handle_t hal_ring_hdl,
  185. uint32_t num_req, uint32_t num_refill)
  186. {
  187. }
  188. #endif
  189. /**
  190. * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
  191. *
  192. * @dp_soc: struct dp_soc *
  193. * @mac_id: Mac id
  194. * @num_entries_avail: num_entries_avail
  195. * @nbuf_frag_info_t: nbuf frag info
  196. * @dp_pdev: struct dp_pdev *
  197. * @rx_desc_pool: Rx desc pool
  198. *
  199. * Return: QDF_STATUS
  200. */
  201. static inline QDF_STATUS
  202. dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
  203. uint32_t mac_id,
  204. uint32_t num_entries_avail,
  205. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  206. struct dp_pdev *dp_pdev,
  207. struct rx_desc_pool *rx_desc_pool)
  208. {
  209. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  210. (nbuf_frag_info_t->virt_addr).nbuf =
  211. dp_rx_buffer_pool_nbuf_alloc(dp_soc,
  212. mac_id,
  213. rx_desc_pool,
  214. num_entries_avail);
  215. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  216. dp_err("nbuf alloc failed");
  217. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  218. return QDF_STATUS_E_NOMEM;
  219. }
  220. ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
  221. nbuf_frag_info_t);
  222. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  223. dp_rx_buffer_pool_nbuf_free(dp_soc,
  224. (nbuf_frag_info_t->virt_addr).nbuf, mac_id);
  225. dp_err("nbuf map failed");
  226. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  227. return QDF_STATUS_E_FAULT;
  228. }
  229. nbuf_frag_info_t->paddr =
  230. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  231. dp_ipa_handle_rx_buf_smmu_mapping(dp_soc,
  232. (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
  233. rx_desc_pool->buf_size,
  234. true);
  235. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  236. &nbuf_frag_info_t->paddr,
  237. rx_desc_pool);
  238. if (ret == QDF_STATUS_E_FAILURE) {
  239. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  240. return QDF_STATUS_E_ADDRNOTAVAIL;
  241. }
  242. return QDF_STATUS_SUCCESS;
  243. }
  244. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  245. QDF_STATUS
  246. __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id,
  247. struct dp_srng *dp_rxdma_srng,
  248. struct rx_desc_pool *rx_desc_pool)
  249. {
  250. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  251. uint32_t count;
  252. void *rxdma_ring_entry;
  253. union dp_rx_desc_list_elem_t *next = NULL;
  254. void *rxdma_srng;
  255. qdf_nbuf_t nbuf;
  256. qdf_dma_addr_t paddr;
  257. uint16_t num_entries_avail = 0;
  258. uint16_t num_alloc_desc = 0;
  259. union dp_rx_desc_list_elem_t *desc_list = NULL;
  260. union dp_rx_desc_list_elem_t *tail = NULL;
  261. int sync_hw_ptr = 0;
  262. rxdma_srng = dp_rxdma_srng->hal_srng;
  263. if (qdf_unlikely(!dp_pdev)) {
  264. dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
  265. return QDF_STATUS_E_FAILURE;
  266. }
  267. if (qdf_unlikely(!rxdma_srng)) {
  268. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  269. return QDF_STATUS_E_FAILURE;
  270. }
  271. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  272. num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
  273. rxdma_srng,
  274. sync_hw_ptr);
  275. dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
  276. soc, num_entries_avail);
  277. if (qdf_unlikely(num_entries_avail <
  278. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  279. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  280. return QDF_STATUS_E_FAILURE;
  281. }
  282. DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
  283. num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
  284. rx_desc_pool,
  285. num_entries_avail,
  286. &desc_list,
  287. &tail);
  288. if (!num_alloc_desc) {
  289. dp_rx_err("%pK: no free rx_descs in freelist", soc);
  290. DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail,
  291. num_entries_avail);
  292. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  293. return QDF_STATUS_E_NOMEM;
  294. }
  295. for (count = 0; count < num_alloc_desc; count++) {
  296. next = desc_list->next;
  297. qdf_prefetch(next);
  298. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  299. if (qdf_unlikely(!nbuf)) {
  300. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  301. break;
  302. }
  303. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  304. rx_desc_pool->buf_size);
  305. rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc,
  306. rxdma_srng);
  307. qdf_assert_always(rxdma_ring_entry);
  308. desc_list->rx_desc.nbuf = nbuf;
  309. desc_list->rx_desc.rx_buf_start = nbuf->data;
  310. desc_list->rx_desc.unmapped = 0;
  311. /* rx_desc.in_use should be zero at this time*/
  312. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  313. desc_list->rx_desc.in_use = 1;
  314. desc_list->rx_desc.in_err_state = 0;
  315. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  316. paddr,
  317. desc_list->rx_desc.cookie,
  318. rx_desc_pool->owner);
  319. desc_list = next;
  320. }
  321. qdf_dsb();
  322. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  323. /* No need to count the number of bytes received during replenish.
  324. * Therefore set replenish.pkts.bytes as 0.
  325. */
  326. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  327. DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count));
  328. /*
  329. * add any available free desc back to the free list
  330. */
  331. if (desc_list)
  332. dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail,
  333. mac_id, rx_desc_pool);
  334. return QDF_STATUS_SUCCESS;
  335. }
  336. QDF_STATUS
  337. __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
  338. struct dp_srng *dp_rxdma_srng,
  339. struct rx_desc_pool *rx_desc_pool,
  340. uint32_t num_req_buffers,
  341. union dp_rx_desc_list_elem_t **desc_list,
  342. union dp_rx_desc_list_elem_t **tail)
  343. {
  344. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  345. uint32_t count;
  346. void *rxdma_ring_entry;
  347. union dp_rx_desc_list_elem_t *next;
  348. void *rxdma_srng;
  349. qdf_nbuf_t nbuf;
  350. qdf_dma_addr_t paddr;
  351. rxdma_srng = dp_rxdma_srng->hal_srng;
  352. if (qdf_unlikely(!dp_pdev)) {
  353. dp_rx_err("%pK: pdev is null for mac_id = %d",
  354. soc, mac_id);
  355. return QDF_STATUS_E_FAILURE;
  356. }
  357. if (qdf_unlikely(!rxdma_srng)) {
  358. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  359. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  360. return QDF_STATUS_E_FAILURE;
  361. }
  362. dp_rx_debug("%pK: requested %d buffers for replenish",
  363. soc, num_req_buffers);
  364. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  365. for (count = 0; count < num_req_buffers; count++) {
  366. next = (*desc_list)->next;
  367. qdf_prefetch(next);
  368. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  369. if (qdf_unlikely(!nbuf)) {
  370. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  371. break;
  372. }
  373. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  374. rx_desc_pool->buf_size);
  375. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  376. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  377. if (!rxdma_ring_entry)
  378. break;
  379. qdf_assert_always(rxdma_ring_entry);
  380. (*desc_list)->rx_desc.nbuf = nbuf;
  381. (*desc_list)->rx_desc.rx_buf_start = nbuf->data;
  382. (*desc_list)->rx_desc.unmapped = 0;
  383. /* rx_desc.in_use should be zero at this time*/
  384. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  385. (*desc_list)->rx_desc.in_use = 1;
  386. (*desc_list)->rx_desc.in_err_state = 0;
  387. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  388. paddr,
  389. (*desc_list)->rx_desc.cookie,
  390. rx_desc_pool->owner);
  391. *desc_list = next;
  392. }
  393. qdf_dsb();
  394. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  395. /* No need to count the number of bytes received during replenish.
  396. * Therefore set replenish.pkts.bytes as 0.
  397. */
  398. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  399. DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count));
  400. /*
  401. * add any available free desc back to the free list
  402. */
  403. if (*desc_list)
  404. dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
  405. mac_id, rx_desc_pool);
  406. return QDF_STATUS_SUCCESS;
  407. }
  408. QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
  409. uint32_t mac_id,
  410. struct dp_srng *dp_rxdma_srng,
  411. struct rx_desc_pool *rx_desc_pool,
  412. uint32_t num_req_buffers)
  413. {
  414. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  415. uint32_t count;
  416. uint32_t nr_descs = 0;
  417. void *rxdma_ring_entry;
  418. union dp_rx_desc_list_elem_t *next;
  419. void *rxdma_srng;
  420. qdf_nbuf_t nbuf;
  421. qdf_dma_addr_t paddr;
  422. union dp_rx_desc_list_elem_t *desc_list = NULL;
  423. union dp_rx_desc_list_elem_t *tail = NULL;
  424. rxdma_srng = dp_rxdma_srng->hal_srng;
  425. if (qdf_unlikely(!dp_pdev)) {
  426. dp_rx_err("%pK: pdev is null for mac_id = %d",
  427. soc, mac_id);
  428. return QDF_STATUS_E_FAILURE;
  429. }
  430. if (qdf_unlikely(!rxdma_srng)) {
  431. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  432. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  433. return QDF_STATUS_E_FAILURE;
  434. }
  435. dp_rx_debug("%pK: requested %d buffers for replenish",
  436. soc, num_req_buffers);
  437. nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool,
  438. num_req_buffers, &desc_list, &tail);
  439. if (!nr_descs) {
  440. dp_err("no free rx_descs in freelist");
  441. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  442. return QDF_STATUS_E_NOMEM;
  443. }
  444. dp_debug("got %u RX descs for driver attach", nr_descs);
  445. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  446. for (count = 0; count < nr_descs; count++) {
  447. next = desc_list->next;
  448. qdf_prefetch(next);
  449. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  450. if (qdf_unlikely(!nbuf)) {
  451. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  452. break;
  453. }
  454. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  455. rx_desc_pool->buf_size);
  456. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  457. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  458. if (!rxdma_ring_entry)
  459. break;
  460. qdf_assert_always(rxdma_ring_entry);
  461. desc_list->rx_desc.nbuf = nbuf;
  462. desc_list->rx_desc.rx_buf_start = nbuf->data;
  463. desc_list->rx_desc.unmapped = 0;
  464. /* rx_desc.in_use should be zero at this time*/
  465. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  466. desc_list->rx_desc.in_use = 1;
  467. desc_list->rx_desc.in_err_state = 0;
  468. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  469. paddr,
  470. desc_list->rx_desc.cookie,
  471. rx_desc_pool->owner);
  472. desc_list = next;
  473. }
  474. qdf_dsb();
  475. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  476. /* No need to count the number of bytes received during replenish.
  477. * Therefore set replenish.pkts.bytes as 0.
  478. */
  479. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  480. return QDF_STATUS_SUCCESS;
  481. }
  482. #endif
  483. /*
  484. * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  485. * called during dp rx initialization
  486. * and at the end of dp_rx_process.
  487. *
  488. * @soc: core txrx main context
  489. * @mac_id: mac_id which is one of 3 mac_ids
  490. * @dp_rxdma_srng: dp rxdma circular ring
  491. * @rx_desc_pool: Pointer to free Rx descriptor pool
  492. * @num_req_buffers: number of buffer to be replenished
  493. * @desc_list: list of descs if called from dp_rx_process
  494. * or NULL during dp rx initialization or out of buffer
  495. * interrupt.
  496. * @tail: tail of descs list
  497. * @func_name: name of the caller function
  498. * Return: return success or failure
  499. */
  500. QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  501. struct dp_srng *dp_rxdma_srng,
  502. struct rx_desc_pool *rx_desc_pool,
  503. uint32_t num_req_buffers,
  504. union dp_rx_desc_list_elem_t **desc_list,
  505. union dp_rx_desc_list_elem_t **tail,
  506. const char *func_name)
  507. {
  508. uint32_t num_alloc_desc;
  509. uint16_t num_desc_to_free = 0;
  510. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  511. uint32_t num_entries_avail;
  512. uint32_t count;
  513. int sync_hw_ptr = 1;
  514. struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
  515. void *rxdma_ring_entry;
  516. union dp_rx_desc_list_elem_t *next;
  517. QDF_STATUS ret;
  518. void *rxdma_srng;
  519. union dp_rx_desc_list_elem_t *desc_list_append = NULL;
  520. union dp_rx_desc_list_elem_t *tail_append = NULL;
  521. union dp_rx_desc_list_elem_t *temp_list = NULL;
  522. rxdma_srng = dp_rxdma_srng->hal_srng;
  523. if (qdf_unlikely(!dp_pdev)) {
  524. dp_rx_err("%pK: pdev is null for mac_id = %d",
  525. dp_soc, mac_id);
  526. return QDF_STATUS_E_FAILURE;
  527. }
  528. if (qdf_unlikely(!rxdma_srng)) {
  529. dp_rx_debug("%pK: rxdma srng not initialized", dp_soc);
  530. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  531. return QDF_STATUS_E_FAILURE;
  532. }
  533. dp_rx_debug("%pK: requested %d buffers for replenish",
  534. dp_soc, num_req_buffers);
  535. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  536. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  537. rxdma_srng,
  538. sync_hw_ptr);
  539. dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
  540. dp_soc, num_entries_avail);
  541. if (!(*desc_list) && (num_entries_avail >
  542. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  543. num_req_buffers = num_entries_avail;
  544. } else if (num_entries_avail < num_req_buffers) {
  545. num_desc_to_free = num_req_buffers - num_entries_avail;
  546. num_req_buffers = num_entries_avail;
  547. } else if ((*desc_list) &&
  548. dp_rxdma_srng->num_entries - num_entries_avail <
  549. CRITICAL_BUFFER_THRESHOLD) {
  550. /* Append some free descriptors to tail */
  551. num_alloc_desc =
  552. dp_rx_get_free_desc_list(dp_soc, mac_id,
  553. rx_desc_pool,
  554. CRITICAL_BUFFER_THRESHOLD,
  555. &desc_list_append,
  556. &tail_append);
  557. if (num_alloc_desc) {
  558. temp_list = *desc_list;
  559. *desc_list = desc_list_append;
  560. tail_append->next = temp_list;
  561. num_req_buffers += num_alloc_desc;
  562. DP_STATS_DEC(dp_pdev,
  563. replenish.free_list,
  564. num_alloc_desc);
  565. } else
  566. dp_err_rl("%pK: no free rx_descs in freelist", dp_soc);
  567. }
  568. if (qdf_unlikely(!num_req_buffers)) {
  569. num_desc_to_free = num_req_buffers;
  570. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  571. goto free_descs;
  572. }
  573. /*
  574. * if desc_list is NULL, allocate the descs from freelist
  575. */
  576. if (!(*desc_list)) {
  577. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  578. rx_desc_pool,
  579. num_req_buffers,
  580. desc_list,
  581. tail);
  582. if (!num_alloc_desc) {
  583. dp_rx_err("%pK: no free rx_descs in freelist", dp_soc);
  584. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  585. num_req_buffers);
  586. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  587. return QDF_STATUS_E_NOMEM;
  588. }
  589. dp_rx_debug("%pK: %d rx desc allocated", dp_soc, num_alloc_desc);
  590. num_req_buffers = num_alloc_desc;
  591. }
  592. count = 0;
  593. while (count < num_req_buffers) {
  594. /* Flag is set while pdev rx_desc_pool initialization */
  595. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  596. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  597. &nbuf_frag_info,
  598. dp_pdev,
  599. rx_desc_pool);
  600. else
  601. ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
  602. mac_id,
  603. num_entries_avail, &nbuf_frag_info,
  604. dp_pdev, rx_desc_pool);
  605. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  606. if (qdf_unlikely(ret == QDF_STATUS_E_FAULT))
  607. continue;
  608. break;
  609. }
  610. count++;
  611. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  612. rxdma_srng);
  613. qdf_assert_always(rxdma_ring_entry);
  614. next = (*desc_list)->next;
  615. /* Flag is set while pdev rx_desc_pool initialization */
  616. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  617. dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
  618. &nbuf_frag_info);
  619. else
  620. dp_rx_desc_prep(&((*desc_list)->rx_desc),
  621. &nbuf_frag_info);
  622. /* rx_desc.in_use should be zero at this time*/
  623. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  624. (*desc_list)->rx_desc.in_use = 1;
  625. (*desc_list)->rx_desc.in_err_state = 0;
  626. dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
  627. func_name, RX_DESC_REPLENISHED);
  628. dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
  629. nbuf_frag_info.virt_addr.nbuf,
  630. (unsigned long long)(nbuf_frag_info.paddr),
  631. (*desc_list)->rx_desc.cookie);
  632. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry,
  633. nbuf_frag_info.paddr,
  634. (*desc_list)->rx_desc.cookie,
  635. rx_desc_pool->owner);
  636. *desc_list = next;
  637. }
  638. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
  639. num_req_buffers, count);
  640. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  641. dp_rx_schedule_refill_thread(dp_soc);
  642. dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
  643. count, num_desc_to_free);
  644. /* No need to count the number of bytes received during replenish.
  645. * Therefore set replenish.pkts.bytes as 0.
  646. */
  647. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  648. DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count);
  649. free_descs:
  650. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  651. /*
  652. * add any available free desc back to the free list
  653. */
  654. if (*desc_list)
  655. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  656. mac_id, rx_desc_pool);
  657. return QDF_STATUS_SUCCESS;
  658. }
  659. qdf_export_symbol(__dp_rx_buffers_replenish);
  660. /*
  661. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  662. * pkts to RAW mode simulation to
  663. * decapsulate the pkt.
  664. *
  665. * @vdev: vdev on which RAW mode is enabled
  666. * @nbuf_list: list of RAW pkts to process
  667. * @txrx_peer: peer object from which the pkt is rx
  668. *
  669. * Return: void
  670. */
  671. void
  672. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  673. struct dp_txrx_peer *txrx_peer)
  674. {
  675. qdf_nbuf_t deliver_list_head = NULL;
  676. qdf_nbuf_t deliver_list_tail = NULL;
  677. qdf_nbuf_t nbuf;
  678. nbuf = nbuf_list;
  679. while (nbuf) {
  680. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  681. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  682. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  683. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1,
  684. qdf_nbuf_len(nbuf));
  685. /*
  686. * reset the chfrag_start and chfrag_end bits in nbuf cb
  687. * as this is a non-amsdu pkt and RAW mode simulation expects
  688. * these bit s to be 0 for non-amsdu pkt.
  689. */
  690. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  691. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  692. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  693. qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
  694. }
  695. nbuf = next;
  696. }
  697. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  698. &deliver_list_tail);
  699. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  700. }
  701. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  702. #ifndef FEATURE_WDS
  703. void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
  704. struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf)
  705. {
  706. }
  707. #endif
  708. #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
  709. /*
  710. * dp_classify_critical_pkts() - API for marking critical packets
  711. * @soc: dp_soc context
  712. * @vdev: vdev on which packet is to be sent
  713. * @nbuf: nbuf that has to be classified
  714. *
  715. * The function parses the packet, identifies whether its a critical frame and
  716. * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf.
  717. * Code for marking which frames are CRITICAL is accessed via callback.
  718. * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames.
  719. *
  720. * Return: None
  721. */
  722. static
  723. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  724. qdf_nbuf_t nbuf)
  725. {
  726. if (vdev->tx_classify_critical_pkt_cb)
  727. vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf);
  728. }
  729. #else
  730. static inline
  731. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  732. qdf_nbuf_t nbuf)
  733. {
  734. }
  735. #endif
  736. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  737. static inline
  738. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  739. {
  740. qdf_nbuf_set_queue_mapping(nbuf, ring_id);
  741. }
  742. #else
  743. static inline
  744. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  745. {
  746. }
  747. #endif
  748. /*
  749. * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
  750. *
  751. * @soc: core txrx main context
  752. * @ta_peer : source peer entry
  753. * @rx_tlv_hdr : start address of rx tlvs
  754. * @nbuf : nbuf that has to be intrabss forwarded
  755. * @tid_stats : tid stats pointer
  756. *
  757. * Return: bool: true if it is forwarded else false
  758. */
  759. bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  760. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  761. struct cdp_tid_rx_stats *tid_stats)
  762. {
  763. uint16_t len;
  764. qdf_nbuf_t nbuf_copy;
  765. if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
  766. nbuf))
  767. return true;
  768. if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
  769. return false;
  770. /* If the source peer in the isolation list
  771. * then dont forward instead push to bridge stack
  772. */
  773. if (dp_get_peer_isolation(ta_peer))
  774. return false;
  775. nbuf_copy = qdf_nbuf_copy(nbuf);
  776. if (!nbuf_copy)
  777. return false;
  778. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  779. qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
  780. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
  781. if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy,
  782. tid_stats))
  783. return false;
  784. if (dp_tx_send((struct cdp_soc_t *)soc,
  785. ta_peer->vdev->vdev_id, nbuf_copy)) {
  786. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  787. len);
  788. tid_stats->fail_cnt[INTRABSS_DROP]++;
  789. dp_rx_nbuf_free(nbuf_copy);
  790. } else {
  791. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  792. len);
  793. tid_stats->intrabss_cnt++;
  794. }
  795. return false;
  796. }
  797. /*
  798. * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
  799. *
  800. * @soc: core txrx main context
  801. * @ta_peer: source peer entry
  802. * @tx_vdev_id: VDEV ID for Intra-BSS TX
  803. * @rx_tlv_hdr: start address of rx tlvs
  804. * @nbuf: nbuf that has to be intrabss forwarded
  805. * @tid_stats: tid stats pointer
  806. *
  807. * Return: bool: true if it is forwarded else false
  808. */
  809. bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  810. uint8_t tx_vdev_id,
  811. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  812. struct cdp_tid_rx_stats *tid_stats)
  813. {
  814. uint16_t len;
  815. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  816. /* linearize the nbuf just before we send to
  817. * dp_tx_send()
  818. */
  819. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  820. if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
  821. return false;
  822. nbuf = qdf_nbuf_unshare(nbuf);
  823. if (!nbuf) {
  824. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer,
  825. rx.intra_bss.fail,
  826. 1, len);
  827. /* return true even though the pkt is
  828. * not forwarded. Basically skb_unshare
  829. * failed and we want to continue with
  830. * next nbuf.
  831. */
  832. tid_stats->fail_cnt[INTRABSS_DROP]++;
  833. return false;
  834. }
  835. }
  836. qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb));
  837. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf);
  838. if (!dp_tx_send((struct cdp_soc_t *)soc,
  839. tx_vdev_id, nbuf)) {
  840. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  841. len);
  842. } else {
  843. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  844. len);
  845. tid_stats->fail_cnt[INTRABSS_DROP]++;
  846. return false;
  847. }
  848. return true;
  849. }
  850. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  851. #ifdef MESH_MODE_SUPPORT
  852. /**
  853. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  854. *
  855. * @vdev: DP Virtual device handle
  856. * @nbuf: Buffer pointer
  857. * @rx_tlv_hdr: start of rx tlv header
  858. * @txrx_peer: pointer to peer
  859. *
  860. * This function allocated memory for mesh receive stats and fill the
  861. * required stats. Stores the memory address in skb cb.
  862. *
  863. * Return: void
  864. */
  865. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  866. uint8_t *rx_tlv_hdr,
  867. struct dp_txrx_peer *txrx_peer)
  868. {
  869. struct mesh_recv_hdr_s *rx_info = NULL;
  870. uint32_t pkt_type;
  871. uint32_t nss;
  872. uint32_t rate_mcs;
  873. uint32_t bw;
  874. uint8_t primary_chan_num;
  875. uint32_t center_chan_freq;
  876. struct dp_soc *soc = vdev->pdev->soc;
  877. struct dp_peer *peer;
  878. struct dp_peer *primary_link_peer;
  879. struct dp_soc *link_peer_soc;
  880. cdp_peer_stats_param_t buf = {0};
  881. /* fill recv mesh stats */
  882. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  883. /* upper layers are resposible to free this memory */
  884. if (!rx_info) {
  885. dp_rx_err("%pK: Memory allocation failed for mesh rx stats",
  886. vdev->pdev->soc);
  887. DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
  888. return;
  889. }
  890. rx_info->rs_flags = MESH_RXHDR_VER1;
  891. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  892. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  893. if (qdf_nbuf_is_rx_chfrag_end(nbuf))
  894. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  895. peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH);
  896. if (peer) {
  897. if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
  898. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  899. rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
  900. rx_tlv_hdr);
  901. if (vdev->osif_get_key)
  902. vdev->osif_get_key(vdev->osif_vdev,
  903. &rx_info->rs_decryptkey[0],
  904. &peer->mac_addr.raw[0],
  905. rx_info->rs_keyix);
  906. }
  907. dp_peer_unref_delete(peer, DP_MOD_ID_MESH);
  908. }
  909. primary_link_peer = dp_get_primary_link_peer_by_id(soc,
  910. txrx_peer->peer_id,
  911. DP_MOD_ID_MESH);
  912. if (qdf_likely(primary_link_peer)) {
  913. link_peer_soc = primary_link_peer->vdev->pdev->soc;
  914. dp_monitor_peer_get_stats_param(link_peer_soc,
  915. primary_link_peer,
  916. cdp_peer_rx_snr, &buf);
  917. rx_info->rs_snr = buf.rx_snr;
  918. dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH);
  919. }
  920. rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
  921. soc = vdev->pdev->soc;
  922. primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr);
  923. center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16;
  924. if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
  925. rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
  926. soc->ctrl_psoc,
  927. vdev->pdev->pdev_id,
  928. center_chan_freq);
  929. }
  930. rx_info->rs_channel = primary_chan_num;
  931. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  932. rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  933. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  934. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  935. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
  936. (bw << 24);
  937. qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  938. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
  939. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"),
  940. rx_info->rs_flags,
  941. rx_info->rs_rssi,
  942. rx_info->rs_channel,
  943. rx_info->rs_ratephy1,
  944. rx_info->rs_keyix,
  945. rx_info->rs_snr);
  946. }
  947. /**
  948. * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
  949. *
  950. * @vdev: DP Virtual device handle
  951. * @nbuf: Buffer pointer
  952. * @rx_tlv_hdr: start of rx tlv header
  953. *
  954. * This checks if the received packet is matching any filter out
  955. * catogery and and drop the packet if it matches.
  956. *
  957. * Return: status(0 indicates drop, 1 indicate to no drop)
  958. */
  959. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  960. uint8_t *rx_tlv_hdr)
  961. {
  962. union dp_align_mac_addr mac_addr;
  963. struct dp_soc *soc = vdev->pdev->soc;
  964. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  965. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  966. if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  967. rx_tlv_hdr))
  968. return QDF_STATUS_SUCCESS;
  969. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  970. if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
  971. rx_tlv_hdr))
  972. return QDF_STATUS_SUCCESS;
  973. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  974. if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  975. rx_tlv_hdr) &&
  976. !hal_rx_mpdu_get_to_ds(soc->hal_soc,
  977. rx_tlv_hdr))
  978. return QDF_STATUS_SUCCESS;
  979. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  980. if (hal_rx_mpdu_get_addr1(soc->hal_soc,
  981. rx_tlv_hdr,
  982. &mac_addr.raw[0]))
  983. return QDF_STATUS_E_FAILURE;
  984. if (!qdf_mem_cmp(&mac_addr.raw[0],
  985. &vdev->mac_addr.raw[0],
  986. QDF_MAC_ADDR_SIZE))
  987. return QDF_STATUS_SUCCESS;
  988. }
  989. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  990. if (hal_rx_mpdu_get_addr2(soc->hal_soc,
  991. rx_tlv_hdr,
  992. &mac_addr.raw[0]))
  993. return QDF_STATUS_E_FAILURE;
  994. if (!qdf_mem_cmp(&mac_addr.raw[0],
  995. &vdev->mac_addr.raw[0],
  996. QDF_MAC_ADDR_SIZE))
  997. return QDF_STATUS_SUCCESS;
  998. }
  999. }
  1000. return QDF_STATUS_E_FAILURE;
  1001. }
  1002. #else
  1003. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1004. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer)
  1005. {
  1006. }
  1007. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1008. uint8_t *rx_tlv_hdr)
  1009. {
  1010. return QDF_STATUS_E_FAILURE;
  1011. }
  1012. #endif
  1013. #ifdef FEATURE_NAC_RSSI
  1014. /**
  1015. * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
  1016. * @soc: DP SOC handle
  1017. * @mpdu: mpdu for which peer is invalid
  1018. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1019. * pool_id has same mapping)
  1020. *
  1021. * return: integer type
  1022. */
  1023. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1024. uint8_t mac_id)
  1025. {
  1026. struct dp_invalid_peer_msg msg;
  1027. struct dp_vdev *vdev = NULL;
  1028. struct dp_pdev *pdev = NULL;
  1029. struct ieee80211_frame *wh;
  1030. qdf_nbuf_t curr_nbuf, next_nbuf;
  1031. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1032. uint8_t *rx_pkt_hdr = NULL;
  1033. int i = 0;
  1034. if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
  1035. dp_rx_debug("%pK: Drop decapped frames", soc);
  1036. goto free;
  1037. }
  1038. /* In RAW packet, packet header will be part of data */
  1039. rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size;
  1040. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1041. if (!DP_FRAME_IS_DATA(wh)) {
  1042. dp_rx_debug("%pK: NAWDS valid only for data frames", soc);
  1043. goto free;
  1044. }
  1045. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  1046. dp_rx_err("%pK: Invalid nbuf length", soc);
  1047. goto free;
  1048. }
  1049. /* In DMAC case the rx_desc_pools are common across PDEVs
  1050. * so PDEV cannot be derived from the pool_id.
  1051. *
  1052. * link_id need to derived from the TLV tag word which is
  1053. * disabled by default. For now adding a WAR to get vdev
  1054. * with brute force this need to fixed with word based subscription
  1055. * support is added by enabling TLV tag word
  1056. */
  1057. if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
  1058. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1059. pdev = soc->pdev_list[i];
  1060. if (!pdev || qdf_unlikely(pdev->is_pdev_down))
  1061. continue;
  1062. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1063. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1064. QDF_MAC_ADDR_SIZE) == 0) {
  1065. goto out;
  1066. }
  1067. }
  1068. }
  1069. } else {
  1070. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1071. if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
  1072. dp_rx_err("%pK: PDEV %s",
  1073. soc, !pdev ? "not found" : "down");
  1074. goto free;
  1075. }
  1076. if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) ==
  1077. QDF_STATUS_SUCCESS)
  1078. return 0;
  1079. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1080. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1081. QDF_MAC_ADDR_SIZE) == 0) {
  1082. goto out;
  1083. }
  1084. }
  1085. }
  1086. if (!vdev) {
  1087. dp_rx_err("%pK: VDEV not found", soc);
  1088. goto free;
  1089. }
  1090. out:
  1091. msg.wh = wh;
  1092. qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size);
  1093. msg.nbuf = mpdu;
  1094. msg.vdev_id = vdev->vdev_id;
  1095. /*
  1096. * NOTE: Only valid for HKv1.
  1097. * If smart monitor mode is enabled on RE, we are getting invalid
  1098. * peer frames with RA as STA mac of RE and the TA not matching
  1099. * with any NAC list or the the BSSID.Such frames need to dropped
  1100. * in order to avoid HM_WDS false addition.
  1101. */
  1102. if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
  1103. if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) {
  1104. dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm",
  1105. soc, wh->i_addr1);
  1106. goto free;
  1107. }
  1108. pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
  1109. (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
  1110. pdev->pdev_id, &msg);
  1111. }
  1112. free:
  1113. /* Drop and free packet */
  1114. curr_nbuf = mpdu;
  1115. while (curr_nbuf) {
  1116. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1117. dp_rx_nbuf_free(curr_nbuf);
  1118. curr_nbuf = next_nbuf;
  1119. }
  1120. return 0;
  1121. }
  1122. /**
  1123. * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
  1124. * @soc: DP SOC handle
  1125. * @mpdu: mpdu for which peer is invalid
  1126. * @mpdu_done: if an mpdu is completed
  1127. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1128. * pool_id has same mapping)
  1129. *
  1130. * return: integer type
  1131. */
  1132. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1133. qdf_nbuf_t mpdu, bool mpdu_done,
  1134. uint8_t mac_id)
  1135. {
  1136. /* Only trigger the process when mpdu is completed */
  1137. if (mpdu_done)
  1138. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1139. }
  1140. #else
  1141. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1142. uint8_t mac_id)
  1143. {
  1144. qdf_nbuf_t curr_nbuf, next_nbuf;
  1145. struct dp_pdev *pdev;
  1146. struct dp_vdev *vdev = NULL;
  1147. struct ieee80211_frame *wh;
  1148. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1149. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  1150. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1151. if (!DP_FRAME_IS_DATA(wh)) {
  1152. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
  1153. "only for data frames");
  1154. goto free;
  1155. }
  1156. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  1157. dp_rx_info_rl("%pK: Invalid nbuf length", soc);
  1158. goto free;
  1159. }
  1160. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1161. if (!pdev) {
  1162. dp_rx_info_rl("%pK: PDEV not found", soc);
  1163. goto free;
  1164. }
  1165. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1166. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1167. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1168. QDF_MAC_ADDR_SIZE) == 0) {
  1169. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1170. goto out;
  1171. }
  1172. }
  1173. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1174. if (!vdev) {
  1175. dp_rx_info_rl("%pK: VDEV not found", soc);
  1176. goto free;
  1177. }
  1178. out:
  1179. if (soc->cdp_soc.ol_ops->rx_invalid_peer)
  1180. soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
  1181. free:
  1182. /* reset the head and tail pointers */
  1183. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1184. if (pdev) {
  1185. pdev->invalid_peer_head_msdu = NULL;
  1186. pdev->invalid_peer_tail_msdu = NULL;
  1187. }
  1188. /* Drop and free packet */
  1189. curr_nbuf = mpdu;
  1190. while (curr_nbuf) {
  1191. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1192. dp_rx_nbuf_free(curr_nbuf);
  1193. curr_nbuf = next_nbuf;
  1194. }
  1195. /* Reset the head and tail pointers */
  1196. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1197. if (pdev) {
  1198. pdev->invalid_peer_head_msdu = NULL;
  1199. pdev->invalid_peer_tail_msdu = NULL;
  1200. }
  1201. return 0;
  1202. }
  1203. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1204. qdf_nbuf_t mpdu, bool mpdu_done,
  1205. uint8_t mac_id)
  1206. {
  1207. /* Process the nbuf */
  1208. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1209. }
  1210. #endif
  1211. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1212. #ifdef RECEIVE_OFFLOAD
  1213. /**
  1214. * dp_rx_print_offload_info() - Print offload info from RX TLV
  1215. * @soc: dp soc handle
  1216. * @msdu: MSDU for which the offload info is to be printed
  1217. *
  1218. * Return: None
  1219. */
  1220. static void dp_rx_print_offload_info(struct dp_soc *soc,
  1221. qdf_nbuf_t msdu)
  1222. {
  1223. dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
  1224. dp_verbose_debug("lro_eligible 0x%x",
  1225. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu));
  1226. dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu));
  1227. dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu));
  1228. dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu));
  1229. dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu));
  1230. dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu));
  1231. dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu));
  1232. dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu));
  1233. dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu));
  1234. dp_verbose_debug("---------------------------------------------------------");
  1235. }
  1236. /**
  1237. * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
  1238. * @soc: DP SOC handle
  1239. * @rx_tlv: RX TLV received for the msdu
  1240. * @msdu: msdu for which GRO info needs to be filled
  1241. * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
  1242. *
  1243. * Return: None
  1244. */
  1245. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  1246. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
  1247. {
  1248. struct hal_offload_info offload_info;
  1249. if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
  1250. return;
  1251. if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info))
  1252. return;
  1253. *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
  1254. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible;
  1255. QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack;
  1256. QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
  1257. hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
  1258. rx_tlv);
  1259. QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num;
  1260. QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num;
  1261. QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win;
  1262. QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto;
  1263. QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto;
  1264. QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset;
  1265. QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id;
  1266. dp_rx_print_offload_info(soc, msdu);
  1267. }
  1268. #endif /* RECEIVE_OFFLOAD */
  1269. /**
  1270. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
  1271. *
  1272. * @soc: DP soc handle
  1273. * @nbuf: pointer to msdu.
  1274. * @mpdu_len: mpdu length
  1275. * @l3_pad_len: L3 padding length by HW
  1276. *
  1277. * Return: returns true if nbuf is last msdu of mpdu else retuns false.
  1278. */
  1279. static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc,
  1280. qdf_nbuf_t nbuf,
  1281. uint16_t *mpdu_len,
  1282. uint32_t l3_pad_len)
  1283. {
  1284. bool last_nbuf;
  1285. uint32_t pkt_hdr_size;
  1286. pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len;
  1287. if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) {
  1288. qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
  1289. last_nbuf = false;
  1290. *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size);
  1291. } else {
  1292. qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size));
  1293. last_nbuf = true;
  1294. *mpdu_len = 0;
  1295. }
  1296. return last_nbuf;
  1297. }
  1298. /**
  1299. * dp_get_l3_hdr_pad_len() - get L3 header padding length.
  1300. *
  1301. * @soc: DP soc handle
  1302. * @nbuf: pointer to msdu.
  1303. *
  1304. * Return: returns padding length in bytes.
  1305. */
  1306. static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc,
  1307. qdf_nbuf_t nbuf)
  1308. {
  1309. uint32_t l3_hdr_pad = 0;
  1310. uint8_t *rx_tlv_hdr;
  1311. struct hal_rx_msdu_metadata msdu_metadata;
  1312. while (nbuf) {
  1313. if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  1314. /* scattered msdu end with continuation is 0 */
  1315. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1316. hal_rx_msdu_metadata_get(soc->hal_soc,
  1317. rx_tlv_hdr,
  1318. &msdu_metadata);
  1319. l3_hdr_pad = msdu_metadata.l3_hdr_pad;
  1320. break;
  1321. }
  1322. nbuf = nbuf->next;
  1323. }
  1324. return l3_hdr_pad;
  1325. }
  1326. /**
  1327. * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
  1328. * multiple nbufs.
  1329. * @soc: DP SOC handle
  1330. * @nbuf: pointer to the first msdu of an amsdu.
  1331. *
  1332. * This function implements the creation of RX frag_list for cases
  1333. * where an MSDU is spread across multiple nbufs.
  1334. *
  1335. * Return: returns the head nbuf which contains complete frag_list.
  1336. */
  1337. qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1338. {
  1339. qdf_nbuf_t parent, frag_list, next = NULL;
  1340. uint16_t frag_list_len = 0;
  1341. uint16_t mpdu_len;
  1342. bool last_nbuf;
  1343. uint32_t l3_hdr_pad_offset = 0;
  1344. /*
  1345. * Use msdu len got from REO entry descriptor instead since
  1346. * there is case the RX PKT TLV is corrupted while msdu_len
  1347. * from REO descriptor is right for non-raw RX scatter msdu.
  1348. */
  1349. mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1350. /*
  1351. * this is a case where the complete msdu fits in one single nbuf.
  1352. * in this case HW sets both start and end bit and we only need to
  1353. * reset these bits for RAW mode simulator to decap the pkt
  1354. */
  1355. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  1356. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1357. qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size);
  1358. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1359. return nbuf;
  1360. }
  1361. l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf);
  1362. /*
  1363. * This is a case where we have multiple msdus (A-MSDU) spread across
  1364. * multiple nbufs. here we create a fraglist out of these nbufs.
  1365. *
  1366. * the moment we encounter a nbuf with continuation bit set we
  1367. * know for sure we have an MSDU which is spread across multiple
  1368. * nbufs. We loop through and reap nbufs till we reach last nbuf.
  1369. */
  1370. parent = nbuf;
  1371. frag_list = nbuf->next;
  1372. nbuf = nbuf->next;
  1373. /*
  1374. * set the start bit in the first nbuf we encounter with continuation
  1375. * bit set. This has the proper mpdu length set as it is the first
  1376. * msdu of the mpdu. this becomes the parent nbuf and the subsequent
  1377. * nbufs will form the frag_list of the parent nbuf.
  1378. */
  1379. qdf_nbuf_set_rx_chfrag_start(parent, 1);
  1380. /*
  1381. * L3 header padding is only needed for the 1st buffer
  1382. * in a scattered msdu
  1383. */
  1384. last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len,
  1385. l3_hdr_pad_offset);
  1386. /*
  1387. * MSDU cont bit is set but reported MPDU length can fit
  1388. * in to single buffer
  1389. *
  1390. * Increment error stats and avoid SG list creation
  1391. */
  1392. if (last_nbuf) {
  1393. DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
  1394. qdf_nbuf_pull_head(parent,
  1395. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1396. return parent;
  1397. }
  1398. /*
  1399. * this is where we set the length of the fragments which are
  1400. * associated to the parent nbuf. We iterate through the frag_list
  1401. * till we hit the last_nbuf of the list.
  1402. */
  1403. do {
  1404. last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0);
  1405. qdf_nbuf_pull_head(nbuf,
  1406. soc->rx_pkt_tlv_size);
  1407. frag_list_len += qdf_nbuf_len(nbuf);
  1408. if (last_nbuf) {
  1409. next = nbuf->next;
  1410. nbuf->next = NULL;
  1411. break;
  1412. } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1413. dp_err("Invalid packet length\n");
  1414. qdf_assert_always(0);
  1415. }
  1416. nbuf = nbuf->next;
  1417. } while (!last_nbuf);
  1418. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  1419. qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
  1420. parent->next = next;
  1421. qdf_nbuf_pull_head(parent,
  1422. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1423. return parent;
  1424. }
  1425. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1426. #ifdef QCA_PEER_EXT_STATS
  1427. /*
  1428. * dp_rx_compute_tid_delay - Computer per TID delay stats
  1429. * @peer: DP soc context
  1430. * @nbuf: NBuffer
  1431. *
  1432. * Return: Void
  1433. */
  1434. void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  1435. qdf_nbuf_t nbuf)
  1436. {
  1437. struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay;
  1438. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1439. dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
  1440. }
  1441. #endif /* QCA_PEER_EXT_STATS */
  1442. /**
  1443. * dp_rx_compute_delay() - Compute and fill in all timestamps
  1444. * to pass in correct fields
  1445. *
  1446. * @vdev: pdev handle
  1447. * @tx_desc: tx descriptor
  1448. * @tid: tid value
  1449. * Return: none
  1450. */
  1451. void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1452. {
  1453. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  1454. int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
  1455. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1456. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1457. uint32_t interframe_delay =
  1458. (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
  1459. struct cdp_tid_rx_stats *rstats =
  1460. &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1461. dp_update_delay_stats(NULL, rstats, to_stack, tid,
  1462. CDP_DELAY_STATS_REAP_STACK, ring_id, false);
  1463. /*
  1464. * Update interframe delay stats calculated at deliver_data_ol point.
  1465. * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
  1466. * interframe delay will not be calculate correctly for 1st frame.
  1467. * On the other side, this will help in avoiding extra per packet check
  1468. * of vdev->prev_rx_deliver_tstamp.
  1469. */
  1470. dp_update_delay_stats(NULL, rstats, interframe_delay, tid,
  1471. CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false);
  1472. vdev->prev_rx_deliver_tstamp = current_ts;
  1473. }
  1474. /**
  1475. * dp_rx_drop_nbuf_list() - drop an nbuf list
  1476. * @pdev: dp pdev reference
  1477. * @buf_list: buffer list to be dropepd
  1478. *
  1479. * Return: int (number of bufs dropped)
  1480. */
  1481. static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
  1482. qdf_nbuf_t buf_list)
  1483. {
  1484. struct cdp_tid_rx_stats *stats = NULL;
  1485. uint8_t tid = 0, ring_id = 0;
  1486. int num_dropped = 0;
  1487. qdf_nbuf_t buf, next_buf;
  1488. buf = buf_list;
  1489. while (buf) {
  1490. ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
  1491. next_buf = qdf_nbuf_queue_next(buf);
  1492. tid = qdf_nbuf_get_tid_val(buf);
  1493. if (qdf_likely(pdev)) {
  1494. stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1495. stats->fail_cnt[INVALID_PEER_VDEV]++;
  1496. stats->delivered_to_stack--;
  1497. }
  1498. dp_rx_nbuf_free(buf);
  1499. buf = next_buf;
  1500. num_dropped++;
  1501. }
  1502. return num_dropped;
  1503. }
  1504. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1505. /**
  1506. * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
  1507. * @soc: core txrx main context
  1508. * @vdev: vdev
  1509. * @txrx_peer: txrx peer
  1510. * @nbuf_head: skb list head
  1511. *
  1512. * Return: true if packet is delivered to netdev per STA.
  1513. */
  1514. static inline bool
  1515. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1516. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1517. {
  1518. /*
  1519. * When extended WDS is disabled, frames are sent to AP netdevice.
  1520. */
  1521. if (qdf_likely(!vdev->wds_ext_enabled))
  1522. return false;
  1523. /*
  1524. * There can be 2 cases:
  1525. * 1. Send frame to parent netdev if its not for netdev per STA
  1526. * 2. If frame is meant for netdev per STA:
  1527. * a. Send frame to appropriate netdev using registered fp.
  1528. * b. If fp is NULL, drop the frames.
  1529. */
  1530. if (!txrx_peer->wds_ext.init)
  1531. return false;
  1532. if (txrx_peer->osif_rx)
  1533. txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head);
  1534. else
  1535. dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1536. return true;
  1537. }
  1538. #else
  1539. static inline bool
  1540. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1541. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1542. {
  1543. return false;
  1544. }
  1545. #endif
  1546. #ifdef PEER_CACHE_RX_PKTS
  1547. /**
  1548. * dp_rx_flush_rx_cached() - flush cached rx frames
  1549. * @peer: peer
  1550. * @drop: flag to drop frames or forward to net stack
  1551. *
  1552. * Return: None
  1553. */
  1554. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  1555. {
  1556. struct dp_peer_cached_bufq *bufqi;
  1557. struct dp_rx_cached_buf *cache_buf = NULL;
  1558. ol_txrx_rx_fp data_rx = NULL;
  1559. int num_buff_elem;
  1560. QDF_STATUS status;
  1561. /*
  1562. * Flush dp cached frames only for mld peers and legacy peers, as
  1563. * link peers don't store cached frames
  1564. */
  1565. if (IS_MLO_DP_LINK_PEER(peer))
  1566. return;
  1567. if (!peer->txrx_peer) {
  1568. dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")",
  1569. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1570. return;
  1571. }
  1572. if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) {
  1573. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1574. return;
  1575. }
  1576. qdf_spin_lock_bh(&peer->peer_info_lock);
  1577. if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
  1578. data_rx = peer->vdev->osif_rx;
  1579. else
  1580. drop = true;
  1581. qdf_spin_unlock_bh(&peer->peer_info_lock);
  1582. bufqi = &peer->txrx_peer->bufq_info;
  1583. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1584. qdf_list_remove_front(&bufqi->cached_bufq,
  1585. (qdf_list_node_t **)&cache_buf);
  1586. while (cache_buf) {
  1587. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
  1588. cache_buf->buf);
  1589. bufqi->entries -= num_buff_elem;
  1590. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1591. if (drop) {
  1592. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1593. cache_buf->buf);
  1594. } else {
  1595. /* Flush the cached frames to OSIF DEV */
  1596. status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
  1597. if (status != QDF_STATUS_SUCCESS)
  1598. bufqi->dropped = dp_rx_drop_nbuf_list(
  1599. peer->vdev->pdev,
  1600. cache_buf->buf);
  1601. }
  1602. qdf_mem_free(cache_buf);
  1603. cache_buf = NULL;
  1604. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1605. qdf_list_remove_front(&bufqi->cached_bufq,
  1606. (qdf_list_node_t **)&cache_buf);
  1607. }
  1608. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1609. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1610. }
  1611. /**
  1612. * dp_rx_enqueue_rx() - cache rx frames
  1613. * @peer: peer
  1614. * @rx_buf_list: cache buffer list
  1615. *
  1616. * Return: None
  1617. */
  1618. static QDF_STATUS
  1619. dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list)
  1620. {
  1621. struct dp_rx_cached_buf *cache_buf;
  1622. struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info;
  1623. int num_buff_elem;
  1624. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  1625. struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
  1626. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
  1627. DP_MOD_ID_RX);
  1628. if (!peer) {
  1629. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1630. rx_buf_list);
  1631. return QDF_STATUS_E_INVAL;
  1632. }
  1633. dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
  1634. bufqi->dropped);
  1635. if (!peer->valid) {
  1636. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1637. rx_buf_list);
  1638. ret = QDF_STATUS_E_INVAL;
  1639. goto fail;
  1640. }
  1641. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1642. if (bufqi->entries >= bufqi->thresh) {
  1643. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1644. rx_buf_list);
  1645. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1646. ret = QDF_STATUS_E_RESOURCES;
  1647. goto fail;
  1648. }
  1649. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1650. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
  1651. cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
  1652. if (!cache_buf) {
  1653. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1654. "Failed to allocate buf to cache rx frames");
  1655. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1656. rx_buf_list);
  1657. ret = QDF_STATUS_E_NOMEM;
  1658. goto fail;
  1659. }
  1660. cache_buf->buf = rx_buf_list;
  1661. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1662. qdf_list_insert_back(&bufqi->cached_bufq,
  1663. &cache_buf->node);
  1664. bufqi->entries += num_buff_elem;
  1665. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1666. fail:
  1667. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  1668. return ret;
  1669. }
  1670. static inline
  1671. bool dp_rx_is_peer_cache_bufq_supported(void)
  1672. {
  1673. return true;
  1674. }
  1675. #else
  1676. static inline
  1677. bool dp_rx_is_peer_cache_bufq_supported(void)
  1678. {
  1679. return false;
  1680. }
  1681. static inline QDF_STATUS
  1682. dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list)
  1683. {
  1684. return QDF_STATUS_SUCCESS;
  1685. }
  1686. #endif
  1687. #ifndef DELIVERY_TO_STACK_STATUS_CHECK
  1688. /**
  1689. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1690. * using the appropriate call back functions.
  1691. * @soc: soc
  1692. * @vdev: vdev
  1693. * @peer: peer
  1694. * @nbuf_head: skb list head
  1695. * @nbuf_tail: skb list tail
  1696. *
  1697. * Return: None
  1698. */
  1699. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1700. struct dp_vdev *vdev,
  1701. struct dp_txrx_peer *txrx_peer,
  1702. qdf_nbuf_t nbuf_head)
  1703. {
  1704. if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
  1705. txrx_peer, nbuf_head)))
  1706. return;
  1707. /* Function pointer initialized only when FISA is enabled */
  1708. if (vdev->osif_fisa_rx)
  1709. /* on failure send it via regular path */
  1710. vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1711. else
  1712. vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1713. }
  1714. #else
  1715. /**
  1716. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1717. * using the appropriate call back functions.
  1718. * @soc: soc
  1719. * @vdev: vdev
  1720. * @txrx_peer: txrx peer
  1721. * @nbuf_head: skb list head
  1722. * @nbuf_tail: skb list tail
  1723. *
  1724. * Check the return status of the call back function and drop
  1725. * the packets if the return status indicates a failure.
  1726. *
  1727. * Return: None
  1728. */
  1729. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1730. struct dp_vdev *vdev,
  1731. struct dp_txrx_peer *txrx_peer,
  1732. qdf_nbuf_t nbuf_head)
  1733. {
  1734. int num_nbuf = 0;
  1735. QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
  1736. /* Function pointer initialized only when FISA is enabled */
  1737. if (vdev->osif_fisa_rx)
  1738. /* on failure send it via regular path */
  1739. ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1740. else if (vdev->osif_rx)
  1741. ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1742. if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
  1743. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1744. DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
  1745. if (txrx_peer)
  1746. DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num,
  1747. num_nbuf);
  1748. }
  1749. }
  1750. #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
  1751. /*
  1752. * dp_rx_validate_rx_callbacks() - validate rx callbacks
  1753. * @soc DP soc
  1754. * @vdev: DP vdev handle
  1755. * @txrx_peer: pointer to the txrx peer object
  1756. * nbuf_head: skb list head
  1757. *
  1758. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  1759. * QDF_STATUS_E_FAILURE
  1760. */
  1761. static inline QDF_STATUS
  1762. dp_rx_validate_rx_callbacks(struct dp_soc *soc,
  1763. struct dp_vdev *vdev,
  1764. struct dp_txrx_peer *txrx_peer,
  1765. qdf_nbuf_t nbuf_head)
  1766. {
  1767. int num_nbuf;
  1768. if (qdf_unlikely(!vdev || vdev->delete.pending)) {
  1769. num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
  1770. /*
  1771. * This is a special case where vdev is invalid,
  1772. * so we cannot know the pdev to which this packet
  1773. * belonged. Hence we update the soc rx error stats.
  1774. */
  1775. DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
  1776. return QDF_STATUS_E_FAILURE;
  1777. }
  1778. /*
  1779. * highly unlikely to have a vdev without a registered rx
  1780. * callback function. if so let us free the nbuf_list.
  1781. */
  1782. if (qdf_unlikely(!vdev->osif_rx)) {
  1783. if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) {
  1784. dp_rx_enqueue_rx(txrx_peer, nbuf_head);
  1785. } else {
  1786. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
  1787. nbuf_head);
  1788. DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf,
  1789. vdev->pdev->enhanced_stats_en);
  1790. }
  1791. return QDF_STATUS_E_FAILURE;
  1792. }
  1793. return QDF_STATUS_SUCCESS;
  1794. }
  1795. QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
  1796. struct dp_vdev *vdev,
  1797. struct dp_txrx_peer *txrx_peer,
  1798. qdf_nbuf_t nbuf_head,
  1799. qdf_nbuf_t nbuf_tail)
  1800. {
  1801. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  1802. QDF_STATUS_SUCCESS)
  1803. return QDF_STATUS_E_FAILURE;
  1804. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
  1805. (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
  1806. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
  1807. &nbuf_tail);
  1808. }
  1809. dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head);
  1810. return QDF_STATUS_SUCCESS;
  1811. }
  1812. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  1813. QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
  1814. struct dp_vdev *vdev,
  1815. struct dp_txrx_peer *txrx_peer,
  1816. qdf_nbuf_t nbuf_head,
  1817. qdf_nbuf_t nbuf_tail)
  1818. {
  1819. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  1820. QDF_STATUS_SUCCESS)
  1821. return QDF_STATUS_E_FAILURE;
  1822. vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head);
  1823. return QDF_STATUS_SUCCESS;
  1824. }
  1825. #endif
  1826. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1827. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1828. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \
  1829. { \
  1830. qdf_nbuf_t nbuf_local; \
  1831. struct dp_txrx_peer *txrx_peer_local; \
  1832. struct dp_vdev *vdev_local = vdev_hdl; \
  1833. do { \
  1834. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  1835. break; \
  1836. nbuf_local = nbuf; \
  1837. txrx_peer_local = txrx_peer; \
  1838. if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
  1839. break; \
  1840. else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
  1841. break; \
  1842. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  1843. (nbuf_local), \
  1844. (txrx_peer_local), 0, 1); \
  1845. } while (0); \
  1846. }
  1847. #else
  1848. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer)
  1849. #endif
  1850. #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
  1851. /**
  1852. * dp_rx_rates_stats_update() - update rate stats
  1853. * from rx msdu.
  1854. * @soc: datapath soc handle
  1855. * @nbuf: received msdu buffer
  1856. * @rx_tlv_hdr: rx tlv header
  1857. * @txrx_peer: datapath txrx_peer handle
  1858. * @sgi: Short Guard Interval
  1859. * @mcs: Modulation and Coding Set
  1860. * @nss: Number of Spatial Streams
  1861. * @bw: BandWidth
  1862. * @pkt_type: Corresponds to preamble
  1863. *
  1864. * To be precisely record rates, following factors are considered:
  1865. * Exclude specific frames, ARP, DHCP, ssdp, etc.
  1866. * Make sure to affect rx throughput as least as possible.
  1867. *
  1868. * Return: void
  1869. */
  1870. static void
  1871. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1872. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  1873. uint32_t sgi, uint32_t mcs,
  1874. uint32_t nss, uint32_t bw, uint32_t pkt_type)
  1875. {
  1876. uint32_t rix;
  1877. uint16_t ratecode;
  1878. uint32_t avg_rx_rate;
  1879. uint32_t ratekbps;
  1880. enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
  1881. if (soc->high_throughput ||
  1882. dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) {
  1883. return;
  1884. }
  1885. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs);
  1886. /* here pkt_type corresponds to preamble */
  1887. ratekbps = dp_getrateindex(sgi,
  1888. mcs,
  1889. nss,
  1890. pkt_type,
  1891. bw,
  1892. punc_mode,
  1893. &rix,
  1894. &ratecode);
  1895. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps);
  1896. avg_rx_rate =
  1897. dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate,
  1898. ratekbps);
  1899. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate);
  1900. }
  1901. #else
  1902. static void
  1903. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1904. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  1905. uint32_t sgi, uint32_t mcs,
  1906. uint32_t nss, uint32_t bw, uint32_t pkt_type)
  1907. {
  1908. }
  1909. #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
  1910. #ifndef QCA_ENHANCED_STATS_SUPPORT
  1911. /**
  1912. * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer
  1913. *
  1914. * @soc: datapath soc handle
  1915. * @nbuf: received msdu buffer
  1916. * @rx_tlv_hdr: rx tlv header
  1917. * @txrx_peer: datapath txrx_peer handle
  1918. *
  1919. * Return: void
  1920. */
  1921. static inline
  1922. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1923. uint8_t *rx_tlv_hdr,
  1924. struct dp_txrx_peer *txrx_peer)
  1925. {
  1926. bool is_ampdu;
  1927. uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
  1928. uint8_t dst_mcs_idx;
  1929. /*
  1930. * TODO - For KIWI this field is present in ring_desc
  1931. * Try to use ring desc instead of tlv.
  1932. */
  1933. is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
  1934. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu);
  1935. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
  1936. sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
  1937. mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  1938. tid = qdf_nbuf_get_tid_val(nbuf);
  1939. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  1940. reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
  1941. rx_tlv_hdr);
  1942. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  1943. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  1944. /* do HW to SW pkt type conversion */
  1945. pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
  1946. hal_2_dp_pkt_type_map[pkt_type]);
  1947. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1,
  1948. ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  1949. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
  1950. ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  1951. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1);
  1952. /*
  1953. * only if nss > 0 and pkt_type is 11N/AC/AX,
  1954. * then increase index [nss - 1] in array counter.
  1955. */
  1956. if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type))
  1957. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1);
  1958. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1);
  1959. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1,
  1960. hal_rx_tlv_mic_err_get(soc->hal_soc,
  1961. rx_tlv_hdr));
  1962. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1,
  1963. hal_rx_tlv_decrypt_err_get(soc->hal_soc,
  1964. rx_tlv_hdr));
  1965. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
  1966. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1);
  1967. dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
  1968. if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
  1969. DP_PEER_EXTD_STATS_INC(txrx_peer,
  1970. rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
  1971. 1);
  1972. dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
  1973. sgi, mcs, nss, bw, pkt_type);
  1974. }
  1975. #else
  1976. static inline
  1977. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1978. uint8_t *rx_tlv_hdr,
  1979. struct dp_txrx_peer *txrx_peer)
  1980. {
  1981. }
  1982. #endif
  1983. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  1984. static inline void
  1985. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  1986. qdf_nbuf_t nbuf)
  1987. {
  1988. uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf);
  1989. /* only count stats per lmac for MLO connection*/
  1990. DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1,
  1991. QDF_NBUF_CB_RX_PKT_LEN(nbuf),
  1992. txrx_peer->mld_peer);
  1993. }
  1994. #else
  1995. static inline void
  1996. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  1997. qdf_nbuf_t nbuf)
  1998. {
  1999. }
  2000. #endif
  2001. /**
  2002. * dp_rx_msdu_stats_update() - update per msdu stats.
  2003. * @soc: core txrx main context
  2004. * @nbuf: pointer to the first msdu of an amsdu.
  2005. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  2006. * @txrx_peer: pointer to the txrx peer object.
  2007. * @ring_id: reo dest ring number on which pkt is reaped.
  2008. * @tid_stats: per tid rx stats.
  2009. *
  2010. * update all the per msdu stats for that nbuf.
  2011. * Return: void
  2012. */
  2013. void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2014. uint8_t *rx_tlv_hdr,
  2015. struct dp_txrx_peer *txrx_peer,
  2016. uint8_t ring_id,
  2017. struct cdp_tid_rx_stats *tid_stats)
  2018. {
  2019. bool is_not_amsdu;
  2020. struct dp_vdev *vdev = txrx_peer->vdev;
  2021. bool enh_flag;
  2022. qdf_ether_header_t *eh;
  2023. uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2024. dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer);
  2025. is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
  2026. qdf_nbuf_is_rx_chfrag_end(nbuf);
  2027. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1,
  2028. msdu_len);
  2029. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1,
  2030. is_not_amsdu);
  2031. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu);
  2032. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1,
  2033. qdf_nbuf_is_rx_retry_flag(nbuf));
  2034. dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf);
  2035. tid_stats->msdu_cnt++;
  2036. if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
  2037. (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
  2038. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2039. enh_flag = vdev->pdev->enhanced_stats_en;
  2040. DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
  2041. tid_stats->mcast_msdu_cnt++;
  2042. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  2043. DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
  2044. tid_stats->bcast_msdu_cnt++;
  2045. }
  2046. }
  2047. txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks();
  2048. dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer);
  2049. }
  2050. #ifndef WDS_VENDOR_EXTENSION
  2051. int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
  2052. struct dp_vdev *vdev,
  2053. struct dp_txrx_peer *txrx_peer)
  2054. {
  2055. return 1;
  2056. }
  2057. #endif
  2058. #ifdef RX_DESC_DEBUG_CHECK
  2059. /**
  2060. * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
  2061. * corruption
  2062. *
  2063. * @ring_desc: REO ring descriptor
  2064. * @rx_desc: Rx descriptor
  2065. *
  2066. * Return: NONE
  2067. */
  2068. QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
  2069. hal_ring_desc_t ring_desc,
  2070. struct dp_rx_desc *rx_desc)
  2071. {
  2072. struct hal_buf_info hbi;
  2073. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2074. /* Sanity check for possible buffer paddr corruption */
  2075. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  2076. return QDF_STATUS_SUCCESS;
  2077. return QDF_STATUS_E_FAILURE;
  2078. }
  2079. /**
  2080. * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer
  2081. * out of bound access from H.W
  2082. *
  2083. * @soc: DP soc
  2084. * @pkt_len: Packet length received from H.W
  2085. *
  2086. * Return: NONE
  2087. */
  2088. static inline void
  2089. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc,
  2090. uint32_t pkt_len)
  2091. {
  2092. struct rx_desc_pool *rx_desc_pool;
  2093. rx_desc_pool = &soc->rx_desc_buf[0];
  2094. qdf_assert_always(pkt_len <= rx_desc_pool->buf_size);
  2095. }
  2096. #else
  2097. static inline void
  2098. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { }
  2099. #endif
  2100. #ifdef DP_RX_PKT_NO_PEER_DELIVER
  2101. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  2102. /**
  2103. * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received
  2104. * during roaming
  2105. * @vdev: dp_vdev pointer
  2106. * @rx_tlv_hdr: rx tlv header
  2107. * @nbuf: pkt skb pointer
  2108. *
  2109. * This function will check if rx udp data is received from authorised
  2110. * roamed peer before peer map indication is received from FW after
  2111. * roaming. This is needed for VoIP scenarios in which packet loss
  2112. * expected during roaming is minimal.
  2113. *
  2114. * Return: bool
  2115. */
  2116. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2117. uint8_t *rx_tlv_hdr,
  2118. qdf_nbuf_t nbuf)
  2119. {
  2120. char *hdr_desc;
  2121. struct ieee80211_frame *wh = NULL;
  2122. hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc,
  2123. rx_tlv_hdr);
  2124. wh = (struct ieee80211_frame *)hdr_desc;
  2125. if (vdev->roaming_peer_status ==
  2126. WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED &&
  2127. !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2,
  2128. QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
  2129. qdf_nbuf_is_ipv6_udp_pkt(nbuf)))
  2130. return true;
  2131. return false;
  2132. }
  2133. #else
  2134. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2135. uint8_t *rx_tlv_hdr,
  2136. qdf_nbuf_t nbuf)
  2137. {
  2138. return false;
  2139. }
  2140. #endif
  2141. /**
  2142. * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
  2143. * no corresbonding peer found
  2144. * @soc: core txrx main context
  2145. * @nbuf: pkt skb pointer
  2146. *
  2147. * This function will try to deliver some RX special frames to stack
  2148. * even there is no peer matched found. for instance, LFR case, some
  2149. * eapol data will be sent to host before peer_map done.
  2150. *
  2151. * Return: None
  2152. */
  2153. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2154. {
  2155. uint16_t peer_id;
  2156. uint8_t vdev_id;
  2157. struct dp_vdev *vdev = NULL;
  2158. uint32_t l2_hdr_offset = 0;
  2159. uint16_t msdu_len = 0;
  2160. uint32_t pkt_len = 0;
  2161. uint8_t *rx_tlv_hdr;
  2162. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  2163. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  2164. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  2165. if (peer_id > soc->max_peer_id)
  2166. goto deliver_fail;
  2167. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  2168. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
  2169. if (!vdev || vdev->delete.pending || !vdev->osif_rx)
  2170. goto deliver_fail;
  2171. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
  2172. goto deliver_fail;
  2173. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2174. l2_hdr_offset =
  2175. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2176. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2177. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  2178. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2179. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  2180. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset);
  2181. if (dp_rx_is_special_frame(nbuf, frame_mask) ||
  2182. dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, nbuf)) {
  2183. qdf_nbuf_set_exc_frame(nbuf, 1);
  2184. if (QDF_STATUS_SUCCESS !=
  2185. vdev->osif_rx(vdev->osif_vdev, nbuf))
  2186. goto deliver_fail;
  2187. DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
  2188. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2189. return;
  2190. }
  2191. deliver_fail:
  2192. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2193. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2194. dp_rx_nbuf_free(nbuf);
  2195. if (vdev)
  2196. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2197. }
  2198. #else
  2199. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2200. {
  2201. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2202. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2203. dp_rx_nbuf_free(nbuf);
  2204. }
  2205. #endif
  2206. /**
  2207. * dp_rx_srng_get_num_pending() - get number of pending entries
  2208. * @hal_soc: hal soc opaque pointer
  2209. * @hal_ring: opaque pointer to the HAL Rx Ring
  2210. * @num_entries: number of entries in the hal_ring.
  2211. * @near_full: pointer to a boolean. This is set if ring is near full.
  2212. *
  2213. * The function returns the number of entries in a destination ring which are
  2214. * yet to be reaped. The function also checks if the ring is near full.
  2215. * If more than half of the ring needs to be reaped, the ring is considered
  2216. * approaching full.
  2217. * The function useses hal_srng_dst_num_valid_locked to get the number of valid
  2218. * entries. It should not be called within a SRNG lock. HW pointer value is
  2219. * synced into cached_hp.
  2220. *
  2221. * Return: Number of pending entries if any
  2222. */
  2223. uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
  2224. hal_ring_handle_t hal_ring_hdl,
  2225. uint32_t num_entries,
  2226. bool *near_full)
  2227. {
  2228. uint32_t num_pending = 0;
  2229. num_pending = hal_srng_dst_num_valid_locked(hal_soc,
  2230. hal_ring_hdl,
  2231. true);
  2232. if (num_entries && (num_pending >= num_entries >> 1))
  2233. *near_full = true;
  2234. else
  2235. *near_full = false;
  2236. return num_pending;
  2237. }
  2238. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2239. #ifdef WLAN_SUPPORT_RX_FISA
  2240. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2241. {
  2242. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  2243. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2244. }
  2245. #else
  2246. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2247. {
  2248. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2249. }
  2250. #endif
  2251. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2252. #ifdef DP_RX_DROP_RAW_FRM
  2253. /**
  2254. * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
  2255. * @nbuf: pkt skb pointer
  2256. *
  2257. * Return: true - raw frame, dropped
  2258. * false - not raw frame, do nothing
  2259. */
  2260. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
  2261. {
  2262. if (qdf_nbuf_is_raw_frame(nbuf)) {
  2263. dp_rx_nbuf_free(nbuf);
  2264. return true;
  2265. }
  2266. return false;
  2267. }
  2268. #endif
  2269. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  2270. /**
  2271. * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
  2272. * @soc: Datapath soc structure
  2273. * @ring_num: REO ring number
  2274. * @ring_desc: REO ring descriptor
  2275. *
  2276. * Returns: None
  2277. */
  2278. void
  2279. dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  2280. hal_ring_desc_t ring_desc)
  2281. {
  2282. struct dp_buf_info_record *record;
  2283. struct hal_buf_info hbi;
  2284. uint32_t idx;
  2285. if (qdf_unlikely(!soc->rx_ring_history[ring_num]))
  2286. return;
  2287. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2288. /* buffer_addr_info is the first element of ring_desc */
  2289. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
  2290. &hbi);
  2291. idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
  2292. DP_RX_HIST_MAX);
  2293. /* No NULL check needed for record since its an array */
  2294. record = &soc->rx_ring_history[ring_num]->entry[idx];
  2295. record->timestamp = qdf_get_log_timestamp();
  2296. record->hbi.paddr = hbi.paddr;
  2297. record->hbi.sw_cookie = hbi.sw_cookie;
  2298. record->hbi.rbm = hbi.rbm;
  2299. }
  2300. #endif
  2301. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  2302. /**
  2303. * dp_rx_update_stats() - Update soc level rx packet count
  2304. * @soc: DP soc handle
  2305. * @nbuf: nbuf received
  2306. *
  2307. * Returns: none
  2308. */
  2309. void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2310. {
  2311. DP_STATS_INC_PKT(soc, rx.ingress, 1,
  2312. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2313. }
  2314. #endif
  2315. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  2316. /**
  2317. * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
  2318. * @soc : dp_soc handle
  2319. * @pdev: dp_pdev handle
  2320. * @peer_id: peer_id of the peer for which completion came
  2321. * @ppdu_id: ppdu_id
  2322. * @netbuf: Buffer pointer
  2323. *
  2324. * This function is used to deliver rx packet to packet capture
  2325. */
  2326. void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  2327. uint16_t peer_id, uint32_t is_offload,
  2328. qdf_nbuf_t netbuf)
  2329. {
  2330. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2331. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf,
  2332. peer_id, is_offload, pdev->pdev_id);
  2333. }
  2334. void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2335. uint32_t is_offload)
  2336. {
  2337. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2338. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER,
  2339. soc, nbuf, HTT_INVALID_VDEV,
  2340. is_offload, 0);
  2341. }
  2342. #endif
  2343. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2344. QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
  2345. {
  2346. QDF_STATUS ret;
  2347. if (vdev->osif_rx_flush) {
  2348. ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
  2349. if (!QDF_IS_STATUS_SUCCESS(ret)) {
  2350. dp_err("Failed to flush rx pkts for vdev %d\n",
  2351. vdev->vdev_id);
  2352. return ret;
  2353. }
  2354. }
  2355. return QDF_STATUS_SUCCESS;
  2356. }
  2357. static QDF_STATUS
  2358. dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
  2359. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  2360. struct dp_pdev *dp_pdev,
  2361. struct rx_desc_pool *rx_desc_pool)
  2362. {
  2363. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  2364. (nbuf_frag_info_t->virt_addr).nbuf =
  2365. qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
  2366. RX_BUFFER_RESERVATION,
  2367. rx_desc_pool->buf_alignment, FALSE);
  2368. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  2369. dp_err("nbuf alloc failed");
  2370. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  2371. return ret;
  2372. }
  2373. ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
  2374. (nbuf_frag_info_t->virt_addr).nbuf,
  2375. QDF_DMA_FROM_DEVICE,
  2376. rx_desc_pool->buf_size);
  2377. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  2378. qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
  2379. dp_err("nbuf map failed");
  2380. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  2381. return ret;
  2382. }
  2383. nbuf_frag_info_t->paddr =
  2384. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  2385. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  2386. &nbuf_frag_info_t->paddr,
  2387. rx_desc_pool);
  2388. if (ret == QDF_STATUS_E_FAILURE) {
  2389. dp_err("nbuf check x86 failed");
  2390. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  2391. return ret;
  2392. }
  2393. return QDF_STATUS_SUCCESS;
  2394. }
  2395. QDF_STATUS
  2396. dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
  2397. struct dp_srng *dp_rxdma_srng,
  2398. struct rx_desc_pool *rx_desc_pool,
  2399. uint32_t num_req_buffers)
  2400. {
  2401. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  2402. hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
  2403. union dp_rx_desc_list_elem_t *next;
  2404. void *rxdma_ring_entry;
  2405. qdf_dma_addr_t paddr;
  2406. struct dp_rx_nbuf_frag_info *nf_info;
  2407. uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
  2408. uint32_t buffer_index, nbuf_ptrs_per_page;
  2409. qdf_nbuf_t nbuf;
  2410. QDF_STATUS ret;
  2411. int page_idx, total_pages;
  2412. union dp_rx_desc_list_elem_t *desc_list = NULL;
  2413. union dp_rx_desc_list_elem_t *tail = NULL;
  2414. int sync_hw_ptr = 1;
  2415. uint32_t num_entries_avail;
  2416. if (qdf_unlikely(!dp_pdev)) {
  2417. dp_rx_err("%pK: pdev is null for mac_id = %d",
  2418. dp_soc, mac_id);
  2419. return QDF_STATUS_E_FAILURE;
  2420. }
  2421. if (qdf_unlikely(!rxdma_srng)) {
  2422. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2423. return QDF_STATUS_E_FAILURE;
  2424. }
  2425. dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
  2426. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2427. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  2428. rxdma_srng,
  2429. sync_hw_ptr);
  2430. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2431. if (!num_entries_avail) {
  2432. dp_err("Num of available entries is zero, nothing to do");
  2433. return QDF_STATUS_E_NOMEM;
  2434. }
  2435. if (num_entries_avail < num_req_buffers)
  2436. num_req_buffers = num_entries_avail;
  2437. nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
  2438. num_req_buffers, &desc_list, &tail);
  2439. if (!nr_descs) {
  2440. dp_err("no free rx_descs in freelist");
  2441. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  2442. return QDF_STATUS_E_NOMEM;
  2443. }
  2444. dp_debug("got %u RX descs for driver attach", nr_descs);
  2445. /*
  2446. * Try to allocate pointers to the nbuf one page at a time.
  2447. * Take pointers that can fit in one page of memory and
  2448. * iterate through the total descriptors that need to be
  2449. * allocated in order of pages. Reuse the pointers that
  2450. * have been allocated to fit in one page across each
  2451. * iteration to index into the nbuf.
  2452. */
  2453. total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE;
  2454. /*
  2455. * Add an extra page to store the remainder if any
  2456. */
  2457. if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE)
  2458. total_pages++;
  2459. nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE);
  2460. if (!nf_info) {
  2461. dp_err("failed to allocate nbuf array");
  2462. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2463. QDF_BUG(0);
  2464. return QDF_STATUS_E_NOMEM;
  2465. }
  2466. nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info);
  2467. for (page_idx = 0; page_idx < total_pages; page_idx++) {
  2468. qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE);
  2469. for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
  2470. /*
  2471. * The last page of buffer pointers may not be required
  2472. * completely based on the number of descriptors. Below
  2473. * check will ensure we are allocating only the
  2474. * required number of descriptors.
  2475. */
  2476. if (nr_nbuf_total >= nr_descs)
  2477. break;
  2478. /* Flag is set while pdev rx_desc_pool initialization */
  2479. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2480. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  2481. &nf_info[nr_nbuf], dp_pdev,
  2482. rx_desc_pool);
  2483. else
  2484. ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
  2485. &nf_info[nr_nbuf], dp_pdev,
  2486. rx_desc_pool);
  2487. if (QDF_IS_STATUS_ERROR(ret))
  2488. break;
  2489. nr_nbuf_total++;
  2490. }
  2491. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2492. for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
  2493. rxdma_ring_entry =
  2494. hal_srng_src_get_next(dp_soc->hal_soc,
  2495. rxdma_srng);
  2496. qdf_assert_always(rxdma_ring_entry);
  2497. next = desc_list->next;
  2498. paddr = nf_info[buffer_index].paddr;
  2499. nbuf = nf_info[buffer_index].virt_addr.nbuf;
  2500. /* Flag is set while pdev rx_desc_pool initialization */
  2501. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2502. dp_rx_desc_frag_prep(&desc_list->rx_desc,
  2503. &nf_info[buffer_index]);
  2504. else
  2505. dp_rx_desc_prep(&desc_list->rx_desc,
  2506. &nf_info[buffer_index]);
  2507. desc_list->rx_desc.in_use = 1;
  2508. dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
  2509. dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
  2510. __func__,
  2511. RX_DESC_REPLENISHED);
  2512. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr,
  2513. desc_list->rx_desc.cookie,
  2514. rx_desc_pool->owner);
  2515. dp_ipa_handle_rx_buf_smmu_mapping(
  2516. dp_soc, nbuf,
  2517. rx_desc_pool->buf_size,
  2518. true);
  2519. desc_list = next;
  2520. }
  2521. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id,
  2522. rxdma_srng, nr_nbuf, nr_nbuf);
  2523. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2524. }
  2525. dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
  2526. qdf_mem_free(nf_info);
  2527. if (!nr_nbuf_total) {
  2528. dp_err("No nbuf's allocated");
  2529. QDF_BUG(0);
  2530. return QDF_STATUS_E_RESOURCES;
  2531. }
  2532. /* No need to count the number of bytes received during replenish.
  2533. * Therefore set replenish.pkts.bytes as 0.
  2534. */
  2535. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
  2536. return QDF_STATUS_SUCCESS;
  2537. }
  2538. qdf_export_symbol(dp_pdev_rx_buffers_attach);
  2539. /**
  2540. * dp_rx_enable_mon_dest_frag() - Enable frag processing for
  2541. * monitor destination ring via frag.
  2542. *
  2543. * Enable this flag only for monitor destination buffer processing
  2544. * if DP_RX_MON_MEM_FRAG feature is enabled.
  2545. * If flag is set then frag based function will be called for alloc,
  2546. * map, prep desc and free ops for desc buffer else normal nbuf based
  2547. * function will be called.
  2548. *
  2549. * @rx_desc_pool: Rx desc pool
  2550. * @is_mon_dest_desc: Is it for monitor dest buffer
  2551. *
  2552. * Return: None
  2553. */
  2554. #ifdef DP_RX_MON_MEM_FRAG
  2555. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2556. bool is_mon_dest_desc)
  2557. {
  2558. rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
  2559. if (is_mon_dest_desc)
  2560. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
  2561. }
  2562. #else
  2563. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2564. bool is_mon_dest_desc)
  2565. {
  2566. rx_desc_pool->rx_mon_dest_frag_enable = false;
  2567. if (is_mon_dest_desc)
  2568. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
  2569. }
  2570. #endif
  2571. qdf_export_symbol(dp_rx_enable_mon_dest_frag);
  2572. /*
  2573. * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor
  2574. * pool
  2575. *
  2576. * @pdev: core txrx pdev context
  2577. *
  2578. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2579. * QDF_STATUS_E_NOMEM
  2580. */
  2581. QDF_STATUS
  2582. dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
  2583. {
  2584. struct dp_soc *soc = pdev->soc;
  2585. uint32_t rxdma_entries;
  2586. uint32_t rx_sw_desc_num;
  2587. struct dp_srng *dp_rxdma_srng;
  2588. struct rx_desc_pool *rx_desc_pool;
  2589. uint32_t status = QDF_STATUS_SUCCESS;
  2590. int mac_for_pdev;
  2591. mac_for_pdev = pdev->lmac_id;
  2592. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2593. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2594. soc, mac_for_pdev);
  2595. return status;
  2596. }
  2597. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2598. rxdma_entries = dp_rxdma_srng->num_entries;
  2599. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2600. rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2601. rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
  2602. status = dp_rx_desc_pool_alloc(soc,
  2603. rx_sw_desc_num,
  2604. rx_desc_pool);
  2605. if (status != QDF_STATUS_SUCCESS)
  2606. return status;
  2607. return status;
  2608. }
  2609. /*
  2610. * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
  2611. *
  2612. * @pdev: core txrx pdev context
  2613. */
  2614. void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
  2615. {
  2616. int mac_for_pdev = pdev->lmac_id;
  2617. struct dp_soc *soc = pdev->soc;
  2618. struct rx_desc_pool *rx_desc_pool;
  2619. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2620. dp_rx_desc_pool_free(soc, rx_desc_pool);
  2621. }
  2622. /*
  2623. * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
  2624. *
  2625. * @pdev: core txrx pdev context
  2626. *
  2627. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2628. * QDF_STATUS_E_NOMEM
  2629. */
  2630. QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
  2631. {
  2632. int mac_for_pdev = pdev->lmac_id;
  2633. struct dp_soc *soc = pdev->soc;
  2634. uint32_t rxdma_entries;
  2635. uint32_t rx_sw_desc_num;
  2636. struct dp_srng *dp_rxdma_srng;
  2637. struct rx_desc_pool *rx_desc_pool;
  2638. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2639. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2640. /**
  2641. * If NSS is enabled, rx_desc_pool is already filled.
  2642. * Hence, just disable desc_pool frag flag.
  2643. */
  2644. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2645. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2646. soc, mac_for_pdev);
  2647. return QDF_STATUS_SUCCESS;
  2648. }
  2649. if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
  2650. return QDF_STATUS_E_NOMEM;
  2651. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2652. rxdma_entries = dp_rxdma_srng->num_entries;
  2653. soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
  2654. rx_sw_desc_num =
  2655. wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2656. rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc);
  2657. rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
  2658. rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
  2659. /* Disable monitor dest processing via frag */
  2660. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2661. dp_rx_desc_pool_init(soc, mac_for_pdev,
  2662. rx_sw_desc_num, rx_desc_pool);
  2663. return QDF_STATUS_SUCCESS;
  2664. }
  2665. /*
  2666. * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
  2667. * @pdev: core txrx pdev context
  2668. *
  2669. * This function resets the freelist of rx descriptors and destroys locks
  2670. * associated with this list of descriptors.
  2671. */
  2672. void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
  2673. {
  2674. int mac_for_pdev = pdev->lmac_id;
  2675. struct dp_soc *soc = pdev->soc;
  2676. struct rx_desc_pool *rx_desc_pool;
  2677. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2678. dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev);
  2679. }
  2680. /*
  2681. * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
  2682. *
  2683. * @pdev: core txrx pdev context
  2684. *
  2685. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2686. * QDF_STATUS_E_NOMEM
  2687. */
  2688. QDF_STATUS
  2689. dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
  2690. {
  2691. int mac_for_pdev = pdev->lmac_id;
  2692. struct dp_soc *soc = pdev->soc;
  2693. struct dp_srng *dp_rxdma_srng;
  2694. struct rx_desc_pool *rx_desc_pool;
  2695. uint32_t rxdma_entries;
  2696. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2697. rxdma_entries = dp_rxdma_srng->num_entries;
  2698. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2699. /* Initialize RX buffer pool which will be
  2700. * used during low memory conditions
  2701. */
  2702. dp_rx_buffer_pool_init(soc, mac_for_pdev);
  2703. return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev,
  2704. dp_rxdma_srng,
  2705. rx_desc_pool,
  2706. rxdma_entries - 1);
  2707. }
  2708. /*
  2709. * dp_rx_pdev_buffers_free - Free nbufs (skbs)
  2710. *
  2711. * @pdev: core txrx pdev context
  2712. */
  2713. void
  2714. dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
  2715. {
  2716. int mac_for_pdev = pdev->lmac_id;
  2717. struct dp_soc *soc = pdev->soc;
  2718. struct rx_desc_pool *rx_desc_pool;
  2719. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2720. dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  2721. dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
  2722. }
  2723. #ifdef DP_RX_SPECIAL_FRAME_NEED
  2724. bool dp_rx_deliver_special_frame(struct dp_soc *soc,
  2725. struct dp_txrx_peer *txrx_peer,
  2726. qdf_nbuf_t nbuf, uint32_t frame_mask,
  2727. uint8_t *rx_tlv_hdr)
  2728. {
  2729. uint32_t l2_hdr_offset = 0;
  2730. uint16_t msdu_len = 0;
  2731. uint32_t skip_len;
  2732. l2_hdr_offset =
  2733. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2734. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  2735. skip_len = l2_hdr_offset;
  2736. } else {
  2737. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2738. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  2739. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  2740. }
  2741. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2742. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  2743. qdf_nbuf_pull_head(nbuf, skip_len);
  2744. if (txrx_peer->vdev) {
  2745. dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf,
  2746. QDF_TX_RX_STATUS_OK);
  2747. }
  2748. if (dp_rx_is_special_frame(nbuf, frame_mask)) {
  2749. dp_info("special frame, mpdu sn 0x%x",
  2750. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  2751. qdf_nbuf_set_exc_frame(nbuf, 1);
  2752. dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer,
  2753. nbuf, NULL);
  2754. return true;
  2755. }
  2756. return false;
  2757. }
  2758. #endif
  2759. #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
  2760. void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
  2761. uint8_t *rx_tlv,
  2762. qdf_nbuf_t nbuf)
  2763. {
  2764. struct dp_soc *soc;
  2765. if (!pdev->is_first_wakeup_packet)
  2766. return;
  2767. soc = pdev->soc;
  2768. if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) {
  2769. qdf_nbuf_mark_wakeup_frame(nbuf);
  2770. dp_info("First packet after WOW Wakeup rcvd");
  2771. }
  2772. }
  2773. #endif