dp_rx.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_rx.h"
  22. #include "dp_tx.h"
  23. #include "dp_peer.h"
  24. #include "hal_rx.h"
  25. #include "hal_api.h"
  26. #include "qdf_nbuf.h"
  27. #ifdef MESH_MODE_SUPPORT
  28. #include "if_meta_hdr.h"
  29. #endif
  30. #include "dp_internal.h"
  31. #include "dp_ipa.h"
  32. #include "dp_hist.h"
  33. #include "dp_rx_buffer_pool.h"
  34. #ifdef WIFI_MONITOR_SUPPORT
  35. #include "dp_htt.h"
  36. #include <dp_mon.h>
  37. #endif
  38. #ifdef FEATURE_WDS
  39. #include "dp_txrx_wds.h"
  40. #endif
  41. #ifdef DP_RATETABLE_SUPPORT
  42. #include "dp_ratetable.h"
  43. #endif
  44. #ifdef DUP_RX_DESC_WAR
  45. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  46. hal_ring_handle_t hal_ring,
  47. hal_ring_desc_t ring_desc,
  48. struct dp_rx_desc *rx_desc)
  49. {
  50. void *hal_soc = soc->hal_soc;
  51. hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
  52. dp_rx_desc_dump(rx_desc);
  53. }
  54. #else
  55. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  56. hal_ring_handle_t hal_ring_hdl,
  57. hal_ring_desc_t ring_desc,
  58. struct dp_rx_desc *rx_desc)
  59. {
  60. hal_soc_handle_t hal_soc = soc->hal_soc;
  61. dp_rx_desc_dump(rx_desc);
  62. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
  63. hal_srng_dump_ring(hal_soc, hal_ring_hdl);
  64. qdf_assert_always(0);
  65. }
  66. #endif
  67. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  68. #ifdef RX_DESC_SANITY_WAR
  69. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  70. hal_ring_handle_t hal_ring_hdl,
  71. hal_ring_desc_t ring_desc,
  72. struct dp_rx_desc *rx_desc)
  73. {
  74. uint8_t return_buffer_manager;
  75. if (qdf_unlikely(!rx_desc)) {
  76. /*
  77. * This is an unlikely case where the cookie obtained
  78. * from the ring_desc is invalid and hence we are not
  79. * able to find the corresponding rx_desc
  80. */
  81. goto fail;
  82. }
  83. return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc);
  84. if (qdf_unlikely(!(return_buffer_manager ==
  85. HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) ||
  86. return_buffer_manager ==
  87. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) {
  88. goto fail;
  89. }
  90. return QDF_STATUS_SUCCESS;
  91. fail:
  92. DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
  93. dp_err("Ring Desc:");
  94. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
  95. ring_desc);
  96. return QDF_STATUS_E_NULL_VALUE;
  97. }
  98. #endif
  99. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  100. /**
  101. * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
  102. *
  103. * @dp_soc: struct dp_soc *
  104. * @nbuf_frag_info_t: nbuf frag info
  105. * @dp_pdev: struct dp_pdev *
  106. * @rx_desc_pool: Rx desc pool
  107. *
  108. * Return: QDF_STATUS
  109. */
  110. #ifdef DP_RX_MON_MEM_FRAG
  111. static inline QDF_STATUS
  112. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  113. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  114. struct dp_pdev *dp_pdev,
  115. struct rx_desc_pool *rx_desc_pool)
  116. {
  117. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  118. (nbuf_frag_info_t->virt_addr).vaddr =
  119. qdf_frag_alloc(NULL, rx_desc_pool->buf_size);
  120. if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
  121. dp_err("Frag alloc failed");
  122. DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
  123. return QDF_STATUS_E_NOMEM;
  124. }
  125. ret = qdf_mem_map_page(dp_soc->osdev,
  126. (nbuf_frag_info_t->virt_addr).vaddr,
  127. QDF_DMA_FROM_DEVICE,
  128. rx_desc_pool->buf_size,
  129. &nbuf_frag_info_t->paddr);
  130. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  131. qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
  132. dp_err("Frag map failed");
  133. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  134. return QDF_STATUS_E_FAULT;
  135. }
  136. return QDF_STATUS_SUCCESS;
  137. }
  138. #else
  139. static inline QDF_STATUS
  140. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  141. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  142. struct dp_pdev *dp_pdev,
  143. struct rx_desc_pool *rx_desc_pool)
  144. {
  145. return QDF_STATUS_SUCCESS;
  146. }
  147. #endif /* DP_RX_MON_MEM_FRAG */
  148. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  149. /**
  150. * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history
  151. * @soc: Datapath soc structure
  152. * @ring_num: Refill ring number
  153. * @num_req: number of buffers requested for refill
  154. * @num_refill: number of buffers refilled
  155. *
  156. * Returns: None
  157. */
  158. static inline void
  159. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  160. hal_ring_handle_t hal_ring_hdl,
  161. uint32_t num_req, uint32_t num_refill)
  162. {
  163. struct dp_refill_info_record *record;
  164. uint32_t idx;
  165. uint32_t tp;
  166. uint32_t hp;
  167. if (qdf_unlikely(ring_num >= MAX_PDEV_CNT ||
  168. !soc->rx_refill_ring_history[ring_num]))
  169. return;
  170. idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index,
  171. DP_RX_REFILL_HIST_MAX);
  172. /* No NULL check needed for record since its an array */
  173. record = &soc->rx_refill_ring_history[ring_num]->entry[idx];
  174. hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp);
  175. record->timestamp = qdf_get_log_timestamp();
  176. record->num_req = num_req;
  177. record->num_refill = num_refill;
  178. record->hp = hp;
  179. record->tp = tp;
  180. }
  181. #else
  182. static inline void
  183. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  184. hal_ring_handle_t hal_ring_hdl,
  185. uint32_t num_req, uint32_t num_refill)
  186. {
  187. }
  188. #endif
  189. /**
  190. * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
  191. *
  192. * @dp_soc: struct dp_soc *
  193. * @mac_id: Mac id
  194. * @num_entries_avail: num_entries_avail
  195. * @nbuf_frag_info_t: nbuf frag info
  196. * @dp_pdev: struct dp_pdev *
  197. * @rx_desc_pool: Rx desc pool
  198. *
  199. * Return: QDF_STATUS
  200. */
  201. static inline QDF_STATUS
  202. dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
  203. uint32_t mac_id,
  204. uint32_t num_entries_avail,
  205. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  206. struct dp_pdev *dp_pdev,
  207. struct rx_desc_pool *rx_desc_pool)
  208. {
  209. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  210. (nbuf_frag_info_t->virt_addr).nbuf =
  211. dp_rx_buffer_pool_nbuf_alloc(dp_soc,
  212. mac_id,
  213. rx_desc_pool,
  214. num_entries_avail);
  215. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  216. dp_err("nbuf alloc failed");
  217. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  218. return QDF_STATUS_E_NOMEM;
  219. }
  220. ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
  221. nbuf_frag_info_t);
  222. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  223. dp_rx_buffer_pool_nbuf_free(dp_soc,
  224. (nbuf_frag_info_t->virt_addr).nbuf, mac_id);
  225. dp_err("nbuf map failed");
  226. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  227. return QDF_STATUS_E_FAULT;
  228. }
  229. nbuf_frag_info_t->paddr =
  230. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  231. dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)(
  232. (nbuf_frag_info_t->virt_addr).nbuf),
  233. rx_desc_pool->buf_size,
  234. true, __func__, __LINE__);
  235. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  236. &nbuf_frag_info_t->paddr,
  237. rx_desc_pool);
  238. if (ret == QDF_STATUS_E_FAILURE) {
  239. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  240. return QDF_STATUS_E_ADDRNOTAVAIL;
  241. }
  242. return QDF_STATUS_SUCCESS;
  243. }
  244. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  245. QDF_STATUS
  246. __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id,
  247. struct dp_srng *dp_rxdma_srng,
  248. struct rx_desc_pool *rx_desc_pool)
  249. {
  250. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  251. uint32_t count;
  252. void *rxdma_ring_entry;
  253. union dp_rx_desc_list_elem_t *next = NULL;
  254. void *rxdma_srng;
  255. qdf_nbuf_t nbuf;
  256. qdf_dma_addr_t paddr;
  257. uint16_t num_entries_avail = 0;
  258. uint16_t num_alloc_desc = 0;
  259. union dp_rx_desc_list_elem_t *desc_list = NULL;
  260. union dp_rx_desc_list_elem_t *tail = NULL;
  261. int sync_hw_ptr = 0;
  262. rxdma_srng = dp_rxdma_srng->hal_srng;
  263. if (qdf_unlikely(!dp_pdev)) {
  264. dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
  265. return QDF_STATUS_E_FAILURE;
  266. }
  267. if (qdf_unlikely(!rxdma_srng)) {
  268. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  269. return QDF_STATUS_E_FAILURE;
  270. }
  271. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  272. num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
  273. rxdma_srng,
  274. sync_hw_ptr);
  275. dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
  276. soc, num_entries_avail);
  277. if (qdf_unlikely(num_entries_avail <
  278. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  279. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  280. return QDF_STATUS_E_FAILURE;
  281. }
  282. DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
  283. num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
  284. rx_desc_pool,
  285. num_entries_avail,
  286. &desc_list,
  287. &tail);
  288. if (!num_alloc_desc) {
  289. dp_rx_err("%pK: no free rx_descs in freelist", soc);
  290. DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail,
  291. num_entries_avail);
  292. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  293. return QDF_STATUS_E_NOMEM;
  294. }
  295. for (count = 0; count < num_alloc_desc; count++) {
  296. next = desc_list->next;
  297. qdf_prefetch(next);
  298. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  299. if (qdf_unlikely(!nbuf)) {
  300. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  301. break;
  302. }
  303. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  304. rx_desc_pool->buf_size);
  305. rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc,
  306. rxdma_srng);
  307. qdf_assert_always(rxdma_ring_entry);
  308. desc_list->rx_desc.nbuf = nbuf;
  309. desc_list->rx_desc.rx_buf_start = nbuf->data;
  310. desc_list->rx_desc.unmapped = 0;
  311. /* rx_desc.in_use should be zero at this time*/
  312. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  313. desc_list->rx_desc.in_use = 1;
  314. desc_list->rx_desc.in_err_state = 0;
  315. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  316. paddr,
  317. desc_list->rx_desc.cookie,
  318. rx_desc_pool->owner);
  319. desc_list = next;
  320. }
  321. qdf_dsb();
  322. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  323. /* No need to count the number of bytes received during replenish.
  324. * Therefore set replenish.pkts.bytes as 0.
  325. */
  326. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  327. DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count));
  328. /*
  329. * add any available free desc back to the free list
  330. */
  331. if (desc_list)
  332. dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail,
  333. mac_id, rx_desc_pool);
  334. return QDF_STATUS_SUCCESS;
  335. }
  336. QDF_STATUS
  337. __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
  338. struct dp_srng *dp_rxdma_srng,
  339. struct rx_desc_pool *rx_desc_pool,
  340. uint32_t num_req_buffers,
  341. union dp_rx_desc_list_elem_t **desc_list,
  342. union dp_rx_desc_list_elem_t **tail)
  343. {
  344. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  345. uint32_t count;
  346. void *rxdma_ring_entry;
  347. union dp_rx_desc_list_elem_t *next;
  348. void *rxdma_srng;
  349. qdf_nbuf_t nbuf;
  350. qdf_nbuf_t nbuf_next;
  351. qdf_nbuf_t nbuf_head = NULL;
  352. qdf_nbuf_t nbuf_tail = NULL;
  353. qdf_dma_addr_t paddr;
  354. rxdma_srng = dp_rxdma_srng->hal_srng;
  355. if (qdf_unlikely(!dp_pdev)) {
  356. dp_rx_err("%pK: pdev is null for mac_id = %d",
  357. soc, mac_id);
  358. return QDF_STATUS_E_FAILURE;
  359. }
  360. if (qdf_unlikely(!rxdma_srng)) {
  361. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  362. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  363. return QDF_STATUS_E_FAILURE;
  364. }
  365. /* Allocate required number of nbufs */
  366. for (count = 0; count < num_req_buffers; count++) {
  367. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  368. if (qdf_unlikely(!nbuf)) {
  369. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  370. /* Update num_req_buffers to nbufs allocated count */
  371. num_req_buffers = count;
  372. break;
  373. }
  374. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  375. rx_desc_pool->buf_size);
  376. QDF_NBUF_CB_PADDR(nbuf) = paddr;
  377. DP_RX_LIST_APPEND(nbuf_head,
  378. nbuf_tail,
  379. nbuf);
  380. }
  381. qdf_dsb();
  382. nbuf = nbuf_head;
  383. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  384. for (count = 0; count < num_req_buffers; count++) {
  385. next = (*desc_list)->next;
  386. nbuf_next = nbuf->next;
  387. qdf_prefetch(next);
  388. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  389. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  390. if (!rxdma_ring_entry)
  391. break;
  392. (*desc_list)->rx_desc.nbuf = nbuf;
  393. (*desc_list)->rx_desc.rx_buf_start = nbuf->data;
  394. (*desc_list)->rx_desc.unmapped = 0;
  395. /* rx_desc.in_use should be zero at this time*/
  396. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  397. (*desc_list)->rx_desc.in_use = 1;
  398. (*desc_list)->rx_desc.in_err_state = 0;
  399. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  400. QDF_NBUF_CB_PADDR(nbuf),
  401. (*desc_list)->rx_desc.cookie,
  402. rx_desc_pool->owner);
  403. *desc_list = next;
  404. nbuf = nbuf_next;
  405. }
  406. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  407. /* No need to count the number of bytes received during replenish.
  408. * Therefore set replenish.pkts.bytes as 0.
  409. */
  410. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  411. DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count));
  412. /*
  413. * add any available free desc back to the free list
  414. */
  415. if (*desc_list)
  416. dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
  417. mac_id, rx_desc_pool);
  418. while (nbuf) {
  419. nbuf_next = nbuf->next;
  420. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  421. qdf_nbuf_free(nbuf);
  422. nbuf = nbuf_next;
  423. }
  424. return QDF_STATUS_SUCCESS;
  425. }
  426. QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
  427. uint32_t mac_id,
  428. struct dp_srng *dp_rxdma_srng,
  429. struct rx_desc_pool *rx_desc_pool,
  430. uint32_t num_req_buffers)
  431. {
  432. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  433. uint32_t count;
  434. uint32_t nr_descs = 0;
  435. void *rxdma_ring_entry;
  436. union dp_rx_desc_list_elem_t *next;
  437. void *rxdma_srng;
  438. qdf_nbuf_t nbuf;
  439. qdf_dma_addr_t paddr;
  440. union dp_rx_desc_list_elem_t *desc_list = NULL;
  441. union dp_rx_desc_list_elem_t *tail = NULL;
  442. rxdma_srng = dp_rxdma_srng->hal_srng;
  443. if (qdf_unlikely(!dp_pdev)) {
  444. dp_rx_err("%pK: pdev is null for mac_id = %d",
  445. soc, mac_id);
  446. return QDF_STATUS_E_FAILURE;
  447. }
  448. if (qdf_unlikely(!rxdma_srng)) {
  449. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  450. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  451. return QDF_STATUS_E_FAILURE;
  452. }
  453. dp_rx_debug("%pK: requested %d buffers for replenish",
  454. soc, num_req_buffers);
  455. nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool,
  456. num_req_buffers, &desc_list, &tail);
  457. if (!nr_descs) {
  458. dp_err("no free rx_descs in freelist");
  459. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  460. return QDF_STATUS_E_NOMEM;
  461. }
  462. dp_debug("got %u RX descs for driver attach", nr_descs);
  463. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  464. for (count = 0; count < nr_descs; count++) {
  465. next = desc_list->next;
  466. qdf_prefetch(next);
  467. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  468. if (qdf_unlikely(!nbuf)) {
  469. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  470. break;
  471. }
  472. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  473. rx_desc_pool->buf_size);
  474. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  475. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  476. if (!rxdma_ring_entry)
  477. break;
  478. qdf_assert_always(rxdma_ring_entry);
  479. desc_list->rx_desc.nbuf = nbuf;
  480. desc_list->rx_desc.rx_buf_start = nbuf->data;
  481. desc_list->rx_desc.unmapped = 0;
  482. /* rx_desc.in_use should be zero at this time*/
  483. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  484. desc_list->rx_desc.in_use = 1;
  485. desc_list->rx_desc.in_err_state = 0;
  486. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  487. paddr,
  488. desc_list->rx_desc.cookie,
  489. rx_desc_pool->owner);
  490. desc_list = next;
  491. }
  492. qdf_dsb();
  493. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  494. /* No need to count the number of bytes received during replenish.
  495. * Therefore set replenish.pkts.bytes as 0.
  496. */
  497. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  498. return QDF_STATUS_SUCCESS;
  499. }
  500. #endif
  501. #ifdef DP_UMAC_HW_RESET_SUPPORT
  502. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  503. static inline
  504. qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
  505. uint32_t buf_size)
  506. {
  507. return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size);
  508. }
  509. #else
  510. static inline
  511. qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
  512. uint32_t buf_size)
  513. {
  514. return qdf_nbuf_get_frag_paddr(nbuf, 0);
  515. }
  516. #endif
  517. /*
  518. * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time
  519. *
  520. * @soc: core txrx main context
  521. * @dp_rxdma_srng: rxdma ring
  522. * @rx_desc_pool: rx descriptor pool
  523. * @rx_desc:rx descriptor
  524. *
  525. * Return: void
  526. */
  527. static inline
  528. void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
  529. struct rx_desc_pool *rx_desc_pool,
  530. struct dp_rx_desc *rx_desc)
  531. {
  532. void *rxdma_srng;
  533. void *rxdma_ring_entry;
  534. qdf_dma_addr_t paddr;
  535. rxdma_srng = dp_rxdma_srng->hal_srng;
  536. /* No one else should be accessing the srng at this point */
  537. hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng);
  538. rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  539. qdf_assert_always(rxdma_ring_entry);
  540. rx_desc->in_err_state = 0;
  541. paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf,
  542. rx_desc_pool->buf_size);
  543. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr,
  544. rx_desc->cookie, rx_desc_pool->owner);
  545. hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng);
  546. }
  547. /*
  548. * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
  549. *
  550. * @soc: core txrx main context
  551. * @nbuf_list: nbuf list for delayed free
  552. *
  553. * Return: void
  554. */
  555. void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  556. {
  557. int mac_id, i, j;
  558. union dp_rx_desc_list_elem_t *head = NULL;
  559. union dp_rx_desc_list_elem_t *tail = NULL;
  560. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  561. struct dp_srng *dp_rxdma_srng =
  562. &soc->rx_refill_buf_ring[mac_id];
  563. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  564. uint32_t rx_sw_desc_num = rx_desc_pool->pool_size;
  565. /* Only fill up 1/3 of the ring size */
  566. uint32_t num_req_decs;
  567. if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng ||
  568. !rx_desc_pool->array)
  569. continue;
  570. num_req_decs = dp_rxdma_srng->num_entries / 3;
  571. for (i = 0, j = 0; i < rx_sw_desc_num; i++) {
  572. struct dp_rx_desc *rx_desc =
  573. (struct dp_rx_desc *)&rx_desc_pool->array[i];
  574. if (rx_desc->in_use) {
  575. if (j < dp_rxdma_srng->num_entries) {
  576. dp_rx_desc_replenish(soc, dp_rxdma_srng,
  577. rx_desc_pool,
  578. rx_desc);
  579. } else {
  580. dp_rx_nbuf_unmap(soc, rx_desc, 0);
  581. rx_desc->unmapped = 0;
  582. rx_desc->nbuf->next = *nbuf_list;
  583. *nbuf_list = rx_desc->nbuf;
  584. dp_rx_add_to_free_desc_list(&head,
  585. &tail,
  586. rx_desc);
  587. }
  588. j++;
  589. }
  590. }
  591. if (head)
  592. dp_rx_add_desc_list_to_free_list(soc, &head, &tail,
  593. mac_id, rx_desc_pool);
  594. /* If num of descs in use were less, then we need to replenish
  595. * the ring with some buffers
  596. */
  597. head = NULL;
  598. tail = NULL;
  599. if (j < (num_req_decs - 1))
  600. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  601. rx_desc_pool,
  602. ((num_req_decs - 1) - j),
  603. &head, &tail, true);
  604. }
  605. }
  606. #endif
  607. /*
  608. * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  609. * called during dp rx initialization
  610. * and at the end of dp_rx_process.
  611. *
  612. * @soc: core txrx main context
  613. * @mac_id: mac_id which is one of 3 mac_ids
  614. * @dp_rxdma_srng: dp rxdma circular ring
  615. * @rx_desc_pool: Pointer to free Rx descriptor pool
  616. * @num_req_buffers: number of buffer to be replenished
  617. * @desc_list: list of descs if called from dp_rx_process
  618. * or NULL during dp rx initialization or out of buffer
  619. * interrupt.
  620. * @tail: tail of descs list
  621. * @req_only: If true don't replenish more than req buffers
  622. * @func_name: name of the caller function
  623. * Return: return success or failure
  624. */
  625. QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  626. struct dp_srng *dp_rxdma_srng,
  627. struct rx_desc_pool *rx_desc_pool,
  628. uint32_t num_req_buffers,
  629. union dp_rx_desc_list_elem_t **desc_list,
  630. union dp_rx_desc_list_elem_t **tail,
  631. bool req_only, const char *func_name)
  632. {
  633. uint32_t num_alloc_desc;
  634. uint16_t num_desc_to_free = 0;
  635. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  636. uint32_t num_entries_avail;
  637. uint32_t count;
  638. uint32_t extra_buffers;
  639. int sync_hw_ptr = 1;
  640. struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
  641. void *rxdma_ring_entry;
  642. union dp_rx_desc_list_elem_t *next;
  643. QDF_STATUS ret;
  644. void *rxdma_srng;
  645. union dp_rx_desc_list_elem_t *desc_list_append = NULL;
  646. union dp_rx_desc_list_elem_t *tail_append = NULL;
  647. union dp_rx_desc_list_elem_t *temp_list = NULL;
  648. rxdma_srng = dp_rxdma_srng->hal_srng;
  649. if (qdf_unlikely(!dp_pdev)) {
  650. dp_rx_err("%pK: pdev is null for mac_id = %d",
  651. dp_soc, mac_id);
  652. return QDF_STATUS_E_FAILURE;
  653. }
  654. if (qdf_unlikely(!rxdma_srng)) {
  655. dp_rx_debug("%pK: rxdma srng not initialized", dp_soc);
  656. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  657. return QDF_STATUS_E_FAILURE;
  658. }
  659. dp_verbose_debug("%pK: requested %d buffers for replenish",
  660. dp_soc, num_req_buffers);
  661. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  662. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  663. rxdma_srng,
  664. sync_hw_ptr);
  665. dp_verbose_debug("%pK: no of available entries in rxdma ring: %d",
  666. dp_soc, num_entries_avail);
  667. if (!req_only && !(*desc_list) && (num_entries_avail >
  668. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  669. num_req_buffers = num_entries_avail;
  670. DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
  671. } else if (num_entries_avail < num_req_buffers) {
  672. num_desc_to_free = num_req_buffers - num_entries_avail;
  673. num_req_buffers = num_entries_avail;
  674. } else if ((*desc_list) &&
  675. dp_rxdma_srng->num_entries - num_entries_avail <
  676. CRITICAL_BUFFER_THRESHOLD) {
  677. /* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if
  678. * total buff requested after adding extra buffers is less
  679. * than or equal to num entries available, else set it to max
  680. * possible additional buffers available at that moment
  681. */
  682. extra_buffers =
  683. ((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ?
  684. (num_entries_avail - num_req_buffers) :
  685. CRITICAL_BUFFER_THRESHOLD;
  686. /* Append some free descriptors to tail */
  687. num_alloc_desc =
  688. dp_rx_get_free_desc_list(dp_soc, mac_id,
  689. rx_desc_pool,
  690. extra_buffers,
  691. &desc_list_append,
  692. &tail_append);
  693. if (num_alloc_desc) {
  694. temp_list = *desc_list;
  695. *desc_list = desc_list_append;
  696. tail_append->next = temp_list;
  697. num_req_buffers += num_alloc_desc;
  698. DP_STATS_DEC(dp_pdev,
  699. replenish.free_list,
  700. num_alloc_desc);
  701. } else
  702. dp_err_rl("%pK: no free rx_descs in freelist", dp_soc);
  703. }
  704. if (qdf_unlikely(!num_req_buffers)) {
  705. num_desc_to_free = num_req_buffers;
  706. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  707. goto free_descs;
  708. }
  709. /*
  710. * if desc_list is NULL, allocate the descs from freelist
  711. */
  712. if (!(*desc_list)) {
  713. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  714. rx_desc_pool,
  715. num_req_buffers,
  716. desc_list,
  717. tail);
  718. if (!num_alloc_desc) {
  719. dp_rx_err("%pK: no free rx_descs in freelist", dp_soc);
  720. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  721. num_req_buffers);
  722. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  723. return QDF_STATUS_E_NOMEM;
  724. }
  725. dp_verbose_debug("%pK: %d rx desc allocated", dp_soc,
  726. num_alloc_desc);
  727. num_req_buffers = num_alloc_desc;
  728. }
  729. count = 0;
  730. while (count < num_req_buffers) {
  731. /* Flag is set while pdev rx_desc_pool initialization */
  732. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  733. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  734. &nbuf_frag_info,
  735. dp_pdev,
  736. rx_desc_pool);
  737. else
  738. ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
  739. mac_id,
  740. num_entries_avail, &nbuf_frag_info,
  741. dp_pdev, rx_desc_pool);
  742. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  743. if (qdf_unlikely(ret == QDF_STATUS_E_FAULT))
  744. continue;
  745. break;
  746. }
  747. count++;
  748. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  749. rxdma_srng);
  750. qdf_assert_always(rxdma_ring_entry);
  751. next = (*desc_list)->next;
  752. /* Flag is set while pdev rx_desc_pool initialization */
  753. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  754. dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
  755. &nbuf_frag_info);
  756. else
  757. dp_rx_desc_prep(&((*desc_list)->rx_desc),
  758. &nbuf_frag_info);
  759. /* rx_desc.in_use should be zero at this time*/
  760. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  761. (*desc_list)->rx_desc.in_use = 1;
  762. (*desc_list)->rx_desc.in_err_state = 0;
  763. dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
  764. func_name, RX_DESC_REPLENISHED);
  765. dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
  766. nbuf_frag_info.virt_addr.nbuf,
  767. (unsigned long long)(nbuf_frag_info.paddr),
  768. (*desc_list)->rx_desc.cookie);
  769. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry,
  770. nbuf_frag_info.paddr,
  771. (*desc_list)->rx_desc.cookie,
  772. rx_desc_pool->owner);
  773. *desc_list = next;
  774. }
  775. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
  776. num_req_buffers, count);
  777. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  778. dp_rx_schedule_refill_thread(dp_soc);
  779. dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
  780. count, num_desc_to_free);
  781. /* No need to count the number of bytes received during replenish.
  782. * Therefore set replenish.pkts.bytes as 0.
  783. */
  784. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  785. DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count);
  786. free_descs:
  787. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  788. /*
  789. * add any available free desc back to the free list
  790. */
  791. if (*desc_list)
  792. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  793. mac_id, rx_desc_pool);
  794. return QDF_STATUS_SUCCESS;
  795. }
  796. qdf_export_symbol(__dp_rx_buffers_replenish);
  797. /*
  798. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  799. * pkts to RAW mode simulation to
  800. * decapsulate the pkt.
  801. *
  802. * @vdev: vdev on which RAW mode is enabled
  803. * @nbuf_list: list of RAW pkts to process
  804. * @txrx_peer: peer object from which the pkt is rx
  805. *
  806. * Return: void
  807. */
  808. void
  809. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  810. struct dp_txrx_peer *txrx_peer)
  811. {
  812. qdf_nbuf_t deliver_list_head = NULL;
  813. qdf_nbuf_t deliver_list_tail = NULL;
  814. qdf_nbuf_t nbuf;
  815. nbuf = nbuf_list;
  816. while (nbuf) {
  817. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  818. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  819. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  820. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1,
  821. qdf_nbuf_len(nbuf));
  822. /*
  823. * reset the chfrag_start and chfrag_end bits in nbuf cb
  824. * as this is a non-amsdu pkt and RAW mode simulation expects
  825. * these bit s to be 0 for non-amsdu pkt.
  826. */
  827. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  828. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  829. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  830. qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
  831. }
  832. nbuf = next;
  833. }
  834. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  835. &deliver_list_tail);
  836. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  837. }
  838. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  839. #ifndef FEATURE_WDS
  840. void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
  841. struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf)
  842. {
  843. }
  844. #endif
  845. #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
  846. /*
  847. * dp_classify_critical_pkts() - API for marking critical packets
  848. * @soc: dp_soc context
  849. * @vdev: vdev on which packet is to be sent
  850. * @nbuf: nbuf that has to be classified
  851. *
  852. * The function parses the packet, identifies whether its a critical frame and
  853. * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf.
  854. * Code for marking which frames are CRITICAL is accessed via callback.
  855. * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames.
  856. *
  857. * Return: None
  858. */
  859. static
  860. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  861. qdf_nbuf_t nbuf)
  862. {
  863. if (vdev->tx_classify_critical_pkt_cb)
  864. vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf);
  865. }
  866. #else
  867. static inline
  868. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  869. qdf_nbuf_t nbuf)
  870. {
  871. }
  872. #endif
  873. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  874. static inline
  875. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  876. {
  877. qdf_nbuf_set_queue_mapping(nbuf, ring_id);
  878. }
  879. #else
  880. static inline
  881. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  882. {
  883. }
  884. #endif
  885. /*
  886. * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
  887. *
  888. * @soc: core txrx main context
  889. * @ta_peer : source peer entry
  890. * @rx_tlv_hdr : start address of rx tlvs
  891. * @nbuf : nbuf that has to be intrabss forwarded
  892. * @tid_stats : tid stats pointer
  893. *
  894. * Return: bool: true if it is forwarded else false
  895. */
  896. bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  897. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  898. struct cdp_tid_rx_stats *tid_stats)
  899. {
  900. uint16_t len;
  901. qdf_nbuf_t nbuf_copy;
  902. if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
  903. nbuf))
  904. return true;
  905. if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
  906. return false;
  907. /* If the source peer in the isolation list
  908. * then dont forward instead push to bridge stack
  909. */
  910. if (dp_get_peer_isolation(ta_peer))
  911. return false;
  912. nbuf_copy = qdf_nbuf_copy(nbuf);
  913. if (!nbuf_copy)
  914. return false;
  915. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  916. qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
  917. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
  918. if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy,
  919. tid_stats))
  920. return false;
  921. /* Don't send packets if tx is paused */
  922. if (!soc->is_tx_pause &&
  923. !dp_tx_send((struct cdp_soc_t *)soc,
  924. ta_peer->vdev->vdev_id, nbuf_copy)) {
  925. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  926. len);
  927. tid_stats->intrabss_cnt++;
  928. } else {
  929. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  930. len);
  931. tid_stats->fail_cnt[INTRABSS_DROP]++;
  932. dp_rx_nbuf_free(nbuf_copy);
  933. }
  934. return false;
  935. }
  936. /*
  937. * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
  938. *
  939. * @soc: core txrx main context
  940. * @ta_peer: source peer entry
  941. * @tx_vdev_id: VDEV ID for Intra-BSS TX
  942. * @rx_tlv_hdr: start address of rx tlvs
  943. * @nbuf: nbuf that has to be intrabss forwarded
  944. * @tid_stats: tid stats pointer
  945. *
  946. * Return: bool: true if it is forwarded else false
  947. */
  948. bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  949. uint8_t tx_vdev_id,
  950. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  951. struct cdp_tid_rx_stats *tid_stats)
  952. {
  953. uint16_t len;
  954. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  955. /* linearize the nbuf just before we send to
  956. * dp_tx_send()
  957. */
  958. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  959. if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
  960. return false;
  961. nbuf = qdf_nbuf_unshare(nbuf);
  962. if (!nbuf) {
  963. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer,
  964. rx.intra_bss.fail,
  965. 1, len);
  966. /* return true even though the pkt is
  967. * not forwarded. Basically skb_unshare
  968. * failed and we want to continue with
  969. * next nbuf.
  970. */
  971. tid_stats->fail_cnt[INTRABSS_DROP]++;
  972. return false;
  973. }
  974. }
  975. qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb));
  976. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf);
  977. /* Don't send packets if tx is paused */
  978. if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc,
  979. tx_vdev_id, nbuf)) {
  980. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  981. len);
  982. } else {
  983. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  984. len);
  985. tid_stats->fail_cnt[INTRABSS_DROP]++;
  986. return false;
  987. }
  988. return true;
  989. }
  990. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  991. #ifdef MESH_MODE_SUPPORT
  992. /**
  993. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  994. *
  995. * @vdev: DP Virtual device handle
  996. * @nbuf: Buffer pointer
  997. * @rx_tlv_hdr: start of rx tlv header
  998. * @txrx_peer: pointer to peer
  999. *
  1000. * This function allocated memory for mesh receive stats and fill the
  1001. * required stats. Stores the memory address in skb cb.
  1002. *
  1003. * Return: void
  1004. */
  1005. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1006. uint8_t *rx_tlv_hdr,
  1007. struct dp_txrx_peer *txrx_peer)
  1008. {
  1009. struct mesh_recv_hdr_s *rx_info = NULL;
  1010. uint32_t pkt_type;
  1011. uint32_t nss;
  1012. uint32_t rate_mcs;
  1013. uint32_t bw;
  1014. uint8_t primary_chan_num;
  1015. uint32_t center_chan_freq;
  1016. struct dp_soc *soc = vdev->pdev->soc;
  1017. struct dp_peer *peer;
  1018. struct dp_peer *primary_link_peer;
  1019. struct dp_soc *link_peer_soc;
  1020. cdp_peer_stats_param_t buf = {0};
  1021. /* fill recv mesh stats */
  1022. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  1023. /* upper layers are responsible to free this memory */
  1024. if (!rx_info) {
  1025. dp_rx_err("%pK: Memory allocation failed for mesh rx stats",
  1026. vdev->pdev->soc);
  1027. DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
  1028. return;
  1029. }
  1030. rx_info->rs_flags = MESH_RXHDR_VER1;
  1031. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  1032. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  1033. if (qdf_nbuf_is_rx_chfrag_end(nbuf))
  1034. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  1035. peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH);
  1036. if (peer) {
  1037. if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
  1038. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  1039. rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
  1040. rx_tlv_hdr);
  1041. if (vdev->osif_get_key)
  1042. vdev->osif_get_key(vdev->osif_vdev,
  1043. &rx_info->rs_decryptkey[0],
  1044. &peer->mac_addr.raw[0],
  1045. rx_info->rs_keyix);
  1046. }
  1047. dp_peer_unref_delete(peer, DP_MOD_ID_MESH);
  1048. }
  1049. primary_link_peer = dp_get_primary_link_peer_by_id(soc,
  1050. txrx_peer->peer_id,
  1051. DP_MOD_ID_MESH);
  1052. if (qdf_likely(primary_link_peer)) {
  1053. link_peer_soc = primary_link_peer->vdev->pdev->soc;
  1054. dp_monitor_peer_get_stats_param(link_peer_soc,
  1055. primary_link_peer,
  1056. cdp_peer_rx_snr, &buf);
  1057. rx_info->rs_snr = buf.rx_snr;
  1058. dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH);
  1059. }
  1060. rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
  1061. soc = vdev->pdev->soc;
  1062. primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr);
  1063. center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16;
  1064. if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
  1065. rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
  1066. soc->ctrl_psoc,
  1067. vdev->pdev->pdev_id,
  1068. center_chan_freq);
  1069. }
  1070. rx_info->rs_channel = primary_chan_num;
  1071. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  1072. rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  1073. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  1074. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  1075. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
  1076. (bw << 24);
  1077. qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  1078. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
  1079. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"),
  1080. rx_info->rs_flags,
  1081. rx_info->rs_rssi,
  1082. rx_info->rs_channel,
  1083. rx_info->rs_ratephy1,
  1084. rx_info->rs_keyix,
  1085. rx_info->rs_snr);
  1086. }
  1087. /**
  1088. * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
  1089. *
  1090. * @vdev: DP Virtual device handle
  1091. * @nbuf: Buffer pointer
  1092. * @rx_tlv_hdr: start of rx tlv header
  1093. *
  1094. * This checks if the received packet is matching any filter out
  1095. * catogery and and drop the packet if it matches.
  1096. *
  1097. * Return: status(0 indicates drop, 1 indicate to no drop)
  1098. */
  1099. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1100. uint8_t *rx_tlv_hdr)
  1101. {
  1102. union dp_align_mac_addr mac_addr;
  1103. struct dp_soc *soc = vdev->pdev->soc;
  1104. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  1105. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  1106. if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  1107. rx_tlv_hdr))
  1108. return QDF_STATUS_SUCCESS;
  1109. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  1110. if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
  1111. rx_tlv_hdr))
  1112. return QDF_STATUS_SUCCESS;
  1113. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  1114. if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  1115. rx_tlv_hdr) &&
  1116. !hal_rx_mpdu_get_to_ds(soc->hal_soc,
  1117. rx_tlv_hdr))
  1118. return QDF_STATUS_SUCCESS;
  1119. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  1120. if (hal_rx_mpdu_get_addr1(soc->hal_soc,
  1121. rx_tlv_hdr,
  1122. &mac_addr.raw[0]))
  1123. return QDF_STATUS_E_FAILURE;
  1124. if (!qdf_mem_cmp(&mac_addr.raw[0],
  1125. &vdev->mac_addr.raw[0],
  1126. QDF_MAC_ADDR_SIZE))
  1127. return QDF_STATUS_SUCCESS;
  1128. }
  1129. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  1130. if (hal_rx_mpdu_get_addr2(soc->hal_soc,
  1131. rx_tlv_hdr,
  1132. &mac_addr.raw[0]))
  1133. return QDF_STATUS_E_FAILURE;
  1134. if (!qdf_mem_cmp(&mac_addr.raw[0],
  1135. &vdev->mac_addr.raw[0],
  1136. QDF_MAC_ADDR_SIZE))
  1137. return QDF_STATUS_SUCCESS;
  1138. }
  1139. }
  1140. return QDF_STATUS_E_FAILURE;
  1141. }
  1142. #else
  1143. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1144. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer)
  1145. {
  1146. }
  1147. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1148. uint8_t *rx_tlv_hdr)
  1149. {
  1150. return QDF_STATUS_E_FAILURE;
  1151. }
  1152. #endif
  1153. #ifdef FEATURE_NAC_RSSI
  1154. /**
  1155. * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
  1156. * @soc: DP SOC handle
  1157. * @mpdu: mpdu for which peer is invalid
  1158. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1159. * pool_id has same mapping)
  1160. *
  1161. * return: integer type
  1162. */
  1163. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1164. uint8_t mac_id)
  1165. {
  1166. struct dp_invalid_peer_msg msg;
  1167. struct dp_vdev *vdev = NULL;
  1168. struct dp_pdev *pdev = NULL;
  1169. struct ieee80211_frame *wh;
  1170. qdf_nbuf_t curr_nbuf, next_nbuf;
  1171. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1172. uint8_t *rx_pkt_hdr = NULL;
  1173. int i = 0;
  1174. if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
  1175. dp_rx_debug("%pK: Drop decapped frames", soc);
  1176. goto free;
  1177. }
  1178. /* In RAW packet, packet header will be part of data */
  1179. rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size;
  1180. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1181. if (!DP_FRAME_IS_DATA(wh)) {
  1182. dp_rx_debug("%pK: NAWDS valid only for data frames", soc);
  1183. goto free;
  1184. }
  1185. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  1186. dp_rx_err("%pK: Invalid nbuf length", soc);
  1187. goto free;
  1188. }
  1189. /* In DMAC case the rx_desc_pools are common across PDEVs
  1190. * so PDEV cannot be derived from the pool_id.
  1191. *
  1192. * link_id need to derived from the TLV tag word which is
  1193. * disabled by default. For now adding a WAR to get vdev
  1194. * with brute force this need to fixed with word based subscription
  1195. * support is added by enabling TLV tag word
  1196. */
  1197. if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
  1198. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1199. pdev = soc->pdev_list[i];
  1200. if (!pdev || qdf_unlikely(pdev->is_pdev_down))
  1201. continue;
  1202. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1203. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1204. QDF_MAC_ADDR_SIZE) == 0) {
  1205. goto out;
  1206. }
  1207. }
  1208. }
  1209. } else {
  1210. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1211. if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
  1212. dp_rx_err("%pK: PDEV %s",
  1213. soc, !pdev ? "not found" : "down");
  1214. goto free;
  1215. }
  1216. if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) ==
  1217. QDF_STATUS_SUCCESS)
  1218. return 0;
  1219. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1220. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1221. QDF_MAC_ADDR_SIZE) == 0) {
  1222. goto out;
  1223. }
  1224. }
  1225. }
  1226. if (!vdev) {
  1227. dp_rx_err("%pK: VDEV not found", soc);
  1228. goto free;
  1229. }
  1230. out:
  1231. msg.wh = wh;
  1232. qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size);
  1233. msg.nbuf = mpdu;
  1234. msg.vdev_id = vdev->vdev_id;
  1235. /*
  1236. * NOTE: Only valid for HKv1.
  1237. * If smart monitor mode is enabled on RE, we are getting invalid
  1238. * peer frames with RA as STA mac of RE and the TA not matching
  1239. * with any NAC list or the the BSSID.Such frames need to dropped
  1240. * in order to avoid HM_WDS false addition.
  1241. */
  1242. if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
  1243. if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) {
  1244. dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm",
  1245. soc, wh->i_addr1);
  1246. goto free;
  1247. }
  1248. pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
  1249. (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
  1250. pdev->pdev_id, &msg);
  1251. }
  1252. free:
  1253. /* Drop and free packet */
  1254. curr_nbuf = mpdu;
  1255. while (curr_nbuf) {
  1256. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1257. dp_rx_nbuf_free(curr_nbuf);
  1258. curr_nbuf = next_nbuf;
  1259. }
  1260. return 0;
  1261. }
  1262. /**
  1263. * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
  1264. * @soc: DP SOC handle
  1265. * @mpdu: mpdu for which peer is invalid
  1266. * @mpdu_done: if an mpdu is completed
  1267. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1268. * pool_id has same mapping)
  1269. *
  1270. * return: integer type
  1271. */
  1272. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1273. qdf_nbuf_t mpdu, bool mpdu_done,
  1274. uint8_t mac_id)
  1275. {
  1276. /* Only trigger the process when mpdu is completed */
  1277. if (mpdu_done)
  1278. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1279. }
  1280. #else
  1281. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1282. uint8_t mac_id)
  1283. {
  1284. qdf_nbuf_t curr_nbuf, next_nbuf;
  1285. struct dp_pdev *pdev;
  1286. struct dp_vdev *vdev = NULL;
  1287. struct ieee80211_frame *wh;
  1288. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1289. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  1290. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1291. if (!DP_FRAME_IS_DATA(wh)) {
  1292. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
  1293. "only for data frames");
  1294. goto free;
  1295. }
  1296. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  1297. dp_rx_info_rl("%pK: Invalid nbuf length", soc);
  1298. goto free;
  1299. }
  1300. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1301. if (!pdev) {
  1302. dp_rx_info_rl("%pK: PDEV not found", soc);
  1303. goto free;
  1304. }
  1305. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1306. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1307. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1308. QDF_MAC_ADDR_SIZE) == 0) {
  1309. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1310. goto out;
  1311. }
  1312. }
  1313. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1314. if (!vdev) {
  1315. dp_rx_info_rl("%pK: VDEV not found", soc);
  1316. goto free;
  1317. }
  1318. out:
  1319. if (soc->cdp_soc.ol_ops->rx_invalid_peer)
  1320. soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
  1321. free:
  1322. /* Drop and free packet */
  1323. curr_nbuf = mpdu;
  1324. while (curr_nbuf) {
  1325. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1326. dp_rx_nbuf_free(curr_nbuf);
  1327. curr_nbuf = next_nbuf;
  1328. }
  1329. /* Reset the head and tail pointers */
  1330. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1331. if (pdev) {
  1332. pdev->invalid_peer_head_msdu = NULL;
  1333. pdev->invalid_peer_tail_msdu = NULL;
  1334. }
  1335. return 0;
  1336. }
  1337. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1338. qdf_nbuf_t mpdu, bool mpdu_done,
  1339. uint8_t mac_id)
  1340. {
  1341. /* Process the nbuf */
  1342. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1343. }
  1344. #endif
  1345. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1346. #ifdef RECEIVE_OFFLOAD
  1347. /**
  1348. * dp_rx_print_offload_info() - Print offload info from RX TLV
  1349. * @soc: dp soc handle
  1350. * @msdu: MSDU for which the offload info is to be printed
  1351. *
  1352. * Return: None
  1353. */
  1354. static void dp_rx_print_offload_info(struct dp_soc *soc,
  1355. qdf_nbuf_t msdu)
  1356. {
  1357. dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
  1358. dp_verbose_debug("lro_eligible 0x%x",
  1359. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu));
  1360. dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu));
  1361. dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu));
  1362. dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu));
  1363. dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu));
  1364. dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu));
  1365. dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu));
  1366. dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu));
  1367. dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu));
  1368. dp_verbose_debug("---------------------------------------------------------");
  1369. }
  1370. /**
  1371. * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
  1372. * @soc: DP SOC handle
  1373. * @rx_tlv: RX TLV received for the msdu
  1374. * @msdu: msdu for which GRO info needs to be filled
  1375. * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
  1376. *
  1377. * Return: None
  1378. */
  1379. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  1380. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
  1381. {
  1382. struct hal_offload_info offload_info;
  1383. if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
  1384. return;
  1385. if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info))
  1386. return;
  1387. *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
  1388. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible;
  1389. QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack;
  1390. QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
  1391. hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
  1392. rx_tlv);
  1393. QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num;
  1394. QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num;
  1395. QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win;
  1396. QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto;
  1397. QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto;
  1398. QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset;
  1399. QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id;
  1400. dp_rx_print_offload_info(soc, msdu);
  1401. }
  1402. #endif /* RECEIVE_OFFLOAD */
  1403. /**
  1404. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
  1405. *
  1406. * @soc: DP soc handle
  1407. * @nbuf: pointer to msdu.
  1408. * @mpdu_len: mpdu length
  1409. * @l3_pad_len: L3 padding length by HW
  1410. *
  1411. * Return: returns true if nbuf is last msdu of mpdu else returns false.
  1412. */
  1413. static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc,
  1414. qdf_nbuf_t nbuf,
  1415. uint16_t *mpdu_len,
  1416. uint32_t l3_pad_len)
  1417. {
  1418. bool last_nbuf;
  1419. uint32_t pkt_hdr_size;
  1420. pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len;
  1421. if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) {
  1422. qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
  1423. last_nbuf = false;
  1424. *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size);
  1425. } else {
  1426. qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size));
  1427. last_nbuf = true;
  1428. *mpdu_len = 0;
  1429. }
  1430. return last_nbuf;
  1431. }
  1432. /**
  1433. * dp_get_l3_hdr_pad_len() - get L3 header padding length.
  1434. *
  1435. * @soc: DP soc handle
  1436. * @nbuf: pointer to msdu.
  1437. *
  1438. * Return: returns padding length in bytes.
  1439. */
  1440. static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc,
  1441. qdf_nbuf_t nbuf)
  1442. {
  1443. uint32_t l3_hdr_pad = 0;
  1444. uint8_t *rx_tlv_hdr;
  1445. struct hal_rx_msdu_metadata msdu_metadata;
  1446. while (nbuf) {
  1447. if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  1448. /* scattered msdu end with continuation is 0 */
  1449. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1450. hal_rx_msdu_metadata_get(soc->hal_soc,
  1451. rx_tlv_hdr,
  1452. &msdu_metadata);
  1453. l3_hdr_pad = msdu_metadata.l3_hdr_pad;
  1454. break;
  1455. }
  1456. nbuf = nbuf->next;
  1457. }
  1458. return l3_hdr_pad;
  1459. }
  1460. /**
  1461. * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
  1462. * multiple nbufs.
  1463. * @soc: DP SOC handle
  1464. * @nbuf: pointer to the first msdu of an amsdu.
  1465. *
  1466. * This function implements the creation of RX frag_list for cases
  1467. * where an MSDU is spread across multiple nbufs.
  1468. *
  1469. * Return: returns the head nbuf which contains complete frag_list.
  1470. */
  1471. qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1472. {
  1473. qdf_nbuf_t parent, frag_list, next = NULL;
  1474. uint16_t frag_list_len = 0;
  1475. uint16_t mpdu_len;
  1476. bool last_nbuf;
  1477. uint32_t l3_hdr_pad_offset = 0;
  1478. /*
  1479. * Use msdu len got from REO entry descriptor instead since
  1480. * there is case the RX PKT TLV is corrupted while msdu_len
  1481. * from REO descriptor is right for non-raw RX scatter msdu.
  1482. */
  1483. mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1484. /*
  1485. * this is a case where the complete msdu fits in one single nbuf.
  1486. * in this case HW sets both start and end bit and we only need to
  1487. * reset these bits for RAW mode simulator to decap the pkt
  1488. */
  1489. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  1490. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1491. qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size);
  1492. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1493. return nbuf;
  1494. }
  1495. l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf);
  1496. /*
  1497. * This is a case where we have multiple msdus (A-MSDU) spread across
  1498. * multiple nbufs. here we create a fraglist out of these nbufs.
  1499. *
  1500. * the moment we encounter a nbuf with continuation bit set we
  1501. * know for sure we have an MSDU which is spread across multiple
  1502. * nbufs. We loop through and reap nbufs till we reach last nbuf.
  1503. */
  1504. parent = nbuf;
  1505. frag_list = nbuf->next;
  1506. nbuf = nbuf->next;
  1507. /*
  1508. * set the start bit in the first nbuf we encounter with continuation
  1509. * bit set. This has the proper mpdu length set as it is the first
  1510. * msdu of the mpdu. this becomes the parent nbuf and the subsequent
  1511. * nbufs will form the frag_list of the parent nbuf.
  1512. */
  1513. qdf_nbuf_set_rx_chfrag_start(parent, 1);
  1514. /*
  1515. * L3 header padding is only needed for the 1st buffer
  1516. * in a scattered msdu
  1517. */
  1518. last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len,
  1519. l3_hdr_pad_offset);
  1520. /*
  1521. * MSDU cont bit is set but reported MPDU length can fit
  1522. * in to single buffer
  1523. *
  1524. * Increment error stats and avoid SG list creation
  1525. */
  1526. if (last_nbuf) {
  1527. DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
  1528. qdf_nbuf_pull_head(parent,
  1529. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1530. return parent;
  1531. }
  1532. /*
  1533. * this is where we set the length of the fragments which are
  1534. * associated to the parent nbuf. We iterate through the frag_list
  1535. * till we hit the last_nbuf of the list.
  1536. */
  1537. do {
  1538. last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0);
  1539. qdf_nbuf_pull_head(nbuf,
  1540. soc->rx_pkt_tlv_size);
  1541. frag_list_len += qdf_nbuf_len(nbuf);
  1542. if (last_nbuf) {
  1543. next = nbuf->next;
  1544. nbuf->next = NULL;
  1545. break;
  1546. } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1547. dp_err("Invalid packet length\n");
  1548. qdf_assert_always(0);
  1549. }
  1550. nbuf = nbuf->next;
  1551. } while (!last_nbuf);
  1552. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  1553. qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
  1554. parent->next = next;
  1555. qdf_nbuf_pull_head(parent,
  1556. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1557. return parent;
  1558. }
  1559. #ifdef DP_RX_SG_FRAME_SUPPORT
  1560. /**
  1561. * dp_rx_is_sg_supported() - SG packets processing supported or not.
  1562. *
  1563. * Return: returns true when processing is supported else false.
  1564. */
  1565. bool dp_rx_is_sg_supported(void)
  1566. {
  1567. return true;
  1568. }
  1569. #else
  1570. bool dp_rx_is_sg_supported(void)
  1571. {
  1572. return false;
  1573. }
  1574. #endif
  1575. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1576. #ifdef QCA_PEER_EXT_STATS
  1577. /*
  1578. * dp_rx_compute_tid_delay - Computer per TID delay stats
  1579. * @peer: DP soc context
  1580. * @nbuf: NBuffer
  1581. *
  1582. * Return: Void
  1583. */
  1584. void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  1585. qdf_nbuf_t nbuf)
  1586. {
  1587. struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay;
  1588. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1589. dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
  1590. }
  1591. #endif /* QCA_PEER_EXT_STATS */
  1592. /**
  1593. * dp_rx_compute_delay() - Compute and fill in all timestamps
  1594. * to pass in correct fields
  1595. *
  1596. * @vdev: pdev handle
  1597. * @tx_desc: tx descriptor
  1598. * @tid: tid value
  1599. * Return: none
  1600. */
  1601. void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1602. {
  1603. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  1604. int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
  1605. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1606. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1607. uint32_t interframe_delay =
  1608. (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
  1609. struct cdp_tid_rx_stats *rstats =
  1610. &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1611. dp_update_delay_stats(NULL, rstats, to_stack, tid,
  1612. CDP_DELAY_STATS_REAP_STACK, ring_id, false);
  1613. /*
  1614. * Update interframe delay stats calculated at deliver_data_ol point.
  1615. * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
  1616. * interframe delay will not be calculate correctly for 1st frame.
  1617. * On the other side, this will help in avoiding extra per packet check
  1618. * of vdev->prev_rx_deliver_tstamp.
  1619. */
  1620. dp_update_delay_stats(NULL, rstats, interframe_delay, tid,
  1621. CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false);
  1622. vdev->prev_rx_deliver_tstamp = current_ts;
  1623. }
  1624. /**
  1625. * dp_rx_drop_nbuf_list() - drop an nbuf list
  1626. * @pdev: dp pdev reference
  1627. * @buf_list: buffer list to be dropepd
  1628. *
  1629. * Return: int (number of bufs dropped)
  1630. */
  1631. static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
  1632. qdf_nbuf_t buf_list)
  1633. {
  1634. struct cdp_tid_rx_stats *stats = NULL;
  1635. uint8_t tid = 0, ring_id = 0;
  1636. int num_dropped = 0;
  1637. qdf_nbuf_t buf, next_buf;
  1638. buf = buf_list;
  1639. while (buf) {
  1640. ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
  1641. next_buf = qdf_nbuf_queue_next(buf);
  1642. tid = qdf_nbuf_get_tid_val(buf);
  1643. if (qdf_likely(pdev)) {
  1644. stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1645. stats->fail_cnt[INVALID_PEER_VDEV]++;
  1646. stats->delivered_to_stack--;
  1647. }
  1648. dp_rx_nbuf_free(buf);
  1649. buf = next_buf;
  1650. num_dropped++;
  1651. }
  1652. return num_dropped;
  1653. }
  1654. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1655. /**
  1656. * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
  1657. * @soc: core txrx main context
  1658. * @vdev: vdev
  1659. * @txrx_peer: txrx peer
  1660. * @nbuf_head: skb list head
  1661. *
  1662. * Return: true if packet is delivered to netdev per STA.
  1663. */
  1664. static inline bool
  1665. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1666. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1667. {
  1668. /*
  1669. * When extended WDS is disabled, frames are sent to AP netdevice.
  1670. */
  1671. if (qdf_likely(!vdev->wds_ext_enabled))
  1672. return false;
  1673. /*
  1674. * There can be 2 cases:
  1675. * 1. Send frame to parent netdev if its not for netdev per STA
  1676. * 2. If frame is meant for netdev per STA:
  1677. * a. Send frame to appropriate netdev using registered fp.
  1678. * b. If fp is NULL, drop the frames.
  1679. */
  1680. if (!txrx_peer->wds_ext.init)
  1681. return false;
  1682. if (txrx_peer->osif_rx)
  1683. txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head);
  1684. else
  1685. dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1686. return true;
  1687. }
  1688. #else
  1689. static inline bool
  1690. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1691. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1692. {
  1693. return false;
  1694. }
  1695. #endif
  1696. #ifdef PEER_CACHE_RX_PKTS
  1697. /**
  1698. * dp_rx_flush_rx_cached() - flush cached rx frames
  1699. * @peer: peer
  1700. * @drop: flag to drop frames or forward to net stack
  1701. *
  1702. * Return: None
  1703. */
  1704. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  1705. {
  1706. struct dp_peer_cached_bufq *bufqi;
  1707. struct dp_rx_cached_buf *cache_buf = NULL;
  1708. ol_txrx_rx_fp data_rx = NULL;
  1709. int num_buff_elem;
  1710. QDF_STATUS status;
  1711. /*
  1712. * Flush dp cached frames only for mld peers and legacy peers, as
  1713. * link peers don't store cached frames
  1714. */
  1715. if (IS_MLO_DP_LINK_PEER(peer))
  1716. return;
  1717. if (!peer->txrx_peer) {
  1718. dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")",
  1719. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1720. return;
  1721. }
  1722. if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) {
  1723. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1724. return;
  1725. }
  1726. qdf_spin_lock_bh(&peer->peer_info_lock);
  1727. if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
  1728. data_rx = peer->vdev->osif_rx;
  1729. else
  1730. drop = true;
  1731. qdf_spin_unlock_bh(&peer->peer_info_lock);
  1732. bufqi = &peer->txrx_peer->bufq_info;
  1733. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1734. qdf_list_remove_front(&bufqi->cached_bufq,
  1735. (qdf_list_node_t **)&cache_buf);
  1736. while (cache_buf) {
  1737. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
  1738. cache_buf->buf);
  1739. bufqi->entries -= num_buff_elem;
  1740. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1741. if (drop) {
  1742. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1743. cache_buf->buf);
  1744. } else {
  1745. /* Flush the cached frames to OSIF DEV */
  1746. status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
  1747. if (status != QDF_STATUS_SUCCESS)
  1748. bufqi->dropped = dp_rx_drop_nbuf_list(
  1749. peer->vdev->pdev,
  1750. cache_buf->buf);
  1751. }
  1752. qdf_mem_free(cache_buf);
  1753. cache_buf = NULL;
  1754. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1755. qdf_list_remove_front(&bufqi->cached_bufq,
  1756. (qdf_list_node_t **)&cache_buf);
  1757. }
  1758. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1759. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1760. }
  1761. /**
  1762. * dp_rx_enqueue_rx() - cache rx frames
  1763. * @peer: peer
  1764. * @txrx_peer: DP txrx_peer
  1765. * @rx_buf_list: cache buffer list
  1766. *
  1767. * Return: None
  1768. */
  1769. static QDF_STATUS
  1770. dp_rx_enqueue_rx(struct dp_peer *peer,
  1771. struct dp_txrx_peer *txrx_peer,
  1772. qdf_nbuf_t rx_buf_list)
  1773. {
  1774. struct dp_rx_cached_buf *cache_buf;
  1775. struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info;
  1776. int num_buff_elem;
  1777. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  1778. struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
  1779. struct dp_peer *ta_peer = NULL;
  1780. /*
  1781. * If peer id is invalid which likely peer map has not completed,
  1782. * then need caller provide dp_peer pointer, else it's ok to use
  1783. * txrx_peer->peer_id to get dp_peer.
  1784. */
  1785. if (peer) {
  1786. if (QDF_STATUS_SUCCESS ==
  1787. dp_peer_get_ref(soc, peer, DP_MOD_ID_RX))
  1788. ta_peer = peer;
  1789. } else {
  1790. ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
  1791. DP_MOD_ID_RX);
  1792. }
  1793. if (!ta_peer) {
  1794. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1795. rx_buf_list);
  1796. return QDF_STATUS_E_INVAL;
  1797. }
  1798. dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
  1799. bufqi->dropped);
  1800. if (!ta_peer->valid) {
  1801. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1802. rx_buf_list);
  1803. ret = QDF_STATUS_E_INVAL;
  1804. goto fail;
  1805. }
  1806. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1807. if (bufqi->entries >= bufqi->thresh) {
  1808. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1809. rx_buf_list);
  1810. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1811. ret = QDF_STATUS_E_RESOURCES;
  1812. goto fail;
  1813. }
  1814. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1815. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
  1816. cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
  1817. if (!cache_buf) {
  1818. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1819. "Failed to allocate buf to cache rx frames");
  1820. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1821. rx_buf_list);
  1822. ret = QDF_STATUS_E_NOMEM;
  1823. goto fail;
  1824. }
  1825. cache_buf->buf = rx_buf_list;
  1826. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1827. qdf_list_insert_back(&bufqi->cached_bufq,
  1828. &cache_buf->node);
  1829. bufqi->entries += num_buff_elem;
  1830. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1831. fail:
  1832. dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX);
  1833. return ret;
  1834. }
  1835. static inline
  1836. bool dp_rx_is_peer_cache_bufq_supported(void)
  1837. {
  1838. return true;
  1839. }
  1840. #else
  1841. static inline
  1842. bool dp_rx_is_peer_cache_bufq_supported(void)
  1843. {
  1844. return false;
  1845. }
  1846. static inline QDF_STATUS
  1847. dp_rx_enqueue_rx(struct dp_peer *peer,
  1848. struct dp_txrx_peer *txrx_peer,
  1849. qdf_nbuf_t rx_buf_list)
  1850. {
  1851. return QDF_STATUS_SUCCESS;
  1852. }
  1853. #endif
  1854. #ifndef DELIVERY_TO_STACK_STATUS_CHECK
  1855. /**
  1856. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1857. * using the appropriate call back functions.
  1858. * @soc: soc
  1859. * @vdev: vdev
  1860. * @peer: peer
  1861. * @nbuf_head: skb list head
  1862. * @nbuf_tail: skb list tail
  1863. *
  1864. * Return: None
  1865. */
  1866. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1867. struct dp_vdev *vdev,
  1868. struct dp_txrx_peer *txrx_peer,
  1869. qdf_nbuf_t nbuf_head)
  1870. {
  1871. if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
  1872. txrx_peer, nbuf_head)))
  1873. return;
  1874. /* Function pointer initialized only when FISA is enabled */
  1875. if (vdev->osif_fisa_rx)
  1876. /* on failure send it via regular path */
  1877. vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1878. else
  1879. vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1880. }
  1881. #else
  1882. /**
  1883. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1884. * using the appropriate call back functions.
  1885. * @soc: soc
  1886. * @vdev: vdev
  1887. * @txrx_peer: txrx peer
  1888. * @nbuf_head: skb list head
  1889. * @nbuf_tail: skb list tail
  1890. *
  1891. * Check the return status of the call back function and drop
  1892. * the packets if the return status indicates a failure.
  1893. *
  1894. * Return: None
  1895. */
  1896. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1897. struct dp_vdev *vdev,
  1898. struct dp_txrx_peer *txrx_peer,
  1899. qdf_nbuf_t nbuf_head)
  1900. {
  1901. int num_nbuf = 0;
  1902. QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
  1903. /* Function pointer initialized only when FISA is enabled */
  1904. if (vdev->osif_fisa_rx)
  1905. /* on failure send it via regular path */
  1906. ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1907. else if (vdev->osif_rx)
  1908. ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1909. if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
  1910. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1911. DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
  1912. if (txrx_peer)
  1913. DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num,
  1914. num_nbuf);
  1915. }
  1916. }
  1917. #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
  1918. /*
  1919. * dp_rx_validate_rx_callbacks() - validate rx callbacks
  1920. * @soc DP soc
  1921. * @vdev: DP vdev handle
  1922. * @txrx_peer: pointer to the txrx peer object
  1923. * nbuf_head: skb list head
  1924. *
  1925. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  1926. * QDF_STATUS_E_FAILURE
  1927. */
  1928. static inline QDF_STATUS
  1929. dp_rx_validate_rx_callbacks(struct dp_soc *soc,
  1930. struct dp_vdev *vdev,
  1931. struct dp_txrx_peer *txrx_peer,
  1932. qdf_nbuf_t nbuf_head)
  1933. {
  1934. int num_nbuf;
  1935. if (qdf_unlikely(!vdev || vdev->delete.pending)) {
  1936. num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
  1937. /*
  1938. * This is a special case where vdev is invalid,
  1939. * so we cannot know the pdev to which this packet
  1940. * belonged. Hence we update the soc rx error stats.
  1941. */
  1942. DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
  1943. return QDF_STATUS_E_FAILURE;
  1944. }
  1945. /*
  1946. * highly unlikely to have a vdev without a registered rx
  1947. * callback function. if so let us free the nbuf_list.
  1948. */
  1949. if (qdf_unlikely(!vdev->osif_rx)) {
  1950. if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) {
  1951. dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head);
  1952. } else {
  1953. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
  1954. nbuf_head);
  1955. DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf,
  1956. vdev->pdev->enhanced_stats_en);
  1957. }
  1958. return QDF_STATUS_E_FAILURE;
  1959. }
  1960. return QDF_STATUS_SUCCESS;
  1961. }
  1962. QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
  1963. struct dp_vdev *vdev,
  1964. struct dp_txrx_peer *txrx_peer,
  1965. qdf_nbuf_t nbuf_head,
  1966. qdf_nbuf_t nbuf_tail)
  1967. {
  1968. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  1969. QDF_STATUS_SUCCESS)
  1970. return QDF_STATUS_E_FAILURE;
  1971. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
  1972. (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
  1973. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
  1974. &nbuf_tail);
  1975. }
  1976. dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head);
  1977. return QDF_STATUS_SUCCESS;
  1978. }
  1979. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  1980. QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
  1981. struct dp_vdev *vdev,
  1982. struct dp_txrx_peer *txrx_peer,
  1983. qdf_nbuf_t nbuf_head,
  1984. qdf_nbuf_t nbuf_tail)
  1985. {
  1986. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  1987. QDF_STATUS_SUCCESS)
  1988. return QDF_STATUS_E_FAILURE;
  1989. vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head);
  1990. return QDF_STATUS_SUCCESS;
  1991. }
  1992. #endif
  1993. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1994. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1995. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \
  1996. { \
  1997. qdf_nbuf_t nbuf_local; \
  1998. struct dp_txrx_peer *txrx_peer_local; \
  1999. struct dp_vdev *vdev_local = vdev_hdl; \
  2000. do { \
  2001. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  2002. break; \
  2003. nbuf_local = nbuf; \
  2004. txrx_peer_local = txrx_peer; \
  2005. if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
  2006. break; \
  2007. else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
  2008. break; \
  2009. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  2010. (nbuf_local), \
  2011. (txrx_peer_local), 0, 1); \
  2012. } while (0); \
  2013. }
  2014. #else
  2015. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer)
  2016. #endif
  2017. #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
  2018. /**
  2019. * dp_rx_rates_stats_update() - update rate stats
  2020. * from rx msdu.
  2021. * @soc: datapath soc handle
  2022. * @nbuf: received msdu buffer
  2023. * @rx_tlv_hdr: rx tlv header
  2024. * @txrx_peer: datapath txrx_peer handle
  2025. * @sgi: Short Guard Interval
  2026. * @mcs: Modulation and Coding Set
  2027. * @nss: Number of Spatial Streams
  2028. * @bw: BandWidth
  2029. * @pkt_type: Corresponds to preamble
  2030. *
  2031. * To be precisely record rates, following factors are considered:
  2032. * Exclude specific frames, ARP, DHCP, ssdp, etc.
  2033. * Make sure to affect rx throughput as least as possible.
  2034. *
  2035. * Return: void
  2036. */
  2037. static void
  2038. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2039. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  2040. uint32_t sgi, uint32_t mcs,
  2041. uint32_t nss, uint32_t bw, uint32_t pkt_type)
  2042. {
  2043. uint32_t rix;
  2044. uint16_t ratecode;
  2045. uint32_t avg_rx_rate;
  2046. uint32_t ratekbps;
  2047. enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
  2048. if (soc->high_throughput ||
  2049. dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) {
  2050. return;
  2051. }
  2052. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs);
  2053. /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */
  2054. if (qdf_unlikely(pkt_type == DOT11_B))
  2055. nss = 1;
  2056. /* here pkt_type corresponds to preamble */
  2057. ratekbps = dp_getrateindex(sgi,
  2058. mcs,
  2059. nss - 1,
  2060. pkt_type,
  2061. bw,
  2062. punc_mode,
  2063. &rix,
  2064. &ratecode);
  2065. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps);
  2066. avg_rx_rate =
  2067. dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate,
  2068. ratekbps);
  2069. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate);
  2070. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss);
  2071. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs);
  2072. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw);
  2073. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi);
  2074. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type);
  2075. }
  2076. #else
  2077. static inline void
  2078. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2079. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  2080. uint32_t sgi, uint32_t mcs,
  2081. uint32_t nss, uint32_t bw, uint32_t pkt_type)
  2082. {
  2083. }
  2084. #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
  2085. #ifndef QCA_ENHANCED_STATS_SUPPORT
  2086. /**
  2087. * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer
  2088. *
  2089. * @soc: datapath soc handle
  2090. * @nbuf: received msdu buffer
  2091. * @rx_tlv_hdr: rx tlv header
  2092. * @txrx_peer: datapath txrx_peer handle
  2093. *
  2094. * Return: void
  2095. */
  2096. static inline
  2097. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2098. uint8_t *rx_tlv_hdr,
  2099. struct dp_txrx_peer *txrx_peer)
  2100. {
  2101. bool is_ampdu;
  2102. uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
  2103. uint8_t dst_mcs_idx;
  2104. /*
  2105. * TODO - For KIWI this field is present in ring_desc
  2106. * Try to use ring desc instead of tlv.
  2107. */
  2108. is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
  2109. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu);
  2110. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
  2111. sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
  2112. mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  2113. tid = qdf_nbuf_get_tid_val(nbuf);
  2114. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  2115. reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
  2116. rx_tlv_hdr);
  2117. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  2118. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  2119. /* do HW to SW pkt type conversion */
  2120. pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
  2121. hal_2_dp_pkt_type_map[pkt_type]);
  2122. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1,
  2123. ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  2124. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
  2125. ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  2126. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1);
  2127. /*
  2128. * only if nss > 0 and pkt_type is 11N/AC/AX,
  2129. * then increase index [nss - 1] in array counter.
  2130. */
  2131. if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type))
  2132. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1);
  2133. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1);
  2134. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1,
  2135. hal_rx_tlv_mic_err_get(soc->hal_soc,
  2136. rx_tlv_hdr));
  2137. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1,
  2138. hal_rx_tlv_decrypt_err_get(soc->hal_soc,
  2139. rx_tlv_hdr));
  2140. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
  2141. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1);
  2142. dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
  2143. if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
  2144. DP_PEER_EXTD_STATS_INC(txrx_peer,
  2145. rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
  2146. 1);
  2147. dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
  2148. sgi, mcs, nss, bw, pkt_type);
  2149. }
  2150. #else
  2151. static inline
  2152. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2153. uint8_t *rx_tlv_hdr,
  2154. struct dp_txrx_peer *txrx_peer)
  2155. {
  2156. }
  2157. #endif
  2158. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  2159. static inline void
  2160. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  2161. qdf_nbuf_t nbuf)
  2162. {
  2163. uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf);
  2164. if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) {
  2165. dp_err_rl("Invalid lmac_id: %u vdev_id: %u",
  2166. lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf));
  2167. if (qdf_likely(txrx_peer))
  2168. dp_err_rl("peer_id: %u", txrx_peer->peer_id);
  2169. return;
  2170. }
  2171. /* only count stats per lmac for MLO connection*/
  2172. DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1,
  2173. QDF_NBUF_CB_RX_PKT_LEN(nbuf),
  2174. txrx_peer->mld_peer);
  2175. }
  2176. #else
  2177. static inline void
  2178. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  2179. qdf_nbuf_t nbuf)
  2180. {
  2181. }
  2182. #endif
  2183. /**
  2184. * dp_rx_msdu_stats_update() - update per msdu stats.
  2185. * @soc: core txrx main context
  2186. * @nbuf: pointer to the first msdu of an amsdu.
  2187. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  2188. * @txrx_peer: pointer to the txrx peer object.
  2189. * @ring_id: reo dest ring number on which pkt is reaped.
  2190. * @tid_stats: per tid rx stats.
  2191. *
  2192. * update all the per msdu stats for that nbuf.
  2193. * Return: void
  2194. */
  2195. void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2196. uint8_t *rx_tlv_hdr,
  2197. struct dp_txrx_peer *txrx_peer,
  2198. uint8_t ring_id,
  2199. struct cdp_tid_rx_stats *tid_stats)
  2200. {
  2201. bool is_not_amsdu;
  2202. struct dp_vdev *vdev = txrx_peer->vdev;
  2203. bool enh_flag;
  2204. qdf_ether_header_t *eh;
  2205. uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2206. dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer);
  2207. is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
  2208. qdf_nbuf_is_rx_chfrag_end(nbuf);
  2209. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1,
  2210. msdu_len);
  2211. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1,
  2212. is_not_amsdu);
  2213. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu);
  2214. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1,
  2215. qdf_nbuf_is_rx_retry_flag(nbuf));
  2216. dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf);
  2217. tid_stats->msdu_cnt++;
  2218. if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
  2219. (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
  2220. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2221. enh_flag = vdev->pdev->enhanced_stats_en;
  2222. DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
  2223. tid_stats->mcast_msdu_cnt++;
  2224. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  2225. DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
  2226. tid_stats->bcast_msdu_cnt++;
  2227. }
  2228. }
  2229. txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks();
  2230. dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer);
  2231. }
  2232. #ifndef WDS_VENDOR_EXTENSION
  2233. int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
  2234. struct dp_vdev *vdev,
  2235. struct dp_txrx_peer *txrx_peer)
  2236. {
  2237. return 1;
  2238. }
  2239. #endif
  2240. #ifdef RX_DESC_DEBUG_CHECK
  2241. /**
  2242. * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
  2243. * corruption
  2244. *
  2245. * @ring_desc: REO ring descriptor
  2246. * @rx_desc: Rx descriptor
  2247. *
  2248. * Return: NONE
  2249. */
  2250. QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
  2251. hal_ring_desc_t ring_desc,
  2252. struct dp_rx_desc *rx_desc)
  2253. {
  2254. struct hal_buf_info hbi;
  2255. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2256. /* Sanity check for possible buffer paddr corruption */
  2257. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  2258. return QDF_STATUS_SUCCESS;
  2259. return QDF_STATUS_E_FAILURE;
  2260. }
  2261. /**
  2262. * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer
  2263. * out of bound access from H.W
  2264. *
  2265. * @soc: DP soc
  2266. * @pkt_len: Packet length received from H.W
  2267. *
  2268. * Return: NONE
  2269. */
  2270. static inline void
  2271. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc,
  2272. uint32_t pkt_len)
  2273. {
  2274. struct rx_desc_pool *rx_desc_pool;
  2275. rx_desc_pool = &soc->rx_desc_buf[0];
  2276. qdf_assert_always(pkt_len <= rx_desc_pool->buf_size);
  2277. }
  2278. #else
  2279. static inline void
  2280. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { }
  2281. #endif
  2282. #ifdef DP_RX_PKT_NO_PEER_DELIVER
  2283. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  2284. /**
  2285. * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received
  2286. * during roaming
  2287. * @vdev: dp_vdev pointer
  2288. * @rx_tlv_hdr: rx tlv header
  2289. * @nbuf: pkt skb pointer
  2290. *
  2291. * This function will check if rx udp data is received from authorised
  2292. * roamed peer before peer map indication is received from FW after
  2293. * roaming. This is needed for VoIP scenarios in which packet loss
  2294. * expected during roaming is minimal.
  2295. *
  2296. * Return: bool
  2297. */
  2298. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2299. uint8_t *rx_tlv_hdr,
  2300. qdf_nbuf_t nbuf)
  2301. {
  2302. char *hdr_desc;
  2303. struct ieee80211_frame *wh = NULL;
  2304. hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc,
  2305. rx_tlv_hdr);
  2306. wh = (struct ieee80211_frame *)hdr_desc;
  2307. if (vdev->roaming_peer_status ==
  2308. WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED &&
  2309. !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2,
  2310. QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
  2311. qdf_nbuf_is_ipv6_udp_pkt(nbuf)))
  2312. return true;
  2313. return false;
  2314. }
  2315. #else
  2316. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2317. uint8_t *rx_tlv_hdr,
  2318. qdf_nbuf_t nbuf)
  2319. {
  2320. return false;
  2321. }
  2322. #endif
  2323. /**
  2324. * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
  2325. * no corresbonding peer found
  2326. * @soc: core txrx main context
  2327. * @nbuf: pkt skb pointer
  2328. *
  2329. * This function will try to deliver some RX special frames to stack
  2330. * even there is no peer matched found. for instance, LFR case, some
  2331. * eapol data will be sent to host before peer_map done.
  2332. *
  2333. * Return: None
  2334. */
  2335. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2336. {
  2337. uint16_t peer_id;
  2338. uint8_t vdev_id;
  2339. struct dp_vdev *vdev = NULL;
  2340. uint32_t l2_hdr_offset = 0;
  2341. uint16_t msdu_len = 0;
  2342. uint32_t pkt_len = 0;
  2343. uint8_t *rx_tlv_hdr;
  2344. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  2345. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  2346. bool is_special_frame = false;
  2347. struct dp_peer *peer = NULL;
  2348. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  2349. if (peer_id > soc->max_peer_id)
  2350. goto deliver_fail;
  2351. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  2352. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
  2353. if (!vdev || vdev->delete.pending)
  2354. goto deliver_fail;
  2355. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
  2356. goto deliver_fail;
  2357. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2358. l2_hdr_offset =
  2359. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2360. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2361. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  2362. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2363. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  2364. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset);
  2365. is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask);
  2366. if (qdf_likely(vdev->osif_rx)) {
  2367. if (is_special_frame ||
  2368. dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr,
  2369. nbuf)) {
  2370. qdf_nbuf_set_exc_frame(nbuf, 1);
  2371. if (QDF_STATUS_SUCCESS !=
  2372. vdev->osif_rx(vdev->osif_vdev, nbuf))
  2373. goto deliver_fail;
  2374. DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
  2375. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2376. return;
  2377. }
  2378. } else if (is_special_frame) {
  2379. /*
  2380. * If MLO connection, txrx_peer for link peer does not exist,
  2381. * try to store these RX packets to txrx_peer's bufq of MLD
  2382. * peer until vdev->osif_rx is registered from CP and flush
  2383. * them to stack.
  2384. */
  2385. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id,
  2386. DP_MOD_ID_RX);
  2387. if (!peer)
  2388. goto deliver_fail;
  2389. /* only check for MLO connection */
  2390. if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer &&
  2391. dp_rx_is_peer_cache_bufq_supported()) {
  2392. qdf_nbuf_set_exc_frame(nbuf, 1);
  2393. if (QDF_STATUS_SUCCESS ==
  2394. dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) {
  2395. DP_STATS_INC(soc,
  2396. rx.err.pkt_delivered_no_peer,
  2397. 1);
  2398. } else {
  2399. DP_STATS_INC(soc,
  2400. rx.err.rx_invalid_peer.num,
  2401. 1);
  2402. }
  2403. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2404. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  2405. return;
  2406. }
  2407. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  2408. }
  2409. deliver_fail:
  2410. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2411. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2412. dp_rx_nbuf_free(nbuf);
  2413. if (vdev)
  2414. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2415. }
  2416. #else
  2417. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2418. {
  2419. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2420. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2421. dp_rx_nbuf_free(nbuf);
  2422. }
  2423. #endif
  2424. /**
  2425. * dp_rx_srng_get_num_pending() - get number of pending entries
  2426. * @hal_soc: hal soc opaque pointer
  2427. * @hal_ring: opaque pointer to the HAL Rx Ring
  2428. * @num_entries: number of entries in the hal_ring.
  2429. * @near_full: pointer to a boolean. This is set if ring is near full.
  2430. *
  2431. * The function returns the number of entries in a destination ring which are
  2432. * yet to be reaped. The function also checks if the ring is near full.
  2433. * If more than half of the ring needs to be reaped, the ring is considered
  2434. * approaching full.
  2435. * The function useses hal_srng_dst_num_valid_locked to get the number of valid
  2436. * entries. It should not be called within a SRNG lock. HW pointer value is
  2437. * synced into cached_hp.
  2438. *
  2439. * Return: Number of pending entries if any
  2440. */
  2441. uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
  2442. hal_ring_handle_t hal_ring_hdl,
  2443. uint32_t num_entries,
  2444. bool *near_full)
  2445. {
  2446. uint32_t num_pending = 0;
  2447. num_pending = hal_srng_dst_num_valid_locked(hal_soc,
  2448. hal_ring_hdl,
  2449. true);
  2450. if (num_entries && (num_pending >= num_entries >> 1))
  2451. *near_full = true;
  2452. else
  2453. *near_full = false;
  2454. return num_pending;
  2455. }
  2456. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2457. #ifdef WLAN_SUPPORT_RX_FISA
  2458. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2459. {
  2460. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  2461. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2462. }
  2463. #else
  2464. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2465. {
  2466. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2467. }
  2468. #endif
  2469. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2470. #ifdef DP_RX_DROP_RAW_FRM
  2471. /**
  2472. * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
  2473. * @nbuf: pkt skb pointer
  2474. *
  2475. * Return: true - raw frame, dropped
  2476. * false - not raw frame, do nothing
  2477. */
  2478. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
  2479. {
  2480. if (qdf_nbuf_is_raw_frame(nbuf)) {
  2481. dp_rx_nbuf_free(nbuf);
  2482. return true;
  2483. }
  2484. return false;
  2485. }
  2486. #endif
  2487. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  2488. /**
  2489. * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
  2490. * @soc: Datapath soc structure
  2491. * @ring_num: REO ring number
  2492. * @ring_desc: REO ring descriptor
  2493. *
  2494. * Returns: None
  2495. */
  2496. void
  2497. dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  2498. hal_ring_desc_t ring_desc)
  2499. {
  2500. struct dp_buf_info_record *record;
  2501. struct hal_buf_info hbi;
  2502. uint32_t idx;
  2503. if (qdf_unlikely(!soc->rx_ring_history[ring_num]))
  2504. return;
  2505. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2506. /* buffer_addr_info is the first element of ring_desc */
  2507. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
  2508. &hbi);
  2509. idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
  2510. DP_RX_HIST_MAX);
  2511. /* No NULL check needed for record since its an array */
  2512. record = &soc->rx_ring_history[ring_num]->entry[idx];
  2513. record->timestamp = qdf_get_log_timestamp();
  2514. record->hbi.paddr = hbi.paddr;
  2515. record->hbi.sw_cookie = hbi.sw_cookie;
  2516. record->hbi.rbm = hbi.rbm;
  2517. }
  2518. #endif
  2519. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  2520. /**
  2521. * dp_rx_update_stats() - Update soc level rx packet count
  2522. * @soc: DP soc handle
  2523. * @nbuf: nbuf received
  2524. *
  2525. * Returns: none
  2526. */
  2527. void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2528. {
  2529. DP_STATS_INC_PKT(soc, rx.ingress, 1,
  2530. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2531. }
  2532. #endif
  2533. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  2534. /**
  2535. * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
  2536. * @soc : dp_soc handle
  2537. * @pdev: dp_pdev handle
  2538. * @peer_id: peer_id of the peer for which completion came
  2539. * @ppdu_id: ppdu_id
  2540. * @netbuf: Buffer pointer
  2541. *
  2542. * This function is used to deliver rx packet to packet capture
  2543. */
  2544. void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  2545. uint16_t peer_id, uint32_t is_offload,
  2546. qdf_nbuf_t netbuf)
  2547. {
  2548. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2549. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf,
  2550. peer_id, is_offload, pdev->pdev_id);
  2551. }
  2552. void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2553. uint32_t is_offload)
  2554. {
  2555. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2556. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER,
  2557. soc, nbuf, HTT_INVALID_VDEV,
  2558. is_offload, 0);
  2559. }
  2560. #endif
  2561. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2562. QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
  2563. {
  2564. QDF_STATUS ret;
  2565. if (vdev->osif_rx_flush) {
  2566. ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
  2567. if (!QDF_IS_STATUS_SUCCESS(ret)) {
  2568. dp_err("Failed to flush rx pkts for vdev %d\n",
  2569. vdev->vdev_id);
  2570. return ret;
  2571. }
  2572. }
  2573. return QDF_STATUS_SUCCESS;
  2574. }
  2575. static QDF_STATUS
  2576. dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
  2577. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  2578. struct dp_pdev *dp_pdev,
  2579. struct rx_desc_pool *rx_desc_pool)
  2580. {
  2581. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  2582. (nbuf_frag_info_t->virt_addr).nbuf =
  2583. qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
  2584. RX_BUFFER_RESERVATION,
  2585. rx_desc_pool->buf_alignment, FALSE);
  2586. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  2587. dp_err("nbuf alloc failed");
  2588. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  2589. return ret;
  2590. }
  2591. ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
  2592. (nbuf_frag_info_t->virt_addr).nbuf,
  2593. QDF_DMA_FROM_DEVICE,
  2594. rx_desc_pool->buf_size);
  2595. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  2596. qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
  2597. dp_err("nbuf map failed");
  2598. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  2599. return ret;
  2600. }
  2601. nbuf_frag_info_t->paddr =
  2602. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  2603. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  2604. &nbuf_frag_info_t->paddr,
  2605. rx_desc_pool);
  2606. if (ret == QDF_STATUS_E_FAILURE) {
  2607. dp_err("nbuf check x86 failed");
  2608. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  2609. return ret;
  2610. }
  2611. return QDF_STATUS_SUCCESS;
  2612. }
  2613. QDF_STATUS
  2614. dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
  2615. struct dp_srng *dp_rxdma_srng,
  2616. struct rx_desc_pool *rx_desc_pool,
  2617. uint32_t num_req_buffers)
  2618. {
  2619. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  2620. hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
  2621. union dp_rx_desc_list_elem_t *next;
  2622. void *rxdma_ring_entry;
  2623. qdf_dma_addr_t paddr;
  2624. struct dp_rx_nbuf_frag_info *nf_info;
  2625. uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
  2626. uint32_t buffer_index, nbuf_ptrs_per_page;
  2627. qdf_nbuf_t nbuf;
  2628. QDF_STATUS ret;
  2629. int page_idx, total_pages;
  2630. union dp_rx_desc_list_elem_t *desc_list = NULL;
  2631. union dp_rx_desc_list_elem_t *tail = NULL;
  2632. int sync_hw_ptr = 1;
  2633. uint32_t num_entries_avail;
  2634. if (qdf_unlikely(!dp_pdev)) {
  2635. dp_rx_err("%pK: pdev is null for mac_id = %d",
  2636. dp_soc, mac_id);
  2637. return QDF_STATUS_E_FAILURE;
  2638. }
  2639. if (qdf_unlikely(!rxdma_srng)) {
  2640. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2641. return QDF_STATUS_E_FAILURE;
  2642. }
  2643. dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
  2644. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2645. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  2646. rxdma_srng,
  2647. sync_hw_ptr);
  2648. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2649. if (!num_entries_avail) {
  2650. dp_err("Num of available entries is zero, nothing to do");
  2651. return QDF_STATUS_E_NOMEM;
  2652. }
  2653. if (num_entries_avail < num_req_buffers)
  2654. num_req_buffers = num_entries_avail;
  2655. nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
  2656. num_req_buffers, &desc_list, &tail);
  2657. if (!nr_descs) {
  2658. dp_err("no free rx_descs in freelist");
  2659. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  2660. return QDF_STATUS_E_NOMEM;
  2661. }
  2662. dp_debug("got %u RX descs for driver attach", nr_descs);
  2663. /*
  2664. * Try to allocate pointers to the nbuf one page at a time.
  2665. * Take pointers that can fit in one page of memory and
  2666. * iterate through the total descriptors that need to be
  2667. * allocated in order of pages. Reuse the pointers that
  2668. * have been allocated to fit in one page across each
  2669. * iteration to index into the nbuf.
  2670. */
  2671. total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE;
  2672. /*
  2673. * Add an extra page to store the remainder if any
  2674. */
  2675. if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE)
  2676. total_pages++;
  2677. nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE);
  2678. if (!nf_info) {
  2679. dp_err("failed to allocate nbuf array");
  2680. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2681. QDF_BUG(0);
  2682. return QDF_STATUS_E_NOMEM;
  2683. }
  2684. nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info);
  2685. for (page_idx = 0; page_idx < total_pages; page_idx++) {
  2686. qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE);
  2687. for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
  2688. /*
  2689. * The last page of buffer pointers may not be required
  2690. * completely based on the number of descriptors. Below
  2691. * check will ensure we are allocating only the
  2692. * required number of descriptors.
  2693. */
  2694. if (nr_nbuf_total >= nr_descs)
  2695. break;
  2696. /* Flag is set while pdev rx_desc_pool initialization */
  2697. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2698. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  2699. &nf_info[nr_nbuf], dp_pdev,
  2700. rx_desc_pool);
  2701. else
  2702. ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
  2703. &nf_info[nr_nbuf], dp_pdev,
  2704. rx_desc_pool);
  2705. if (QDF_IS_STATUS_ERROR(ret))
  2706. break;
  2707. nr_nbuf_total++;
  2708. }
  2709. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2710. for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
  2711. rxdma_ring_entry =
  2712. hal_srng_src_get_next(dp_soc->hal_soc,
  2713. rxdma_srng);
  2714. qdf_assert_always(rxdma_ring_entry);
  2715. next = desc_list->next;
  2716. paddr = nf_info[buffer_index].paddr;
  2717. nbuf = nf_info[buffer_index].virt_addr.nbuf;
  2718. /* Flag is set while pdev rx_desc_pool initialization */
  2719. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2720. dp_rx_desc_frag_prep(&desc_list->rx_desc,
  2721. &nf_info[buffer_index]);
  2722. else
  2723. dp_rx_desc_prep(&desc_list->rx_desc,
  2724. &nf_info[buffer_index]);
  2725. desc_list->rx_desc.in_use = 1;
  2726. dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
  2727. dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
  2728. __func__,
  2729. RX_DESC_REPLENISHED);
  2730. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr,
  2731. desc_list->rx_desc.cookie,
  2732. rx_desc_pool->owner);
  2733. dp_ipa_handle_rx_buf_smmu_mapping(
  2734. dp_soc, nbuf,
  2735. rx_desc_pool->buf_size, true,
  2736. __func__, __LINE__);
  2737. dp_audio_smmu_map(dp_soc->osdev,
  2738. qdf_mem_paddr_from_dmaaddr(dp_soc->osdev,
  2739. QDF_NBUF_CB_PADDR(nbuf)),
  2740. QDF_NBUF_CB_PADDR(nbuf),
  2741. rx_desc_pool->buf_size);
  2742. desc_list = next;
  2743. }
  2744. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id,
  2745. rxdma_srng, nr_nbuf, nr_nbuf);
  2746. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2747. }
  2748. dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
  2749. qdf_mem_free(nf_info);
  2750. if (!nr_nbuf_total) {
  2751. dp_err("No nbuf's allocated");
  2752. QDF_BUG(0);
  2753. return QDF_STATUS_E_RESOURCES;
  2754. }
  2755. /* No need to count the number of bytes received during replenish.
  2756. * Therefore set replenish.pkts.bytes as 0.
  2757. */
  2758. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
  2759. return QDF_STATUS_SUCCESS;
  2760. }
  2761. qdf_export_symbol(dp_pdev_rx_buffers_attach);
  2762. /**
  2763. * dp_rx_enable_mon_dest_frag() - Enable frag processing for
  2764. * monitor destination ring via frag.
  2765. *
  2766. * Enable this flag only for monitor destination buffer processing
  2767. * if DP_RX_MON_MEM_FRAG feature is enabled.
  2768. * If flag is set then frag based function will be called for alloc,
  2769. * map, prep desc and free ops for desc buffer else normal nbuf based
  2770. * function will be called.
  2771. *
  2772. * @rx_desc_pool: Rx desc pool
  2773. * @is_mon_dest_desc: Is it for monitor dest buffer
  2774. *
  2775. * Return: None
  2776. */
  2777. #ifdef DP_RX_MON_MEM_FRAG
  2778. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2779. bool is_mon_dest_desc)
  2780. {
  2781. rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
  2782. if (is_mon_dest_desc)
  2783. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
  2784. }
  2785. #else
  2786. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2787. bool is_mon_dest_desc)
  2788. {
  2789. rx_desc_pool->rx_mon_dest_frag_enable = false;
  2790. if (is_mon_dest_desc)
  2791. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
  2792. }
  2793. #endif
  2794. qdf_export_symbol(dp_rx_enable_mon_dest_frag);
  2795. /*
  2796. * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor
  2797. * pool
  2798. *
  2799. * @pdev: core txrx pdev context
  2800. *
  2801. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2802. * QDF_STATUS_E_NOMEM
  2803. */
  2804. QDF_STATUS
  2805. dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
  2806. {
  2807. struct dp_soc *soc = pdev->soc;
  2808. uint32_t rxdma_entries;
  2809. uint32_t rx_sw_desc_num;
  2810. struct dp_srng *dp_rxdma_srng;
  2811. struct rx_desc_pool *rx_desc_pool;
  2812. uint32_t status = QDF_STATUS_SUCCESS;
  2813. int mac_for_pdev;
  2814. mac_for_pdev = pdev->lmac_id;
  2815. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2816. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2817. soc, mac_for_pdev);
  2818. return status;
  2819. }
  2820. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2821. rxdma_entries = dp_rxdma_srng->num_entries;
  2822. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2823. rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2824. rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
  2825. status = dp_rx_desc_pool_alloc(soc,
  2826. rx_sw_desc_num,
  2827. rx_desc_pool);
  2828. if (status != QDF_STATUS_SUCCESS)
  2829. return status;
  2830. return status;
  2831. }
  2832. /*
  2833. * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
  2834. *
  2835. * @pdev: core txrx pdev context
  2836. */
  2837. void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
  2838. {
  2839. int mac_for_pdev = pdev->lmac_id;
  2840. struct dp_soc *soc = pdev->soc;
  2841. struct rx_desc_pool *rx_desc_pool;
  2842. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2843. dp_rx_desc_pool_free(soc, rx_desc_pool);
  2844. }
  2845. /*
  2846. * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
  2847. *
  2848. * @pdev: core txrx pdev context
  2849. *
  2850. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2851. * QDF_STATUS_E_NOMEM
  2852. */
  2853. QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
  2854. {
  2855. int mac_for_pdev = pdev->lmac_id;
  2856. struct dp_soc *soc = pdev->soc;
  2857. uint32_t rxdma_entries;
  2858. uint32_t rx_sw_desc_num;
  2859. struct dp_srng *dp_rxdma_srng;
  2860. struct rx_desc_pool *rx_desc_pool;
  2861. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2862. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2863. /**
  2864. * If NSS is enabled, rx_desc_pool is already filled.
  2865. * Hence, just disable desc_pool frag flag.
  2866. */
  2867. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2868. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2869. soc, mac_for_pdev);
  2870. return QDF_STATUS_SUCCESS;
  2871. }
  2872. if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
  2873. return QDF_STATUS_E_NOMEM;
  2874. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2875. rxdma_entries = dp_rxdma_srng->num_entries;
  2876. soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
  2877. rx_sw_desc_num =
  2878. wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2879. rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc);
  2880. rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
  2881. rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
  2882. /* Disable monitor dest processing via frag */
  2883. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2884. dp_rx_desc_pool_init(soc, mac_for_pdev,
  2885. rx_sw_desc_num, rx_desc_pool);
  2886. return QDF_STATUS_SUCCESS;
  2887. }
  2888. /*
  2889. * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
  2890. * @pdev: core txrx pdev context
  2891. *
  2892. * This function resets the freelist of rx descriptors and destroys locks
  2893. * associated with this list of descriptors.
  2894. */
  2895. void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
  2896. {
  2897. int mac_for_pdev = pdev->lmac_id;
  2898. struct dp_soc *soc = pdev->soc;
  2899. struct rx_desc_pool *rx_desc_pool;
  2900. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2901. dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev);
  2902. }
  2903. /*
  2904. * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
  2905. *
  2906. * @pdev: core txrx pdev context
  2907. *
  2908. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2909. * QDF_STATUS_E_NOMEM
  2910. */
  2911. QDF_STATUS
  2912. dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
  2913. {
  2914. int mac_for_pdev = pdev->lmac_id;
  2915. struct dp_soc *soc = pdev->soc;
  2916. struct dp_srng *dp_rxdma_srng;
  2917. struct rx_desc_pool *rx_desc_pool;
  2918. uint32_t rxdma_entries;
  2919. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2920. rxdma_entries = dp_rxdma_srng->num_entries;
  2921. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2922. /* Initialize RX buffer pool which will be
  2923. * used during low memory conditions
  2924. */
  2925. dp_rx_buffer_pool_init(soc, mac_for_pdev);
  2926. return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev,
  2927. dp_rxdma_srng,
  2928. rx_desc_pool,
  2929. rxdma_entries - 1);
  2930. }
  2931. /*
  2932. * dp_rx_pdev_buffers_free - Free nbufs (skbs)
  2933. *
  2934. * @pdev: core txrx pdev context
  2935. */
  2936. void
  2937. dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
  2938. {
  2939. int mac_for_pdev = pdev->lmac_id;
  2940. struct dp_soc *soc = pdev->soc;
  2941. struct rx_desc_pool *rx_desc_pool;
  2942. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2943. dp_rx_desc_nbuf_free(soc, rx_desc_pool, false);
  2944. dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
  2945. }
  2946. #ifdef DP_RX_SPECIAL_FRAME_NEED
  2947. bool dp_rx_deliver_special_frame(struct dp_soc *soc,
  2948. struct dp_txrx_peer *txrx_peer,
  2949. qdf_nbuf_t nbuf, uint32_t frame_mask,
  2950. uint8_t *rx_tlv_hdr)
  2951. {
  2952. uint32_t l2_hdr_offset = 0;
  2953. uint16_t msdu_len = 0;
  2954. uint32_t skip_len;
  2955. l2_hdr_offset =
  2956. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2957. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  2958. skip_len = l2_hdr_offset;
  2959. } else {
  2960. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2961. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  2962. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  2963. }
  2964. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2965. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  2966. qdf_nbuf_pull_head(nbuf, skip_len);
  2967. if (txrx_peer->vdev) {
  2968. dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf,
  2969. QDF_TX_RX_STATUS_OK);
  2970. }
  2971. if (dp_rx_is_special_frame(nbuf, frame_mask)) {
  2972. dp_info("special frame, mpdu sn 0x%x",
  2973. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  2974. qdf_nbuf_set_exc_frame(nbuf, 1);
  2975. dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer,
  2976. nbuf, NULL);
  2977. return true;
  2978. }
  2979. return false;
  2980. }
  2981. #endif
  2982. #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
  2983. void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
  2984. uint8_t *rx_tlv,
  2985. qdf_nbuf_t nbuf)
  2986. {
  2987. struct dp_soc *soc;
  2988. if (!pdev->is_first_wakeup_packet)
  2989. return;
  2990. soc = pdev->soc;
  2991. if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) {
  2992. qdf_nbuf_mark_wakeup_frame(nbuf);
  2993. dp_info("First packet after WOW Wakeup rcvd");
  2994. }
  2995. }
  2996. #endif