dp_rx.c 96 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_rx.h"
  22. #include "dp_tx.h"
  23. #include "dp_peer.h"
  24. #include "hal_rx.h"
  25. #include "hal_api.h"
  26. #include "qdf_nbuf.h"
  27. #ifdef MESH_MODE_SUPPORT
  28. #include "if_meta_hdr.h"
  29. #endif
  30. #include "dp_internal.h"
  31. #include "dp_ipa.h"
  32. #include "dp_hist.h"
  33. #include "dp_rx_buffer_pool.h"
  34. #ifdef WIFI_MONITOR_SUPPORT
  35. #include "dp_htt.h"
  36. #include <dp_mon.h>
  37. #endif
  38. #ifdef FEATURE_WDS
  39. #include "dp_txrx_wds.h"
  40. #endif
  41. #ifdef DP_RATETABLE_SUPPORT
  42. #include "dp_ratetable.h"
  43. #endif
  44. #include "enet.h"
  45. #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
  46. #ifdef DUP_RX_DESC_WAR
  47. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  48. hal_ring_handle_t hal_ring,
  49. hal_ring_desc_t ring_desc,
  50. struct dp_rx_desc *rx_desc)
  51. {
  52. void *hal_soc = soc->hal_soc;
  53. hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
  54. dp_rx_desc_dump(rx_desc);
  55. }
  56. #else
  57. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  58. hal_ring_handle_t hal_ring_hdl,
  59. hal_ring_desc_t ring_desc,
  60. struct dp_rx_desc *rx_desc)
  61. {
  62. hal_soc_handle_t hal_soc = soc->hal_soc;
  63. dp_rx_desc_dump(rx_desc);
  64. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
  65. hal_srng_dump_ring(hal_soc, hal_ring_hdl);
  66. qdf_assert_always(0);
  67. }
  68. #endif
  69. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  70. #ifdef RX_DESC_SANITY_WAR
  71. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  72. hal_ring_handle_t hal_ring_hdl,
  73. hal_ring_desc_t ring_desc,
  74. struct dp_rx_desc *rx_desc)
  75. {
  76. uint8_t return_buffer_manager;
  77. if (qdf_unlikely(!rx_desc)) {
  78. /*
  79. * This is an unlikely case where the cookie obtained
  80. * from the ring_desc is invalid and hence we are not
  81. * able to find the corresponding rx_desc
  82. */
  83. goto fail;
  84. }
  85. return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc);
  86. if (qdf_unlikely(!(return_buffer_manager ==
  87. HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) ||
  88. return_buffer_manager ==
  89. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) {
  90. goto fail;
  91. }
  92. return QDF_STATUS_SUCCESS;
  93. fail:
  94. DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
  95. dp_err("Ring Desc:");
  96. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
  97. ring_desc);
  98. return QDF_STATUS_E_NULL_VALUE;
  99. }
  100. #endif
  101. uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
  102. hal_ring_handle_t hal_ring_hdl,
  103. uint32_t num_entries,
  104. bool *near_full)
  105. {
  106. uint32_t num_pending = 0;
  107. num_pending = hal_srng_dst_num_valid_locked(hal_soc,
  108. hal_ring_hdl,
  109. true);
  110. if (num_entries && (num_pending >= num_entries >> 1))
  111. *near_full = true;
  112. else
  113. *near_full = false;
  114. return num_pending;
  115. }
  116. #ifdef RX_DESC_DEBUG_CHECK
  117. QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
  118. hal_ring_desc_t ring_desc,
  119. struct dp_rx_desc *rx_desc)
  120. {
  121. struct hal_buf_info hbi;
  122. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  123. /* Sanity check for possible buffer paddr corruption */
  124. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  125. return QDF_STATUS_SUCCESS;
  126. return QDF_STATUS_E_FAILURE;
  127. }
  128. /**
  129. * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer
  130. * out of bound access from H.W
  131. *
  132. * @soc: DP soc
  133. * @pkt_len: Packet length received from H.W
  134. *
  135. * Return: NONE
  136. */
  137. static inline void
  138. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc,
  139. uint32_t pkt_len)
  140. {
  141. struct rx_desc_pool *rx_desc_pool;
  142. rx_desc_pool = &soc->rx_desc_buf[0];
  143. qdf_assert_always(pkt_len <= rx_desc_pool->buf_size);
  144. }
  145. #else
  146. static inline void
  147. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { }
  148. #endif
  149. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  150. void
  151. dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  152. hal_ring_desc_t ring_desc)
  153. {
  154. struct dp_buf_info_record *record;
  155. struct hal_buf_info hbi;
  156. uint32_t idx;
  157. if (qdf_unlikely(!soc->rx_ring_history[ring_num]))
  158. return;
  159. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  160. /* buffer_addr_info is the first element of ring_desc */
  161. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
  162. &hbi);
  163. idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
  164. DP_RX_HIST_MAX);
  165. /* No NULL check needed for record since its an array */
  166. record = &soc->rx_ring_history[ring_num]->entry[idx];
  167. record->timestamp = qdf_get_log_timestamp();
  168. record->hbi.paddr = hbi.paddr;
  169. record->hbi.sw_cookie = hbi.sw_cookie;
  170. record->hbi.rbm = hbi.rbm;
  171. }
  172. #endif
  173. #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
  174. void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
  175. uint8_t *rx_tlv,
  176. qdf_nbuf_t nbuf)
  177. {
  178. struct dp_soc *soc;
  179. if (!pdev->is_first_wakeup_packet)
  180. return;
  181. soc = pdev->soc;
  182. if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) {
  183. qdf_nbuf_mark_wakeup_frame(nbuf);
  184. dp_info("First packet after WOW Wakeup rcvd");
  185. }
  186. }
  187. #endif
  188. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  189. #endif /* WLAN_SOFTUMAC_SUPPORT */
  190. /**
  191. * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
  192. *
  193. * @dp_soc: struct dp_soc *
  194. * @nbuf_frag_info_t: nbuf frag info
  195. * @dp_pdev: struct dp_pdev *
  196. * @rx_desc_pool: Rx desc pool
  197. *
  198. * Return: QDF_STATUS
  199. */
  200. #ifdef DP_RX_MON_MEM_FRAG
  201. static inline QDF_STATUS
  202. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  203. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  204. struct dp_pdev *dp_pdev,
  205. struct rx_desc_pool *rx_desc_pool)
  206. {
  207. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  208. (nbuf_frag_info_t->virt_addr).vaddr =
  209. qdf_frag_alloc(NULL, rx_desc_pool->buf_size);
  210. if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
  211. dp_err("Frag alloc failed");
  212. DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
  213. return QDF_STATUS_E_NOMEM;
  214. }
  215. ret = qdf_mem_map_page(dp_soc->osdev,
  216. (nbuf_frag_info_t->virt_addr).vaddr,
  217. QDF_DMA_FROM_DEVICE,
  218. rx_desc_pool->buf_size,
  219. &nbuf_frag_info_t->paddr);
  220. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  221. qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
  222. dp_err("Frag map failed");
  223. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  224. return QDF_STATUS_E_FAULT;
  225. }
  226. return QDF_STATUS_SUCCESS;
  227. }
  228. #else
  229. static inline QDF_STATUS
  230. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  231. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  232. struct dp_pdev *dp_pdev,
  233. struct rx_desc_pool *rx_desc_pool)
  234. {
  235. return QDF_STATUS_SUCCESS;
  236. }
  237. #endif /* DP_RX_MON_MEM_FRAG */
  238. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  239. /**
  240. * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history
  241. * @soc: Datapath soc structure
  242. * @ring_num: Refill ring number
  243. * @hal_ring_hdl:
  244. * @num_req: number of buffers requested for refill
  245. * @num_refill: number of buffers refilled
  246. *
  247. * Return: None
  248. */
  249. static inline void
  250. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  251. hal_ring_handle_t hal_ring_hdl,
  252. uint32_t num_req, uint32_t num_refill)
  253. {
  254. struct dp_refill_info_record *record;
  255. uint32_t idx;
  256. uint32_t tp;
  257. uint32_t hp;
  258. if (qdf_unlikely(ring_num >= MAX_PDEV_CNT ||
  259. !soc->rx_refill_ring_history[ring_num]))
  260. return;
  261. idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index,
  262. DP_RX_REFILL_HIST_MAX);
  263. /* No NULL check needed for record since its an array */
  264. record = &soc->rx_refill_ring_history[ring_num]->entry[idx];
  265. hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp);
  266. record->timestamp = qdf_get_log_timestamp();
  267. record->num_req = num_req;
  268. record->num_refill = num_refill;
  269. record->hp = hp;
  270. record->tp = tp;
  271. }
  272. #else
  273. static inline void
  274. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  275. hal_ring_handle_t hal_ring_hdl,
  276. uint32_t num_req, uint32_t num_refill)
  277. {
  278. }
  279. #endif
  280. /**
  281. * dp_pdev_nbuf_alloc_and_map_replenish() - Allocate nbuf for desc buffer and
  282. * map
  283. * @dp_soc: struct dp_soc *
  284. * @mac_id: Mac id
  285. * @num_entries_avail: num_entries_avail
  286. * @nbuf_frag_info_t: nbuf frag info
  287. * @dp_pdev: struct dp_pdev *
  288. * @rx_desc_pool: Rx desc pool
  289. *
  290. * Return: QDF_STATUS
  291. */
  292. static inline QDF_STATUS
  293. dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
  294. uint32_t mac_id,
  295. uint32_t num_entries_avail,
  296. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  297. struct dp_pdev *dp_pdev,
  298. struct rx_desc_pool *rx_desc_pool)
  299. {
  300. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  301. (nbuf_frag_info_t->virt_addr).nbuf =
  302. dp_rx_buffer_pool_nbuf_alloc(dp_soc,
  303. mac_id,
  304. rx_desc_pool,
  305. num_entries_avail);
  306. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  307. dp_err("nbuf alloc failed");
  308. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  309. return QDF_STATUS_E_NOMEM;
  310. }
  311. ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
  312. nbuf_frag_info_t);
  313. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  314. dp_rx_buffer_pool_nbuf_free(dp_soc,
  315. (nbuf_frag_info_t->virt_addr).nbuf, mac_id);
  316. dp_err("nbuf map failed");
  317. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  318. return QDF_STATUS_E_FAULT;
  319. }
  320. nbuf_frag_info_t->paddr =
  321. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  322. if (qdf_atomic_read(&dp_soc->ipa_mapped))
  323. dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)(
  324. (nbuf_frag_info_t->virt_addr).nbuf),
  325. rx_desc_pool->buf_size,
  326. true, __func__, __LINE__);
  327. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  328. &nbuf_frag_info_t->paddr,
  329. rx_desc_pool);
  330. if (ret == QDF_STATUS_E_FAILURE) {
  331. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  332. return QDF_STATUS_E_ADDRNOTAVAIL;
  333. }
  334. return QDF_STATUS_SUCCESS;
  335. }
  336. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  337. QDF_STATUS
  338. __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id,
  339. struct dp_srng *dp_rxdma_srng,
  340. struct rx_desc_pool *rx_desc_pool)
  341. {
  342. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  343. uint32_t count;
  344. void *rxdma_ring_entry;
  345. union dp_rx_desc_list_elem_t *next = NULL;
  346. void *rxdma_srng;
  347. qdf_nbuf_t nbuf;
  348. qdf_dma_addr_t paddr;
  349. uint16_t num_entries_avail = 0;
  350. uint16_t num_alloc_desc = 0;
  351. union dp_rx_desc_list_elem_t *desc_list = NULL;
  352. union dp_rx_desc_list_elem_t *tail = NULL;
  353. int sync_hw_ptr = 0;
  354. rxdma_srng = dp_rxdma_srng->hal_srng;
  355. if (qdf_unlikely(!dp_pdev)) {
  356. dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
  357. return QDF_STATUS_E_FAILURE;
  358. }
  359. if (qdf_unlikely(!rxdma_srng)) {
  360. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  361. return QDF_STATUS_E_FAILURE;
  362. }
  363. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  364. num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
  365. rxdma_srng,
  366. sync_hw_ptr);
  367. dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
  368. soc, num_entries_avail);
  369. if (qdf_unlikely(num_entries_avail <
  370. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  371. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  372. return QDF_STATUS_E_FAILURE;
  373. }
  374. DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
  375. num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
  376. rx_desc_pool,
  377. num_entries_avail,
  378. &desc_list,
  379. &tail);
  380. if (!num_alloc_desc) {
  381. dp_rx_err("%pK: no free rx_descs in freelist", soc);
  382. DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail,
  383. num_entries_avail);
  384. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  385. return QDF_STATUS_E_NOMEM;
  386. }
  387. for (count = 0; count < num_alloc_desc; count++) {
  388. next = desc_list->next;
  389. qdf_prefetch(next);
  390. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  391. if (qdf_unlikely(!nbuf)) {
  392. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  393. break;
  394. }
  395. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  396. rx_desc_pool->buf_size);
  397. rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc,
  398. rxdma_srng);
  399. qdf_assert_always(rxdma_ring_entry);
  400. desc_list->rx_desc.nbuf = nbuf;
  401. dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf);
  402. desc_list->rx_desc.rx_buf_start = nbuf->data;
  403. desc_list->rx_desc.paddr_buf_start = paddr;
  404. desc_list->rx_desc.unmapped = 0;
  405. /* rx_desc.in_use should be zero at this time*/
  406. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  407. desc_list->rx_desc.in_use = 1;
  408. desc_list->rx_desc.in_err_state = 0;
  409. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  410. paddr,
  411. desc_list->rx_desc.cookie,
  412. rx_desc_pool->owner);
  413. desc_list = next;
  414. }
  415. qdf_dsb();
  416. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  417. /* No need to count the number of bytes received during replenish.
  418. * Therefore set replenish.pkts.bytes as 0.
  419. */
  420. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  421. DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count));
  422. /*
  423. * add any available free desc back to the free list
  424. */
  425. if (desc_list)
  426. dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail,
  427. mac_id, rx_desc_pool);
  428. return QDF_STATUS_SUCCESS;
  429. }
  430. QDF_STATUS
  431. __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
  432. struct dp_srng *dp_rxdma_srng,
  433. struct rx_desc_pool *rx_desc_pool,
  434. uint32_t num_req_buffers,
  435. union dp_rx_desc_list_elem_t **desc_list,
  436. union dp_rx_desc_list_elem_t **tail)
  437. {
  438. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  439. uint32_t count;
  440. void *rxdma_ring_entry;
  441. union dp_rx_desc_list_elem_t *next;
  442. void *rxdma_srng;
  443. qdf_nbuf_t nbuf;
  444. qdf_nbuf_t nbuf_next;
  445. qdf_nbuf_t nbuf_head = NULL;
  446. qdf_nbuf_t nbuf_tail = NULL;
  447. qdf_dma_addr_t paddr;
  448. rxdma_srng = dp_rxdma_srng->hal_srng;
  449. if (qdf_unlikely(!dp_pdev)) {
  450. dp_rx_err("%pK: pdev is null for mac_id = %d",
  451. soc, mac_id);
  452. return QDF_STATUS_E_FAILURE;
  453. }
  454. if (qdf_unlikely(!rxdma_srng)) {
  455. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  456. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  457. return QDF_STATUS_E_FAILURE;
  458. }
  459. /* Allocate required number of nbufs */
  460. for (count = 0; count < num_req_buffers; count++) {
  461. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  462. if (qdf_unlikely(!nbuf)) {
  463. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  464. /* Update num_req_buffers to nbufs allocated count */
  465. num_req_buffers = count;
  466. break;
  467. }
  468. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  469. rx_desc_pool->buf_size);
  470. QDF_NBUF_CB_PADDR(nbuf) = paddr;
  471. DP_RX_LIST_APPEND(nbuf_head,
  472. nbuf_tail,
  473. nbuf);
  474. }
  475. qdf_dsb();
  476. nbuf = nbuf_head;
  477. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  478. for (count = 0; count < num_req_buffers; count++) {
  479. next = (*desc_list)->next;
  480. nbuf_next = nbuf->next;
  481. qdf_prefetch(next);
  482. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  483. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  484. if (!rxdma_ring_entry)
  485. break;
  486. (*desc_list)->rx_desc.nbuf = nbuf;
  487. dp_rx_set_reuse_nbuf(&(*desc_list)->rx_desc, nbuf);
  488. (*desc_list)->rx_desc.rx_buf_start = nbuf->data;
  489. (*desc_list)->rx_desc.paddr_buf_start = QDF_NBUF_CB_PADDR(nbuf);
  490. (*desc_list)->rx_desc.unmapped = 0;
  491. /* rx_desc.in_use should be zero at this time*/
  492. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  493. (*desc_list)->rx_desc.in_use = 1;
  494. (*desc_list)->rx_desc.in_err_state = 0;
  495. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  496. QDF_NBUF_CB_PADDR(nbuf),
  497. (*desc_list)->rx_desc.cookie,
  498. rx_desc_pool->owner);
  499. *desc_list = next;
  500. nbuf = nbuf_next;
  501. }
  502. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  503. /* No need to count the number of bytes received during replenish.
  504. * Therefore set replenish.pkts.bytes as 0.
  505. */
  506. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  507. DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count));
  508. /*
  509. * add any available free desc back to the free list
  510. */
  511. if (*desc_list)
  512. dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
  513. mac_id, rx_desc_pool);
  514. while (nbuf) {
  515. nbuf_next = nbuf->next;
  516. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  517. qdf_nbuf_free(nbuf);
  518. nbuf = nbuf_next;
  519. }
  520. return QDF_STATUS_SUCCESS;
  521. }
  522. #ifdef WLAN_SUPPORT_PPEDS
  523. QDF_STATUS
  524. __dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id,
  525. struct dp_srng *dp_rxdma_srng,
  526. struct rx_desc_pool *rx_desc_pool,
  527. uint32_t num_req_buffers,
  528. union dp_rx_desc_list_elem_t **desc_list,
  529. union dp_rx_desc_list_elem_t **tail)
  530. {
  531. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  532. uint32_t count;
  533. void *rxdma_ring_entry;
  534. union dp_rx_desc_list_elem_t *next;
  535. union dp_rx_desc_list_elem_t *cur;
  536. void *rxdma_srng;
  537. qdf_nbuf_t nbuf;
  538. rxdma_srng = dp_rxdma_srng->hal_srng;
  539. if (qdf_unlikely(!dp_pdev)) {
  540. dp_rx_err("%pK: pdev is null for mac_id = %d",
  541. soc, mac_id);
  542. return QDF_STATUS_E_FAILURE;
  543. }
  544. if (qdf_unlikely(!rxdma_srng)) {
  545. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  546. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  547. return QDF_STATUS_E_FAILURE;
  548. }
  549. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  550. for (count = 0; count < num_req_buffers; count++) {
  551. next = (*desc_list)->next;
  552. qdf_prefetch(next);
  553. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  554. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  555. if (!rxdma_ring_entry)
  556. break;
  557. (*desc_list)->rx_desc.in_use = 1;
  558. (*desc_list)->rx_desc.in_err_state = 0;
  559. (*desc_list)->rx_desc.nbuf = (*desc_list)->rx_desc.reuse_nbuf;
  560. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  561. (*desc_list)->rx_desc.paddr_buf_start,
  562. (*desc_list)->rx_desc.cookie,
  563. rx_desc_pool->owner);
  564. *desc_list = next;
  565. }
  566. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  567. /* No need to count the number of bytes received during replenish.
  568. * Therefore set replenish.pkts.bytes as 0.
  569. */
  570. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  571. DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count));
  572. /*
  573. * add any available free desc back to the free list
  574. */
  575. cur = *desc_list;
  576. for ( ; count < num_req_buffers; count++) {
  577. next = cur->next;
  578. qdf_prefetch(next);
  579. nbuf = cur->rx_desc.reuse_nbuf;
  580. cur->rx_desc.nbuf = NULL;
  581. cur->rx_desc.in_use = 0;
  582. cur->rx_desc.has_reuse_nbuf = false;
  583. cur->rx_desc.reuse_nbuf = NULL;
  584. if (!nbuf->recycled_for_ds)
  585. dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
  586. nbuf->recycled_for_ds = 0;
  587. nbuf->fast_recycled = 0;
  588. qdf_nbuf_free(nbuf);
  589. cur = next;
  590. }
  591. if (*desc_list)
  592. dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
  593. mac_id, rx_desc_pool);
  594. return QDF_STATUS_SUCCESS;
  595. }
  596. #endif
  597. QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
  598. uint32_t mac_id,
  599. struct dp_srng *dp_rxdma_srng,
  600. struct rx_desc_pool *rx_desc_pool,
  601. uint32_t num_req_buffers)
  602. {
  603. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  604. uint32_t count;
  605. uint32_t nr_descs = 0;
  606. void *rxdma_ring_entry;
  607. union dp_rx_desc_list_elem_t *next;
  608. void *rxdma_srng;
  609. qdf_nbuf_t nbuf;
  610. qdf_dma_addr_t paddr;
  611. union dp_rx_desc_list_elem_t *desc_list = NULL;
  612. union dp_rx_desc_list_elem_t *tail = NULL;
  613. rxdma_srng = dp_rxdma_srng->hal_srng;
  614. if (qdf_unlikely(!dp_pdev)) {
  615. dp_rx_err("%pK: pdev is null for mac_id = %d",
  616. soc, mac_id);
  617. return QDF_STATUS_E_FAILURE;
  618. }
  619. if (qdf_unlikely(!rxdma_srng)) {
  620. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  621. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  622. return QDF_STATUS_E_FAILURE;
  623. }
  624. dp_rx_debug("%pK: requested %d buffers for replenish",
  625. soc, num_req_buffers);
  626. nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool,
  627. num_req_buffers, &desc_list, &tail);
  628. if (!nr_descs) {
  629. dp_err("no free rx_descs in freelist");
  630. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  631. return QDF_STATUS_E_NOMEM;
  632. }
  633. dp_debug("got %u RX descs for driver attach", nr_descs);
  634. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  635. for (count = 0; count < nr_descs; count++) {
  636. next = desc_list->next;
  637. qdf_prefetch(next);
  638. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  639. if (qdf_unlikely(!nbuf)) {
  640. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  641. break;
  642. }
  643. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  644. rx_desc_pool->buf_size);
  645. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  646. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  647. if (!rxdma_ring_entry)
  648. break;
  649. desc_list->rx_desc.nbuf = nbuf;
  650. dp_rx_set_reuse_nbuf(&desc_list->rx_desc, nbuf);
  651. desc_list->rx_desc.rx_buf_start = nbuf->data;
  652. desc_list->rx_desc.paddr_buf_start = paddr;
  653. desc_list->rx_desc.unmapped = 0;
  654. /* rx_desc.in_use should be zero at this time*/
  655. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  656. desc_list->rx_desc.in_use = 1;
  657. desc_list->rx_desc.in_err_state = 0;
  658. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  659. paddr,
  660. desc_list->rx_desc.cookie,
  661. rx_desc_pool->owner);
  662. desc_list = next;
  663. }
  664. qdf_dsb();
  665. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  666. /* No need to count the number of bytes received during replenish.
  667. * Therefore set replenish.pkts.bytes as 0.
  668. */
  669. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  670. return QDF_STATUS_SUCCESS;
  671. }
  672. #endif
  673. #ifdef DP_UMAC_HW_RESET_SUPPORT
  674. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  675. static inline
  676. qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
  677. uint32_t buf_size)
  678. {
  679. return dp_rx_nbuf_sync_no_dsb(dp_soc, nbuf, buf_size);
  680. }
  681. #else
  682. static inline
  683. qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
  684. uint32_t buf_size)
  685. {
  686. return qdf_nbuf_get_frag_paddr(nbuf, 0);
  687. }
  688. #endif
  689. /**
  690. * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time
  691. * @soc: core txrx main context
  692. * @dp_rxdma_srng: rxdma ring
  693. * @rx_desc_pool: rx descriptor pool
  694. * @rx_desc:rx descriptor
  695. *
  696. * Return: void
  697. */
  698. static inline
  699. void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
  700. struct rx_desc_pool *rx_desc_pool,
  701. struct dp_rx_desc *rx_desc)
  702. {
  703. void *rxdma_srng;
  704. void *rxdma_ring_entry;
  705. qdf_dma_addr_t paddr;
  706. rxdma_srng = dp_rxdma_srng->hal_srng;
  707. /* No one else should be accessing the srng at this point */
  708. hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng);
  709. rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  710. qdf_assert_always(rxdma_ring_entry);
  711. rx_desc->in_err_state = 0;
  712. paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf,
  713. rx_desc_pool->buf_size);
  714. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr,
  715. rx_desc->cookie, rx_desc_pool->owner);
  716. hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng);
  717. }
  718. void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  719. {
  720. int mac_id, i, j;
  721. union dp_rx_desc_list_elem_t *head = NULL;
  722. union dp_rx_desc_list_elem_t *tail = NULL;
  723. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  724. struct dp_srng *dp_rxdma_srng =
  725. &soc->rx_refill_buf_ring[mac_id];
  726. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  727. uint32_t rx_sw_desc_num = rx_desc_pool->pool_size;
  728. /* Only fill up 1/3 of the ring size */
  729. uint32_t num_req_decs;
  730. if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng ||
  731. !rx_desc_pool->array)
  732. continue;
  733. num_req_decs = dp_rxdma_srng->num_entries / 3;
  734. for (i = 0, j = 0; i < rx_sw_desc_num; i++) {
  735. struct dp_rx_desc *rx_desc =
  736. (struct dp_rx_desc *)&rx_desc_pool->array[i];
  737. if (rx_desc->in_use) {
  738. if (j < (dp_rxdma_srng->num_entries - 1)) {
  739. dp_rx_desc_replenish(soc, dp_rxdma_srng,
  740. rx_desc_pool,
  741. rx_desc);
  742. } else {
  743. dp_rx_nbuf_unmap(soc, rx_desc, 0);
  744. rx_desc->unmapped = 0;
  745. rx_desc->nbuf->next = *nbuf_list;
  746. *nbuf_list = rx_desc->nbuf;
  747. dp_rx_add_to_free_desc_list(&head,
  748. &tail,
  749. rx_desc);
  750. }
  751. j++;
  752. }
  753. }
  754. if (head)
  755. dp_rx_add_desc_list_to_free_list(soc, &head, &tail,
  756. mac_id, rx_desc_pool);
  757. /* If num of descs in use were less, then we need to replenish
  758. * the ring with some buffers
  759. */
  760. head = NULL;
  761. tail = NULL;
  762. if (j < (num_req_decs - 1))
  763. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  764. rx_desc_pool,
  765. ((num_req_decs - 1) - j),
  766. &head, &tail, true);
  767. }
  768. }
  769. #endif
  770. QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  771. struct dp_srng *dp_rxdma_srng,
  772. struct rx_desc_pool *rx_desc_pool,
  773. uint32_t num_req_buffers,
  774. union dp_rx_desc_list_elem_t **desc_list,
  775. union dp_rx_desc_list_elem_t **tail,
  776. bool req_only, const char *func_name)
  777. {
  778. uint32_t num_alloc_desc;
  779. uint16_t num_desc_to_free = 0;
  780. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  781. uint32_t num_entries_avail;
  782. uint32_t count;
  783. uint32_t extra_buffers;
  784. int sync_hw_ptr = 1;
  785. struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
  786. void *rxdma_ring_entry;
  787. union dp_rx_desc_list_elem_t *next;
  788. QDF_STATUS ret;
  789. void *rxdma_srng;
  790. union dp_rx_desc_list_elem_t *desc_list_append = NULL;
  791. union dp_rx_desc_list_elem_t *tail_append = NULL;
  792. union dp_rx_desc_list_elem_t *temp_list = NULL;
  793. rxdma_srng = dp_rxdma_srng->hal_srng;
  794. if (qdf_unlikely(!dp_pdev)) {
  795. dp_rx_err("%pK: pdev is null for mac_id = %d",
  796. dp_soc, mac_id);
  797. return QDF_STATUS_E_FAILURE;
  798. }
  799. if (qdf_unlikely(!rxdma_srng)) {
  800. dp_rx_debug("%pK: rxdma srng not initialized", dp_soc);
  801. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  802. return QDF_STATUS_E_FAILURE;
  803. }
  804. dp_verbose_debug("%pK: requested %d buffers for replenish",
  805. dp_soc, num_req_buffers);
  806. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  807. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  808. rxdma_srng,
  809. sync_hw_ptr);
  810. dp_verbose_debug("%pK: no of available entries in rxdma ring: %d",
  811. dp_soc, num_entries_avail);
  812. if (!req_only && !(*desc_list) && (num_entries_avail >
  813. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  814. num_req_buffers = num_entries_avail;
  815. DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
  816. } else if (num_entries_avail < num_req_buffers) {
  817. num_desc_to_free = num_req_buffers - num_entries_avail;
  818. num_req_buffers = num_entries_avail;
  819. } else if ((*desc_list) &&
  820. dp_rxdma_srng->num_entries - num_entries_avail <
  821. CRITICAL_BUFFER_THRESHOLD) {
  822. /* set extra buffers to CRITICAL_BUFFER_THRESHOLD only if
  823. * total buff requested after adding extra buffers is less
  824. * than or equal to num entries available, else set it to max
  825. * possible additional buffers available at that moment
  826. */
  827. extra_buffers =
  828. ((num_req_buffers + CRITICAL_BUFFER_THRESHOLD) > num_entries_avail) ?
  829. (num_entries_avail - num_req_buffers) :
  830. CRITICAL_BUFFER_THRESHOLD;
  831. /* Append some free descriptors to tail */
  832. num_alloc_desc =
  833. dp_rx_get_free_desc_list(dp_soc, mac_id,
  834. rx_desc_pool,
  835. extra_buffers,
  836. &desc_list_append,
  837. &tail_append);
  838. if (num_alloc_desc) {
  839. temp_list = *desc_list;
  840. *desc_list = desc_list_append;
  841. tail_append->next = temp_list;
  842. num_req_buffers += num_alloc_desc;
  843. DP_STATS_DEC(dp_pdev,
  844. replenish.free_list,
  845. num_alloc_desc);
  846. } else
  847. dp_err_rl("%pK: no free rx_descs in freelist", dp_soc);
  848. }
  849. if (qdf_unlikely(!num_req_buffers)) {
  850. num_desc_to_free = num_req_buffers;
  851. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  852. goto free_descs;
  853. }
  854. /*
  855. * if desc_list is NULL, allocate the descs from freelist
  856. */
  857. if (!(*desc_list)) {
  858. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  859. rx_desc_pool,
  860. num_req_buffers,
  861. desc_list,
  862. tail);
  863. if (!num_alloc_desc) {
  864. dp_rx_err("%pK: no free rx_descs in freelist", dp_soc);
  865. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  866. num_req_buffers);
  867. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  868. return QDF_STATUS_E_NOMEM;
  869. }
  870. dp_verbose_debug("%pK: %d rx desc allocated", dp_soc,
  871. num_alloc_desc);
  872. num_req_buffers = num_alloc_desc;
  873. }
  874. count = 0;
  875. while (count < num_req_buffers) {
  876. /* Flag is set while pdev rx_desc_pool initialization */
  877. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  878. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  879. &nbuf_frag_info,
  880. dp_pdev,
  881. rx_desc_pool);
  882. else
  883. ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
  884. mac_id,
  885. num_entries_avail, &nbuf_frag_info,
  886. dp_pdev, rx_desc_pool);
  887. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  888. if (qdf_unlikely(ret == QDF_STATUS_E_FAULT))
  889. continue;
  890. break;
  891. }
  892. count++;
  893. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  894. rxdma_srng);
  895. qdf_assert_always(rxdma_ring_entry);
  896. next = (*desc_list)->next;
  897. /* Flag is set while pdev rx_desc_pool initialization */
  898. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  899. dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
  900. &nbuf_frag_info);
  901. else
  902. dp_rx_desc_prep(&((*desc_list)->rx_desc),
  903. &nbuf_frag_info);
  904. /* rx_desc.in_use should be zero at this time*/
  905. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  906. (*desc_list)->rx_desc.in_use = 1;
  907. (*desc_list)->rx_desc.in_err_state = 0;
  908. dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
  909. func_name, RX_DESC_REPLENISHED);
  910. dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
  911. nbuf_frag_info.virt_addr.nbuf,
  912. (unsigned long long)(nbuf_frag_info.paddr),
  913. (*desc_list)->rx_desc.cookie);
  914. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry,
  915. nbuf_frag_info.paddr,
  916. (*desc_list)->rx_desc.cookie,
  917. rx_desc_pool->owner);
  918. *desc_list = next;
  919. }
  920. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
  921. num_req_buffers, count);
  922. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  923. dp_rx_schedule_refill_thread(dp_soc);
  924. dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
  925. count, num_desc_to_free);
  926. /* No need to count the number of bytes received during replenish.
  927. * Therefore set replenish.pkts.bytes as 0.
  928. */
  929. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  930. DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count);
  931. free_descs:
  932. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  933. /*
  934. * add any available free desc back to the free list
  935. */
  936. if (*desc_list)
  937. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  938. mac_id, rx_desc_pool);
  939. return QDF_STATUS_SUCCESS;
  940. }
  941. qdf_export_symbol(__dp_rx_buffers_replenish);
  942. void
  943. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  944. struct dp_txrx_peer *txrx_peer, uint8_t link_id)
  945. {
  946. qdf_nbuf_t deliver_list_head = NULL;
  947. qdf_nbuf_t deliver_list_tail = NULL;
  948. qdf_nbuf_t nbuf;
  949. nbuf = nbuf_list;
  950. while (nbuf) {
  951. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  952. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  953. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  954. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1,
  955. qdf_nbuf_len(nbuf), link_id);
  956. nbuf = next;
  957. }
  958. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  959. &deliver_list_tail);
  960. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  961. }
  962. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  963. #ifndef FEATURE_WDS
  964. void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
  965. struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf)
  966. {
  967. }
  968. #endif
  969. #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
  970. /**
  971. * dp_classify_critical_pkts() - API for marking critical packets
  972. * @soc: dp_soc context
  973. * @vdev: vdev on which packet is to be sent
  974. * @nbuf: nbuf that has to be classified
  975. *
  976. * The function parses the packet, identifies whether its a critical frame and
  977. * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf.
  978. * Code for marking which frames are CRITICAL is accessed via callback.
  979. * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames.
  980. *
  981. * Return: None
  982. */
  983. static
  984. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  985. qdf_nbuf_t nbuf)
  986. {
  987. if (vdev->tx_classify_critical_pkt_cb)
  988. vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf);
  989. }
  990. #else
  991. static inline
  992. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  993. qdf_nbuf_t nbuf)
  994. {
  995. }
  996. #endif
  997. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  998. static inline
  999. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  1000. {
  1001. qdf_nbuf_set_queue_mapping(nbuf, ring_id);
  1002. }
  1003. #else
  1004. static inline
  1005. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  1006. {
  1007. }
  1008. #endif
  1009. bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  1010. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  1011. struct cdp_tid_rx_stats *tid_stats,
  1012. uint8_t link_id)
  1013. {
  1014. uint16_t len;
  1015. qdf_nbuf_t nbuf_copy;
  1016. if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
  1017. nbuf))
  1018. return true;
  1019. if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf, link_id))
  1020. return false;
  1021. /* If the source peer in the isolation list
  1022. * then dont forward instead push to bridge stack
  1023. */
  1024. if (dp_get_peer_isolation(ta_peer))
  1025. return false;
  1026. nbuf_copy = qdf_nbuf_copy(nbuf);
  1027. if (!nbuf_copy)
  1028. return false;
  1029. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1030. qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
  1031. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
  1032. if (soc->arch_ops.dp_rx_intrabss_mcast_handler(soc, ta_peer,
  1033. nbuf_copy,
  1034. tid_stats,
  1035. link_id))
  1036. return false;
  1037. /* Don't send packets if tx is paused */
  1038. if (!soc->is_tx_pause &&
  1039. !dp_tx_send((struct cdp_soc_t *)soc,
  1040. ta_peer->vdev->vdev_id, nbuf_copy)) {
  1041. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  1042. len, link_id);
  1043. tid_stats->intrabss_cnt++;
  1044. } else {
  1045. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  1046. len, link_id);
  1047. tid_stats->fail_cnt[INTRABSS_DROP]++;
  1048. dp_rx_nbuf_free(nbuf_copy);
  1049. }
  1050. return false;
  1051. }
  1052. bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  1053. uint8_t tx_vdev_id,
  1054. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  1055. struct cdp_tid_rx_stats *tid_stats,
  1056. uint8_t link_id)
  1057. {
  1058. uint16_t len;
  1059. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1060. /* linearize the nbuf just before we send to
  1061. * dp_tx_send()
  1062. */
  1063. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  1064. if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
  1065. return false;
  1066. nbuf = qdf_nbuf_unshare(nbuf);
  1067. if (!nbuf) {
  1068. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer,
  1069. rx.intra_bss.fail,
  1070. 1, len, link_id);
  1071. /* return true even though the pkt is
  1072. * not forwarded. Basically skb_unshare
  1073. * failed and we want to continue with
  1074. * next nbuf.
  1075. */
  1076. tid_stats->fail_cnt[INTRABSS_DROP]++;
  1077. return false;
  1078. }
  1079. }
  1080. qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb));
  1081. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf);
  1082. /* Don't send packets if tx is paused */
  1083. if (!soc->is_tx_pause && !dp_tx_send((struct cdp_soc_t *)soc,
  1084. tx_vdev_id, nbuf)) {
  1085. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  1086. len, link_id);
  1087. } else {
  1088. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  1089. len, link_id);
  1090. tid_stats->fail_cnt[INTRABSS_DROP]++;
  1091. return false;
  1092. }
  1093. return true;
  1094. }
  1095. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1096. #ifdef MESH_MODE_SUPPORT
  1097. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1098. uint8_t *rx_tlv_hdr,
  1099. struct dp_txrx_peer *txrx_peer)
  1100. {
  1101. struct mesh_recv_hdr_s *rx_info = NULL;
  1102. uint32_t pkt_type;
  1103. uint32_t nss;
  1104. uint32_t rate_mcs;
  1105. uint32_t bw;
  1106. uint8_t primary_chan_num;
  1107. uint32_t center_chan_freq;
  1108. struct dp_soc *soc = vdev->pdev->soc;
  1109. struct dp_peer *peer;
  1110. struct dp_peer *primary_link_peer;
  1111. struct dp_soc *link_peer_soc;
  1112. cdp_peer_stats_param_t buf = {0};
  1113. /* fill recv mesh stats */
  1114. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  1115. /* upper layers are responsible to free this memory */
  1116. if (!rx_info) {
  1117. dp_rx_err("%pK: Memory allocation failed for mesh rx stats",
  1118. vdev->pdev->soc);
  1119. DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
  1120. return;
  1121. }
  1122. rx_info->rs_flags = MESH_RXHDR_VER1;
  1123. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  1124. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  1125. if (qdf_nbuf_is_rx_chfrag_end(nbuf))
  1126. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  1127. peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH);
  1128. if (peer) {
  1129. if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
  1130. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  1131. rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
  1132. rx_tlv_hdr);
  1133. if (vdev->osif_get_key)
  1134. vdev->osif_get_key(vdev->osif_vdev,
  1135. &rx_info->rs_decryptkey[0],
  1136. &peer->mac_addr.raw[0],
  1137. rx_info->rs_keyix);
  1138. }
  1139. dp_peer_unref_delete(peer, DP_MOD_ID_MESH);
  1140. }
  1141. primary_link_peer = dp_get_primary_link_peer_by_id(soc,
  1142. txrx_peer->peer_id,
  1143. DP_MOD_ID_MESH);
  1144. if (qdf_likely(primary_link_peer)) {
  1145. link_peer_soc = primary_link_peer->vdev->pdev->soc;
  1146. dp_monitor_peer_get_stats_param(link_peer_soc,
  1147. primary_link_peer,
  1148. cdp_peer_rx_snr, &buf);
  1149. rx_info->rs_snr = buf.rx_snr;
  1150. dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH);
  1151. }
  1152. rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
  1153. soc = vdev->pdev->soc;
  1154. primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr);
  1155. center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16;
  1156. if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
  1157. rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
  1158. soc->ctrl_psoc,
  1159. vdev->pdev->pdev_id,
  1160. center_chan_freq);
  1161. }
  1162. rx_info->rs_channel = primary_chan_num;
  1163. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  1164. rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  1165. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  1166. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  1167. /*
  1168. * The MCS index does not start with 0 when NSS>1 in HT mode.
  1169. * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1):
  1170. * ------------------------------------------------------
  1171. * NSS | 1 | 2 | 3 | 4
  1172. * ------------------------------------------------------
  1173. * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31
  1174. * ------------------------------------------------------
  1175. * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31
  1176. * ------------------------------------------------------
  1177. * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1)
  1178. */
  1179. if ((pkt_type == DOT11_N) && (nss == 2))
  1180. rate_mcs += 8;
  1181. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
  1182. (bw << 24);
  1183. qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  1184. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
  1185. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"),
  1186. rx_info->rs_flags,
  1187. rx_info->rs_rssi,
  1188. rx_info->rs_channel,
  1189. rx_info->rs_ratephy1,
  1190. rx_info->rs_keyix,
  1191. rx_info->rs_snr);
  1192. }
  1193. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1194. uint8_t *rx_tlv_hdr)
  1195. {
  1196. union dp_align_mac_addr mac_addr;
  1197. struct dp_soc *soc = vdev->pdev->soc;
  1198. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  1199. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  1200. if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  1201. rx_tlv_hdr))
  1202. return QDF_STATUS_SUCCESS;
  1203. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  1204. if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
  1205. rx_tlv_hdr))
  1206. return QDF_STATUS_SUCCESS;
  1207. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  1208. if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  1209. rx_tlv_hdr) &&
  1210. !hal_rx_mpdu_get_to_ds(soc->hal_soc,
  1211. rx_tlv_hdr))
  1212. return QDF_STATUS_SUCCESS;
  1213. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  1214. if (hal_rx_mpdu_get_addr1(soc->hal_soc,
  1215. rx_tlv_hdr,
  1216. &mac_addr.raw[0]))
  1217. return QDF_STATUS_E_FAILURE;
  1218. if (!qdf_mem_cmp(&mac_addr.raw[0],
  1219. &vdev->mac_addr.raw[0],
  1220. QDF_MAC_ADDR_SIZE))
  1221. return QDF_STATUS_SUCCESS;
  1222. }
  1223. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  1224. if (hal_rx_mpdu_get_addr2(soc->hal_soc,
  1225. rx_tlv_hdr,
  1226. &mac_addr.raw[0]))
  1227. return QDF_STATUS_E_FAILURE;
  1228. if (!qdf_mem_cmp(&mac_addr.raw[0],
  1229. &vdev->mac_addr.raw[0],
  1230. QDF_MAC_ADDR_SIZE))
  1231. return QDF_STATUS_SUCCESS;
  1232. }
  1233. }
  1234. return QDF_STATUS_E_FAILURE;
  1235. }
  1236. #else
  1237. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1238. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer)
  1239. {
  1240. }
  1241. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1242. uint8_t *rx_tlv_hdr)
  1243. {
  1244. return QDF_STATUS_E_FAILURE;
  1245. }
  1246. #endif
  1247. #ifdef RX_PEER_INVALID_ENH
  1248. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1249. uint8_t mac_id)
  1250. {
  1251. struct dp_invalid_peer_msg msg;
  1252. struct dp_vdev *vdev = NULL;
  1253. struct dp_pdev *pdev = NULL;
  1254. struct ieee80211_frame *wh;
  1255. qdf_nbuf_t curr_nbuf, next_nbuf;
  1256. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1257. uint8_t *rx_pkt_hdr = NULL;
  1258. int i = 0;
  1259. uint32_t nbuf_len;
  1260. if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
  1261. dp_rx_debug("%pK: Drop decapped frames", soc);
  1262. goto free;
  1263. }
  1264. /* In RAW packet, packet header will be part of data */
  1265. rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size;
  1266. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1267. if (!DP_FRAME_IS_DATA(wh)) {
  1268. dp_rx_debug("%pK: NAWDS valid only for data frames", soc);
  1269. goto free;
  1270. }
  1271. nbuf_len = qdf_nbuf_len(mpdu);
  1272. if (nbuf_len < sizeof(struct ieee80211_frame)) {
  1273. dp_rx_err("%pK: Invalid nbuf length: %u", soc, nbuf_len);
  1274. goto free;
  1275. }
  1276. /* In DMAC case the rx_desc_pools are common across PDEVs
  1277. * so PDEV cannot be derived from the pool_id.
  1278. *
  1279. * link_id need to derived from the TLV tag word which is
  1280. * disabled by default. For now adding a WAR to get vdev
  1281. * with brute force this need to fixed with word based subscription
  1282. * support is added by enabling TLV tag word
  1283. */
  1284. if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
  1285. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1286. pdev = soc->pdev_list[i];
  1287. if (!pdev || qdf_unlikely(pdev->is_pdev_down))
  1288. continue;
  1289. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1290. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1291. QDF_MAC_ADDR_SIZE) == 0) {
  1292. goto out;
  1293. }
  1294. }
  1295. }
  1296. } else {
  1297. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1298. if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
  1299. dp_rx_err("%pK: PDEV %s",
  1300. soc, !pdev ? "not found" : "down");
  1301. goto free;
  1302. }
  1303. if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) ==
  1304. QDF_STATUS_SUCCESS)
  1305. return 0;
  1306. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1307. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1308. QDF_MAC_ADDR_SIZE) == 0) {
  1309. goto out;
  1310. }
  1311. }
  1312. }
  1313. if (!vdev) {
  1314. dp_rx_err("%pK: VDEV not found", soc);
  1315. goto free;
  1316. }
  1317. out:
  1318. msg.wh = wh;
  1319. qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size);
  1320. msg.nbuf = mpdu;
  1321. msg.vdev_id = vdev->vdev_id;
  1322. /*
  1323. * NOTE: Only valid for HKv1.
  1324. * If smart monitor mode is enabled on RE, we are getting invalid
  1325. * peer frames with RA as STA mac of RE and the TA not matching
  1326. * with any NAC list or the the BSSID.Such frames need to dropped
  1327. * in order to avoid HM_WDS false addition.
  1328. */
  1329. if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
  1330. if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) {
  1331. dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm",
  1332. soc, wh->i_addr1);
  1333. goto free;
  1334. }
  1335. pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
  1336. (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
  1337. pdev->pdev_id, &msg);
  1338. }
  1339. free:
  1340. /* Drop and free packet */
  1341. curr_nbuf = mpdu;
  1342. while (curr_nbuf) {
  1343. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1344. dp_rx_nbuf_free(curr_nbuf);
  1345. curr_nbuf = next_nbuf;
  1346. }
  1347. return 0;
  1348. }
  1349. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1350. qdf_nbuf_t mpdu, bool mpdu_done,
  1351. uint8_t mac_id)
  1352. {
  1353. /* Only trigger the process when mpdu is completed */
  1354. if (mpdu_done)
  1355. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1356. }
  1357. #else
  1358. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1359. uint8_t mac_id)
  1360. {
  1361. qdf_nbuf_t curr_nbuf, next_nbuf;
  1362. struct dp_pdev *pdev;
  1363. struct dp_vdev *vdev = NULL;
  1364. struct ieee80211_frame *wh;
  1365. struct dp_peer *peer = NULL;
  1366. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1367. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  1368. uint32_t nbuf_len;
  1369. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1370. if (!DP_FRAME_IS_DATA(wh)) {
  1371. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
  1372. "only for data frames");
  1373. goto free;
  1374. }
  1375. nbuf_len = qdf_nbuf_len(mpdu);
  1376. if (nbuf_len < sizeof(struct ieee80211_frame)) {
  1377. dp_rx_info_rl("%pK: Invalid nbuf length: %u", soc, nbuf_len);
  1378. goto free;
  1379. }
  1380. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1381. if (!pdev) {
  1382. dp_rx_info_rl("%pK: PDEV not found", soc);
  1383. goto free;
  1384. }
  1385. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1386. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1387. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1388. QDF_MAC_ADDR_SIZE) == 0) {
  1389. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1390. goto out;
  1391. }
  1392. }
  1393. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1394. if (!vdev) {
  1395. dp_rx_info_rl("%pK: VDEV not found", soc);
  1396. goto free;
  1397. }
  1398. out:
  1399. if (vdev->opmode == wlan_op_mode_ap) {
  1400. peer = dp_peer_find_hash_find(soc, wh->i_addr2, 0,
  1401. vdev->vdev_id,
  1402. DP_MOD_ID_RX_ERR);
  1403. /* If SA is a valid peer in vdev,
  1404. * don't send disconnect
  1405. */
  1406. if (peer) {
  1407. dp_peer_unref_delete(peer, DP_MOD_ID_RX_ERR);
  1408. DP_STATS_INC(soc, rx.err.decrypt_err_drop, 1);
  1409. dp_err_rl("invalid peer frame with correct SA/RA is freed");
  1410. goto free;
  1411. }
  1412. }
  1413. if (soc->cdp_soc.ol_ops->rx_invalid_peer)
  1414. soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
  1415. free:
  1416. /* Drop and free packet */
  1417. curr_nbuf = mpdu;
  1418. while (curr_nbuf) {
  1419. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1420. dp_rx_nbuf_free(curr_nbuf);
  1421. curr_nbuf = next_nbuf;
  1422. }
  1423. /* Reset the head and tail pointers */
  1424. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1425. if (pdev) {
  1426. pdev->invalid_peer_head_msdu = NULL;
  1427. pdev->invalid_peer_tail_msdu = NULL;
  1428. }
  1429. return 0;
  1430. }
  1431. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1432. qdf_nbuf_t mpdu, bool mpdu_done,
  1433. uint8_t mac_id)
  1434. {
  1435. /* Process the nbuf */
  1436. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1437. }
  1438. #endif
  1439. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1440. #ifdef RECEIVE_OFFLOAD
  1441. /**
  1442. * dp_rx_print_offload_info() - Print offload info from RX TLV
  1443. * @soc: dp soc handle
  1444. * @msdu: MSDU for which the offload info is to be printed
  1445. * @ofl_info: offload info saved in hal_offload_info structure
  1446. *
  1447. * Return: None
  1448. */
  1449. static void dp_rx_print_offload_info(struct dp_soc *soc,
  1450. qdf_nbuf_t msdu,
  1451. struct hal_offload_info *ofl_info)
  1452. {
  1453. dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
  1454. dp_verbose_debug("lro_eligible 0x%x",
  1455. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu));
  1456. dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu));
  1457. dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu));
  1458. dp_verbose_debug("TCP seq num 0x%x", ofl_info->tcp_seq_num);
  1459. dp_verbose_debug("TCP ack num 0x%x", ofl_info->tcp_ack_num);
  1460. dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu));
  1461. dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu));
  1462. dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu));
  1463. dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu));
  1464. dp_verbose_debug("---------------------------------------------------------");
  1465. }
  1466. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  1467. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
  1468. {
  1469. struct hal_offload_info offload_info;
  1470. if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
  1471. return;
  1472. if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info))
  1473. return;
  1474. *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
  1475. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible;
  1476. QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack;
  1477. QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
  1478. hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
  1479. rx_tlv);
  1480. QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win;
  1481. QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto;
  1482. QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto;
  1483. QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset;
  1484. QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id;
  1485. dp_rx_print_offload_info(soc, msdu, &offload_info);
  1486. }
  1487. #endif /* RECEIVE_OFFLOAD */
  1488. /**
  1489. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
  1490. *
  1491. * @soc: DP soc handle
  1492. * @nbuf: pointer to msdu.
  1493. * @mpdu_len: mpdu length
  1494. * @l3_pad_len: L3 padding length by HW
  1495. *
  1496. * Return: returns true if nbuf is last msdu of mpdu else returns false.
  1497. */
  1498. static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc,
  1499. qdf_nbuf_t nbuf,
  1500. uint16_t *mpdu_len,
  1501. uint32_t l3_pad_len)
  1502. {
  1503. bool last_nbuf;
  1504. uint32_t pkt_hdr_size;
  1505. uint16_t buf_size;
  1506. buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
  1507. pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len;
  1508. if ((*mpdu_len + pkt_hdr_size) > buf_size) {
  1509. qdf_nbuf_set_pktlen(nbuf, buf_size);
  1510. last_nbuf = false;
  1511. *mpdu_len -= (buf_size - pkt_hdr_size);
  1512. } else {
  1513. qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size));
  1514. last_nbuf = true;
  1515. *mpdu_len = 0;
  1516. }
  1517. return last_nbuf;
  1518. }
  1519. /**
  1520. * dp_get_l3_hdr_pad_len() - get L3 header padding length.
  1521. *
  1522. * @soc: DP soc handle
  1523. * @nbuf: pointer to msdu.
  1524. *
  1525. * Return: returns padding length in bytes.
  1526. */
  1527. static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc,
  1528. qdf_nbuf_t nbuf)
  1529. {
  1530. uint32_t l3_hdr_pad = 0;
  1531. uint8_t *rx_tlv_hdr;
  1532. struct hal_rx_msdu_metadata msdu_metadata;
  1533. while (nbuf) {
  1534. if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  1535. /* scattered msdu end with continuation is 0 */
  1536. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1537. hal_rx_msdu_metadata_get(soc->hal_soc,
  1538. rx_tlv_hdr,
  1539. &msdu_metadata);
  1540. l3_hdr_pad = msdu_metadata.l3_hdr_pad;
  1541. break;
  1542. }
  1543. nbuf = nbuf->next;
  1544. }
  1545. return l3_hdr_pad;
  1546. }
  1547. qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1548. {
  1549. qdf_nbuf_t parent, frag_list, next = NULL;
  1550. uint16_t frag_list_len = 0;
  1551. uint16_t mpdu_len;
  1552. bool last_nbuf;
  1553. uint32_t l3_hdr_pad_offset = 0;
  1554. /*
  1555. * Use msdu len got from REO entry descriptor instead since
  1556. * there is case the RX PKT TLV is corrupted while msdu_len
  1557. * from REO descriptor is right for non-raw RX scatter msdu.
  1558. */
  1559. mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1560. /*
  1561. * this is a case where the complete msdu fits in one single nbuf.
  1562. * in this case HW sets both start and end bit and we only need to
  1563. * reset these bits for RAW mode simulator to decap the pkt
  1564. */
  1565. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  1566. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1567. qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size);
  1568. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1569. return nbuf;
  1570. }
  1571. l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf);
  1572. /*
  1573. * This is a case where we have multiple msdus (A-MSDU) spread across
  1574. * multiple nbufs. here we create a fraglist out of these nbufs.
  1575. *
  1576. * the moment we encounter a nbuf with continuation bit set we
  1577. * know for sure we have an MSDU which is spread across multiple
  1578. * nbufs. We loop through and reap nbufs till we reach last nbuf.
  1579. */
  1580. parent = nbuf;
  1581. frag_list = nbuf->next;
  1582. nbuf = nbuf->next;
  1583. /*
  1584. * set the start bit in the first nbuf we encounter with continuation
  1585. * bit set. This has the proper mpdu length set as it is the first
  1586. * msdu of the mpdu. this becomes the parent nbuf and the subsequent
  1587. * nbufs will form the frag_list of the parent nbuf.
  1588. */
  1589. qdf_nbuf_set_rx_chfrag_start(parent, 1);
  1590. /*
  1591. * L3 header padding is only needed for the 1st buffer
  1592. * in a scattered msdu
  1593. */
  1594. last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len,
  1595. l3_hdr_pad_offset);
  1596. /*
  1597. * MSDU cont bit is set but reported MPDU length can fit
  1598. * in to single buffer
  1599. *
  1600. * Increment error stats and avoid SG list creation
  1601. */
  1602. if (last_nbuf) {
  1603. DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
  1604. qdf_nbuf_pull_head(parent,
  1605. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1606. return parent;
  1607. }
  1608. /*
  1609. * this is where we set the length of the fragments which are
  1610. * associated to the parent nbuf. We iterate through the frag_list
  1611. * till we hit the last_nbuf of the list.
  1612. */
  1613. do {
  1614. last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0);
  1615. qdf_nbuf_pull_head(nbuf,
  1616. soc->rx_pkt_tlv_size);
  1617. frag_list_len += qdf_nbuf_len(nbuf);
  1618. if (last_nbuf) {
  1619. next = nbuf->next;
  1620. nbuf->next = NULL;
  1621. break;
  1622. } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1623. dp_err("Invalid packet length");
  1624. qdf_assert_always(0);
  1625. }
  1626. nbuf = nbuf->next;
  1627. } while (!last_nbuf);
  1628. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  1629. qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
  1630. parent->next = next;
  1631. qdf_nbuf_pull_head(parent,
  1632. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1633. return parent;
  1634. }
  1635. #ifdef DP_RX_SG_FRAME_SUPPORT
  1636. bool dp_rx_is_sg_supported(void)
  1637. {
  1638. return true;
  1639. }
  1640. #else
  1641. bool dp_rx_is_sg_supported(void)
  1642. {
  1643. return false;
  1644. }
  1645. #endif
  1646. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1647. #ifdef QCA_PEER_EXT_STATS
  1648. void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  1649. qdf_nbuf_t nbuf)
  1650. {
  1651. struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay;
  1652. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1653. dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
  1654. }
  1655. #endif /* QCA_PEER_EXT_STATS */
  1656. void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1657. {
  1658. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  1659. int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
  1660. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1661. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1662. uint32_t interframe_delay =
  1663. (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
  1664. struct cdp_tid_rx_stats *rstats =
  1665. &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1666. dp_update_delay_stats(NULL, rstats, to_stack, tid,
  1667. CDP_DELAY_STATS_REAP_STACK, ring_id, false);
  1668. /*
  1669. * Update interframe delay stats calculated at deliver_data_ol point.
  1670. * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
  1671. * interframe delay will not be calculate correctly for 1st frame.
  1672. * On the other side, this will help in avoiding extra per packet check
  1673. * of vdev->prev_rx_deliver_tstamp.
  1674. */
  1675. dp_update_delay_stats(NULL, rstats, interframe_delay, tid,
  1676. CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false);
  1677. vdev->prev_rx_deliver_tstamp = current_ts;
  1678. }
  1679. /**
  1680. * dp_rx_drop_nbuf_list() - drop an nbuf list
  1681. * @pdev: dp pdev reference
  1682. * @buf_list: buffer list to be dropepd
  1683. *
  1684. * Return: int (number of bufs dropped)
  1685. */
  1686. static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
  1687. qdf_nbuf_t buf_list)
  1688. {
  1689. struct cdp_tid_rx_stats *stats = NULL;
  1690. uint8_t tid = 0, ring_id = 0;
  1691. int num_dropped = 0;
  1692. qdf_nbuf_t buf, next_buf;
  1693. buf = buf_list;
  1694. while (buf) {
  1695. ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
  1696. next_buf = qdf_nbuf_queue_next(buf);
  1697. tid = qdf_nbuf_get_tid_val(buf);
  1698. if (qdf_likely(pdev)) {
  1699. stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1700. stats->fail_cnt[INVALID_PEER_VDEV]++;
  1701. stats->delivered_to_stack--;
  1702. }
  1703. dp_rx_nbuf_free(buf);
  1704. buf = next_buf;
  1705. num_dropped++;
  1706. }
  1707. return num_dropped;
  1708. }
  1709. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1710. /**
  1711. * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
  1712. * @soc: core txrx main context
  1713. * @vdev: vdev
  1714. * @txrx_peer: txrx peer
  1715. * @nbuf_head: skb list head
  1716. *
  1717. * Return: true if packet is delivered to netdev per STA.
  1718. */
  1719. static inline bool
  1720. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1721. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1722. {
  1723. /*
  1724. * When extended WDS is disabled, frames are sent to AP netdevice.
  1725. */
  1726. if (qdf_likely(!vdev->wds_ext_enabled))
  1727. return false;
  1728. /*
  1729. * There can be 2 cases:
  1730. * 1. Send frame to parent netdev if its not for netdev per STA
  1731. * 2. If frame is meant for netdev per STA:
  1732. * a. Send frame to appropriate netdev using registered fp.
  1733. * b. If fp is NULL, drop the frames.
  1734. */
  1735. if (!txrx_peer->wds_ext.init)
  1736. return false;
  1737. if (txrx_peer->osif_rx)
  1738. txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head);
  1739. else
  1740. dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1741. return true;
  1742. }
  1743. #else
  1744. static inline bool
  1745. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1746. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1747. {
  1748. return false;
  1749. }
  1750. #endif
  1751. #ifdef PEER_CACHE_RX_PKTS
  1752. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  1753. /**
  1754. * dp_set_nbuf_band() - Set band in nbuf cb
  1755. * @peer: dp_peer
  1756. * @nbuf: nbuf
  1757. *
  1758. * Return: None
  1759. */
  1760. static inline void
  1761. dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf)
  1762. {
  1763. uint8_t link_id = 0;
  1764. link_id = dp_rx_get_stats_arr_idx_from_link_id(nbuf, peer->txrx_peer);
  1765. dp_rx_set_nbuf_band(nbuf, peer->txrx_peer, link_id);
  1766. }
  1767. #else
  1768. static inline void
  1769. dp_set_nbuf_band(struct dp_peer *peer, qdf_nbuf_t nbuf)
  1770. {
  1771. }
  1772. #endif
  1773. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  1774. {
  1775. struct dp_peer_cached_bufq *bufqi;
  1776. struct dp_rx_cached_buf *cache_buf = NULL;
  1777. ol_txrx_rx_fp data_rx = NULL;
  1778. int num_buff_elem;
  1779. QDF_STATUS status;
  1780. /*
  1781. * Flush dp cached frames only for mld peers and legacy peers, as
  1782. * link peers don't store cached frames
  1783. */
  1784. if (IS_MLO_DP_LINK_PEER(peer))
  1785. return;
  1786. if (!peer->txrx_peer) {
  1787. dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")",
  1788. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1789. return;
  1790. }
  1791. if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) {
  1792. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1793. return;
  1794. }
  1795. qdf_spin_lock_bh(&peer->peer_info_lock);
  1796. if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
  1797. data_rx = peer->vdev->osif_rx;
  1798. else
  1799. drop = true;
  1800. qdf_spin_unlock_bh(&peer->peer_info_lock);
  1801. bufqi = &peer->txrx_peer->bufq_info;
  1802. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1803. qdf_list_remove_front(&bufqi->cached_bufq,
  1804. (qdf_list_node_t **)&cache_buf);
  1805. while (cache_buf) {
  1806. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
  1807. cache_buf->buf);
  1808. bufqi->entries -= num_buff_elem;
  1809. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1810. if (drop) {
  1811. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1812. cache_buf->buf);
  1813. } else {
  1814. dp_set_nbuf_band(peer, cache_buf->buf);
  1815. /* Flush the cached frames to OSIF DEV */
  1816. status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
  1817. if (status != QDF_STATUS_SUCCESS)
  1818. bufqi->dropped = dp_rx_drop_nbuf_list(
  1819. peer->vdev->pdev,
  1820. cache_buf->buf);
  1821. }
  1822. qdf_mem_free(cache_buf);
  1823. cache_buf = NULL;
  1824. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1825. qdf_list_remove_front(&bufqi->cached_bufq,
  1826. (qdf_list_node_t **)&cache_buf);
  1827. }
  1828. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1829. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1830. }
  1831. /**
  1832. * dp_rx_enqueue_rx() - cache rx frames
  1833. * @peer: peer
  1834. * @txrx_peer: DP txrx_peer
  1835. * @rx_buf_list: cache buffer list
  1836. *
  1837. * Return: None
  1838. */
  1839. static QDF_STATUS
  1840. dp_rx_enqueue_rx(struct dp_peer *peer,
  1841. struct dp_txrx_peer *txrx_peer,
  1842. qdf_nbuf_t rx_buf_list)
  1843. {
  1844. struct dp_rx_cached_buf *cache_buf;
  1845. struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info;
  1846. int num_buff_elem;
  1847. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  1848. struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
  1849. struct dp_peer *ta_peer = NULL;
  1850. /*
  1851. * If peer id is invalid which likely peer map has not completed,
  1852. * then need caller provide dp_peer pointer, else it's ok to use
  1853. * txrx_peer->peer_id to get dp_peer.
  1854. */
  1855. if (peer) {
  1856. if (QDF_STATUS_SUCCESS ==
  1857. dp_peer_get_ref(soc, peer, DP_MOD_ID_RX))
  1858. ta_peer = peer;
  1859. } else {
  1860. ta_peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
  1861. DP_MOD_ID_RX);
  1862. }
  1863. if (!ta_peer) {
  1864. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1865. rx_buf_list);
  1866. return QDF_STATUS_E_INVAL;
  1867. }
  1868. dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
  1869. bufqi->dropped);
  1870. if (!ta_peer->valid) {
  1871. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1872. rx_buf_list);
  1873. ret = QDF_STATUS_E_INVAL;
  1874. goto fail;
  1875. }
  1876. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1877. if (bufqi->entries >= bufqi->thresh) {
  1878. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1879. rx_buf_list);
  1880. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1881. ret = QDF_STATUS_E_RESOURCES;
  1882. goto fail;
  1883. }
  1884. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1885. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
  1886. cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
  1887. if (!cache_buf) {
  1888. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1889. "Failed to allocate buf to cache rx frames");
  1890. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1891. rx_buf_list);
  1892. ret = QDF_STATUS_E_NOMEM;
  1893. goto fail;
  1894. }
  1895. cache_buf->buf = rx_buf_list;
  1896. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1897. qdf_list_insert_back(&bufqi->cached_bufq,
  1898. &cache_buf->node);
  1899. bufqi->entries += num_buff_elem;
  1900. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1901. fail:
  1902. dp_peer_unref_delete(ta_peer, DP_MOD_ID_RX);
  1903. return ret;
  1904. }
  1905. static inline
  1906. bool dp_rx_is_peer_cache_bufq_supported(void)
  1907. {
  1908. return true;
  1909. }
  1910. #else
  1911. static inline
  1912. bool dp_rx_is_peer_cache_bufq_supported(void)
  1913. {
  1914. return false;
  1915. }
  1916. static inline QDF_STATUS
  1917. dp_rx_enqueue_rx(struct dp_peer *peer,
  1918. struct dp_txrx_peer *txrx_peer,
  1919. qdf_nbuf_t rx_buf_list)
  1920. {
  1921. return QDF_STATUS_SUCCESS;
  1922. }
  1923. #endif
  1924. #ifndef DELIVERY_TO_STACK_STATUS_CHECK
  1925. /**
  1926. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1927. * using the appropriate call back functions.
  1928. * @soc: soc
  1929. * @vdev: vdev
  1930. * @txrx_peer: peer
  1931. * @nbuf_head: skb list head
  1932. *
  1933. * Return: None
  1934. */
  1935. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1936. struct dp_vdev *vdev,
  1937. struct dp_txrx_peer *txrx_peer,
  1938. qdf_nbuf_t nbuf_head)
  1939. {
  1940. if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
  1941. txrx_peer, nbuf_head)))
  1942. return;
  1943. /* Function pointer initialized only when FISA is enabled */
  1944. if (vdev->osif_fisa_rx)
  1945. /* on failure send it via regular path */
  1946. vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1947. else
  1948. vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1949. }
  1950. #else
  1951. /**
  1952. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1953. * using the appropriate call back functions.
  1954. * @soc: soc
  1955. * @vdev: vdev
  1956. * @txrx_peer: txrx peer
  1957. * @nbuf_head: skb list head
  1958. *
  1959. * Check the return status of the call back function and drop
  1960. * the packets if the return status indicates a failure.
  1961. *
  1962. * Return: None
  1963. */
  1964. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1965. struct dp_vdev *vdev,
  1966. struct dp_txrx_peer *txrx_peer,
  1967. qdf_nbuf_t nbuf_head)
  1968. {
  1969. int num_nbuf = 0;
  1970. QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
  1971. /* Function pointer initialized only when FISA is enabled */
  1972. if (vdev->osif_fisa_rx)
  1973. /* on failure send it via regular path */
  1974. ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1975. else if (vdev->osif_rx)
  1976. ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1977. if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
  1978. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1979. DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
  1980. if (txrx_peer)
  1981. DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num,
  1982. num_nbuf);
  1983. }
  1984. }
  1985. #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
  1986. /**
  1987. * dp_rx_validate_rx_callbacks() - validate rx callbacks
  1988. * @soc: DP soc
  1989. * @vdev: DP vdev handle
  1990. * @txrx_peer: pointer to the txrx peer object
  1991. * @nbuf_head: skb list head
  1992. *
  1993. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  1994. * QDF_STATUS_E_FAILURE
  1995. */
  1996. static inline QDF_STATUS
  1997. dp_rx_validate_rx_callbacks(struct dp_soc *soc,
  1998. struct dp_vdev *vdev,
  1999. struct dp_txrx_peer *txrx_peer,
  2000. qdf_nbuf_t nbuf_head)
  2001. {
  2002. int num_nbuf;
  2003. if (qdf_unlikely(!vdev || vdev->delete.pending)) {
  2004. num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
  2005. /*
  2006. * This is a special case where vdev is invalid,
  2007. * so we cannot know the pdev to which this packet
  2008. * belonged. Hence we update the soc rx error stats.
  2009. */
  2010. DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
  2011. return QDF_STATUS_E_FAILURE;
  2012. }
  2013. /*
  2014. * highly unlikely to have a vdev without a registered rx
  2015. * callback function. if so let us free the nbuf_list.
  2016. */
  2017. if (qdf_unlikely(!vdev->osif_rx)) {
  2018. if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) {
  2019. dp_rx_enqueue_rx(NULL, txrx_peer, nbuf_head);
  2020. } else {
  2021. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
  2022. nbuf_head);
  2023. DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf,
  2024. vdev->pdev->enhanced_stats_en);
  2025. }
  2026. return QDF_STATUS_E_FAILURE;
  2027. }
  2028. return QDF_STATUS_SUCCESS;
  2029. }
  2030. #if defined(WLAN_FEATURE_11BE_MLO) && defined(RAW_PKT_MLD_ADDR_CONVERSION)
  2031. static void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc,
  2032. struct dp_vdev *vdev,
  2033. struct dp_txrx_peer *txrx_peer,
  2034. qdf_nbuf_t nbuf_head)
  2035. {
  2036. qdf_nbuf_t nbuf, next;
  2037. struct dp_peer *peer = NULL;
  2038. struct ieee80211_frame *wh = NULL;
  2039. if (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)
  2040. return;
  2041. peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
  2042. DP_MOD_ID_RX);
  2043. if (!peer)
  2044. return;
  2045. if (!IS_MLO_DP_MLD_PEER(peer)) {
  2046. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  2047. return;
  2048. }
  2049. nbuf = nbuf_head;
  2050. while (nbuf) {
  2051. next = nbuf->next;
  2052. wh = (struct ieee80211_frame *)qdf_nbuf_data(nbuf);
  2053. qdf_mem_copy(wh->i_addr1, vdev->mld_mac_addr.raw,
  2054. QDF_MAC_ADDR_SIZE);
  2055. qdf_mem_copy(wh->i_addr2, peer->mac_addr.raw,
  2056. QDF_MAC_ADDR_SIZE);
  2057. nbuf = next;
  2058. }
  2059. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  2060. }
  2061. #else
  2062. static inline
  2063. void dp_rx_raw_pkt_mld_addr_conv(struct dp_soc *soc,
  2064. struct dp_vdev *vdev,
  2065. struct dp_txrx_peer *txrx_peer,
  2066. qdf_nbuf_t nbuf_head)
  2067. { }
  2068. #endif
  2069. QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
  2070. struct dp_vdev *vdev,
  2071. struct dp_txrx_peer *txrx_peer,
  2072. qdf_nbuf_t nbuf_head,
  2073. qdf_nbuf_t nbuf_tail)
  2074. {
  2075. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  2076. QDF_STATUS_SUCCESS)
  2077. return QDF_STATUS_E_FAILURE;
  2078. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
  2079. (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
  2080. dp_rx_raw_pkt_mld_addr_conv(soc, vdev, txrx_peer, nbuf_head);
  2081. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
  2082. &nbuf_tail);
  2083. }
  2084. dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head);
  2085. return QDF_STATUS_SUCCESS;
  2086. }
  2087. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  2088. QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
  2089. struct dp_vdev *vdev,
  2090. struct dp_txrx_peer *txrx_peer,
  2091. qdf_nbuf_t nbuf_head,
  2092. qdf_nbuf_t nbuf_tail)
  2093. {
  2094. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  2095. QDF_STATUS_SUCCESS)
  2096. return QDF_STATUS_E_FAILURE;
  2097. vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head);
  2098. return QDF_STATUS_SUCCESS;
  2099. }
  2100. #endif
  2101. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2102. #ifdef VDEV_PEER_PROTOCOL_COUNT
  2103. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \
  2104. { \
  2105. qdf_nbuf_t nbuf_local; \
  2106. struct dp_txrx_peer *txrx_peer_local; \
  2107. struct dp_vdev *vdev_local = vdev_hdl; \
  2108. do { \
  2109. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  2110. break; \
  2111. nbuf_local = nbuf; \
  2112. txrx_peer_local = txrx_peer; \
  2113. if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
  2114. break; \
  2115. else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
  2116. break; \
  2117. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  2118. (nbuf_local), \
  2119. (txrx_peer_local), 0, 1); \
  2120. } while (0); \
  2121. }
  2122. #else
  2123. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer)
  2124. #endif
  2125. #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
  2126. /**
  2127. * dp_rx_rates_stats_update() - update rate stats
  2128. * from rx msdu.
  2129. * @soc: datapath soc handle
  2130. * @nbuf: received msdu buffer
  2131. * @rx_tlv_hdr: rx tlv header
  2132. * @txrx_peer: datapath txrx_peer handle
  2133. * @sgi: Short Guard Interval
  2134. * @mcs: Modulation and Coding Set
  2135. * @nss: Number of Spatial Streams
  2136. * @bw: BandWidth
  2137. * @pkt_type: Corresponds to preamble
  2138. * @link_id: Link Id on which packet is received
  2139. *
  2140. * To be precisely record rates, following factors are considered:
  2141. * Exclude specific frames, ARP, DHCP, ssdp, etc.
  2142. * Make sure to affect rx throughput as least as possible.
  2143. *
  2144. * Return: void
  2145. */
  2146. static void
  2147. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2148. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  2149. uint32_t sgi, uint32_t mcs,
  2150. uint32_t nss, uint32_t bw, uint32_t pkt_type,
  2151. uint8_t link_id)
  2152. {
  2153. uint32_t rix;
  2154. uint16_t ratecode;
  2155. uint32_t avg_rx_rate;
  2156. uint32_t ratekbps;
  2157. enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
  2158. if (soc->high_throughput ||
  2159. dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) {
  2160. return;
  2161. }
  2162. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs, link_id);
  2163. /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */
  2164. if (qdf_unlikely(pkt_type == DOT11_B))
  2165. nss = 1;
  2166. /* here pkt_type corresponds to preamble */
  2167. ratekbps = dp_getrateindex(sgi,
  2168. mcs,
  2169. nss - 1,
  2170. pkt_type,
  2171. bw,
  2172. punc_mode,
  2173. &rix,
  2174. &ratecode);
  2175. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps, link_id);
  2176. avg_rx_rate =
  2177. dp_ath_rate_lpf(
  2178. txrx_peer->stats[link_id].extd_stats.rx.avg_rx_rate,
  2179. ratekbps);
  2180. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate, link_id);
  2181. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss, link_id);
  2182. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs, link_id);
  2183. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw, link_id);
  2184. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi, link_id);
  2185. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type, link_id);
  2186. }
  2187. #else
  2188. static inline void
  2189. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2190. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  2191. uint32_t sgi, uint32_t mcs,
  2192. uint32_t nss, uint32_t bw, uint32_t pkt_type,
  2193. uint8_t link_id)
  2194. {
  2195. }
  2196. #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
  2197. #ifndef QCA_ENHANCED_STATS_SUPPORT
  2198. /**
  2199. * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer
  2200. *
  2201. * @soc: datapath soc handle
  2202. * @nbuf: received msdu buffer
  2203. * @rx_tlv_hdr: rx tlv header
  2204. * @txrx_peer: datapath txrx_peer handle
  2205. * @link_id: link id on which the packet is received
  2206. *
  2207. * Return: void
  2208. */
  2209. static inline
  2210. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2211. uint8_t *rx_tlv_hdr,
  2212. struct dp_txrx_peer *txrx_peer,
  2213. uint8_t link_id)
  2214. {
  2215. bool is_ampdu;
  2216. uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
  2217. uint8_t dst_mcs_idx;
  2218. /*
  2219. * TODO - For KIWI this field is present in ring_desc
  2220. * Try to use ring desc instead of tlv.
  2221. */
  2222. is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
  2223. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu, link_id);
  2224. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu),
  2225. link_id);
  2226. sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
  2227. mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  2228. tid = qdf_nbuf_get_tid_val(nbuf);
  2229. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  2230. reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
  2231. rx_tlv_hdr);
  2232. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  2233. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  2234. /* do HW to SW pkt type conversion */
  2235. pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
  2236. hal_2_dp_pkt_type_map[pkt_type]);
  2237. /*
  2238. * The MCS index does not start with 0 when NSS>1 in HT mode.
  2239. * MCS params for optional 20/40MHz, NSS=1~3, EQM(NSS>1):
  2240. * ------------------------------------------------------
  2241. * NSS | 1 | 2 | 3 | 4
  2242. * ------------------------------------------------------
  2243. * MCS index: HT20 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31
  2244. * ------------------------------------------------------
  2245. * MCS index: HT40 | 0 ~ 7 | 8 ~ 15 | 16 ~ 23 | 24 ~ 31
  2246. * ------------------------------------------------------
  2247. * Currently, the MAX_NSS=2. If NSS>2, MCS index = 8 * (NSS-1)
  2248. */
  2249. if ((pkt_type == DOT11_N) && (nss == 2))
  2250. mcs += 8;
  2251. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1,
  2252. ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)),
  2253. link_id);
  2254. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
  2255. ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)),
  2256. link_id);
  2257. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1, link_id);
  2258. /*
  2259. * only if nss > 0 and pkt_type is 11N/AC/AX,
  2260. * then increase index [nss - 1] in array counter.
  2261. */
  2262. if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type))
  2263. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1, link_id);
  2264. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1, link_id);
  2265. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1,
  2266. hal_rx_tlv_mic_err_get(soc->hal_soc,
  2267. rx_tlv_hdr), link_id);
  2268. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1,
  2269. hal_rx_tlv_decrypt_err_get(soc->hal_soc,
  2270. rx_tlv_hdr), link_id);
  2271. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1,
  2272. link_id);
  2273. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1,
  2274. link_id);
  2275. dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
  2276. if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
  2277. DP_PEER_EXTD_STATS_INC(txrx_peer,
  2278. rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
  2279. 1, link_id);
  2280. dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
  2281. sgi, mcs, nss, bw, pkt_type, link_id);
  2282. }
  2283. #else
  2284. static inline
  2285. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2286. uint8_t *rx_tlv_hdr,
  2287. struct dp_txrx_peer *txrx_peer,
  2288. uint8_t link_id)
  2289. {
  2290. }
  2291. #endif
  2292. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  2293. static inline void
  2294. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  2295. qdf_nbuf_t nbuf, uint8_t link_id)
  2296. {
  2297. uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf);
  2298. if (qdf_unlikely(lmac_id >= CDP_MAX_LMACS)) {
  2299. dp_err_rl("Invalid lmac_id: %u vdev_id: %u",
  2300. lmac_id, QDF_NBUF_CB_RX_VDEV_ID(nbuf));
  2301. if (qdf_likely(txrx_peer))
  2302. dp_err_rl("peer_id: %u", txrx_peer->peer_id);
  2303. return;
  2304. }
  2305. /* only count stats per lmac for MLO connection*/
  2306. DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1,
  2307. QDF_NBUF_CB_RX_PKT_LEN(nbuf),
  2308. txrx_peer->is_mld_peer, link_id);
  2309. }
  2310. #else
  2311. static inline void
  2312. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  2313. qdf_nbuf_t nbuf, uint8_t link_id)
  2314. {
  2315. }
  2316. #endif
  2317. void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2318. uint8_t *rx_tlv_hdr,
  2319. struct dp_txrx_peer *txrx_peer,
  2320. uint8_t ring_id,
  2321. struct cdp_tid_rx_stats *tid_stats,
  2322. uint8_t link_id)
  2323. {
  2324. bool is_not_amsdu;
  2325. struct dp_vdev *vdev = txrx_peer->vdev;
  2326. uint8_t enh_flag;
  2327. qdf_ether_header_t *eh;
  2328. uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2329. dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer);
  2330. is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
  2331. qdf_nbuf_is_rx_chfrag_end(nbuf);
  2332. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1,
  2333. msdu_len, link_id);
  2334. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1,
  2335. is_not_amsdu, link_id);
  2336. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1,
  2337. !is_not_amsdu, link_id);
  2338. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1,
  2339. qdf_nbuf_is_rx_retry_flag(nbuf), link_id);
  2340. dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf, link_id);
  2341. tid_stats->msdu_cnt++;
  2342. enh_flag = vdev->pdev->enhanced_stats_en;
  2343. if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
  2344. (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
  2345. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2346. DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag, link_id);
  2347. tid_stats->mcast_msdu_cnt++;
  2348. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  2349. DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len,
  2350. enh_flag, link_id);
  2351. tid_stats->bcast_msdu_cnt++;
  2352. }
  2353. } else {
  2354. DP_PEER_UC_INCC_PKT(txrx_peer, 1, msdu_len,
  2355. enh_flag, link_id);
  2356. }
  2357. txrx_peer->stats[link_id].per_pkt_stats.rx.last_rx_ts =
  2358. qdf_system_ticks();
  2359. dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr,
  2360. txrx_peer, link_id);
  2361. }
  2362. #ifndef WDS_VENDOR_EXTENSION
  2363. int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
  2364. struct dp_vdev *vdev,
  2365. struct dp_txrx_peer *txrx_peer)
  2366. {
  2367. return 1;
  2368. }
  2369. #endif
  2370. #ifdef DP_RX_PKT_NO_PEER_DELIVER
  2371. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  2372. /**
  2373. * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received
  2374. * during roaming
  2375. * @vdev: dp_vdev pointer
  2376. * @rx_tlv_hdr: rx tlv header
  2377. * @nbuf: pkt skb pointer
  2378. *
  2379. * This function will check if rx udp data is received from authorised
  2380. * roamed peer before peer map indication is received from FW after
  2381. * roaming. This is needed for VoIP scenarios in which packet loss
  2382. * expected during roaming is minimal.
  2383. *
  2384. * Return: bool
  2385. */
  2386. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2387. uint8_t *rx_tlv_hdr,
  2388. qdf_nbuf_t nbuf)
  2389. {
  2390. char *hdr_desc;
  2391. struct ieee80211_frame *wh = NULL;
  2392. hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc,
  2393. rx_tlv_hdr);
  2394. wh = (struct ieee80211_frame *)hdr_desc;
  2395. if (vdev->roaming_peer_status ==
  2396. WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED &&
  2397. !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2,
  2398. QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
  2399. qdf_nbuf_is_ipv6_udp_pkt(nbuf)))
  2400. return true;
  2401. return false;
  2402. }
  2403. #else
  2404. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2405. uint8_t *rx_tlv_hdr,
  2406. qdf_nbuf_t nbuf)
  2407. {
  2408. return false;
  2409. }
  2410. #endif
  2411. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2412. {
  2413. uint16_t peer_id;
  2414. uint8_t vdev_id;
  2415. struct dp_vdev *vdev = NULL;
  2416. uint32_t l2_hdr_offset = 0;
  2417. uint16_t msdu_len = 0;
  2418. uint32_t pkt_len = 0;
  2419. uint8_t *rx_tlv_hdr;
  2420. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  2421. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  2422. bool is_special_frame = false;
  2423. struct dp_peer *peer = NULL;
  2424. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  2425. if (peer_id > soc->max_peer_id)
  2426. goto deliver_fail;
  2427. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  2428. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
  2429. if (!vdev || vdev->delete.pending)
  2430. goto deliver_fail;
  2431. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
  2432. goto deliver_fail;
  2433. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2434. l2_hdr_offset =
  2435. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2436. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2437. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  2438. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2439. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  2440. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset);
  2441. is_special_frame = dp_rx_is_special_frame(nbuf, frame_mask);
  2442. if (qdf_likely(vdev->osif_rx)) {
  2443. if (is_special_frame ||
  2444. dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr,
  2445. nbuf)) {
  2446. qdf_nbuf_set_exc_frame(nbuf, 1);
  2447. if (QDF_STATUS_SUCCESS !=
  2448. vdev->osif_rx(vdev->osif_vdev, nbuf))
  2449. goto deliver_fail;
  2450. DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
  2451. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2452. return;
  2453. }
  2454. } else if (is_special_frame) {
  2455. /*
  2456. * If MLO connection, txrx_peer for link peer does not exist,
  2457. * try to store these RX packets to txrx_peer's bufq of MLD
  2458. * peer until vdev->osif_rx is registered from CP and flush
  2459. * them to stack.
  2460. */
  2461. peer = dp_peer_get_tgt_peer_by_id(soc, peer_id,
  2462. DP_MOD_ID_RX);
  2463. if (!peer)
  2464. goto deliver_fail;
  2465. /* only check for MLO connection */
  2466. if (IS_MLO_DP_MLD_PEER(peer) && peer->txrx_peer &&
  2467. dp_rx_is_peer_cache_bufq_supported()) {
  2468. qdf_nbuf_set_exc_frame(nbuf, 1);
  2469. if (QDF_STATUS_SUCCESS ==
  2470. dp_rx_enqueue_rx(peer, peer->txrx_peer, nbuf)) {
  2471. DP_STATS_INC(soc,
  2472. rx.err.pkt_delivered_no_peer,
  2473. 1);
  2474. } else {
  2475. DP_STATS_INC(soc,
  2476. rx.err.rx_invalid_peer.num,
  2477. 1);
  2478. }
  2479. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2480. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  2481. return;
  2482. }
  2483. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  2484. }
  2485. deliver_fail:
  2486. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2487. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2488. dp_rx_nbuf_free(nbuf);
  2489. if (vdev)
  2490. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2491. }
  2492. #else
  2493. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2494. {
  2495. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2496. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2497. dp_rx_nbuf_free(nbuf);
  2498. }
  2499. #endif
  2500. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2501. #ifdef WLAN_SUPPORT_RX_FISA
  2502. QDF_STATUS dp_fisa_config(ol_txrx_soc_handle cdp_soc, uint8_t pdev_id,
  2503. enum cdp_fisa_config_id config_id,
  2504. union cdp_fisa_config *cfg)
  2505. {
  2506. struct dp_soc *soc = (struct dp_soc *)cdp_soc;
  2507. struct dp_pdev *pdev;
  2508. QDF_STATUS status;
  2509. pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
  2510. if (!pdev) {
  2511. dp_err("pdev is NULL for pdev_id %u", pdev_id);
  2512. return QDF_STATUS_E_INVAL;
  2513. }
  2514. switch (config_id) {
  2515. case CDP_FISA_HTT_RX_FISA_CFG:
  2516. status = dp_htt_rx_fisa_config(pdev, cfg->fisa_config);
  2517. break;
  2518. case CDP_FISA_HTT_RX_FSE_OP_CFG:
  2519. status = dp_htt_rx_flow_fse_operation(pdev, cfg->fse_op_cmd);
  2520. break;
  2521. case CDP_FISA_HTT_RX_FSE_SETUP_CFG:
  2522. status = dp_htt_rx_flow_fst_setup(pdev, cfg->fse_setup_info);
  2523. break;
  2524. default:
  2525. status = QDF_STATUS_E_INVAL;
  2526. }
  2527. return status;
  2528. }
  2529. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2530. {
  2531. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  2532. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2533. }
  2534. #else
  2535. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2536. {
  2537. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2538. }
  2539. #endif
  2540. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2541. #ifdef DP_RX_DROP_RAW_FRM
  2542. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
  2543. {
  2544. if (qdf_nbuf_is_raw_frame(nbuf)) {
  2545. dp_rx_nbuf_free(nbuf);
  2546. return true;
  2547. }
  2548. return false;
  2549. }
  2550. #endif
  2551. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  2552. void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2553. {
  2554. DP_STATS_INC_PKT(soc, rx.ingress, 1,
  2555. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2556. }
  2557. #endif
  2558. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  2559. void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  2560. uint16_t peer_id, uint32_t is_offload,
  2561. qdf_nbuf_t netbuf)
  2562. {
  2563. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2564. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf,
  2565. peer_id, is_offload, pdev->pdev_id);
  2566. }
  2567. void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2568. uint32_t is_offload)
  2569. {
  2570. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2571. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER,
  2572. soc, nbuf, HTT_INVALID_VDEV,
  2573. is_offload, 0);
  2574. }
  2575. #endif
  2576. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2577. QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
  2578. {
  2579. QDF_STATUS ret;
  2580. if (vdev->osif_rx_flush) {
  2581. ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
  2582. if (!QDF_IS_STATUS_SUCCESS(ret)) {
  2583. dp_err("Failed to flush rx pkts for vdev %d",
  2584. vdev->vdev_id);
  2585. return ret;
  2586. }
  2587. }
  2588. return QDF_STATUS_SUCCESS;
  2589. }
  2590. static QDF_STATUS
  2591. dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
  2592. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  2593. struct dp_pdev *dp_pdev,
  2594. struct rx_desc_pool *rx_desc_pool,
  2595. bool dp_buf_page_frag_alloc_enable)
  2596. {
  2597. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  2598. if (dp_buf_page_frag_alloc_enable) {
  2599. (nbuf_frag_info_t->virt_addr).nbuf =
  2600. qdf_nbuf_frag_alloc(dp_soc->osdev,
  2601. rx_desc_pool->buf_size,
  2602. RX_BUFFER_RESERVATION,
  2603. rx_desc_pool->buf_alignment, FALSE);
  2604. } else {
  2605. (nbuf_frag_info_t->virt_addr).nbuf =
  2606. qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
  2607. RX_BUFFER_RESERVATION,
  2608. rx_desc_pool->buf_alignment, FALSE);
  2609. }
  2610. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  2611. dp_err("nbuf alloc failed");
  2612. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  2613. return ret;
  2614. }
  2615. ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
  2616. (nbuf_frag_info_t->virt_addr).nbuf,
  2617. QDF_DMA_FROM_DEVICE,
  2618. rx_desc_pool->buf_size);
  2619. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  2620. qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
  2621. dp_err("nbuf map failed");
  2622. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  2623. return ret;
  2624. }
  2625. nbuf_frag_info_t->paddr =
  2626. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  2627. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  2628. &nbuf_frag_info_t->paddr,
  2629. rx_desc_pool);
  2630. if (ret == QDF_STATUS_E_FAILURE) {
  2631. dp_err("nbuf check x86 failed");
  2632. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  2633. return ret;
  2634. }
  2635. return QDF_STATUS_SUCCESS;
  2636. }
  2637. QDF_STATUS
  2638. dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
  2639. struct dp_srng *dp_rxdma_srng,
  2640. struct rx_desc_pool *rx_desc_pool,
  2641. uint32_t num_req_buffers)
  2642. {
  2643. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  2644. hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
  2645. union dp_rx_desc_list_elem_t *next;
  2646. void *rxdma_ring_entry;
  2647. qdf_dma_addr_t paddr;
  2648. struct dp_rx_nbuf_frag_info *nf_info;
  2649. uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
  2650. uint32_t buffer_index, nbuf_ptrs_per_page;
  2651. qdf_nbuf_t nbuf;
  2652. QDF_STATUS ret;
  2653. int page_idx, total_pages;
  2654. union dp_rx_desc_list_elem_t *desc_list = NULL;
  2655. union dp_rx_desc_list_elem_t *tail = NULL;
  2656. int sync_hw_ptr = 1;
  2657. uint32_t num_entries_avail;
  2658. bool dp_buf_page_frag_alloc_enable;
  2659. if (qdf_unlikely(!dp_pdev)) {
  2660. dp_rx_err("%pK: pdev is null for mac_id = %d",
  2661. dp_soc, mac_id);
  2662. return QDF_STATUS_E_FAILURE;
  2663. }
  2664. dp_buf_page_frag_alloc_enable =
  2665. wlan_cfg_is_dp_buf_page_frag_alloc_enable(dp_soc->wlan_cfg_ctx);
  2666. if (qdf_unlikely(!rxdma_srng)) {
  2667. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2668. return QDF_STATUS_E_FAILURE;
  2669. }
  2670. dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
  2671. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2672. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  2673. rxdma_srng,
  2674. sync_hw_ptr);
  2675. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2676. if (!num_entries_avail) {
  2677. dp_err("Num of available entries is zero, nothing to do");
  2678. return QDF_STATUS_E_NOMEM;
  2679. }
  2680. if (num_entries_avail < num_req_buffers)
  2681. num_req_buffers = num_entries_avail;
  2682. nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
  2683. num_req_buffers, &desc_list, &tail);
  2684. if (!nr_descs) {
  2685. dp_err("no free rx_descs in freelist");
  2686. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  2687. return QDF_STATUS_E_NOMEM;
  2688. }
  2689. dp_debug("got %u RX descs for driver attach", nr_descs);
  2690. /*
  2691. * Try to allocate pointers to the nbuf one page at a time.
  2692. * Take pointers that can fit in one page of memory and
  2693. * iterate through the total descriptors that need to be
  2694. * allocated in order of pages. Reuse the pointers that
  2695. * have been allocated to fit in one page across each
  2696. * iteration to index into the nbuf.
  2697. */
  2698. total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE;
  2699. /*
  2700. * Add an extra page to store the remainder if any
  2701. */
  2702. if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE)
  2703. total_pages++;
  2704. nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE);
  2705. if (!nf_info) {
  2706. dp_err("failed to allocate nbuf array");
  2707. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2708. QDF_BUG(0);
  2709. return QDF_STATUS_E_NOMEM;
  2710. }
  2711. nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info);
  2712. for (page_idx = 0; page_idx < total_pages; page_idx++) {
  2713. qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE);
  2714. for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
  2715. /*
  2716. * The last page of buffer pointers may not be required
  2717. * completely based on the number of descriptors. Below
  2718. * check will ensure we are allocating only the
  2719. * required number of descriptors.
  2720. */
  2721. if (nr_nbuf_total >= nr_descs)
  2722. break;
  2723. /* Flag is set while pdev rx_desc_pool initialization */
  2724. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2725. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  2726. &nf_info[nr_nbuf], dp_pdev,
  2727. rx_desc_pool);
  2728. else
  2729. ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
  2730. &nf_info[nr_nbuf], dp_pdev,
  2731. rx_desc_pool,
  2732. dp_buf_page_frag_alloc_enable);
  2733. if (QDF_IS_STATUS_ERROR(ret))
  2734. break;
  2735. nr_nbuf_total++;
  2736. }
  2737. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2738. for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
  2739. rxdma_ring_entry =
  2740. hal_srng_src_get_next(dp_soc->hal_soc,
  2741. rxdma_srng);
  2742. qdf_assert_always(rxdma_ring_entry);
  2743. next = desc_list->next;
  2744. paddr = nf_info[buffer_index].paddr;
  2745. nbuf = nf_info[buffer_index].virt_addr.nbuf;
  2746. /* Flag is set while pdev rx_desc_pool initialization */
  2747. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2748. dp_rx_desc_frag_prep(&desc_list->rx_desc,
  2749. &nf_info[buffer_index]);
  2750. else
  2751. dp_rx_desc_prep(&desc_list->rx_desc,
  2752. &nf_info[buffer_index]);
  2753. desc_list->rx_desc.in_use = 1;
  2754. dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
  2755. dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
  2756. __func__,
  2757. RX_DESC_REPLENISHED);
  2758. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr,
  2759. desc_list->rx_desc.cookie,
  2760. rx_desc_pool->owner);
  2761. if (qdf_atomic_read(&dp_soc->ipa_mapped))
  2762. dp_ipa_handle_rx_buf_smmu_mapping(
  2763. dp_soc, nbuf,
  2764. rx_desc_pool->buf_size, true,
  2765. __func__, __LINE__);
  2766. dp_audio_smmu_map(dp_soc->osdev,
  2767. qdf_mem_paddr_from_dmaaddr(dp_soc->osdev,
  2768. QDF_NBUF_CB_PADDR(nbuf)),
  2769. QDF_NBUF_CB_PADDR(nbuf),
  2770. rx_desc_pool->buf_size);
  2771. desc_list = next;
  2772. }
  2773. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id,
  2774. rxdma_srng, nr_nbuf, nr_nbuf);
  2775. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2776. }
  2777. dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
  2778. qdf_mem_free(nf_info);
  2779. if (!nr_nbuf_total) {
  2780. dp_err("No nbuf's allocated");
  2781. QDF_BUG(0);
  2782. return QDF_STATUS_E_RESOURCES;
  2783. }
  2784. /* No need to count the number of bytes received during replenish.
  2785. * Therefore set replenish.pkts.bytes as 0.
  2786. */
  2787. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
  2788. return QDF_STATUS_SUCCESS;
  2789. }
  2790. qdf_export_symbol(dp_pdev_rx_buffers_attach);
  2791. #ifdef DP_RX_MON_MEM_FRAG
  2792. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2793. bool is_mon_dest_desc)
  2794. {
  2795. rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
  2796. if (is_mon_dest_desc)
  2797. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
  2798. }
  2799. #else
  2800. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2801. bool is_mon_dest_desc)
  2802. {
  2803. rx_desc_pool->rx_mon_dest_frag_enable = false;
  2804. if (is_mon_dest_desc)
  2805. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
  2806. }
  2807. #endif
  2808. qdf_export_symbol(dp_rx_enable_mon_dest_frag);
  2809. QDF_STATUS
  2810. dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
  2811. {
  2812. struct dp_soc *soc = pdev->soc;
  2813. uint32_t rxdma_entries;
  2814. uint32_t rx_sw_desc_num;
  2815. struct dp_srng *dp_rxdma_srng;
  2816. struct rx_desc_pool *rx_desc_pool;
  2817. uint32_t status = QDF_STATUS_SUCCESS;
  2818. int mac_for_pdev;
  2819. mac_for_pdev = pdev->lmac_id;
  2820. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2821. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2822. soc, mac_for_pdev);
  2823. return status;
  2824. }
  2825. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2826. rxdma_entries = dp_rxdma_srng->num_entries;
  2827. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2828. rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2829. rx_desc_pool->desc_type = QDF_DP_RX_DESC_BUF_TYPE;
  2830. status = dp_rx_desc_pool_alloc(soc,
  2831. rx_sw_desc_num,
  2832. rx_desc_pool);
  2833. if (status != QDF_STATUS_SUCCESS)
  2834. return status;
  2835. return status;
  2836. }
  2837. void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
  2838. {
  2839. int mac_for_pdev = pdev->lmac_id;
  2840. struct dp_soc *soc = pdev->soc;
  2841. struct rx_desc_pool *rx_desc_pool;
  2842. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2843. dp_rx_desc_pool_free(soc, rx_desc_pool);
  2844. }
  2845. QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
  2846. {
  2847. int mac_for_pdev = pdev->lmac_id;
  2848. struct dp_soc *soc = pdev->soc;
  2849. uint32_t rxdma_entries;
  2850. uint32_t rx_sw_desc_num;
  2851. struct dp_srng *dp_rxdma_srng;
  2852. struct rx_desc_pool *rx_desc_pool;
  2853. uint32_t target_type = hal_get_target_type(soc->hal_soc);
  2854. uint16_t buf_size;
  2855. buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
  2856. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2857. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2858. /*
  2859. * If NSS is enabled, rx_desc_pool is already filled.
  2860. * Hence, just disable desc_pool frag flag.
  2861. */
  2862. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2863. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2864. soc, mac_for_pdev);
  2865. return QDF_STATUS_SUCCESS;
  2866. }
  2867. if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
  2868. return QDF_STATUS_E_NOMEM;
  2869. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2870. rxdma_entries = dp_rxdma_srng->num_entries;
  2871. soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
  2872. rx_sw_desc_num =
  2873. wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2874. rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc);
  2875. rx_desc_pool->buf_size = buf_size;
  2876. rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
  2877. /* Disable monitor dest processing via frag */
  2878. if (target_type == TARGET_TYPE_QCN9160) {
  2879. rx_desc_pool->buf_size = RX_MONITOR_BUFFER_SIZE;
  2880. rx_desc_pool->buf_alignment = RX_MONITOR_BUFFER_ALIGNMENT;
  2881. dp_rx_enable_mon_dest_frag(rx_desc_pool, true);
  2882. } else {
  2883. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2884. }
  2885. dp_rx_desc_pool_init(soc, mac_for_pdev,
  2886. rx_sw_desc_num, rx_desc_pool);
  2887. return QDF_STATUS_SUCCESS;
  2888. }
  2889. void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
  2890. {
  2891. int mac_for_pdev = pdev->lmac_id;
  2892. struct dp_soc *soc = pdev->soc;
  2893. struct rx_desc_pool *rx_desc_pool;
  2894. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2895. dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev);
  2896. }
  2897. QDF_STATUS
  2898. dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
  2899. {
  2900. int mac_for_pdev = pdev->lmac_id;
  2901. struct dp_soc *soc = pdev->soc;
  2902. struct dp_srng *dp_rxdma_srng;
  2903. struct rx_desc_pool *rx_desc_pool;
  2904. uint32_t rxdma_entries;
  2905. uint32_t target_type = hal_get_target_type(soc->hal_soc);
  2906. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2907. rxdma_entries = dp_rxdma_srng->num_entries;
  2908. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2909. /* Initialize RX buffer pool which will be
  2910. * used during low memory conditions
  2911. */
  2912. dp_rx_buffer_pool_init(soc, mac_for_pdev);
  2913. if (target_type == TARGET_TYPE_QCN9160)
  2914. return dp_pdev_rx_buffers_attach(soc, mac_for_pdev,
  2915. dp_rxdma_srng,
  2916. rx_desc_pool,
  2917. rxdma_entries - 1);
  2918. else
  2919. return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev,
  2920. dp_rxdma_srng,
  2921. rx_desc_pool,
  2922. rxdma_entries - 1);
  2923. }
  2924. void
  2925. dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
  2926. {
  2927. int mac_for_pdev = pdev->lmac_id;
  2928. struct dp_soc *soc = pdev->soc;
  2929. struct rx_desc_pool *rx_desc_pool;
  2930. uint32_t target_type = hal_get_target_type(soc->hal_soc);
  2931. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2932. if (target_type == TARGET_TYPE_QCN9160)
  2933. dp_rx_desc_frag_free(soc, rx_desc_pool);
  2934. else
  2935. dp_rx_desc_nbuf_free(soc, rx_desc_pool, false);
  2936. dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
  2937. }
  2938. #ifdef DP_RX_SPECIAL_FRAME_NEED
  2939. bool dp_rx_deliver_special_frame(struct dp_soc *soc,
  2940. struct dp_txrx_peer *txrx_peer,
  2941. qdf_nbuf_t nbuf, uint32_t frame_mask,
  2942. uint8_t *rx_tlv_hdr)
  2943. {
  2944. uint32_t l2_hdr_offset = 0;
  2945. uint16_t msdu_len = 0;
  2946. uint32_t skip_len;
  2947. l2_hdr_offset =
  2948. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2949. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  2950. skip_len = l2_hdr_offset;
  2951. } else {
  2952. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2953. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  2954. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  2955. }
  2956. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2957. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  2958. qdf_nbuf_pull_head(nbuf, skip_len);
  2959. if (txrx_peer->vdev) {
  2960. dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf,
  2961. QDF_TX_RX_STATUS_OK);
  2962. }
  2963. if (dp_rx_is_special_frame(nbuf, frame_mask)) {
  2964. dp_info("special frame, mpdu sn 0x%x",
  2965. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  2966. qdf_nbuf_set_exc_frame(nbuf, 1);
  2967. dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer,
  2968. nbuf, NULL);
  2969. return true;
  2970. }
  2971. return false;
  2972. }
  2973. #endif
  2974. #ifdef QCA_MULTIPASS_SUPPORT
  2975. bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
  2976. uint8_t tid)
  2977. {
  2978. struct vlan_ethhdr *vethhdrp;
  2979. if (qdf_unlikely(!txrx_peer->vlan_id))
  2980. return true;
  2981. vethhdrp = (struct vlan_ethhdr *)qdf_nbuf_data(nbuf);
  2982. /*
  2983. * h_vlan_proto & h_vlan_TCI should be 0x8100 & zero respectively
  2984. * as it is expected to be padded by 0
  2985. * return false if frame doesn't have above tag so that caller will
  2986. * drop the frame.
  2987. */
  2988. if (qdf_unlikely(vethhdrp->h_vlan_proto != htons(QDF_ETH_TYPE_8021Q)) ||
  2989. qdf_unlikely(vethhdrp->h_vlan_TCI != 0))
  2990. return false;
  2991. vethhdrp->h_vlan_TCI = htons(((tid & 0x7) << VLAN_PRIO_SHIFT) |
  2992. (txrx_peer->vlan_id & VLAN_VID_MASK));
  2993. if (vethhdrp->h_vlan_encapsulated_proto == htons(ETHERTYPE_PAE))
  2994. dp_tx_remove_vlan_tag(txrx_peer->vdev, nbuf);
  2995. return true;
  2996. }
  2997. #endif /* QCA_MULTIPASS_SUPPORT */