dp_rx.c 91 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_rx.h"
  22. #include "dp_tx.h"
  23. #include "dp_peer.h"
  24. #include "hal_rx.h"
  25. #include "hal_api.h"
  26. #include "qdf_nbuf.h"
  27. #ifdef MESH_MODE_SUPPORT
  28. #include "if_meta_hdr.h"
  29. #endif
  30. #include "dp_internal.h"
  31. #include "dp_ipa.h"
  32. #include "dp_hist.h"
  33. #include "dp_rx_buffer_pool.h"
  34. #ifdef WIFI_MONITOR_SUPPORT
  35. #include "dp_htt.h"
  36. #include <dp_mon.h>
  37. #endif
  38. #ifdef FEATURE_WDS
  39. #include "dp_txrx_wds.h"
  40. #endif
  41. #ifdef DP_RATETABLE_SUPPORT
  42. #include "dp_ratetable.h"
  43. #endif
  44. #ifdef DUP_RX_DESC_WAR
  45. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  46. hal_ring_handle_t hal_ring,
  47. hal_ring_desc_t ring_desc,
  48. struct dp_rx_desc *rx_desc)
  49. {
  50. void *hal_soc = soc->hal_soc;
  51. hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
  52. dp_rx_desc_dump(rx_desc);
  53. }
  54. #else
  55. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  56. hal_ring_handle_t hal_ring_hdl,
  57. hal_ring_desc_t ring_desc,
  58. struct dp_rx_desc *rx_desc)
  59. {
  60. hal_soc_handle_t hal_soc = soc->hal_soc;
  61. dp_rx_desc_dump(rx_desc);
  62. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
  63. hal_srng_dump_ring(hal_soc, hal_ring_hdl);
  64. qdf_assert_always(0);
  65. }
  66. #endif
  67. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  68. #ifdef RX_DESC_SANITY_WAR
  69. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  70. hal_ring_handle_t hal_ring_hdl,
  71. hal_ring_desc_t ring_desc,
  72. struct dp_rx_desc *rx_desc)
  73. {
  74. uint8_t return_buffer_manager;
  75. if (qdf_unlikely(!rx_desc)) {
  76. /*
  77. * This is an unlikely case where the cookie obtained
  78. * from the ring_desc is invalid and hence we are not
  79. * able to find the corresponding rx_desc
  80. */
  81. goto fail;
  82. }
  83. return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc);
  84. if (qdf_unlikely(!(return_buffer_manager ==
  85. HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) ||
  86. return_buffer_manager ==
  87. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) {
  88. goto fail;
  89. }
  90. return QDF_STATUS_SUCCESS;
  91. fail:
  92. DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
  93. dp_err("Ring Desc:");
  94. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
  95. ring_desc);
  96. return QDF_STATUS_E_NULL_VALUE;
  97. }
  98. #endif
  99. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  100. /**
  101. * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
  102. *
  103. * @dp_soc: struct dp_soc *
  104. * @nbuf_frag_info_t: nbuf frag info
  105. * @dp_pdev: struct dp_pdev *
  106. * @rx_desc_pool: Rx desc pool
  107. *
  108. * Return: QDF_STATUS
  109. */
  110. #ifdef DP_RX_MON_MEM_FRAG
  111. static inline QDF_STATUS
  112. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  113. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  114. struct dp_pdev *dp_pdev,
  115. struct rx_desc_pool *rx_desc_pool)
  116. {
  117. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  118. (nbuf_frag_info_t->virt_addr).vaddr =
  119. qdf_frag_alloc(NULL, rx_desc_pool->buf_size);
  120. if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
  121. dp_err("Frag alloc failed");
  122. DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
  123. return QDF_STATUS_E_NOMEM;
  124. }
  125. ret = qdf_mem_map_page(dp_soc->osdev,
  126. (nbuf_frag_info_t->virt_addr).vaddr,
  127. QDF_DMA_FROM_DEVICE,
  128. rx_desc_pool->buf_size,
  129. &nbuf_frag_info_t->paddr);
  130. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  131. qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
  132. dp_err("Frag map failed");
  133. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  134. return QDF_STATUS_E_FAULT;
  135. }
  136. return QDF_STATUS_SUCCESS;
  137. }
  138. #else
  139. static inline QDF_STATUS
  140. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  141. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  142. struct dp_pdev *dp_pdev,
  143. struct rx_desc_pool *rx_desc_pool)
  144. {
  145. return QDF_STATUS_SUCCESS;
  146. }
  147. #endif /* DP_RX_MON_MEM_FRAG */
  148. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  149. /**
  150. * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history
  151. * @soc: Datapath soc structure
  152. * @ring_num: Refill ring number
  153. * @num_req: number of buffers requested for refill
  154. * @num_refill: number of buffers refilled
  155. *
  156. * Returns: None
  157. */
  158. static inline void
  159. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  160. hal_ring_handle_t hal_ring_hdl,
  161. uint32_t num_req, uint32_t num_refill)
  162. {
  163. struct dp_refill_info_record *record;
  164. uint32_t idx;
  165. uint32_t tp;
  166. uint32_t hp;
  167. if (qdf_unlikely(ring_num >= MAX_PDEV_CNT ||
  168. !soc->rx_refill_ring_history[ring_num]))
  169. return;
  170. idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index,
  171. DP_RX_REFILL_HIST_MAX);
  172. /* No NULL check needed for record since its an array */
  173. record = &soc->rx_refill_ring_history[ring_num]->entry[idx];
  174. hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp);
  175. record->timestamp = qdf_get_log_timestamp();
  176. record->num_req = num_req;
  177. record->num_refill = num_refill;
  178. record->hp = hp;
  179. record->tp = tp;
  180. }
  181. #else
  182. static inline void
  183. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  184. hal_ring_handle_t hal_ring_hdl,
  185. uint32_t num_req, uint32_t num_refill)
  186. {
  187. }
  188. #endif
  189. /**
  190. * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
  191. *
  192. * @dp_soc: struct dp_soc *
  193. * @mac_id: Mac id
  194. * @num_entries_avail: num_entries_avail
  195. * @nbuf_frag_info_t: nbuf frag info
  196. * @dp_pdev: struct dp_pdev *
  197. * @rx_desc_pool: Rx desc pool
  198. *
  199. * Return: QDF_STATUS
  200. */
  201. static inline QDF_STATUS
  202. dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
  203. uint32_t mac_id,
  204. uint32_t num_entries_avail,
  205. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  206. struct dp_pdev *dp_pdev,
  207. struct rx_desc_pool *rx_desc_pool)
  208. {
  209. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  210. (nbuf_frag_info_t->virt_addr).nbuf =
  211. dp_rx_buffer_pool_nbuf_alloc(dp_soc,
  212. mac_id,
  213. rx_desc_pool,
  214. num_entries_avail);
  215. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  216. dp_err("nbuf alloc failed");
  217. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  218. return QDF_STATUS_E_NOMEM;
  219. }
  220. ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
  221. nbuf_frag_info_t);
  222. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  223. dp_rx_buffer_pool_nbuf_free(dp_soc,
  224. (nbuf_frag_info_t->virt_addr).nbuf, mac_id);
  225. dp_err("nbuf map failed");
  226. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  227. return QDF_STATUS_E_FAULT;
  228. }
  229. nbuf_frag_info_t->paddr =
  230. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  231. dp_ipa_handle_rx_buf_smmu_mapping(dp_soc,
  232. (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
  233. rx_desc_pool->buf_size,
  234. true);
  235. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  236. &nbuf_frag_info_t->paddr,
  237. rx_desc_pool);
  238. if (ret == QDF_STATUS_E_FAILURE) {
  239. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  240. return QDF_STATUS_E_ADDRNOTAVAIL;
  241. }
  242. return QDF_STATUS_SUCCESS;
  243. }
  244. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  245. QDF_STATUS
  246. __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id,
  247. struct dp_srng *dp_rxdma_srng,
  248. struct rx_desc_pool *rx_desc_pool)
  249. {
  250. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  251. uint32_t count;
  252. void *rxdma_ring_entry;
  253. union dp_rx_desc_list_elem_t *next = NULL;
  254. void *rxdma_srng;
  255. qdf_nbuf_t nbuf;
  256. qdf_dma_addr_t paddr;
  257. uint16_t num_entries_avail = 0;
  258. uint16_t num_alloc_desc = 0;
  259. union dp_rx_desc_list_elem_t *desc_list = NULL;
  260. union dp_rx_desc_list_elem_t *tail = NULL;
  261. int sync_hw_ptr = 0;
  262. rxdma_srng = dp_rxdma_srng->hal_srng;
  263. if (qdf_unlikely(!dp_pdev)) {
  264. dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
  265. return QDF_STATUS_E_FAILURE;
  266. }
  267. if (qdf_unlikely(!rxdma_srng)) {
  268. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  269. return QDF_STATUS_E_FAILURE;
  270. }
  271. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  272. num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
  273. rxdma_srng,
  274. sync_hw_ptr);
  275. dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
  276. soc, num_entries_avail);
  277. if (qdf_unlikely(num_entries_avail <
  278. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  279. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  280. return QDF_STATUS_E_FAILURE;
  281. }
  282. DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
  283. num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
  284. rx_desc_pool,
  285. num_entries_avail,
  286. &desc_list,
  287. &tail);
  288. if (!num_alloc_desc) {
  289. dp_rx_err("%pK: no free rx_descs in freelist", soc);
  290. DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail,
  291. num_entries_avail);
  292. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  293. return QDF_STATUS_E_NOMEM;
  294. }
  295. for (count = 0; count < num_alloc_desc; count++) {
  296. next = desc_list->next;
  297. qdf_prefetch(next);
  298. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  299. if (qdf_unlikely(!nbuf)) {
  300. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  301. break;
  302. }
  303. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  304. rx_desc_pool->buf_size);
  305. rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc,
  306. rxdma_srng);
  307. qdf_assert_always(rxdma_ring_entry);
  308. desc_list->rx_desc.nbuf = nbuf;
  309. desc_list->rx_desc.rx_buf_start = nbuf->data;
  310. desc_list->rx_desc.unmapped = 0;
  311. /* rx_desc.in_use should be zero at this time*/
  312. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  313. desc_list->rx_desc.in_use = 1;
  314. desc_list->rx_desc.in_err_state = 0;
  315. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  316. paddr,
  317. desc_list->rx_desc.cookie,
  318. rx_desc_pool->owner);
  319. desc_list = next;
  320. }
  321. qdf_dsb();
  322. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  323. /* No need to count the number of bytes received during replenish.
  324. * Therefore set replenish.pkts.bytes as 0.
  325. */
  326. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  327. DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count));
  328. /*
  329. * add any available free desc back to the free list
  330. */
  331. if (desc_list)
  332. dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail,
  333. mac_id, rx_desc_pool);
  334. return QDF_STATUS_SUCCESS;
  335. }
  336. QDF_STATUS
  337. __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
  338. struct dp_srng *dp_rxdma_srng,
  339. struct rx_desc_pool *rx_desc_pool,
  340. uint32_t num_req_buffers,
  341. union dp_rx_desc_list_elem_t **desc_list,
  342. union dp_rx_desc_list_elem_t **tail)
  343. {
  344. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  345. uint32_t count;
  346. void *rxdma_ring_entry;
  347. union dp_rx_desc_list_elem_t *next;
  348. void *rxdma_srng;
  349. qdf_nbuf_t nbuf;
  350. qdf_dma_addr_t paddr;
  351. rxdma_srng = dp_rxdma_srng->hal_srng;
  352. if (qdf_unlikely(!dp_pdev)) {
  353. dp_rx_err("%pK: pdev is null for mac_id = %d",
  354. soc, mac_id);
  355. return QDF_STATUS_E_FAILURE;
  356. }
  357. if (qdf_unlikely(!rxdma_srng)) {
  358. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  359. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  360. return QDF_STATUS_E_FAILURE;
  361. }
  362. dp_rx_debug("%pK: requested %d buffers for replenish",
  363. soc, num_req_buffers);
  364. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  365. for (count = 0; count < num_req_buffers; count++) {
  366. next = (*desc_list)->next;
  367. qdf_prefetch(next);
  368. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  369. if (qdf_unlikely(!nbuf)) {
  370. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  371. break;
  372. }
  373. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  374. rx_desc_pool->buf_size);
  375. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  376. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  377. if (!rxdma_ring_entry)
  378. break;
  379. qdf_assert_always(rxdma_ring_entry);
  380. (*desc_list)->rx_desc.nbuf = nbuf;
  381. (*desc_list)->rx_desc.rx_buf_start = nbuf->data;
  382. (*desc_list)->rx_desc.unmapped = 0;
  383. /* rx_desc.in_use should be zero at this time*/
  384. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  385. (*desc_list)->rx_desc.in_use = 1;
  386. (*desc_list)->rx_desc.in_err_state = 0;
  387. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  388. paddr,
  389. (*desc_list)->rx_desc.cookie,
  390. rx_desc_pool->owner);
  391. *desc_list = next;
  392. }
  393. qdf_dsb();
  394. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  395. /* No need to count the number of bytes received during replenish.
  396. * Therefore set replenish.pkts.bytes as 0.
  397. */
  398. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  399. DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count));
  400. /*
  401. * add any available free desc back to the free list
  402. */
  403. if (*desc_list)
  404. dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
  405. mac_id, rx_desc_pool);
  406. return QDF_STATUS_SUCCESS;
  407. }
  408. QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
  409. uint32_t mac_id,
  410. struct dp_srng *dp_rxdma_srng,
  411. struct rx_desc_pool *rx_desc_pool,
  412. uint32_t num_req_buffers)
  413. {
  414. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  415. uint32_t count;
  416. uint32_t nr_descs = 0;
  417. void *rxdma_ring_entry;
  418. union dp_rx_desc_list_elem_t *next;
  419. void *rxdma_srng;
  420. qdf_nbuf_t nbuf;
  421. qdf_dma_addr_t paddr;
  422. union dp_rx_desc_list_elem_t *desc_list = NULL;
  423. union dp_rx_desc_list_elem_t *tail = NULL;
  424. rxdma_srng = dp_rxdma_srng->hal_srng;
  425. if (qdf_unlikely(!dp_pdev)) {
  426. dp_rx_err("%pK: pdev is null for mac_id = %d",
  427. soc, mac_id);
  428. return QDF_STATUS_E_FAILURE;
  429. }
  430. if (qdf_unlikely(!rxdma_srng)) {
  431. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  432. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  433. return QDF_STATUS_E_FAILURE;
  434. }
  435. dp_rx_debug("%pK: requested %d buffers for replenish",
  436. soc, num_req_buffers);
  437. nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool,
  438. num_req_buffers, &desc_list, &tail);
  439. if (!nr_descs) {
  440. dp_err("no free rx_descs in freelist");
  441. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  442. return QDF_STATUS_E_NOMEM;
  443. }
  444. dp_debug("got %u RX descs for driver attach", nr_descs);
  445. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  446. for (count = 0; count < nr_descs; count++) {
  447. next = desc_list->next;
  448. qdf_prefetch(next);
  449. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  450. if (qdf_unlikely(!nbuf)) {
  451. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  452. break;
  453. }
  454. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  455. rx_desc_pool->buf_size);
  456. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  457. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  458. if (!rxdma_ring_entry)
  459. break;
  460. qdf_assert_always(rxdma_ring_entry);
  461. desc_list->rx_desc.nbuf = nbuf;
  462. desc_list->rx_desc.rx_buf_start = nbuf->data;
  463. desc_list->rx_desc.unmapped = 0;
  464. /* rx_desc.in_use should be zero at this time*/
  465. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  466. desc_list->rx_desc.in_use = 1;
  467. desc_list->rx_desc.in_err_state = 0;
  468. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  469. paddr,
  470. desc_list->rx_desc.cookie,
  471. rx_desc_pool->owner);
  472. desc_list = next;
  473. }
  474. qdf_dsb();
  475. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  476. /* No need to count the number of bytes received during replenish.
  477. * Therefore set replenish.pkts.bytes as 0.
  478. */
  479. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  480. return QDF_STATUS_SUCCESS;
  481. }
  482. #endif
  483. #ifdef DP_UMAC_HW_RESET_SUPPORT
  484. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  485. static inline
  486. qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
  487. uint32_t buf_size)
  488. {
  489. return dp_rx_nbuf_sync_no_dsb(soc, nbuf, rx_desc_pool->buf_size);
  490. }
  491. #else
  492. static inline
  493. qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
  494. uint32_t buf_size)
  495. {
  496. return qdf_nbuf_get_frag_paddr(nbuf, 0);
  497. }
  498. #endif
  499. /*
  500. * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time
  501. *
  502. * @soc: core txrx main context
  503. * @dp_rxdma_srng: rxdma ring
  504. * @rx_desc_pool: rx descriptor pool
  505. * @rx_desc:rx descriptor
  506. *
  507. * Return: void
  508. */
  509. static inline
  510. void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
  511. struct rx_desc_pool *rx_desc_pool,
  512. struct dp_rx_desc *rx_desc)
  513. {
  514. void *rxdma_srng;
  515. void *rxdma_ring_entry;
  516. qdf_dma_addr_t paddr;
  517. rxdma_srng = dp_rxdma_srng->hal_srng;
  518. /* No one else should be accessing the srng at this point */
  519. hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng);
  520. rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  521. qdf_assert_always(rxdma_ring_entry);
  522. rx_desc->in_err_state = 0;
  523. paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf,
  524. rx_desc_pool->buf_size);
  525. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr,
  526. rx_desc->cookie, rx_desc_pool->owner);
  527. hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng);
  528. }
  529. /*
  530. * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
  531. *
  532. * @soc: core txrx main context
  533. * @nbuf_list: nbuf list for delayed free
  534. *
  535. * Return: void
  536. */
  537. void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  538. {
  539. int mac_id, i, j;
  540. union dp_rx_desc_list_elem_t *head = NULL;
  541. union dp_rx_desc_list_elem_t *tail = NULL;
  542. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  543. struct dp_srng *dp_rxdma_srng =
  544. &soc->rx_refill_buf_ring[mac_id];
  545. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  546. uint32_t rx_sw_desc_num = rx_desc_pool->pool_size;
  547. /* Only fill up 1/3 of the ring size */
  548. uint32_t num_req_decs;
  549. if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng ||
  550. !rx_desc_pool->array)
  551. continue;
  552. num_req_decs = dp_rxdma_srng->num_entries / 3;
  553. for (i = 0, j = 0; i < rx_sw_desc_num; i++) {
  554. struct dp_rx_desc *rx_desc =
  555. (struct dp_rx_desc *)&rx_desc_pool->array[i];
  556. if (rx_desc->in_use) {
  557. if (j < dp_rxdma_srng->num_entries) {
  558. dp_rx_desc_replenish(soc, dp_rxdma_srng,
  559. rx_desc_pool,
  560. rx_desc);
  561. } else {
  562. dp_rx_nbuf_unmap(soc, rx_desc, 0);
  563. rx_desc->unmapped = 0;
  564. rx_desc->nbuf->next = *nbuf_list;
  565. *nbuf_list = rx_desc->nbuf;
  566. dp_rx_add_to_free_desc_list(&head,
  567. &tail,
  568. rx_desc);
  569. }
  570. j++;
  571. }
  572. }
  573. if (head)
  574. dp_rx_add_desc_list_to_free_list(soc, &head, &tail,
  575. mac_id, rx_desc_pool);
  576. /* If num of descs in use were less, then we need to replenish
  577. * the ring with some buffers
  578. */
  579. head = NULL;
  580. tail = NULL;
  581. if (j < (num_req_decs - 1))
  582. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  583. rx_desc_pool,
  584. ((num_req_decs - 1) - j),
  585. &head, &tail, true);
  586. }
  587. }
  588. #endif
  589. /*
  590. * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  591. * called during dp rx initialization
  592. * and at the end of dp_rx_process.
  593. *
  594. * @soc: core txrx main context
  595. * @mac_id: mac_id which is one of 3 mac_ids
  596. * @dp_rxdma_srng: dp rxdma circular ring
  597. * @rx_desc_pool: Pointer to free Rx descriptor pool
  598. * @num_req_buffers: number of buffer to be replenished
  599. * @desc_list: list of descs if called from dp_rx_process
  600. * or NULL during dp rx initialization or out of buffer
  601. * interrupt.
  602. * @tail: tail of descs list
  603. * @req_only: If true don't replenish more than req buffers
  604. * @func_name: name of the caller function
  605. * Return: return success or failure
  606. */
  607. QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  608. struct dp_srng *dp_rxdma_srng,
  609. struct rx_desc_pool *rx_desc_pool,
  610. uint32_t num_req_buffers,
  611. union dp_rx_desc_list_elem_t **desc_list,
  612. union dp_rx_desc_list_elem_t **tail,
  613. bool req_only, const char *func_name)
  614. {
  615. uint32_t num_alloc_desc;
  616. uint16_t num_desc_to_free = 0;
  617. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  618. uint32_t num_entries_avail;
  619. uint32_t count;
  620. int sync_hw_ptr = 1;
  621. struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
  622. void *rxdma_ring_entry;
  623. union dp_rx_desc_list_elem_t *next;
  624. QDF_STATUS ret;
  625. void *rxdma_srng;
  626. union dp_rx_desc_list_elem_t *desc_list_append = NULL;
  627. union dp_rx_desc_list_elem_t *tail_append = NULL;
  628. union dp_rx_desc_list_elem_t *temp_list = NULL;
  629. rxdma_srng = dp_rxdma_srng->hal_srng;
  630. if (qdf_unlikely(!dp_pdev)) {
  631. dp_rx_err("%pK: pdev is null for mac_id = %d",
  632. dp_soc, mac_id);
  633. return QDF_STATUS_E_FAILURE;
  634. }
  635. if (qdf_unlikely(!rxdma_srng)) {
  636. dp_rx_debug("%pK: rxdma srng not initialized", dp_soc);
  637. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  638. return QDF_STATUS_E_FAILURE;
  639. }
  640. dp_rx_debug("%pK: requested %d buffers for replenish",
  641. dp_soc, num_req_buffers);
  642. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  643. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  644. rxdma_srng,
  645. sync_hw_ptr);
  646. dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
  647. dp_soc, num_entries_avail);
  648. if (!req_only && !(*desc_list) && (num_entries_avail >
  649. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  650. num_req_buffers = num_entries_avail;
  651. } else if (num_entries_avail < num_req_buffers) {
  652. num_desc_to_free = num_req_buffers - num_entries_avail;
  653. num_req_buffers = num_entries_avail;
  654. } else if ((*desc_list) &&
  655. dp_rxdma_srng->num_entries - num_entries_avail <
  656. CRITICAL_BUFFER_THRESHOLD) {
  657. /* Append some free descriptors to tail */
  658. num_alloc_desc =
  659. dp_rx_get_free_desc_list(dp_soc, mac_id,
  660. rx_desc_pool,
  661. CRITICAL_BUFFER_THRESHOLD,
  662. &desc_list_append,
  663. &tail_append);
  664. if (num_alloc_desc) {
  665. temp_list = *desc_list;
  666. *desc_list = desc_list_append;
  667. tail_append->next = temp_list;
  668. num_req_buffers += num_alloc_desc;
  669. DP_STATS_DEC(dp_pdev,
  670. replenish.free_list,
  671. num_alloc_desc);
  672. } else
  673. dp_err_rl("%pK: no free rx_descs in freelist", dp_soc);
  674. }
  675. if (qdf_unlikely(!num_req_buffers)) {
  676. num_desc_to_free = num_req_buffers;
  677. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  678. goto free_descs;
  679. }
  680. /*
  681. * if desc_list is NULL, allocate the descs from freelist
  682. */
  683. if (!(*desc_list)) {
  684. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  685. rx_desc_pool,
  686. num_req_buffers,
  687. desc_list,
  688. tail);
  689. if (!num_alloc_desc) {
  690. dp_rx_err("%pK: no free rx_descs in freelist", dp_soc);
  691. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  692. num_req_buffers);
  693. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  694. return QDF_STATUS_E_NOMEM;
  695. }
  696. dp_rx_debug("%pK: %d rx desc allocated", dp_soc, num_alloc_desc);
  697. num_req_buffers = num_alloc_desc;
  698. }
  699. count = 0;
  700. while (count < num_req_buffers) {
  701. /* Flag is set while pdev rx_desc_pool initialization */
  702. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  703. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  704. &nbuf_frag_info,
  705. dp_pdev,
  706. rx_desc_pool);
  707. else
  708. ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
  709. mac_id,
  710. num_entries_avail, &nbuf_frag_info,
  711. dp_pdev, rx_desc_pool);
  712. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  713. if (qdf_unlikely(ret == QDF_STATUS_E_FAULT))
  714. continue;
  715. break;
  716. }
  717. count++;
  718. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  719. rxdma_srng);
  720. qdf_assert_always(rxdma_ring_entry);
  721. next = (*desc_list)->next;
  722. /* Flag is set while pdev rx_desc_pool initialization */
  723. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  724. dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
  725. &nbuf_frag_info);
  726. else
  727. dp_rx_desc_prep(&((*desc_list)->rx_desc),
  728. &nbuf_frag_info);
  729. /* rx_desc.in_use should be zero at this time*/
  730. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  731. (*desc_list)->rx_desc.in_use = 1;
  732. (*desc_list)->rx_desc.in_err_state = 0;
  733. dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
  734. func_name, RX_DESC_REPLENISHED);
  735. dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
  736. nbuf_frag_info.virt_addr.nbuf,
  737. (unsigned long long)(nbuf_frag_info.paddr),
  738. (*desc_list)->rx_desc.cookie);
  739. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry,
  740. nbuf_frag_info.paddr,
  741. (*desc_list)->rx_desc.cookie,
  742. rx_desc_pool->owner);
  743. *desc_list = next;
  744. }
  745. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
  746. num_req_buffers, count);
  747. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  748. dp_rx_schedule_refill_thread(dp_soc);
  749. dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
  750. count, num_desc_to_free);
  751. /* No need to count the number of bytes received during replenish.
  752. * Therefore set replenish.pkts.bytes as 0.
  753. */
  754. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  755. DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count);
  756. free_descs:
  757. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  758. /*
  759. * add any available free desc back to the free list
  760. */
  761. if (*desc_list)
  762. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  763. mac_id, rx_desc_pool);
  764. return QDF_STATUS_SUCCESS;
  765. }
  766. qdf_export_symbol(__dp_rx_buffers_replenish);
  767. /*
  768. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  769. * pkts to RAW mode simulation to
  770. * decapsulate the pkt.
  771. *
  772. * @vdev: vdev on which RAW mode is enabled
  773. * @nbuf_list: list of RAW pkts to process
  774. * @txrx_peer: peer object from which the pkt is rx
  775. *
  776. * Return: void
  777. */
  778. void
  779. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  780. struct dp_txrx_peer *txrx_peer)
  781. {
  782. qdf_nbuf_t deliver_list_head = NULL;
  783. qdf_nbuf_t deliver_list_tail = NULL;
  784. qdf_nbuf_t nbuf;
  785. nbuf = nbuf_list;
  786. while (nbuf) {
  787. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  788. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  789. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  790. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1,
  791. qdf_nbuf_len(nbuf));
  792. /*
  793. * reset the chfrag_start and chfrag_end bits in nbuf cb
  794. * as this is a non-amsdu pkt and RAW mode simulation expects
  795. * these bit s to be 0 for non-amsdu pkt.
  796. */
  797. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  798. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  799. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  800. qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
  801. }
  802. nbuf = next;
  803. }
  804. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  805. &deliver_list_tail);
  806. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  807. }
  808. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  809. #ifndef FEATURE_WDS
  810. void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
  811. struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf)
  812. {
  813. }
  814. #endif
  815. #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
  816. /*
  817. * dp_classify_critical_pkts() - API for marking critical packets
  818. * @soc: dp_soc context
  819. * @vdev: vdev on which packet is to be sent
  820. * @nbuf: nbuf that has to be classified
  821. *
  822. * The function parses the packet, identifies whether its a critical frame and
  823. * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf.
  824. * Code for marking which frames are CRITICAL is accessed via callback.
  825. * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames.
  826. *
  827. * Return: None
  828. */
  829. static
  830. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  831. qdf_nbuf_t nbuf)
  832. {
  833. if (vdev->tx_classify_critical_pkt_cb)
  834. vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf);
  835. }
  836. #else
  837. static inline
  838. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  839. qdf_nbuf_t nbuf)
  840. {
  841. }
  842. #endif
  843. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  844. static inline
  845. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  846. {
  847. qdf_nbuf_set_queue_mapping(nbuf, ring_id);
  848. }
  849. #else
  850. static inline
  851. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  852. {
  853. }
  854. #endif
  855. /*
  856. * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
  857. *
  858. * @soc: core txrx main context
  859. * @ta_peer : source peer entry
  860. * @rx_tlv_hdr : start address of rx tlvs
  861. * @nbuf : nbuf that has to be intrabss forwarded
  862. * @tid_stats : tid stats pointer
  863. *
  864. * Return: bool: true if it is forwarded else false
  865. */
  866. bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  867. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  868. struct cdp_tid_rx_stats *tid_stats)
  869. {
  870. uint16_t len;
  871. qdf_nbuf_t nbuf_copy;
  872. if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
  873. nbuf))
  874. return true;
  875. if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
  876. return false;
  877. /* If the source peer in the isolation list
  878. * then dont forward instead push to bridge stack
  879. */
  880. if (dp_get_peer_isolation(ta_peer))
  881. return false;
  882. nbuf_copy = qdf_nbuf_copy(nbuf);
  883. if (!nbuf_copy)
  884. return false;
  885. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  886. qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
  887. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
  888. if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy,
  889. tid_stats))
  890. return false;
  891. if (dp_tx_send((struct cdp_soc_t *)soc,
  892. ta_peer->vdev->vdev_id, nbuf_copy)) {
  893. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  894. len);
  895. tid_stats->fail_cnt[INTRABSS_DROP]++;
  896. dp_rx_nbuf_free(nbuf_copy);
  897. } else {
  898. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  899. len);
  900. tid_stats->intrabss_cnt++;
  901. }
  902. return false;
  903. }
  904. /*
  905. * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
  906. *
  907. * @soc: core txrx main context
  908. * @ta_peer: source peer entry
  909. * @tx_vdev_id: VDEV ID for Intra-BSS TX
  910. * @rx_tlv_hdr: start address of rx tlvs
  911. * @nbuf: nbuf that has to be intrabss forwarded
  912. * @tid_stats: tid stats pointer
  913. *
  914. * Return: bool: true if it is forwarded else false
  915. */
  916. bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  917. uint8_t tx_vdev_id,
  918. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  919. struct cdp_tid_rx_stats *tid_stats)
  920. {
  921. uint16_t len;
  922. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  923. /* linearize the nbuf just before we send to
  924. * dp_tx_send()
  925. */
  926. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  927. if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
  928. return false;
  929. nbuf = qdf_nbuf_unshare(nbuf);
  930. if (!nbuf) {
  931. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer,
  932. rx.intra_bss.fail,
  933. 1, len);
  934. /* return true even though the pkt is
  935. * not forwarded. Basically skb_unshare
  936. * failed and we want to continue with
  937. * next nbuf.
  938. */
  939. tid_stats->fail_cnt[INTRABSS_DROP]++;
  940. return false;
  941. }
  942. }
  943. qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb));
  944. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf);
  945. if (!dp_tx_send((struct cdp_soc_t *)soc,
  946. tx_vdev_id, nbuf)) {
  947. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  948. len);
  949. } else {
  950. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  951. len);
  952. tid_stats->fail_cnt[INTRABSS_DROP]++;
  953. return false;
  954. }
  955. return true;
  956. }
  957. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  958. #ifdef MESH_MODE_SUPPORT
  959. /**
  960. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  961. *
  962. * @vdev: DP Virtual device handle
  963. * @nbuf: Buffer pointer
  964. * @rx_tlv_hdr: start of rx tlv header
  965. * @txrx_peer: pointer to peer
  966. *
  967. * This function allocated memory for mesh receive stats and fill the
  968. * required stats. Stores the memory address in skb cb.
  969. *
  970. * Return: void
  971. */
  972. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  973. uint8_t *rx_tlv_hdr,
  974. struct dp_txrx_peer *txrx_peer)
  975. {
  976. struct mesh_recv_hdr_s *rx_info = NULL;
  977. uint32_t pkt_type;
  978. uint32_t nss;
  979. uint32_t rate_mcs;
  980. uint32_t bw;
  981. uint8_t primary_chan_num;
  982. uint32_t center_chan_freq;
  983. struct dp_soc *soc = vdev->pdev->soc;
  984. struct dp_peer *peer;
  985. struct dp_peer *primary_link_peer;
  986. struct dp_soc *link_peer_soc;
  987. cdp_peer_stats_param_t buf = {0};
  988. /* fill recv mesh stats */
  989. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  990. /* upper layers are resposible to free this memory */
  991. if (!rx_info) {
  992. dp_rx_err("%pK: Memory allocation failed for mesh rx stats",
  993. vdev->pdev->soc);
  994. DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
  995. return;
  996. }
  997. rx_info->rs_flags = MESH_RXHDR_VER1;
  998. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  999. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  1000. if (qdf_nbuf_is_rx_chfrag_end(nbuf))
  1001. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  1002. peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH);
  1003. if (peer) {
  1004. if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
  1005. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  1006. rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
  1007. rx_tlv_hdr);
  1008. if (vdev->osif_get_key)
  1009. vdev->osif_get_key(vdev->osif_vdev,
  1010. &rx_info->rs_decryptkey[0],
  1011. &peer->mac_addr.raw[0],
  1012. rx_info->rs_keyix);
  1013. }
  1014. dp_peer_unref_delete(peer, DP_MOD_ID_MESH);
  1015. }
  1016. primary_link_peer = dp_get_primary_link_peer_by_id(soc,
  1017. txrx_peer->peer_id,
  1018. DP_MOD_ID_MESH);
  1019. if (qdf_likely(primary_link_peer)) {
  1020. link_peer_soc = primary_link_peer->vdev->pdev->soc;
  1021. dp_monitor_peer_get_stats_param(link_peer_soc,
  1022. primary_link_peer,
  1023. cdp_peer_rx_snr, &buf);
  1024. rx_info->rs_snr = buf.rx_snr;
  1025. dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH);
  1026. }
  1027. rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
  1028. soc = vdev->pdev->soc;
  1029. primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr);
  1030. center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16;
  1031. if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
  1032. rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
  1033. soc->ctrl_psoc,
  1034. vdev->pdev->pdev_id,
  1035. center_chan_freq);
  1036. }
  1037. rx_info->rs_channel = primary_chan_num;
  1038. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  1039. rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  1040. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  1041. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  1042. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
  1043. (bw << 24);
  1044. qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  1045. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
  1046. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"),
  1047. rx_info->rs_flags,
  1048. rx_info->rs_rssi,
  1049. rx_info->rs_channel,
  1050. rx_info->rs_ratephy1,
  1051. rx_info->rs_keyix,
  1052. rx_info->rs_snr);
  1053. }
  1054. /**
  1055. * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
  1056. *
  1057. * @vdev: DP Virtual device handle
  1058. * @nbuf: Buffer pointer
  1059. * @rx_tlv_hdr: start of rx tlv header
  1060. *
  1061. * This checks if the received packet is matching any filter out
  1062. * catogery and and drop the packet if it matches.
  1063. *
  1064. * Return: status(0 indicates drop, 1 indicate to no drop)
  1065. */
  1066. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1067. uint8_t *rx_tlv_hdr)
  1068. {
  1069. union dp_align_mac_addr mac_addr;
  1070. struct dp_soc *soc = vdev->pdev->soc;
  1071. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  1072. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  1073. if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  1074. rx_tlv_hdr))
  1075. return QDF_STATUS_SUCCESS;
  1076. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  1077. if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
  1078. rx_tlv_hdr))
  1079. return QDF_STATUS_SUCCESS;
  1080. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  1081. if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  1082. rx_tlv_hdr) &&
  1083. !hal_rx_mpdu_get_to_ds(soc->hal_soc,
  1084. rx_tlv_hdr))
  1085. return QDF_STATUS_SUCCESS;
  1086. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  1087. if (hal_rx_mpdu_get_addr1(soc->hal_soc,
  1088. rx_tlv_hdr,
  1089. &mac_addr.raw[0]))
  1090. return QDF_STATUS_E_FAILURE;
  1091. if (!qdf_mem_cmp(&mac_addr.raw[0],
  1092. &vdev->mac_addr.raw[0],
  1093. QDF_MAC_ADDR_SIZE))
  1094. return QDF_STATUS_SUCCESS;
  1095. }
  1096. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  1097. if (hal_rx_mpdu_get_addr2(soc->hal_soc,
  1098. rx_tlv_hdr,
  1099. &mac_addr.raw[0]))
  1100. return QDF_STATUS_E_FAILURE;
  1101. if (!qdf_mem_cmp(&mac_addr.raw[0],
  1102. &vdev->mac_addr.raw[0],
  1103. QDF_MAC_ADDR_SIZE))
  1104. return QDF_STATUS_SUCCESS;
  1105. }
  1106. }
  1107. return QDF_STATUS_E_FAILURE;
  1108. }
  1109. #else
  1110. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1111. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer)
  1112. {
  1113. }
  1114. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1115. uint8_t *rx_tlv_hdr)
  1116. {
  1117. return QDF_STATUS_E_FAILURE;
  1118. }
  1119. #endif
  1120. #ifdef FEATURE_NAC_RSSI
  1121. /**
  1122. * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
  1123. * @soc: DP SOC handle
  1124. * @mpdu: mpdu for which peer is invalid
  1125. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1126. * pool_id has same mapping)
  1127. *
  1128. * return: integer type
  1129. */
  1130. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1131. uint8_t mac_id)
  1132. {
  1133. struct dp_invalid_peer_msg msg;
  1134. struct dp_vdev *vdev = NULL;
  1135. struct dp_pdev *pdev = NULL;
  1136. struct ieee80211_frame *wh;
  1137. qdf_nbuf_t curr_nbuf, next_nbuf;
  1138. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1139. uint8_t *rx_pkt_hdr = NULL;
  1140. int i = 0;
  1141. if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
  1142. dp_rx_debug("%pK: Drop decapped frames", soc);
  1143. goto free;
  1144. }
  1145. /* In RAW packet, packet header will be part of data */
  1146. rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size;
  1147. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1148. if (!DP_FRAME_IS_DATA(wh)) {
  1149. dp_rx_debug("%pK: NAWDS valid only for data frames", soc);
  1150. goto free;
  1151. }
  1152. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  1153. dp_rx_err("%pK: Invalid nbuf length", soc);
  1154. goto free;
  1155. }
  1156. /* In DMAC case the rx_desc_pools are common across PDEVs
  1157. * so PDEV cannot be derived from the pool_id.
  1158. *
  1159. * link_id need to derived from the TLV tag word which is
  1160. * disabled by default. For now adding a WAR to get vdev
  1161. * with brute force this need to fixed with word based subscription
  1162. * support is added by enabling TLV tag word
  1163. */
  1164. if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
  1165. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1166. pdev = soc->pdev_list[i];
  1167. if (!pdev || qdf_unlikely(pdev->is_pdev_down))
  1168. continue;
  1169. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1170. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1171. QDF_MAC_ADDR_SIZE) == 0) {
  1172. goto out;
  1173. }
  1174. }
  1175. }
  1176. } else {
  1177. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1178. if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
  1179. dp_rx_err("%pK: PDEV %s",
  1180. soc, !pdev ? "not found" : "down");
  1181. goto free;
  1182. }
  1183. if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) ==
  1184. QDF_STATUS_SUCCESS)
  1185. return 0;
  1186. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1187. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1188. QDF_MAC_ADDR_SIZE) == 0) {
  1189. goto out;
  1190. }
  1191. }
  1192. }
  1193. if (!vdev) {
  1194. dp_rx_err("%pK: VDEV not found", soc);
  1195. goto free;
  1196. }
  1197. out:
  1198. msg.wh = wh;
  1199. qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size);
  1200. msg.nbuf = mpdu;
  1201. msg.vdev_id = vdev->vdev_id;
  1202. /*
  1203. * NOTE: Only valid for HKv1.
  1204. * If smart monitor mode is enabled on RE, we are getting invalid
  1205. * peer frames with RA as STA mac of RE and the TA not matching
  1206. * with any NAC list or the the BSSID.Such frames need to dropped
  1207. * in order to avoid HM_WDS false addition.
  1208. */
  1209. if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
  1210. if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) {
  1211. dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm",
  1212. soc, wh->i_addr1);
  1213. goto free;
  1214. }
  1215. pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
  1216. (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
  1217. pdev->pdev_id, &msg);
  1218. }
  1219. free:
  1220. /* Drop and free packet */
  1221. curr_nbuf = mpdu;
  1222. while (curr_nbuf) {
  1223. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1224. dp_rx_nbuf_free(curr_nbuf);
  1225. curr_nbuf = next_nbuf;
  1226. }
  1227. return 0;
  1228. }
  1229. /**
  1230. * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
  1231. * @soc: DP SOC handle
  1232. * @mpdu: mpdu for which peer is invalid
  1233. * @mpdu_done: if an mpdu is completed
  1234. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1235. * pool_id has same mapping)
  1236. *
  1237. * return: integer type
  1238. */
  1239. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1240. qdf_nbuf_t mpdu, bool mpdu_done,
  1241. uint8_t mac_id)
  1242. {
  1243. /* Only trigger the process when mpdu is completed */
  1244. if (mpdu_done)
  1245. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1246. }
  1247. #else
  1248. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1249. uint8_t mac_id)
  1250. {
  1251. qdf_nbuf_t curr_nbuf, next_nbuf;
  1252. struct dp_pdev *pdev;
  1253. struct dp_vdev *vdev = NULL;
  1254. struct ieee80211_frame *wh;
  1255. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1256. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  1257. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1258. if (!DP_FRAME_IS_DATA(wh)) {
  1259. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
  1260. "only for data frames");
  1261. goto free;
  1262. }
  1263. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  1264. dp_rx_info_rl("%pK: Invalid nbuf length", soc);
  1265. goto free;
  1266. }
  1267. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1268. if (!pdev) {
  1269. dp_rx_info_rl("%pK: PDEV not found", soc);
  1270. goto free;
  1271. }
  1272. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1273. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1274. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1275. QDF_MAC_ADDR_SIZE) == 0) {
  1276. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1277. goto out;
  1278. }
  1279. }
  1280. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1281. if (!vdev) {
  1282. dp_rx_info_rl("%pK: VDEV not found", soc);
  1283. goto free;
  1284. }
  1285. out:
  1286. if (soc->cdp_soc.ol_ops->rx_invalid_peer)
  1287. soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
  1288. free:
  1289. /* reset the head and tail pointers */
  1290. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1291. if (pdev) {
  1292. pdev->invalid_peer_head_msdu = NULL;
  1293. pdev->invalid_peer_tail_msdu = NULL;
  1294. }
  1295. /* Drop and free packet */
  1296. curr_nbuf = mpdu;
  1297. while (curr_nbuf) {
  1298. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1299. dp_rx_nbuf_free(curr_nbuf);
  1300. curr_nbuf = next_nbuf;
  1301. }
  1302. /* Reset the head and tail pointers */
  1303. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1304. if (pdev) {
  1305. pdev->invalid_peer_head_msdu = NULL;
  1306. pdev->invalid_peer_tail_msdu = NULL;
  1307. }
  1308. return 0;
  1309. }
  1310. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1311. qdf_nbuf_t mpdu, bool mpdu_done,
  1312. uint8_t mac_id)
  1313. {
  1314. /* Process the nbuf */
  1315. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1316. }
  1317. #endif
  1318. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1319. #ifdef RECEIVE_OFFLOAD
  1320. /**
  1321. * dp_rx_print_offload_info() - Print offload info from RX TLV
  1322. * @soc: dp soc handle
  1323. * @msdu: MSDU for which the offload info is to be printed
  1324. *
  1325. * Return: None
  1326. */
  1327. static void dp_rx_print_offload_info(struct dp_soc *soc,
  1328. qdf_nbuf_t msdu)
  1329. {
  1330. dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
  1331. dp_verbose_debug("lro_eligible 0x%x",
  1332. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu));
  1333. dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu));
  1334. dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu));
  1335. dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu));
  1336. dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu));
  1337. dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu));
  1338. dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu));
  1339. dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu));
  1340. dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu));
  1341. dp_verbose_debug("---------------------------------------------------------");
  1342. }
  1343. /**
  1344. * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
  1345. * @soc: DP SOC handle
  1346. * @rx_tlv: RX TLV received for the msdu
  1347. * @msdu: msdu for which GRO info needs to be filled
  1348. * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
  1349. *
  1350. * Return: None
  1351. */
  1352. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  1353. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
  1354. {
  1355. struct hal_offload_info offload_info;
  1356. if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
  1357. return;
  1358. if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info))
  1359. return;
  1360. *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
  1361. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible;
  1362. QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack;
  1363. QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
  1364. hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
  1365. rx_tlv);
  1366. QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num;
  1367. QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num;
  1368. QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win;
  1369. QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto;
  1370. QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto;
  1371. QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset;
  1372. QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id;
  1373. dp_rx_print_offload_info(soc, msdu);
  1374. }
  1375. #endif /* RECEIVE_OFFLOAD */
  1376. /**
  1377. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
  1378. *
  1379. * @soc: DP soc handle
  1380. * @nbuf: pointer to msdu.
  1381. * @mpdu_len: mpdu length
  1382. * @l3_pad_len: L3 padding length by HW
  1383. *
  1384. * Return: returns true if nbuf is last msdu of mpdu else retuns false.
  1385. */
  1386. static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc,
  1387. qdf_nbuf_t nbuf,
  1388. uint16_t *mpdu_len,
  1389. uint32_t l3_pad_len)
  1390. {
  1391. bool last_nbuf;
  1392. uint32_t pkt_hdr_size;
  1393. pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len;
  1394. if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) {
  1395. qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
  1396. last_nbuf = false;
  1397. *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size);
  1398. } else {
  1399. qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size));
  1400. last_nbuf = true;
  1401. *mpdu_len = 0;
  1402. }
  1403. return last_nbuf;
  1404. }
  1405. /**
  1406. * dp_get_l3_hdr_pad_len() - get L3 header padding length.
  1407. *
  1408. * @soc: DP soc handle
  1409. * @nbuf: pointer to msdu.
  1410. *
  1411. * Return: returns padding length in bytes.
  1412. */
  1413. static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc,
  1414. qdf_nbuf_t nbuf)
  1415. {
  1416. uint32_t l3_hdr_pad = 0;
  1417. uint8_t *rx_tlv_hdr;
  1418. struct hal_rx_msdu_metadata msdu_metadata;
  1419. while (nbuf) {
  1420. if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  1421. /* scattered msdu end with continuation is 0 */
  1422. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1423. hal_rx_msdu_metadata_get(soc->hal_soc,
  1424. rx_tlv_hdr,
  1425. &msdu_metadata);
  1426. l3_hdr_pad = msdu_metadata.l3_hdr_pad;
  1427. break;
  1428. }
  1429. nbuf = nbuf->next;
  1430. }
  1431. return l3_hdr_pad;
  1432. }
  1433. /**
  1434. * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
  1435. * multiple nbufs.
  1436. * @soc: DP SOC handle
  1437. * @nbuf: pointer to the first msdu of an amsdu.
  1438. *
  1439. * This function implements the creation of RX frag_list for cases
  1440. * where an MSDU is spread across multiple nbufs.
  1441. *
  1442. * Return: returns the head nbuf which contains complete frag_list.
  1443. */
  1444. qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1445. {
  1446. qdf_nbuf_t parent, frag_list, next = NULL;
  1447. uint16_t frag_list_len = 0;
  1448. uint16_t mpdu_len;
  1449. bool last_nbuf;
  1450. uint32_t l3_hdr_pad_offset = 0;
  1451. /*
  1452. * Use msdu len got from REO entry descriptor instead since
  1453. * there is case the RX PKT TLV is corrupted while msdu_len
  1454. * from REO descriptor is right for non-raw RX scatter msdu.
  1455. */
  1456. mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1457. /*
  1458. * this is a case where the complete msdu fits in one single nbuf.
  1459. * in this case HW sets both start and end bit and we only need to
  1460. * reset these bits for RAW mode simulator to decap the pkt
  1461. */
  1462. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  1463. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1464. qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size);
  1465. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1466. return nbuf;
  1467. }
  1468. l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf);
  1469. /*
  1470. * This is a case where we have multiple msdus (A-MSDU) spread across
  1471. * multiple nbufs. here we create a fraglist out of these nbufs.
  1472. *
  1473. * the moment we encounter a nbuf with continuation bit set we
  1474. * know for sure we have an MSDU which is spread across multiple
  1475. * nbufs. We loop through and reap nbufs till we reach last nbuf.
  1476. */
  1477. parent = nbuf;
  1478. frag_list = nbuf->next;
  1479. nbuf = nbuf->next;
  1480. /*
  1481. * set the start bit in the first nbuf we encounter with continuation
  1482. * bit set. This has the proper mpdu length set as it is the first
  1483. * msdu of the mpdu. this becomes the parent nbuf and the subsequent
  1484. * nbufs will form the frag_list of the parent nbuf.
  1485. */
  1486. qdf_nbuf_set_rx_chfrag_start(parent, 1);
  1487. /*
  1488. * L3 header padding is only needed for the 1st buffer
  1489. * in a scattered msdu
  1490. */
  1491. last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len,
  1492. l3_hdr_pad_offset);
  1493. /*
  1494. * MSDU cont bit is set but reported MPDU length can fit
  1495. * in to single buffer
  1496. *
  1497. * Increment error stats and avoid SG list creation
  1498. */
  1499. if (last_nbuf) {
  1500. DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
  1501. qdf_nbuf_pull_head(parent,
  1502. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1503. return parent;
  1504. }
  1505. /*
  1506. * this is where we set the length of the fragments which are
  1507. * associated to the parent nbuf. We iterate through the frag_list
  1508. * till we hit the last_nbuf of the list.
  1509. */
  1510. do {
  1511. last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0);
  1512. qdf_nbuf_pull_head(nbuf,
  1513. soc->rx_pkt_tlv_size);
  1514. frag_list_len += qdf_nbuf_len(nbuf);
  1515. if (last_nbuf) {
  1516. next = nbuf->next;
  1517. nbuf->next = NULL;
  1518. break;
  1519. } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1520. dp_err("Invalid packet length\n");
  1521. qdf_assert_always(0);
  1522. }
  1523. nbuf = nbuf->next;
  1524. } while (!last_nbuf);
  1525. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  1526. qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
  1527. parent->next = next;
  1528. qdf_nbuf_pull_head(parent,
  1529. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1530. return parent;
  1531. }
  1532. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1533. #ifdef QCA_PEER_EXT_STATS
  1534. /*
  1535. * dp_rx_compute_tid_delay - Computer per TID delay stats
  1536. * @peer: DP soc context
  1537. * @nbuf: NBuffer
  1538. *
  1539. * Return: Void
  1540. */
  1541. void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  1542. qdf_nbuf_t nbuf)
  1543. {
  1544. struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay;
  1545. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1546. dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
  1547. }
  1548. #endif /* QCA_PEER_EXT_STATS */
  1549. /**
  1550. * dp_rx_compute_delay() - Compute and fill in all timestamps
  1551. * to pass in correct fields
  1552. *
  1553. * @vdev: pdev handle
  1554. * @tx_desc: tx descriptor
  1555. * @tid: tid value
  1556. * Return: none
  1557. */
  1558. void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1559. {
  1560. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  1561. int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
  1562. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1563. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1564. uint32_t interframe_delay =
  1565. (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
  1566. struct cdp_tid_rx_stats *rstats =
  1567. &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1568. dp_update_delay_stats(NULL, rstats, to_stack, tid,
  1569. CDP_DELAY_STATS_REAP_STACK, ring_id, false);
  1570. /*
  1571. * Update interframe delay stats calculated at deliver_data_ol point.
  1572. * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
  1573. * interframe delay will not be calculate correctly for 1st frame.
  1574. * On the other side, this will help in avoiding extra per packet check
  1575. * of vdev->prev_rx_deliver_tstamp.
  1576. */
  1577. dp_update_delay_stats(NULL, rstats, interframe_delay, tid,
  1578. CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false);
  1579. vdev->prev_rx_deliver_tstamp = current_ts;
  1580. }
  1581. /**
  1582. * dp_rx_drop_nbuf_list() - drop an nbuf list
  1583. * @pdev: dp pdev reference
  1584. * @buf_list: buffer list to be dropepd
  1585. *
  1586. * Return: int (number of bufs dropped)
  1587. */
  1588. static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
  1589. qdf_nbuf_t buf_list)
  1590. {
  1591. struct cdp_tid_rx_stats *stats = NULL;
  1592. uint8_t tid = 0, ring_id = 0;
  1593. int num_dropped = 0;
  1594. qdf_nbuf_t buf, next_buf;
  1595. buf = buf_list;
  1596. while (buf) {
  1597. ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
  1598. next_buf = qdf_nbuf_queue_next(buf);
  1599. tid = qdf_nbuf_get_tid_val(buf);
  1600. if (qdf_likely(pdev)) {
  1601. stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1602. stats->fail_cnt[INVALID_PEER_VDEV]++;
  1603. stats->delivered_to_stack--;
  1604. }
  1605. dp_rx_nbuf_free(buf);
  1606. buf = next_buf;
  1607. num_dropped++;
  1608. }
  1609. return num_dropped;
  1610. }
  1611. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1612. /**
  1613. * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
  1614. * @soc: core txrx main context
  1615. * @vdev: vdev
  1616. * @txrx_peer: txrx peer
  1617. * @nbuf_head: skb list head
  1618. *
  1619. * Return: true if packet is delivered to netdev per STA.
  1620. */
  1621. static inline bool
  1622. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1623. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1624. {
  1625. /*
  1626. * When extended WDS is disabled, frames are sent to AP netdevice.
  1627. */
  1628. if (qdf_likely(!vdev->wds_ext_enabled))
  1629. return false;
  1630. /*
  1631. * There can be 2 cases:
  1632. * 1. Send frame to parent netdev if its not for netdev per STA
  1633. * 2. If frame is meant for netdev per STA:
  1634. * a. Send frame to appropriate netdev using registered fp.
  1635. * b. If fp is NULL, drop the frames.
  1636. */
  1637. if (!txrx_peer->wds_ext.init)
  1638. return false;
  1639. if (txrx_peer->osif_rx)
  1640. txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head);
  1641. else
  1642. dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1643. return true;
  1644. }
  1645. #else
  1646. static inline bool
  1647. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1648. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1649. {
  1650. return false;
  1651. }
  1652. #endif
  1653. #ifdef PEER_CACHE_RX_PKTS
  1654. /**
  1655. * dp_rx_flush_rx_cached() - flush cached rx frames
  1656. * @peer: peer
  1657. * @drop: flag to drop frames or forward to net stack
  1658. *
  1659. * Return: None
  1660. */
  1661. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  1662. {
  1663. struct dp_peer_cached_bufq *bufqi;
  1664. struct dp_rx_cached_buf *cache_buf = NULL;
  1665. ol_txrx_rx_fp data_rx = NULL;
  1666. int num_buff_elem;
  1667. QDF_STATUS status;
  1668. /*
  1669. * Flush dp cached frames only for mld peers and legacy peers, as
  1670. * link peers don't store cached frames
  1671. */
  1672. if (IS_MLO_DP_LINK_PEER(peer))
  1673. return;
  1674. if (!peer->txrx_peer) {
  1675. dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")",
  1676. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1677. return;
  1678. }
  1679. if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) {
  1680. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1681. return;
  1682. }
  1683. qdf_spin_lock_bh(&peer->peer_info_lock);
  1684. if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
  1685. data_rx = peer->vdev->osif_rx;
  1686. else
  1687. drop = true;
  1688. qdf_spin_unlock_bh(&peer->peer_info_lock);
  1689. bufqi = &peer->txrx_peer->bufq_info;
  1690. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1691. qdf_list_remove_front(&bufqi->cached_bufq,
  1692. (qdf_list_node_t **)&cache_buf);
  1693. while (cache_buf) {
  1694. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
  1695. cache_buf->buf);
  1696. bufqi->entries -= num_buff_elem;
  1697. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1698. if (drop) {
  1699. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1700. cache_buf->buf);
  1701. } else {
  1702. /* Flush the cached frames to OSIF DEV */
  1703. status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
  1704. if (status != QDF_STATUS_SUCCESS)
  1705. bufqi->dropped = dp_rx_drop_nbuf_list(
  1706. peer->vdev->pdev,
  1707. cache_buf->buf);
  1708. }
  1709. qdf_mem_free(cache_buf);
  1710. cache_buf = NULL;
  1711. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1712. qdf_list_remove_front(&bufqi->cached_bufq,
  1713. (qdf_list_node_t **)&cache_buf);
  1714. }
  1715. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1716. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1717. }
  1718. /**
  1719. * dp_rx_enqueue_rx() - cache rx frames
  1720. * @peer: peer
  1721. * @rx_buf_list: cache buffer list
  1722. *
  1723. * Return: None
  1724. */
  1725. static QDF_STATUS
  1726. dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list)
  1727. {
  1728. struct dp_rx_cached_buf *cache_buf;
  1729. struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info;
  1730. int num_buff_elem;
  1731. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  1732. struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
  1733. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
  1734. DP_MOD_ID_RX);
  1735. if (!peer) {
  1736. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1737. rx_buf_list);
  1738. return QDF_STATUS_E_INVAL;
  1739. }
  1740. dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
  1741. bufqi->dropped);
  1742. if (!peer->valid) {
  1743. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1744. rx_buf_list);
  1745. ret = QDF_STATUS_E_INVAL;
  1746. goto fail;
  1747. }
  1748. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1749. if (bufqi->entries >= bufqi->thresh) {
  1750. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1751. rx_buf_list);
  1752. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1753. ret = QDF_STATUS_E_RESOURCES;
  1754. goto fail;
  1755. }
  1756. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1757. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
  1758. cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
  1759. if (!cache_buf) {
  1760. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1761. "Failed to allocate buf to cache rx frames");
  1762. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1763. rx_buf_list);
  1764. ret = QDF_STATUS_E_NOMEM;
  1765. goto fail;
  1766. }
  1767. cache_buf->buf = rx_buf_list;
  1768. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1769. qdf_list_insert_back(&bufqi->cached_bufq,
  1770. &cache_buf->node);
  1771. bufqi->entries += num_buff_elem;
  1772. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1773. fail:
  1774. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  1775. return ret;
  1776. }
  1777. static inline
  1778. bool dp_rx_is_peer_cache_bufq_supported(void)
  1779. {
  1780. return true;
  1781. }
  1782. #else
  1783. static inline
  1784. bool dp_rx_is_peer_cache_bufq_supported(void)
  1785. {
  1786. return false;
  1787. }
  1788. static inline QDF_STATUS
  1789. dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list)
  1790. {
  1791. return QDF_STATUS_SUCCESS;
  1792. }
  1793. #endif
  1794. #ifndef DELIVERY_TO_STACK_STATUS_CHECK
  1795. /**
  1796. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1797. * using the appropriate call back functions.
  1798. * @soc: soc
  1799. * @vdev: vdev
  1800. * @peer: peer
  1801. * @nbuf_head: skb list head
  1802. * @nbuf_tail: skb list tail
  1803. *
  1804. * Return: None
  1805. */
  1806. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1807. struct dp_vdev *vdev,
  1808. struct dp_txrx_peer *txrx_peer,
  1809. qdf_nbuf_t nbuf_head)
  1810. {
  1811. if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
  1812. txrx_peer, nbuf_head)))
  1813. return;
  1814. /* Function pointer initialized only when FISA is enabled */
  1815. if (vdev->osif_fisa_rx)
  1816. /* on failure send it via regular path */
  1817. vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1818. else
  1819. vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1820. }
  1821. #else
  1822. /**
  1823. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1824. * using the appropriate call back functions.
  1825. * @soc: soc
  1826. * @vdev: vdev
  1827. * @txrx_peer: txrx peer
  1828. * @nbuf_head: skb list head
  1829. * @nbuf_tail: skb list tail
  1830. *
  1831. * Check the return status of the call back function and drop
  1832. * the packets if the return status indicates a failure.
  1833. *
  1834. * Return: None
  1835. */
  1836. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1837. struct dp_vdev *vdev,
  1838. struct dp_txrx_peer *txrx_peer,
  1839. qdf_nbuf_t nbuf_head)
  1840. {
  1841. int num_nbuf = 0;
  1842. QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
  1843. /* Function pointer initialized only when FISA is enabled */
  1844. if (vdev->osif_fisa_rx)
  1845. /* on failure send it via regular path */
  1846. ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1847. else if (vdev->osif_rx)
  1848. ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1849. if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
  1850. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1851. DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
  1852. if (txrx_peer)
  1853. DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num,
  1854. num_nbuf);
  1855. }
  1856. }
  1857. #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
  1858. /*
  1859. * dp_rx_validate_rx_callbacks() - validate rx callbacks
  1860. * @soc DP soc
  1861. * @vdev: DP vdev handle
  1862. * @txrx_peer: pointer to the txrx peer object
  1863. * nbuf_head: skb list head
  1864. *
  1865. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  1866. * QDF_STATUS_E_FAILURE
  1867. */
  1868. static inline QDF_STATUS
  1869. dp_rx_validate_rx_callbacks(struct dp_soc *soc,
  1870. struct dp_vdev *vdev,
  1871. struct dp_txrx_peer *txrx_peer,
  1872. qdf_nbuf_t nbuf_head)
  1873. {
  1874. int num_nbuf;
  1875. if (qdf_unlikely(!vdev || vdev->delete.pending)) {
  1876. num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
  1877. /*
  1878. * This is a special case where vdev is invalid,
  1879. * so we cannot know the pdev to which this packet
  1880. * belonged. Hence we update the soc rx error stats.
  1881. */
  1882. DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
  1883. return QDF_STATUS_E_FAILURE;
  1884. }
  1885. /*
  1886. * highly unlikely to have a vdev without a registered rx
  1887. * callback function. if so let us free the nbuf_list.
  1888. */
  1889. if (qdf_unlikely(!vdev->osif_rx)) {
  1890. if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) {
  1891. dp_rx_enqueue_rx(txrx_peer, nbuf_head);
  1892. } else {
  1893. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
  1894. nbuf_head);
  1895. DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf,
  1896. vdev->pdev->enhanced_stats_en);
  1897. }
  1898. return QDF_STATUS_E_FAILURE;
  1899. }
  1900. return QDF_STATUS_SUCCESS;
  1901. }
  1902. QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
  1903. struct dp_vdev *vdev,
  1904. struct dp_txrx_peer *txrx_peer,
  1905. qdf_nbuf_t nbuf_head,
  1906. qdf_nbuf_t nbuf_tail)
  1907. {
  1908. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  1909. QDF_STATUS_SUCCESS)
  1910. return QDF_STATUS_E_FAILURE;
  1911. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
  1912. (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
  1913. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
  1914. &nbuf_tail);
  1915. }
  1916. dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head);
  1917. return QDF_STATUS_SUCCESS;
  1918. }
  1919. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  1920. QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
  1921. struct dp_vdev *vdev,
  1922. struct dp_txrx_peer *txrx_peer,
  1923. qdf_nbuf_t nbuf_head,
  1924. qdf_nbuf_t nbuf_tail)
  1925. {
  1926. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  1927. QDF_STATUS_SUCCESS)
  1928. return QDF_STATUS_E_FAILURE;
  1929. vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head);
  1930. return QDF_STATUS_SUCCESS;
  1931. }
  1932. #endif
  1933. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1934. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1935. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \
  1936. { \
  1937. qdf_nbuf_t nbuf_local; \
  1938. struct dp_txrx_peer *txrx_peer_local; \
  1939. struct dp_vdev *vdev_local = vdev_hdl; \
  1940. do { \
  1941. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  1942. break; \
  1943. nbuf_local = nbuf; \
  1944. txrx_peer_local = txrx_peer; \
  1945. if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
  1946. break; \
  1947. else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
  1948. break; \
  1949. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  1950. (nbuf_local), \
  1951. (txrx_peer_local), 0, 1); \
  1952. } while (0); \
  1953. }
  1954. #else
  1955. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer)
  1956. #endif
  1957. #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
  1958. /**
  1959. * dp_rx_rates_stats_update() - update rate stats
  1960. * from rx msdu.
  1961. * @soc: datapath soc handle
  1962. * @nbuf: received msdu buffer
  1963. * @rx_tlv_hdr: rx tlv header
  1964. * @txrx_peer: datapath txrx_peer handle
  1965. * @sgi: Short Guard Interval
  1966. * @mcs: Modulation and Coding Set
  1967. * @nss: Number of Spatial Streams
  1968. * @bw: BandWidth
  1969. * @pkt_type: Corresponds to preamble
  1970. *
  1971. * To be precisely record rates, following factors are considered:
  1972. * Exclude specific frames, ARP, DHCP, ssdp, etc.
  1973. * Make sure to affect rx throughput as least as possible.
  1974. *
  1975. * Return: void
  1976. */
  1977. static void
  1978. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1979. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  1980. uint32_t sgi, uint32_t mcs,
  1981. uint32_t nss, uint32_t bw, uint32_t pkt_type)
  1982. {
  1983. uint32_t rix;
  1984. uint16_t ratecode;
  1985. uint32_t avg_rx_rate;
  1986. uint32_t ratekbps;
  1987. enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
  1988. if (soc->high_throughput ||
  1989. dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) {
  1990. return;
  1991. }
  1992. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs);
  1993. /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */
  1994. if (pkt_type == DOT11_B)
  1995. nss = 1;
  1996. /* here pkt_type corresponds to preamble */
  1997. ratekbps = dp_getrateindex(sgi,
  1998. mcs,
  1999. nss - 1,
  2000. pkt_type,
  2001. bw,
  2002. punc_mode,
  2003. &rix,
  2004. &ratecode);
  2005. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps);
  2006. avg_rx_rate =
  2007. dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate,
  2008. ratekbps);
  2009. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate);
  2010. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss);
  2011. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs);
  2012. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw);
  2013. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi);
  2014. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type);
  2015. }
  2016. #else
  2017. static inline void
  2018. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2019. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  2020. uint32_t sgi, uint32_t mcs,
  2021. uint32_t nss, uint32_t bw, uint32_t pkt_type)
  2022. {
  2023. }
  2024. #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
  2025. #ifndef QCA_ENHANCED_STATS_SUPPORT
  2026. /**
  2027. * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer
  2028. *
  2029. * @soc: datapath soc handle
  2030. * @nbuf: received msdu buffer
  2031. * @rx_tlv_hdr: rx tlv header
  2032. * @txrx_peer: datapath txrx_peer handle
  2033. *
  2034. * Return: void
  2035. */
  2036. static inline
  2037. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2038. uint8_t *rx_tlv_hdr,
  2039. struct dp_txrx_peer *txrx_peer)
  2040. {
  2041. bool is_ampdu;
  2042. uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
  2043. uint8_t dst_mcs_idx;
  2044. /*
  2045. * TODO - For KIWI this field is present in ring_desc
  2046. * Try to use ring desc instead of tlv.
  2047. */
  2048. is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
  2049. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu);
  2050. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
  2051. sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
  2052. mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  2053. tid = qdf_nbuf_get_tid_val(nbuf);
  2054. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  2055. reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
  2056. rx_tlv_hdr);
  2057. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  2058. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  2059. /* do HW to SW pkt type conversion */
  2060. pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
  2061. hal_2_dp_pkt_type_map[pkt_type]);
  2062. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1,
  2063. ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  2064. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
  2065. ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  2066. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1);
  2067. /*
  2068. * only if nss > 0 and pkt_type is 11N/AC/AX,
  2069. * then increase index [nss - 1] in array counter.
  2070. */
  2071. if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type))
  2072. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1);
  2073. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1);
  2074. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1,
  2075. hal_rx_tlv_mic_err_get(soc->hal_soc,
  2076. rx_tlv_hdr));
  2077. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1,
  2078. hal_rx_tlv_decrypt_err_get(soc->hal_soc,
  2079. rx_tlv_hdr));
  2080. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
  2081. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1);
  2082. dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
  2083. if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
  2084. DP_PEER_EXTD_STATS_INC(txrx_peer,
  2085. rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
  2086. 1);
  2087. dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
  2088. sgi, mcs, nss, bw, pkt_type);
  2089. }
  2090. #else
  2091. static inline
  2092. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2093. uint8_t *rx_tlv_hdr,
  2094. struct dp_txrx_peer *txrx_peer)
  2095. {
  2096. }
  2097. #endif
  2098. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  2099. static inline void
  2100. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  2101. qdf_nbuf_t nbuf)
  2102. {
  2103. uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf);
  2104. /* only count stats per lmac for MLO connection*/
  2105. DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1,
  2106. QDF_NBUF_CB_RX_PKT_LEN(nbuf),
  2107. txrx_peer->mld_peer);
  2108. }
  2109. #else
  2110. static inline void
  2111. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  2112. qdf_nbuf_t nbuf)
  2113. {
  2114. }
  2115. #endif
  2116. /**
  2117. * dp_rx_msdu_stats_update() - update per msdu stats.
  2118. * @soc: core txrx main context
  2119. * @nbuf: pointer to the first msdu of an amsdu.
  2120. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  2121. * @txrx_peer: pointer to the txrx peer object.
  2122. * @ring_id: reo dest ring number on which pkt is reaped.
  2123. * @tid_stats: per tid rx stats.
  2124. *
  2125. * update all the per msdu stats for that nbuf.
  2126. * Return: void
  2127. */
  2128. void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2129. uint8_t *rx_tlv_hdr,
  2130. struct dp_txrx_peer *txrx_peer,
  2131. uint8_t ring_id,
  2132. struct cdp_tid_rx_stats *tid_stats)
  2133. {
  2134. bool is_not_amsdu;
  2135. struct dp_vdev *vdev = txrx_peer->vdev;
  2136. bool enh_flag;
  2137. qdf_ether_header_t *eh;
  2138. uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2139. dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer);
  2140. is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
  2141. qdf_nbuf_is_rx_chfrag_end(nbuf);
  2142. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1,
  2143. msdu_len);
  2144. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1,
  2145. is_not_amsdu);
  2146. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu);
  2147. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1,
  2148. qdf_nbuf_is_rx_retry_flag(nbuf));
  2149. dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf);
  2150. tid_stats->msdu_cnt++;
  2151. if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
  2152. (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
  2153. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2154. enh_flag = vdev->pdev->enhanced_stats_en;
  2155. DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
  2156. tid_stats->mcast_msdu_cnt++;
  2157. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  2158. DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
  2159. tid_stats->bcast_msdu_cnt++;
  2160. }
  2161. }
  2162. txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks();
  2163. dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer);
  2164. }
  2165. #ifndef WDS_VENDOR_EXTENSION
  2166. int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
  2167. struct dp_vdev *vdev,
  2168. struct dp_txrx_peer *txrx_peer)
  2169. {
  2170. return 1;
  2171. }
  2172. #endif
  2173. #ifdef RX_DESC_DEBUG_CHECK
  2174. /**
  2175. * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
  2176. * corruption
  2177. *
  2178. * @ring_desc: REO ring descriptor
  2179. * @rx_desc: Rx descriptor
  2180. *
  2181. * Return: NONE
  2182. */
  2183. QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
  2184. hal_ring_desc_t ring_desc,
  2185. struct dp_rx_desc *rx_desc)
  2186. {
  2187. struct hal_buf_info hbi;
  2188. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2189. /* Sanity check for possible buffer paddr corruption */
  2190. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  2191. return QDF_STATUS_SUCCESS;
  2192. return QDF_STATUS_E_FAILURE;
  2193. }
  2194. /**
  2195. * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer
  2196. * out of bound access from H.W
  2197. *
  2198. * @soc: DP soc
  2199. * @pkt_len: Packet length received from H.W
  2200. *
  2201. * Return: NONE
  2202. */
  2203. static inline void
  2204. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc,
  2205. uint32_t pkt_len)
  2206. {
  2207. struct rx_desc_pool *rx_desc_pool;
  2208. rx_desc_pool = &soc->rx_desc_buf[0];
  2209. qdf_assert_always(pkt_len <= rx_desc_pool->buf_size);
  2210. }
  2211. #else
  2212. static inline void
  2213. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { }
  2214. #endif
  2215. #ifdef DP_RX_PKT_NO_PEER_DELIVER
  2216. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  2217. /**
  2218. * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received
  2219. * during roaming
  2220. * @vdev: dp_vdev pointer
  2221. * @rx_tlv_hdr: rx tlv header
  2222. * @nbuf: pkt skb pointer
  2223. *
  2224. * This function will check if rx udp data is received from authorised
  2225. * roamed peer before peer map indication is received from FW after
  2226. * roaming. This is needed for VoIP scenarios in which packet loss
  2227. * expected during roaming is minimal.
  2228. *
  2229. * Return: bool
  2230. */
  2231. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2232. uint8_t *rx_tlv_hdr,
  2233. qdf_nbuf_t nbuf)
  2234. {
  2235. char *hdr_desc;
  2236. struct ieee80211_frame *wh = NULL;
  2237. hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc,
  2238. rx_tlv_hdr);
  2239. wh = (struct ieee80211_frame *)hdr_desc;
  2240. if (vdev->roaming_peer_status ==
  2241. WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED &&
  2242. !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2,
  2243. QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
  2244. qdf_nbuf_is_ipv6_udp_pkt(nbuf)))
  2245. return true;
  2246. return false;
  2247. }
  2248. #else
  2249. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2250. uint8_t *rx_tlv_hdr,
  2251. qdf_nbuf_t nbuf)
  2252. {
  2253. return false;
  2254. }
  2255. #endif
  2256. /**
  2257. * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
  2258. * no corresbonding peer found
  2259. * @soc: core txrx main context
  2260. * @nbuf: pkt skb pointer
  2261. *
  2262. * This function will try to deliver some RX special frames to stack
  2263. * even there is no peer matched found. for instance, LFR case, some
  2264. * eapol data will be sent to host before peer_map done.
  2265. *
  2266. * Return: None
  2267. */
  2268. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2269. {
  2270. uint16_t peer_id;
  2271. uint8_t vdev_id;
  2272. struct dp_vdev *vdev = NULL;
  2273. uint32_t l2_hdr_offset = 0;
  2274. uint16_t msdu_len = 0;
  2275. uint32_t pkt_len = 0;
  2276. uint8_t *rx_tlv_hdr;
  2277. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  2278. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  2279. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  2280. if (peer_id > soc->max_peer_id)
  2281. goto deliver_fail;
  2282. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  2283. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
  2284. if (!vdev || vdev->delete.pending || !vdev->osif_rx)
  2285. goto deliver_fail;
  2286. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
  2287. goto deliver_fail;
  2288. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2289. l2_hdr_offset =
  2290. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2291. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2292. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  2293. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2294. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  2295. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset);
  2296. if (dp_rx_is_special_frame(nbuf, frame_mask) ||
  2297. dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, nbuf)) {
  2298. qdf_nbuf_set_exc_frame(nbuf, 1);
  2299. if (QDF_STATUS_SUCCESS !=
  2300. vdev->osif_rx(vdev->osif_vdev, nbuf))
  2301. goto deliver_fail;
  2302. DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
  2303. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2304. return;
  2305. }
  2306. deliver_fail:
  2307. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2308. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2309. dp_rx_nbuf_free(nbuf);
  2310. if (vdev)
  2311. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2312. }
  2313. #else
  2314. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2315. {
  2316. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2317. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2318. dp_rx_nbuf_free(nbuf);
  2319. }
  2320. #endif
  2321. /**
  2322. * dp_rx_srng_get_num_pending() - get number of pending entries
  2323. * @hal_soc: hal soc opaque pointer
  2324. * @hal_ring: opaque pointer to the HAL Rx Ring
  2325. * @num_entries: number of entries in the hal_ring.
  2326. * @near_full: pointer to a boolean. This is set if ring is near full.
  2327. *
  2328. * The function returns the number of entries in a destination ring which are
  2329. * yet to be reaped. The function also checks if the ring is near full.
  2330. * If more than half of the ring needs to be reaped, the ring is considered
  2331. * approaching full.
  2332. * The function useses hal_srng_dst_num_valid_locked to get the number of valid
  2333. * entries. It should not be called within a SRNG lock. HW pointer value is
  2334. * synced into cached_hp.
  2335. *
  2336. * Return: Number of pending entries if any
  2337. */
  2338. uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
  2339. hal_ring_handle_t hal_ring_hdl,
  2340. uint32_t num_entries,
  2341. bool *near_full)
  2342. {
  2343. uint32_t num_pending = 0;
  2344. num_pending = hal_srng_dst_num_valid_locked(hal_soc,
  2345. hal_ring_hdl,
  2346. true);
  2347. if (num_entries && (num_pending >= num_entries >> 1))
  2348. *near_full = true;
  2349. else
  2350. *near_full = false;
  2351. return num_pending;
  2352. }
  2353. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2354. #ifdef WLAN_SUPPORT_RX_FISA
  2355. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2356. {
  2357. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  2358. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2359. }
  2360. #else
  2361. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2362. {
  2363. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2364. }
  2365. #endif
  2366. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2367. #ifdef DP_RX_DROP_RAW_FRM
  2368. /**
  2369. * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
  2370. * @nbuf: pkt skb pointer
  2371. *
  2372. * Return: true - raw frame, dropped
  2373. * false - not raw frame, do nothing
  2374. */
  2375. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
  2376. {
  2377. if (qdf_nbuf_is_raw_frame(nbuf)) {
  2378. dp_rx_nbuf_free(nbuf);
  2379. return true;
  2380. }
  2381. return false;
  2382. }
  2383. #endif
  2384. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  2385. /**
  2386. * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
  2387. * @soc: Datapath soc structure
  2388. * @ring_num: REO ring number
  2389. * @ring_desc: REO ring descriptor
  2390. *
  2391. * Returns: None
  2392. */
  2393. void
  2394. dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  2395. hal_ring_desc_t ring_desc)
  2396. {
  2397. struct dp_buf_info_record *record;
  2398. struct hal_buf_info hbi;
  2399. uint32_t idx;
  2400. if (qdf_unlikely(!soc->rx_ring_history[ring_num]))
  2401. return;
  2402. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2403. /* buffer_addr_info is the first element of ring_desc */
  2404. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
  2405. &hbi);
  2406. idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
  2407. DP_RX_HIST_MAX);
  2408. /* No NULL check needed for record since its an array */
  2409. record = &soc->rx_ring_history[ring_num]->entry[idx];
  2410. record->timestamp = qdf_get_log_timestamp();
  2411. record->hbi.paddr = hbi.paddr;
  2412. record->hbi.sw_cookie = hbi.sw_cookie;
  2413. record->hbi.rbm = hbi.rbm;
  2414. }
  2415. #endif
  2416. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  2417. /**
  2418. * dp_rx_update_stats() - Update soc level rx packet count
  2419. * @soc: DP soc handle
  2420. * @nbuf: nbuf received
  2421. *
  2422. * Returns: none
  2423. */
  2424. void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2425. {
  2426. DP_STATS_INC_PKT(soc, rx.ingress, 1,
  2427. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2428. }
  2429. #endif
  2430. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  2431. /**
  2432. * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
  2433. * @soc : dp_soc handle
  2434. * @pdev: dp_pdev handle
  2435. * @peer_id: peer_id of the peer for which completion came
  2436. * @ppdu_id: ppdu_id
  2437. * @netbuf: Buffer pointer
  2438. *
  2439. * This function is used to deliver rx packet to packet capture
  2440. */
  2441. void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  2442. uint16_t peer_id, uint32_t is_offload,
  2443. qdf_nbuf_t netbuf)
  2444. {
  2445. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2446. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf,
  2447. peer_id, is_offload, pdev->pdev_id);
  2448. }
  2449. void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2450. uint32_t is_offload)
  2451. {
  2452. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2453. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER,
  2454. soc, nbuf, HTT_INVALID_VDEV,
  2455. is_offload, 0);
  2456. }
  2457. #endif
  2458. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2459. QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
  2460. {
  2461. QDF_STATUS ret;
  2462. if (vdev->osif_rx_flush) {
  2463. ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
  2464. if (!QDF_IS_STATUS_SUCCESS(ret)) {
  2465. dp_err("Failed to flush rx pkts for vdev %d\n",
  2466. vdev->vdev_id);
  2467. return ret;
  2468. }
  2469. }
  2470. return QDF_STATUS_SUCCESS;
  2471. }
  2472. static QDF_STATUS
  2473. dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
  2474. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  2475. struct dp_pdev *dp_pdev,
  2476. struct rx_desc_pool *rx_desc_pool)
  2477. {
  2478. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  2479. (nbuf_frag_info_t->virt_addr).nbuf =
  2480. qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
  2481. RX_BUFFER_RESERVATION,
  2482. rx_desc_pool->buf_alignment, FALSE);
  2483. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  2484. dp_err("nbuf alloc failed");
  2485. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  2486. return ret;
  2487. }
  2488. ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
  2489. (nbuf_frag_info_t->virt_addr).nbuf,
  2490. QDF_DMA_FROM_DEVICE,
  2491. rx_desc_pool->buf_size);
  2492. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  2493. qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
  2494. dp_err("nbuf map failed");
  2495. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  2496. return ret;
  2497. }
  2498. nbuf_frag_info_t->paddr =
  2499. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  2500. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  2501. &nbuf_frag_info_t->paddr,
  2502. rx_desc_pool);
  2503. if (ret == QDF_STATUS_E_FAILURE) {
  2504. dp_err("nbuf check x86 failed");
  2505. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  2506. return ret;
  2507. }
  2508. return QDF_STATUS_SUCCESS;
  2509. }
  2510. QDF_STATUS
  2511. dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
  2512. struct dp_srng *dp_rxdma_srng,
  2513. struct rx_desc_pool *rx_desc_pool,
  2514. uint32_t num_req_buffers)
  2515. {
  2516. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  2517. hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
  2518. union dp_rx_desc_list_elem_t *next;
  2519. void *rxdma_ring_entry;
  2520. qdf_dma_addr_t paddr;
  2521. struct dp_rx_nbuf_frag_info *nf_info;
  2522. uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
  2523. uint32_t buffer_index, nbuf_ptrs_per_page;
  2524. qdf_nbuf_t nbuf;
  2525. QDF_STATUS ret;
  2526. int page_idx, total_pages;
  2527. union dp_rx_desc_list_elem_t *desc_list = NULL;
  2528. union dp_rx_desc_list_elem_t *tail = NULL;
  2529. int sync_hw_ptr = 1;
  2530. uint32_t num_entries_avail;
  2531. if (qdf_unlikely(!dp_pdev)) {
  2532. dp_rx_err("%pK: pdev is null for mac_id = %d",
  2533. dp_soc, mac_id);
  2534. return QDF_STATUS_E_FAILURE;
  2535. }
  2536. if (qdf_unlikely(!rxdma_srng)) {
  2537. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2538. return QDF_STATUS_E_FAILURE;
  2539. }
  2540. dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
  2541. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2542. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  2543. rxdma_srng,
  2544. sync_hw_ptr);
  2545. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2546. if (!num_entries_avail) {
  2547. dp_err("Num of available entries is zero, nothing to do");
  2548. return QDF_STATUS_E_NOMEM;
  2549. }
  2550. if (num_entries_avail < num_req_buffers)
  2551. num_req_buffers = num_entries_avail;
  2552. nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
  2553. num_req_buffers, &desc_list, &tail);
  2554. if (!nr_descs) {
  2555. dp_err("no free rx_descs in freelist");
  2556. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  2557. return QDF_STATUS_E_NOMEM;
  2558. }
  2559. dp_debug("got %u RX descs for driver attach", nr_descs);
  2560. /*
  2561. * Try to allocate pointers to the nbuf one page at a time.
  2562. * Take pointers that can fit in one page of memory and
  2563. * iterate through the total descriptors that need to be
  2564. * allocated in order of pages. Reuse the pointers that
  2565. * have been allocated to fit in one page across each
  2566. * iteration to index into the nbuf.
  2567. */
  2568. total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE;
  2569. /*
  2570. * Add an extra page to store the remainder if any
  2571. */
  2572. if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE)
  2573. total_pages++;
  2574. nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE);
  2575. if (!nf_info) {
  2576. dp_err("failed to allocate nbuf array");
  2577. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2578. QDF_BUG(0);
  2579. return QDF_STATUS_E_NOMEM;
  2580. }
  2581. nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info);
  2582. for (page_idx = 0; page_idx < total_pages; page_idx++) {
  2583. qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE);
  2584. for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
  2585. /*
  2586. * The last page of buffer pointers may not be required
  2587. * completely based on the number of descriptors. Below
  2588. * check will ensure we are allocating only the
  2589. * required number of descriptors.
  2590. */
  2591. if (nr_nbuf_total >= nr_descs)
  2592. break;
  2593. /* Flag is set while pdev rx_desc_pool initialization */
  2594. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2595. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  2596. &nf_info[nr_nbuf], dp_pdev,
  2597. rx_desc_pool);
  2598. else
  2599. ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
  2600. &nf_info[nr_nbuf], dp_pdev,
  2601. rx_desc_pool);
  2602. if (QDF_IS_STATUS_ERROR(ret))
  2603. break;
  2604. nr_nbuf_total++;
  2605. }
  2606. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2607. for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
  2608. rxdma_ring_entry =
  2609. hal_srng_src_get_next(dp_soc->hal_soc,
  2610. rxdma_srng);
  2611. qdf_assert_always(rxdma_ring_entry);
  2612. next = desc_list->next;
  2613. paddr = nf_info[buffer_index].paddr;
  2614. nbuf = nf_info[buffer_index].virt_addr.nbuf;
  2615. /* Flag is set while pdev rx_desc_pool initialization */
  2616. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2617. dp_rx_desc_frag_prep(&desc_list->rx_desc,
  2618. &nf_info[buffer_index]);
  2619. else
  2620. dp_rx_desc_prep(&desc_list->rx_desc,
  2621. &nf_info[buffer_index]);
  2622. desc_list->rx_desc.in_use = 1;
  2623. dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
  2624. dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
  2625. __func__,
  2626. RX_DESC_REPLENISHED);
  2627. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr,
  2628. desc_list->rx_desc.cookie,
  2629. rx_desc_pool->owner);
  2630. dp_ipa_handle_rx_buf_smmu_mapping(
  2631. dp_soc, nbuf,
  2632. rx_desc_pool->buf_size,
  2633. true);
  2634. desc_list = next;
  2635. }
  2636. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id,
  2637. rxdma_srng, nr_nbuf, nr_nbuf);
  2638. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2639. }
  2640. dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
  2641. qdf_mem_free(nf_info);
  2642. if (!nr_nbuf_total) {
  2643. dp_err("No nbuf's allocated");
  2644. QDF_BUG(0);
  2645. return QDF_STATUS_E_RESOURCES;
  2646. }
  2647. /* No need to count the number of bytes received during replenish.
  2648. * Therefore set replenish.pkts.bytes as 0.
  2649. */
  2650. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
  2651. return QDF_STATUS_SUCCESS;
  2652. }
  2653. qdf_export_symbol(dp_pdev_rx_buffers_attach);
  2654. /**
  2655. * dp_rx_enable_mon_dest_frag() - Enable frag processing for
  2656. * monitor destination ring via frag.
  2657. *
  2658. * Enable this flag only for monitor destination buffer processing
  2659. * if DP_RX_MON_MEM_FRAG feature is enabled.
  2660. * If flag is set then frag based function will be called for alloc,
  2661. * map, prep desc and free ops for desc buffer else normal nbuf based
  2662. * function will be called.
  2663. *
  2664. * @rx_desc_pool: Rx desc pool
  2665. * @is_mon_dest_desc: Is it for monitor dest buffer
  2666. *
  2667. * Return: None
  2668. */
  2669. #ifdef DP_RX_MON_MEM_FRAG
  2670. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2671. bool is_mon_dest_desc)
  2672. {
  2673. rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
  2674. if (is_mon_dest_desc)
  2675. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
  2676. }
  2677. #else
  2678. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2679. bool is_mon_dest_desc)
  2680. {
  2681. rx_desc_pool->rx_mon_dest_frag_enable = false;
  2682. if (is_mon_dest_desc)
  2683. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
  2684. }
  2685. #endif
  2686. qdf_export_symbol(dp_rx_enable_mon_dest_frag);
  2687. /*
  2688. * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor
  2689. * pool
  2690. *
  2691. * @pdev: core txrx pdev context
  2692. *
  2693. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2694. * QDF_STATUS_E_NOMEM
  2695. */
  2696. QDF_STATUS
  2697. dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
  2698. {
  2699. struct dp_soc *soc = pdev->soc;
  2700. uint32_t rxdma_entries;
  2701. uint32_t rx_sw_desc_num;
  2702. struct dp_srng *dp_rxdma_srng;
  2703. struct rx_desc_pool *rx_desc_pool;
  2704. uint32_t status = QDF_STATUS_SUCCESS;
  2705. int mac_for_pdev;
  2706. mac_for_pdev = pdev->lmac_id;
  2707. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2708. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2709. soc, mac_for_pdev);
  2710. return status;
  2711. }
  2712. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2713. rxdma_entries = dp_rxdma_srng->num_entries;
  2714. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2715. rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2716. rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
  2717. status = dp_rx_desc_pool_alloc(soc,
  2718. rx_sw_desc_num,
  2719. rx_desc_pool);
  2720. if (status != QDF_STATUS_SUCCESS)
  2721. return status;
  2722. return status;
  2723. }
  2724. /*
  2725. * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
  2726. *
  2727. * @pdev: core txrx pdev context
  2728. */
  2729. void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
  2730. {
  2731. int mac_for_pdev = pdev->lmac_id;
  2732. struct dp_soc *soc = pdev->soc;
  2733. struct rx_desc_pool *rx_desc_pool;
  2734. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2735. dp_rx_desc_pool_free(soc, rx_desc_pool);
  2736. }
  2737. /*
  2738. * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
  2739. *
  2740. * @pdev: core txrx pdev context
  2741. *
  2742. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2743. * QDF_STATUS_E_NOMEM
  2744. */
  2745. QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
  2746. {
  2747. int mac_for_pdev = pdev->lmac_id;
  2748. struct dp_soc *soc = pdev->soc;
  2749. uint32_t rxdma_entries;
  2750. uint32_t rx_sw_desc_num;
  2751. struct dp_srng *dp_rxdma_srng;
  2752. struct rx_desc_pool *rx_desc_pool;
  2753. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2754. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2755. /**
  2756. * If NSS is enabled, rx_desc_pool is already filled.
  2757. * Hence, just disable desc_pool frag flag.
  2758. */
  2759. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2760. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2761. soc, mac_for_pdev);
  2762. return QDF_STATUS_SUCCESS;
  2763. }
  2764. if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
  2765. return QDF_STATUS_E_NOMEM;
  2766. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2767. rxdma_entries = dp_rxdma_srng->num_entries;
  2768. soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
  2769. rx_sw_desc_num =
  2770. wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2771. rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc);
  2772. rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
  2773. rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
  2774. /* Disable monitor dest processing via frag */
  2775. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2776. dp_rx_desc_pool_init(soc, mac_for_pdev,
  2777. rx_sw_desc_num, rx_desc_pool);
  2778. return QDF_STATUS_SUCCESS;
  2779. }
  2780. /*
  2781. * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
  2782. * @pdev: core txrx pdev context
  2783. *
  2784. * This function resets the freelist of rx descriptors and destroys locks
  2785. * associated with this list of descriptors.
  2786. */
  2787. void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
  2788. {
  2789. int mac_for_pdev = pdev->lmac_id;
  2790. struct dp_soc *soc = pdev->soc;
  2791. struct rx_desc_pool *rx_desc_pool;
  2792. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2793. dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev);
  2794. }
  2795. /*
  2796. * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
  2797. *
  2798. * @pdev: core txrx pdev context
  2799. *
  2800. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2801. * QDF_STATUS_E_NOMEM
  2802. */
  2803. QDF_STATUS
  2804. dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
  2805. {
  2806. int mac_for_pdev = pdev->lmac_id;
  2807. struct dp_soc *soc = pdev->soc;
  2808. struct dp_srng *dp_rxdma_srng;
  2809. struct rx_desc_pool *rx_desc_pool;
  2810. uint32_t rxdma_entries;
  2811. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2812. rxdma_entries = dp_rxdma_srng->num_entries;
  2813. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2814. /* Initialize RX buffer pool which will be
  2815. * used during low memory conditions
  2816. */
  2817. dp_rx_buffer_pool_init(soc, mac_for_pdev);
  2818. return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev,
  2819. dp_rxdma_srng,
  2820. rx_desc_pool,
  2821. rxdma_entries - 1);
  2822. }
  2823. /*
  2824. * dp_rx_pdev_buffers_free - Free nbufs (skbs)
  2825. *
  2826. * @pdev: core txrx pdev context
  2827. */
  2828. void
  2829. dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
  2830. {
  2831. int mac_for_pdev = pdev->lmac_id;
  2832. struct dp_soc *soc = pdev->soc;
  2833. struct rx_desc_pool *rx_desc_pool;
  2834. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2835. dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  2836. dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
  2837. }
  2838. #ifdef DP_RX_SPECIAL_FRAME_NEED
  2839. bool dp_rx_deliver_special_frame(struct dp_soc *soc,
  2840. struct dp_txrx_peer *txrx_peer,
  2841. qdf_nbuf_t nbuf, uint32_t frame_mask,
  2842. uint8_t *rx_tlv_hdr)
  2843. {
  2844. uint32_t l2_hdr_offset = 0;
  2845. uint16_t msdu_len = 0;
  2846. uint32_t skip_len;
  2847. l2_hdr_offset =
  2848. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2849. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  2850. skip_len = l2_hdr_offset;
  2851. } else {
  2852. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2853. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  2854. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  2855. }
  2856. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2857. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  2858. qdf_nbuf_pull_head(nbuf, skip_len);
  2859. if (txrx_peer->vdev) {
  2860. dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf,
  2861. QDF_TX_RX_STATUS_OK);
  2862. }
  2863. if (dp_rx_is_special_frame(nbuf, frame_mask)) {
  2864. dp_info("special frame, mpdu sn 0x%x",
  2865. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  2866. qdf_nbuf_set_exc_frame(nbuf, 1);
  2867. dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer,
  2868. nbuf, NULL);
  2869. return true;
  2870. }
  2871. return false;
  2872. }
  2873. #endif
  2874. #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
  2875. void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
  2876. uint8_t *rx_tlv,
  2877. qdf_nbuf_t nbuf)
  2878. {
  2879. struct dp_soc *soc;
  2880. if (!pdev->is_first_wakeup_packet)
  2881. return;
  2882. soc = pdev->soc;
  2883. if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) {
  2884. qdf_nbuf_mark_wakeup_frame(nbuf);
  2885. dp_info("First packet after WOW Wakeup rcvd");
  2886. }
  2887. }
  2888. #endif