dp_rx.h 98 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #ifndef _DP_RX_H
  20. #define _DP_RX_H
  21. #include "hal_rx.h"
  22. #include "dp_peer.h"
  23. #include "dp_internal.h"
  24. #include <qdf_tracepoint.h>
  25. #include "dp_ipa.h"
  26. #ifdef RXDMA_OPTIMIZATION
  27. #ifndef RX_DATA_BUFFER_ALIGNMENT
  28. #define RX_DATA_BUFFER_ALIGNMENT 128
  29. #endif
  30. #ifndef RX_MONITOR_BUFFER_ALIGNMENT
  31. #define RX_MONITOR_BUFFER_ALIGNMENT 128
  32. #endif
  33. #else /* RXDMA_OPTIMIZATION */
  34. #define RX_DATA_BUFFER_ALIGNMENT 4
  35. #define RX_MONITOR_BUFFER_ALIGNMENT 4
  36. #endif /* RXDMA_OPTIMIZATION */
  37. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  38. #define DP_WBM2SW_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW1_BM(sw0_bm_id)
  39. /* RBM value used for re-injecting defragmented packets into REO */
  40. #define DP_DEFRAG_RBM(sw0_bm_id) HAL_RX_BUF_RBM_SW3_BM(sw0_bm_id)
  41. #endif
  42. /* Max buffer in invalid peer SG list*/
  43. #define DP_MAX_INVALID_BUFFERS 10
  44. #ifdef DP_INVALID_PEER_ASSERT
  45. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) \
  46. do { \
  47. qdf_assert_always(!(head)); \
  48. qdf_assert_always(!(tail)); \
  49. } while (0)
  50. #else
  51. #define DP_PDEV_INVALID_PEER_MSDU_CHECK(head, tail) /* no op */
  52. #endif
  53. #define RX_BUFFER_RESERVATION 0
  54. #define DP_DEFAULT_NOISEFLOOR (-96)
  55. #define DP_RX_DESC_MAGIC 0xdec0de
  56. #define dp_rx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_RX, params)
  57. #define dp_rx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX, params)
  58. #define dp_rx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_RX, params)
  59. #define dp_rx_info(params...) \
  60. __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
  61. #define dp_rx_info_rl(params...) \
  62. __QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_RX, ## params)
  63. #define dp_rx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_RX, params)
  64. #define dp_rx_err_err(params...) \
  65. QDF_TRACE_ERROR(QDF_MODULE_ID_DP_RX_ERROR, params)
  66. /**
  67. * enum dp_rx_desc_state
  68. *
  69. * @RX_DESC_REPLENISHED: rx desc replenished
  70. * @RX_DESC_IN_FREELIST: rx desc in freelist
  71. */
  72. enum dp_rx_desc_state {
  73. RX_DESC_REPLENISHED,
  74. RX_DESC_IN_FREELIST,
  75. };
  76. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  77. /**
  78. * struct dp_rx_desc_dbg_info
  79. *
  80. * @freelist_caller: name of the function that put the
  81. * the rx desc in freelist
  82. * @freelist_ts: timestamp when the rx desc is put in
  83. * a freelist
  84. * @replenish_caller: name of the function that last
  85. * replenished the rx desc
  86. * @replenish_ts: last replenish timestamp
  87. * @prev_nbuf: previous nbuf info
  88. * @prev_nbuf_data_addr: previous nbuf data address
  89. */
  90. struct dp_rx_desc_dbg_info {
  91. char freelist_caller[QDF_MEM_FUNC_NAME_SIZE];
  92. uint64_t freelist_ts;
  93. char replenish_caller[QDF_MEM_FUNC_NAME_SIZE];
  94. uint64_t replenish_ts;
  95. qdf_nbuf_t prev_nbuf;
  96. uint8_t *prev_nbuf_data_addr;
  97. };
  98. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  99. /**
  100. * struct dp_rx_desc
  101. *
  102. * @nbuf: VA of the "skb" posted
  103. * @rx_buf_start: VA of the original Rx buffer, before
  104. * movement of any skb->data pointer
  105. * @paddr_buf_start: PA of the original Rx buffer, before
  106. * movement of any frag pointer
  107. * @cookie: index into the sw array which holds
  108. * the sw Rx descriptors
  109. * Cookie space is 21 bits:
  110. * lower 18 bits -- index
  111. * upper 3 bits -- pool_id
  112. * @pool_id: pool Id for which this allocated.
  113. * Can only be used if there is no flow
  114. * steering
  115. * @chip_id: chip_id indicating MLO chip_id
  116. * valid or used only in case of multi-chip MLO
  117. * @reuse_nbuf: VA of the "skb" which is being reused
  118. * @magic:
  119. * @nbuf_data_addr: VA of nbuf data posted
  120. * @dbg_info:
  121. * @in_use: rx_desc is in use
  122. * @unmapped: used to mark rx_desc an unmapped if the corresponding
  123. * nbuf is already unmapped
  124. * @in_err_state: Nbuf sanity failed for this descriptor.
  125. * @has_reuse_nbuf: the nbuf associated with this desc is also saved in
  126. * reuse_nbuf field
  127. */
  128. struct dp_rx_desc {
  129. qdf_nbuf_t nbuf;
  130. #ifdef WLAN_SUPPORT_PPEDS
  131. qdf_nbuf_t reuse_nbuf;
  132. #endif
  133. uint8_t *rx_buf_start;
  134. qdf_dma_addr_t paddr_buf_start;
  135. uint32_t cookie;
  136. uint8_t pool_id;
  137. uint8_t chip_id;
  138. #ifdef RX_DESC_DEBUG_CHECK
  139. uint32_t magic;
  140. uint8_t *nbuf_data_addr;
  141. struct dp_rx_desc_dbg_info *dbg_info;
  142. #endif
  143. uint8_t in_use:1,
  144. unmapped:1,
  145. in_err_state:1,
  146. has_reuse_nbuf:1;
  147. };
  148. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  149. #ifdef ATH_RX_PRI_SAVE
  150. #define DP_RX_TID_SAVE(_nbuf, _tid) \
  151. (qdf_nbuf_set_priority(_nbuf, _tid))
  152. #else
  153. #define DP_RX_TID_SAVE(_nbuf, _tid)
  154. #endif
  155. /* RX Descriptor Multi Page memory alloc related */
  156. #define DP_RX_DESC_OFFSET_NUM_BITS 8
  157. #define DP_RX_DESC_PAGE_ID_NUM_BITS 8
  158. #define DP_RX_DESC_POOL_ID_NUM_BITS 4
  159. #define DP_RX_DESC_PAGE_ID_SHIFT DP_RX_DESC_OFFSET_NUM_BITS
  160. #define DP_RX_DESC_POOL_ID_SHIFT \
  161. (DP_RX_DESC_OFFSET_NUM_BITS + DP_RX_DESC_PAGE_ID_NUM_BITS)
  162. #define RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK \
  163. (((1 << DP_RX_DESC_POOL_ID_NUM_BITS) - 1) << DP_RX_DESC_POOL_ID_SHIFT)
  164. #define RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK \
  165. (((1 << DP_RX_DESC_PAGE_ID_NUM_BITS) - 1) << \
  166. DP_RX_DESC_PAGE_ID_SHIFT)
  167. #define RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK \
  168. ((1 << DP_RX_DESC_OFFSET_NUM_BITS) - 1)
  169. #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(_cookie) \
  170. (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_POOL_ID_MASK) >> \
  171. DP_RX_DESC_POOL_ID_SHIFT)
  172. #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(_cookie) \
  173. (((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_PAGE_ID_MASK) >> \
  174. DP_RX_DESC_PAGE_ID_SHIFT)
  175. #define DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(_cookie) \
  176. ((_cookie) & RX_DESC_MULTI_PAGE_COOKIE_OFFSET_MASK)
  177. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  178. #define RX_DESC_COOKIE_INDEX_SHIFT 0
  179. #define RX_DESC_COOKIE_INDEX_MASK 0x3ffff /* 18 bits */
  180. #define RX_DESC_COOKIE_POOL_ID_SHIFT 18
  181. #define RX_DESC_COOKIE_POOL_ID_MASK 0x1c0000
  182. #define DP_RX_DESC_COOKIE_MAX \
  183. (RX_DESC_COOKIE_INDEX_MASK | RX_DESC_COOKIE_POOL_ID_MASK)
  184. #define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie) \
  185. (((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >> \
  186. RX_DESC_COOKIE_POOL_ID_SHIFT)
  187. #define DP_RX_DESC_COOKIE_INDEX_GET(_cookie) \
  188. (((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >> \
  189. RX_DESC_COOKIE_INDEX_SHIFT)
  190. #define dp_rx_add_to_free_desc_list(head, tail, new) \
  191. __dp_rx_add_to_free_desc_list(head, tail, new, __func__)
  192. #define dp_rx_add_to_free_desc_list_reuse(head, tail, new) \
  193. __dp_rx_add_to_free_desc_list_reuse(head, tail, new, __func__)
  194. #define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
  195. num_buffers, desc_list, tail, req_only) \
  196. __dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
  197. num_buffers, desc_list, tail, req_only, \
  198. __func__)
  199. #ifdef WLAN_SUPPORT_RX_FISA
  200. /**
  201. * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
  202. * @nbuf: pkt skb pointer
  203. * @l3_padding: l3 padding
  204. *
  205. * Return: None
  206. */
  207. static inline
  208. void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
  209. {
  210. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  211. }
  212. #else
  213. static inline
  214. void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
  215. {
  216. }
  217. #endif
  218. #ifdef DP_RX_SPECIAL_FRAME_NEED
  219. /**
  220. * dp_rx_is_special_frame() - check is RX frame special needed
  221. *
  222. * @nbuf: RX skb pointer
  223. * @frame_mask: the mask for special frame needed
  224. *
  225. * Check is RX frame wanted matched with mask
  226. *
  227. * Return: true - special frame needed, false - no
  228. */
  229. static inline
  230. bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
  231. {
  232. if (((frame_mask & FRAME_MASK_IPV4_ARP) &&
  233. qdf_nbuf_is_ipv4_arp_pkt(nbuf)) ||
  234. ((frame_mask & FRAME_MASK_IPV4_DHCP) &&
  235. qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) ||
  236. ((frame_mask & FRAME_MASK_IPV4_EAPOL) &&
  237. qdf_nbuf_is_ipv4_eapol_pkt(nbuf)) ||
  238. ((frame_mask & FRAME_MASK_IPV6_DHCP) &&
  239. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))
  240. return true;
  241. return false;
  242. }
  243. /**
  244. * dp_rx_deliver_special_frame() - Deliver the RX special frame to stack
  245. * if matches mask
  246. *
  247. * @soc: Datapath soc handler
  248. * @peer: pointer to DP peer
  249. * @nbuf: pointer to the skb of RX frame
  250. * @frame_mask: the mask for special frame needed
  251. * @rx_tlv_hdr: start of rx tlv header
  252. *
  253. * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and
  254. * single nbuf is expected.
  255. *
  256. * Return: true - nbuf has been delivered to stack, false - not.
  257. */
  258. bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
  259. qdf_nbuf_t nbuf, uint32_t frame_mask,
  260. uint8_t *rx_tlv_hdr);
  261. #else
  262. static inline
  263. bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask)
  264. {
  265. return false;
  266. }
  267. static inline
  268. bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer,
  269. qdf_nbuf_t nbuf, uint32_t frame_mask,
  270. uint8_t *rx_tlv_hdr)
  271. {
  272. return false;
  273. }
  274. #endif
  275. #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
  276. /**
  277. * dp_rx_data_is_specific() - Used to exclude specific frames
  278. * not practical for getting rx
  279. * stats like rate, mcs, nss, etc.
  280. *
  281. * @hal_soc_hdl: soc handler
  282. * @rx_tlv_hdr: rx tlv header
  283. * @nbuf: RX skb pointer
  284. *
  285. * Return: true - a specific frame not suitable
  286. * for getting rx stats from it.
  287. * false - a common frame suitable for
  288. * getting rx stats from it.
  289. */
  290. static inline
  291. bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
  292. uint8_t *rx_tlv_hdr,
  293. qdf_nbuf_t nbuf)
  294. {
  295. if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf)))
  296. return true;
  297. if (!hal_rx_tlv_first_mpdu_get(hal_soc_hdl, rx_tlv_hdr))
  298. return true;
  299. if (!hal_rx_msdu_end_first_msdu_get(hal_soc_hdl, rx_tlv_hdr))
  300. return true;
  301. /* ARP, EAPOL is neither IPV6 ETH nor IPV4 ETH from L3 level */
  302. if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
  303. QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
  304. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  305. return true;
  306. } else if (qdf_likely(hal_rx_tlv_l3_type_get(hal_soc_hdl, rx_tlv_hdr) ==
  307. QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
  308. if (qdf_nbuf_is_ipv6_dhcp_pkt(nbuf))
  309. return true;
  310. } else {
  311. return true;
  312. }
  313. return false;
  314. }
  315. #else
  316. static inline
  317. bool dp_rx_data_is_specific(hal_soc_handle_t hal_soc_hdl,
  318. uint8_t *rx_tlv_hdr,
  319. qdf_nbuf_t nbuf)
  320. {
  321. /*
  322. * default return is true to make sure that rx stats
  323. * will not be handled when this feature is disabled
  324. */
  325. return true;
  326. }
  327. #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
  328. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  329. #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
  330. static inline
  331. bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
  332. qdf_nbuf_t nbuf, uint8_t link_id)
  333. {
  334. if (ta_txrx_peer->vdev->opmode == wlan_op_mode_ndi &&
  335. qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
  336. DP_PEER_PER_PKT_STATS_INC(ta_txrx_peer,
  337. rx.intra_bss.mdns_no_fwd,
  338. 1, link_id);
  339. return false;
  340. }
  341. return true;
  342. }
  343. #else
  344. static inline
  345. bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer,
  346. qdf_nbuf_t nbuf, uint8_t link_id)
  347. {
  348. return true;
  349. }
  350. #endif
  351. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  352. /* DOC: Offset to obtain LLC hdr
  353. *
  354. * In the case of Wifi parse error
  355. * to reach LLC header from beginning
  356. * of VLAN tag we need to skip 8 bytes.
  357. * Vlan_tag(4)+length(2)+length added
  358. * by HW(2) = 8 bytes.
  359. */
  360. #define DP_SKIP_VLAN 8
  361. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  362. /**
  363. * struct dp_rx_cached_buf - rx cached buffer
  364. * @node: linked list node
  365. * @buf: skb buffer
  366. */
  367. struct dp_rx_cached_buf {
  368. qdf_list_node_t node;
  369. qdf_nbuf_t buf;
  370. };
  371. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  372. /**
  373. * dp_rx_xor_block() - xor block of data
  374. * @b: destination data block
  375. * @a: source data block
  376. * @len: length of the data to process
  377. *
  378. * Return: None
  379. */
  380. static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
  381. {
  382. qdf_size_t i;
  383. for (i = 0; i < len; i++)
  384. b[i] ^= a[i];
  385. }
  386. /**
  387. * dp_rx_rotl() - rotate the bits left
  388. * @val: unsigned integer input value
  389. * @bits: number of bits
  390. *
  391. * Return: Integer with left rotated by number of 'bits'
  392. */
  393. static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
  394. {
  395. return (val << bits) | (val >> (32 - bits));
  396. }
  397. /**
  398. * dp_rx_rotr() - rotate the bits right
  399. * @val: unsigned integer input value
  400. * @bits: number of bits
  401. *
  402. * Return: Integer with right rotated by number of 'bits'
  403. */
  404. static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
  405. {
  406. return (val >> bits) | (val << (32 - bits));
  407. }
  408. /**
  409. * dp_set_rx_queue() - set queue_mapping in skb
  410. * @nbuf: skb
  411. * @queue_id: rx queue_id
  412. *
  413. * Return: void
  414. */
  415. #ifdef QCA_OL_RX_MULTIQ_SUPPORT
  416. static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
  417. {
  418. qdf_nbuf_record_rx_queue(nbuf, queue_id);
  419. return;
  420. }
  421. #else
  422. static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id)
  423. {
  424. }
  425. #endif
  426. /**
  427. * dp_rx_xswap() - swap the bits left
  428. * @val: unsigned integer input value
  429. *
  430. * Return: Integer with bits swapped
  431. */
  432. static inline uint32_t dp_rx_xswap(uint32_t val)
  433. {
  434. return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
  435. }
  436. /**
  437. * dp_rx_get_le32_split() - get little endian 32 bits split
  438. * @b0: byte 0
  439. * @b1: byte 1
  440. * @b2: byte 2
  441. * @b3: byte 3
  442. *
  443. * Return: Integer with split little endian 32 bits
  444. */
  445. static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
  446. uint8_t b3)
  447. {
  448. return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
  449. }
  450. /**
  451. * dp_rx_get_le32() - get little endian 32 bits
  452. * @p: source 32-bit value
  453. *
  454. * Return: Integer with little endian 32 bits
  455. */
  456. static inline uint32_t dp_rx_get_le32(const uint8_t *p)
  457. {
  458. return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
  459. }
  460. /**
  461. * dp_rx_put_le32() - put little endian 32 bits
  462. * @p: destination char array
  463. * @v: source 32-bit integer
  464. *
  465. * Return: None
  466. */
  467. static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
  468. {
  469. p[0] = (v) & 0xff;
  470. p[1] = (v >> 8) & 0xff;
  471. p[2] = (v >> 16) & 0xff;
  472. p[3] = (v >> 24) & 0xff;
  473. }
  474. /* Extract michal mic block of data */
  475. #define dp_rx_michael_block(l, r) \
  476. do { \
  477. r ^= dp_rx_rotl(l, 17); \
  478. l += r; \
  479. r ^= dp_rx_xswap(l); \
  480. l += r; \
  481. r ^= dp_rx_rotl(l, 3); \
  482. l += r; \
  483. r ^= dp_rx_rotr(l, 2); \
  484. l += r; \
  485. } while (0)
  486. /**
  487. * struct dp_rx_desc_list_elem_t
  488. *
  489. * @next: Next pointer to form free list
  490. * @rx_desc: DP Rx descriptor
  491. */
  492. union dp_rx_desc_list_elem_t {
  493. union dp_rx_desc_list_elem_t *next;
  494. struct dp_rx_desc rx_desc;
  495. };
  496. #ifdef RX_DESC_MULTI_PAGE_ALLOC
  497. /**
  498. * dp_rx_desc_find() - find dp rx descriptor from page ID and offset
  499. * @page_id: Page ID
  500. * @offset: Offset of the descriptor element
  501. * @rx_pool: RX pool
  502. *
  503. * Return: RX descriptor element
  504. */
  505. union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
  506. struct rx_desc_pool *rx_pool);
  507. static inline
  508. struct dp_rx_desc *dp_get_rx_desc_from_cookie(struct dp_soc *soc,
  509. struct rx_desc_pool *pool,
  510. uint32_t cookie)
  511. {
  512. uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
  513. uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
  514. uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
  515. struct rx_desc_pool *rx_desc_pool;
  516. union dp_rx_desc_list_elem_t *rx_desc_elem;
  517. if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
  518. return NULL;
  519. rx_desc_pool = &pool[pool_id];
  520. rx_desc_elem = (union dp_rx_desc_list_elem_t *)
  521. (rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  522. rx_desc_pool->elem_size * offset);
  523. return &rx_desc_elem->rx_desc;
  524. }
  525. static inline
  526. struct dp_rx_desc *dp_get_rx_mon_status_desc_from_cookie(struct dp_soc *soc,
  527. struct rx_desc_pool *pool,
  528. uint32_t cookie)
  529. {
  530. uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
  531. uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
  532. uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
  533. struct rx_desc_pool *rx_desc_pool;
  534. union dp_rx_desc_list_elem_t *rx_desc_elem;
  535. if (qdf_unlikely(pool_id >= NUM_RXDMA_STATUS_RINGS_PER_PDEV))
  536. return NULL;
  537. rx_desc_pool = &pool[pool_id];
  538. rx_desc_elem = (union dp_rx_desc_list_elem_t *)
  539. (rx_desc_pool->desc_pages.cacheable_pages[page_id] +
  540. rx_desc_pool->elem_size * offset);
  541. return &rx_desc_elem->rx_desc;
  542. }
  543. /**
  544. * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
  545. * the Rx descriptor on Rx DMA source ring buffer
  546. * @soc: core txrx main context
  547. * @cookie: cookie used to lookup virtual address
  548. *
  549. * Return: Pointer to the Rx descriptor
  550. */
  551. static inline
  552. struct dp_rx_desc *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc,
  553. uint32_t cookie)
  554. {
  555. return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_buf[0], cookie);
  556. }
  557. /**
  558. * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
  559. * the Rx descriptor on monitor ring buffer
  560. * @soc: core txrx main context
  561. * @cookie: cookie used to lookup virtual address
  562. *
  563. * Return: Pointer to the Rx descriptor
  564. */
  565. static inline
  566. struct dp_rx_desc *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc,
  567. uint32_t cookie)
  568. {
  569. return dp_get_rx_desc_from_cookie(soc, &soc->rx_desc_mon[0], cookie);
  570. }
  571. /**
  572. * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
  573. * the Rx descriptor on monitor status ring buffer
  574. * @soc: core txrx main context
  575. * @cookie: cookie used to lookup virtual address
  576. *
  577. * Return: Pointer to the Rx descriptor
  578. */
  579. static inline
  580. struct dp_rx_desc *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc,
  581. uint32_t cookie)
  582. {
  583. return dp_get_rx_mon_status_desc_from_cookie(soc,
  584. &soc->rx_desc_status[0],
  585. cookie);
  586. }
  587. #else
  588. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  589. uint32_t pool_size,
  590. struct rx_desc_pool *rx_desc_pool);
  591. /**
  592. * dp_rx_cookie_2_va_rxdma_buf() - Converts cookie to a virtual address of
  593. * the Rx descriptor on Rx DMA source ring buffer
  594. * @soc: core txrx main context
  595. * @cookie: cookie used to lookup virtual address
  596. *
  597. * Return: void *: Virtual Address of the Rx descriptor
  598. */
  599. static inline
  600. void *dp_rx_cookie_2_va_rxdma_buf(struct dp_soc *soc, uint32_t cookie)
  601. {
  602. uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
  603. uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
  604. struct rx_desc_pool *rx_desc_pool;
  605. if (qdf_unlikely(pool_id >= MAX_RXDESC_POOLS))
  606. return NULL;
  607. rx_desc_pool = &soc->rx_desc_buf[pool_id];
  608. if (qdf_unlikely(index >= rx_desc_pool->pool_size))
  609. return NULL;
  610. return &rx_desc_pool->array[index].rx_desc;
  611. }
  612. /**
  613. * dp_rx_cookie_2_va_mon_buf() - Converts cookie to a virtual address of
  614. * the Rx descriptor on monitor ring buffer
  615. * @soc: core txrx main context
  616. * @cookie: cookie used to lookup virtual address
  617. *
  618. * Return: void *: Virtual Address of the Rx descriptor
  619. */
  620. static inline
  621. void *dp_rx_cookie_2_va_mon_buf(struct dp_soc *soc, uint32_t cookie)
  622. {
  623. uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
  624. uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
  625. /* TODO */
  626. /* Add sanity for pool_id & index */
  627. return &(soc->rx_desc_mon[pool_id].array[index].rx_desc);
  628. }
  629. /**
  630. * dp_rx_cookie_2_va_mon_status() - Converts cookie to a virtual address of
  631. * the Rx descriptor on monitor status ring buffer
  632. * @soc: core txrx main context
  633. * @cookie: cookie used to lookup virtual address
  634. *
  635. * Return: void *: Virtual Address of the Rx descriptor
  636. */
  637. static inline
  638. void *dp_rx_cookie_2_va_mon_status(struct dp_soc *soc, uint32_t cookie)
  639. {
  640. uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
  641. uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
  642. /* TODO */
  643. /* Add sanity for pool_id & index */
  644. return &(soc->rx_desc_status[pool_id].array[index].rx_desc);
  645. }
  646. #endif /* RX_DESC_MULTI_PAGE_ALLOC */
  647. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  648. static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
  649. {
  650. return vdev->ap_bridge_enabled;
  651. }
  652. #ifdef DP_RX_DESC_COOKIE_INVALIDATE
  653. static inline QDF_STATUS
  654. dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
  655. {
  656. if (qdf_unlikely(HAL_RX_REO_BUF_COOKIE_INVALID_GET(ring_desc)))
  657. return QDF_STATUS_E_FAILURE;
  658. HAL_RX_REO_BUF_COOKIE_INVALID_SET(ring_desc);
  659. return QDF_STATUS_SUCCESS;
  660. }
  661. /**
  662. * dp_rx_cookie_reset_invalid_bit() - Reset the invalid bit of the cookie
  663. * field in ring descriptor
  664. * @ring_desc: ring descriptor
  665. *
  666. * Return: None
  667. */
  668. static inline void
  669. dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
  670. {
  671. HAL_RX_REO_BUF_COOKIE_INVALID_RESET(ring_desc);
  672. }
  673. #else
  674. static inline QDF_STATUS
  675. dp_rx_cookie_check_and_invalidate(hal_ring_desc_t ring_desc)
  676. {
  677. return QDF_STATUS_SUCCESS;
  678. }
  679. static inline void
  680. dp_rx_cookie_reset_invalid_bit(hal_ring_desc_t ring_desc)
  681. {
  682. }
  683. #endif
  684. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  685. #if defined(RX_DESC_MULTI_PAGE_ALLOC) && \
  686. defined(DP_WAR_VALIDATE_RX_ERR_MSDU_COOKIE)
  687. /**
  688. * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
  689. * @soc: dp soc ref
  690. * @cookie: Rx buf SW cookie value
  691. *
  692. * Return: true if cookie is valid else false
  693. */
  694. static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
  695. uint32_t cookie)
  696. {
  697. uint8_t pool_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_POOL_ID(cookie);
  698. uint16_t page_id = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_PAGE_ID(cookie);
  699. uint8_t offset = DP_RX_DESC_MULTI_PAGE_COOKIE_GET_OFFSET(cookie);
  700. struct rx_desc_pool *rx_desc_pool;
  701. if (qdf_unlikely(pool_id >= MAX_PDEV_CNT))
  702. goto fail;
  703. rx_desc_pool = &soc->rx_desc_buf[pool_id];
  704. if (page_id >= rx_desc_pool->desc_pages.num_pages ||
  705. offset >= rx_desc_pool->desc_pages.num_element_per_page)
  706. goto fail;
  707. return true;
  708. fail:
  709. DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
  710. return false;
  711. }
  712. #else
  713. /**
  714. * dp_rx_is_sw_cookie_valid() - check whether SW cookie valid
  715. * @soc: dp soc ref
  716. * @cookie: Rx buf SW cookie value
  717. *
  718. * When multi page alloc is disabled SW cookie validness is
  719. * checked while fetching Rx descriptor, so no need to check here
  720. *
  721. * Return: true if cookie is valid else false
  722. */
  723. static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc,
  724. uint32_t cookie)
  725. {
  726. return true;
  727. }
  728. #endif
  729. /**
  730. * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the
  731. * rx descriptor pool
  732. * @rx_desc_pool: rx descriptor pool pointer
  733. *
  734. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  735. * QDF_STATUS_E_NOMEM
  736. */
  737. QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool);
  738. /**
  739. * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx
  740. * descriptors
  741. * @soc: core txrx main context
  742. * @pool_size: number of rx descriptors (size of the pool)
  743. * @rx_desc_pool: rx descriptor pool pointer
  744. *
  745. * Return: QDF_STATUS QDF_STATUS_SUCCESS
  746. * QDF_STATUS_E_NOMEM
  747. * QDF_STATUS_E_FAULT
  748. */
  749. QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
  750. uint32_t pool_size,
  751. struct rx_desc_pool *rx_desc_pool);
  752. /**
  753. * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool
  754. * @soc: core txrx main context
  755. * @pool_id: pool_id which is one of 3 mac_ids
  756. * @pool_size: size of the rx descriptor pool
  757. * @rx_desc_pool: rx descriptor pool pointer
  758. *
  759. * Convert the pool of memory into a list of rx descriptors and create
  760. * locks to access this list of rx descriptors.
  761. *
  762. */
  763. void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
  764. uint32_t pool_size,
  765. struct rx_desc_pool *rx_desc_pool);
  766. /**
  767. * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
  768. * freelist.
  769. * @soc: core txrx main context
  770. * @local_desc_list: local desc list provided by the caller
  771. * @tail: attach the point to last desc of local desc list
  772. * @pool_id: pool_id which is one of 3 mac_ids
  773. * @rx_desc_pool: rx descriptor pool pointer
  774. */
  775. void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
  776. union dp_rx_desc_list_elem_t **local_desc_list,
  777. union dp_rx_desc_list_elem_t **tail,
  778. uint16_t pool_id,
  779. struct rx_desc_pool *rx_desc_pool);
  780. /**
  781. * dp_rx_get_free_desc_list() - provide a list of descriptors from
  782. * the free rx desc pool.
  783. * @soc: core txrx main context
  784. * @pool_id: pool_id which is one of 3 mac_ids
  785. * @rx_desc_pool: rx descriptor pool pointer
  786. * @num_descs: number of descs requested from freelist
  787. * @desc_list: attach the descs to this list (output parameter)
  788. * @tail: attach the point to last desc of free list (output parameter)
  789. *
  790. * Return: number of descs allocated from free list.
  791. */
  792. uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
  793. struct rx_desc_pool *rx_desc_pool,
  794. uint16_t num_descs,
  795. union dp_rx_desc_list_elem_t **desc_list,
  796. union dp_rx_desc_list_elem_t **tail);
  797. /**
  798. * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor
  799. * pool
  800. * @pdev: core txrx pdev context
  801. *
  802. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  803. * QDF_STATUS_E_NOMEM
  804. */
  805. QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev);
  806. /**
  807. * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
  808. * @pdev: core txrx pdev context
  809. */
  810. void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev);
  811. /**
  812. * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
  813. * @pdev: core txrx pdev context
  814. *
  815. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  816. * QDF_STATUS_E_NOMEM
  817. */
  818. QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev);
  819. /**
  820. * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
  821. * @pdev: core txrx pdev context
  822. *
  823. * This function resets the freelist of rx descriptors and destroys locks
  824. * associated with this list of descriptors.
  825. */
  826. void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev);
  827. void dp_rx_desc_pool_deinit(struct dp_soc *soc,
  828. struct rx_desc_pool *rx_desc_pool,
  829. uint32_t pool_id);
  830. QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
  831. /**
  832. * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
  833. * @pdev: core txrx pdev context
  834. *
  835. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  836. * QDF_STATUS_E_NOMEM
  837. */
  838. QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev);
  839. /**
  840. * dp_rx_pdev_buffers_free() - Free nbufs (skbs)
  841. * @pdev: core txrx pdev context
  842. */
  843. void dp_rx_pdev_buffers_free(struct dp_pdev *pdev);
  844. void dp_rx_pdev_detach(struct dp_pdev *pdev);
  845. /**
  846. * dp_print_napi_stats() - NAPI stats
  847. * @soc: soc handle
  848. */
  849. void dp_print_napi_stats(struct dp_soc *soc);
  850. /**
  851. * dp_rx_vdev_detach() - detach vdev from dp rx
  852. * @vdev: virtual device instance
  853. *
  854. * Return: QDF_STATUS_SUCCESS: success
  855. * QDF_STATUS_E_RESOURCES: Error return
  856. */
  857. QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev);
  858. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  859. uint32_t
  860. dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
  861. uint8_t reo_ring_num,
  862. uint32_t quota);
  863. /**
  864. * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
  865. * multiple nbufs.
  866. * @soc: core txrx main context
  867. * @nbuf: pointer to the first msdu of an amsdu.
  868. *
  869. * This function implements the creation of RX frag_list for cases
  870. * where an MSDU is spread across multiple nbufs.
  871. *
  872. * Return: returns the head nbuf which contains complete frag_list.
  873. */
  874. qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf);
  875. /**
  876. * dp_rx_is_sg_supported() - SG packets processing supported or not.
  877. *
  878. * Return: returns true when processing is supported else false.
  879. */
  880. bool dp_rx_is_sg_supported(void);
  881. /**
  882. * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during
  883. * de-initialization of wifi module.
  884. *
  885. * @soc: core txrx main context
  886. * @pool_id: pool_id which is one of 3 mac_ids
  887. * @rx_desc_pool: rx descriptor pool pointer
  888. *
  889. * Return: None
  890. */
  891. void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
  892. struct rx_desc_pool *rx_desc_pool);
  893. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  894. /**
  895. * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during
  896. * de-initialization of wifi module.
  897. *
  898. * @soc: core txrx main context
  899. * @rx_desc_pool: rx descriptor pool pointer
  900. * @is_mon_pool: true if this is a monitor pool
  901. *
  902. * Return: None
  903. */
  904. void dp_rx_desc_nbuf_free(struct dp_soc *soc,
  905. struct rx_desc_pool *rx_desc_pool,
  906. bool is_mon_pool);
  907. #ifdef DP_RX_MON_MEM_FRAG
  908. /**
  909. * dp_rx_desc_frag_free() - free the sw rx desc frag called during
  910. * de-initialization of wifi module.
  911. *
  912. * @soc: core txrx main context
  913. * @rx_desc_pool: rx descriptor pool pointer
  914. *
  915. * Return: None
  916. */
  917. void dp_rx_desc_frag_free(struct dp_soc *soc,
  918. struct rx_desc_pool *rx_desc_pool);
  919. #else
  920. static inline
  921. void dp_rx_desc_frag_free(struct dp_soc *soc,
  922. struct rx_desc_pool *rx_desc_pool)
  923. {
  924. }
  925. #endif
  926. /**
  927. * dp_rx_desc_pool_free() - free the sw rx desc array called during
  928. * de-initialization of wifi module.
  929. *
  930. * @soc: core txrx main context
  931. * @rx_desc_pool: rx descriptor pool pointer
  932. *
  933. * Return: None
  934. */
  935. void dp_rx_desc_pool_free(struct dp_soc *soc,
  936. struct rx_desc_pool *rx_desc_pool);
  937. /**
  938. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  939. * pkts to RAW mode simulation to
  940. * decapsulate the pkt.
  941. * @vdev: vdev on which RAW mode is enabled
  942. * @nbuf_list: list of RAW pkts to process
  943. * @peer: peer object from which the pkt is rx
  944. * @link_id: link Id on which the packet is received
  945. *
  946. * Return: void
  947. */
  948. void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  949. struct dp_txrx_peer *peer, uint8_t link_id);
  950. #ifdef RX_DESC_LOGGING
  951. /**
  952. * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug
  953. * structure
  954. * @rx_desc: rx descriptor pointer
  955. *
  956. * Return: None
  957. */
  958. static inline
  959. void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
  960. {
  961. rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info));
  962. }
  963. /**
  964. * dp_rx_desc_free_dbg_info() - Free rx descriptor debug
  965. * structure memory
  966. * @rx_desc: rx descriptor pointer
  967. *
  968. * Return: None
  969. */
  970. static inline
  971. void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
  972. {
  973. qdf_mem_free(rx_desc->dbg_info);
  974. }
  975. /**
  976. * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info
  977. * structure memory
  978. * @rx_desc: rx descriptor pointer
  979. * @func_name: name of calling function
  980. * @flag:
  981. *
  982. * Return: None
  983. */
  984. static
  985. void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
  986. const char *func_name, uint8_t flag)
  987. {
  988. struct dp_rx_desc_dbg_info *info = rx_desc->dbg_info;
  989. if (!info)
  990. return;
  991. if (flag == RX_DESC_REPLENISHED) {
  992. qdf_str_lcopy(info->replenish_caller, func_name,
  993. QDF_MEM_FUNC_NAME_SIZE);
  994. info->replenish_ts = qdf_get_log_timestamp();
  995. } else {
  996. qdf_str_lcopy(info->freelist_caller, func_name,
  997. QDF_MEM_FUNC_NAME_SIZE);
  998. info->freelist_ts = qdf_get_log_timestamp();
  999. info->prev_nbuf = rx_desc->nbuf;
  1000. info->prev_nbuf_data_addr = rx_desc->nbuf_data_addr;
  1001. rx_desc->nbuf_data_addr = NULL;
  1002. }
  1003. }
  1004. #else
  1005. static inline
  1006. void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc)
  1007. {
  1008. }
  1009. static inline
  1010. void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc)
  1011. {
  1012. }
  1013. static inline
  1014. void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc,
  1015. const char *func_name, uint8_t flag)
  1016. {
  1017. }
  1018. #endif /* RX_DESC_LOGGING */
  1019. /**
  1020. * __dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
  1021. *
  1022. * @head: pointer to the head of local free list
  1023. * @tail: pointer to the tail of local free list
  1024. * @new: new descriptor that is added to the free list
  1025. * @func_name: caller func name
  1026. *
  1027. * Return: void:
  1028. */
  1029. static inline
  1030. void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
  1031. union dp_rx_desc_list_elem_t **tail,
  1032. struct dp_rx_desc *new, const char *func_name)
  1033. {
  1034. qdf_assert(head && new);
  1035. dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
  1036. new->nbuf = NULL;
  1037. new->in_use = 0;
  1038. ((union dp_rx_desc_list_elem_t *)new)->next = *head;
  1039. *head = (union dp_rx_desc_list_elem_t *)new;
  1040. /* reset tail if head->next is NULL */
  1041. if (!*tail || !(*head)->next)
  1042. *tail = *head;
  1043. }
  1044. /**
  1045. * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
  1046. * @soc: DP SOC handle
  1047. * @nbuf: network buffer
  1048. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1049. * pool_id has same mapping)
  1050. *
  1051. * Return: integer type
  1052. */
  1053. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1054. uint8_t mac_id);
  1055. /**
  1056. * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
  1057. * @soc: DP SOC handle
  1058. * @mpdu: mpdu for which peer is invalid
  1059. * @mpdu_done: if an mpdu is completed
  1060. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1061. * pool_id has same mapping)
  1062. *
  1063. * Return: integer type
  1064. */
  1065. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1066. qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
  1067. #define DP_RX_HEAD_APPEND(head, elem) \
  1068. do { \
  1069. qdf_nbuf_set_next((elem), (head)); \
  1070. (head) = (elem); \
  1071. } while (0)
  1072. #define DP_RX_LIST_APPEND(head, tail, elem) \
  1073. do { \
  1074. if (!(head)) { \
  1075. (head) = (elem); \
  1076. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head) = 1;\
  1077. } else { \
  1078. qdf_nbuf_set_next((tail), (elem)); \
  1079. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(head)++; \
  1080. } \
  1081. (tail) = (elem); \
  1082. qdf_nbuf_set_next((tail), NULL); \
  1083. } while (0)
  1084. #define DP_RX_MERGE_TWO_LIST(phead, ptail, chead, ctail) \
  1085. do { \
  1086. if (!(phead)) { \
  1087. (phead) = (chead); \
  1088. } else { \
  1089. qdf_nbuf_set_next((ptail), (chead)); \
  1090. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(phead) += \
  1091. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(chead); \
  1092. } \
  1093. (ptail) = (ctail); \
  1094. qdf_nbuf_set_next((ptail), NULL); \
  1095. } while (0)
  1096. #if defined(QCA_PADDR_CHECK_ON_3RD_PARTY_PLATFORM)
  1097. /*
  1098. * on some third-party platform, the memory below 0x2000
  1099. * is reserved for target use, so any memory allocated in this
  1100. * region should not be used by host
  1101. */
  1102. #define MAX_RETRY 50
  1103. #define DP_PHY_ADDR_RESERVED 0x2000
  1104. #elif defined(BUILD_X86)
  1105. /*
  1106. * in M2M emulation platforms (x86) the memory below 0x50000000
  1107. * is reserved for target use, so any memory allocated in this
  1108. * region should not be used by host
  1109. */
  1110. #define MAX_RETRY 100
  1111. #define DP_PHY_ADDR_RESERVED 0x50000000
  1112. #endif
  1113. #if defined(QCA_PADDR_CHECK_ON_3RD_PARTY_PLATFORM) || defined(BUILD_X86)
  1114. /**
  1115. * dp_check_paddr() - check if current phy address is valid or not
  1116. * @dp_soc: core txrx main context
  1117. * @rx_netbuf: skb buffer
  1118. * @paddr: physical address
  1119. * @rx_desc_pool: struct of rx descriptor pool
  1120. * check if the physical address of the nbuf->data is less
  1121. * than DP_PHY_ADDR_RESERVED then free the nbuf and try
  1122. * allocating new nbuf. We can try for 100 times.
  1123. *
  1124. * This is a temp WAR till we fix it properly.
  1125. *
  1126. * Return: success or failure.
  1127. */
  1128. static inline
  1129. int dp_check_paddr(struct dp_soc *dp_soc,
  1130. qdf_nbuf_t *rx_netbuf,
  1131. qdf_dma_addr_t *paddr,
  1132. struct rx_desc_pool *rx_desc_pool)
  1133. {
  1134. uint32_t nbuf_retry = 0;
  1135. int32_t ret;
  1136. if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
  1137. return QDF_STATUS_SUCCESS;
  1138. do {
  1139. dp_debug("invalid phy addr 0x%llx, trying again",
  1140. (uint64_t)(*paddr));
  1141. nbuf_retry++;
  1142. if ((*rx_netbuf)) {
  1143. /* Not freeing buffer intentionally.
  1144. * Observed that same buffer is getting
  1145. * re-allocated resulting in longer load time
  1146. * WMI init timeout.
  1147. * This buffer is anyway not useful so skip it.
  1148. *.Add such buffer to invalid list and free
  1149. *.them when driver unload.
  1150. **/
  1151. qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
  1152. *rx_netbuf,
  1153. QDF_DMA_FROM_DEVICE,
  1154. rx_desc_pool->buf_size);
  1155. qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
  1156. *rx_netbuf);
  1157. }
  1158. *rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
  1159. rx_desc_pool->buf_size,
  1160. RX_BUFFER_RESERVATION,
  1161. rx_desc_pool->buf_alignment,
  1162. FALSE);
  1163. if (qdf_unlikely(!(*rx_netbuf)))
  1164. return QDF_STATUS_E_FAILURE;
  1165. ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
  1166. *rx_netbuf,
  1167. QDF_DMA_FROM_DEVICE,
  1168. rx_desc_pool->buf_size);
  1169. if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
  1170. qdf_nbuf_free(*rx_netbuf);
  1171. *rx_netbuf = NULL;
  1172. continue;
  1173. }
  1174. *paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
  1175. if (qdf_likely(*paddr > DP_PHY_ADDR_RESERVED))
  1176. return QDF_STATUS_SUCCESS;
  1177. } while (nbuf_retry < MAX_RETRY);
  1178. if ((*rx_netbuf)) {
  1179. qdf_nbuf_unmap_nbytes_single(dp_soc->osdev,
  1180. *rx_netbuf,
  1181. QDF_DMA_FROM_DEVICE,
  1182. rx_desc_pool->buf_size);
  1183. qdf_nbuf_queue_add(&dp_soc->invalid_buf_queue,
  1184. *rx_netbuf);
  1185. }
  1186. return QDF_STATUS_E_FAILURE;
  1187. }
  1188. #else
  1189. static inline
  1190. int dp_check_paddr(struct dp_soc *dp_soc,
  1191. qdf_nbuf_t *rx_netbuf,
  1192. qdf_dma_addr_t *paddr,
  1193. struct rx_desc_pool *rx_desc_pool)
  1194. {
  1195. return QDF_STATUS_SUCCESS;
  1196. }
  1197. #endif
  1198. /**
  1199. * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
  1200. * the MSDU Link Descriptor
  1201. * @soc: core txrx main context
  1202. * @buf_info: buf_info includes cookie that is used to lookup
  1203. * virtual address of link descriptor after deriving the page id
  1204. * and the offset or index of the desc on the associatde page.
  1205. *
  1206. * This is the VA of the link descriptor, that HAL layer later uses to
  1207. * retrieve the list of MSDU's for a given MPDU.
  1208. *
  1209. * Return: void *: Virtual Address of the Rx descriptor
  1210. */
  1211. static inline
  1212. void *dp_rx_cookie_2_link_desc_va(struct dp_soc *soc,
  1213. struct hal_buf_info *buf_info)
  1214. {
  1215. void *link_desc_va;
  1216. struct qdf_mem_multi_page_t *pages;
  1217. uint16_t page_id = LINK_DESC_COOKIE_PAGE_ID(buf_info->sw_cookie);
  1218. pages = &soc->link_desc_pages;
  1219. if (!pages)
  1220. return NULL;
  1221. if (qdf_unlikely(page_id >= pages->num_pages))
  1222. return NULL;
  1223. link_desc_va = pages->dma_pages[page_id].page_v_addr_start +
  1224. (buf_info->paddr - pages->dma_pages[page_id].page_p_addr);
  1225. return link_desc_va;
  1226. }
  1227. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1228. #ifdef DISABLE_EAPOL_INTRABSS_FWD
  1229. #ifdef WLAN_FEATURE_11BE_MLO
  1230. static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
  1231. qdf_nbuf_t nbuf)
  1232. {
  1233. struct qdf_mac_addr *self_mld_mac_addr =
  1234. (struct qdf_mac_addr *)vdev->mld_mac_addr.raw;
  1235. return qdf_is_macaddr_equal(self_mld_mac_addr,
  1236. (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
  1237. QDF_NBUF_DEST_MAC_OFFSET);
  1238. }
  1239. #else
  1240. static inline bool dp_nbuf_dst_addr_is_mld_addr(struct dp_vdev *vdev,
  1241. qdf_nbuf_t nbuf)
  1242. {
  1243. return false;
  1244. }
  1245. #endif
  1246. static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev,
  1247. qdf_nbuf_t nbuf)
  1248. {
  1249. return qdf_is_macaddr_equal((struct qdf_mac_addr *)vdev->mac_addr.raw,
  1250. (struct qdf_mac_addr *)qdf_nbuf_data(nbuf) +
  1251. QDF_NBUF_DEST_MAC_OFFSET);
  1252. }
  1253. /**
  1254. * dp_rx_intrabss_eapol_drop_check() - API For EAPOL
  1255. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  1256. * @soc: core txrx main context
  1257. * @ta_txrx_peer: source peer entry
  1258. * @rx_tlv_hdr: start address of rx tlvs
  1259. * @nbuf: nbuf that has to be intrabss forwarded
  1260. *
  1261. * Return: true if it is forwarded else false
  1262. */
  1263. static inline
  1264. bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
  1265. struct dp_txrx_peer *ta_txrx_peer,
  1266. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
  1267. {
  1268. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) &&
  1269. !(dp_nbuf_dst_addr_is_self_addr(ta_txrx_peer->vdev,
  1270. nbuf) ||
  1271. dp_nbuf_dst_addr_is_mld_addr(ta_txrx_peer->vdev,
  1272. nbuf)))) {
  1273. qdf_nbuf_free(nbuf);
  1274. DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1);
  1275. return true;
  1276. }
  1277. return false;
  1278. }
  1279. #else /* DISABLE_EAPOL_INTRABSS_FWD */
  1280. static inline
  1281. bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc,
  1282. struct dp_txrx_peer *ta_txrx_peer,
  1283. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
  1284. {
  1285. return false;
  1286. }
  1287. #endif /* DISABLE_EAPOL_INTRABSS_FWD */
  1288. /**
  1289. * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
  1290. * @soc: core txrx main context
  1291. * @ta_peer: source peer entry
  1292. * @rx_tlv_hdr: start address of rx tlvs
  1293. * @nbuf: nbuf that has to be intrabss forwarded
  1294. * @tid_stats: tid stats pointer
  1295. * @link_id: link Id on which packet is received
  1296. *
  1297. * Return: bool: true if it is forwarded else false
  1298. */
  1299. bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc,
  1300. struct dp_txrx_peer *ta_peer,
  1301. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  1302. struct cdp_tid_rx_stats *tid_stats,
  1303. uint8_t link_id);
  1304. /**
  1305. * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
  1306. * @soc: core txrx main context
  1307. * @ta_peer: source peer entry
  1308. * @tx_vdev_id: VDEV ID for Intra-BSS TX
  1309. * @rx_tlv_hdr: start address of rx tlvs
  1310. * @nbuf: nbuf that has to be intrabss forwarded
  1311. * @tid_stats: tid stats pointer
  1312. * @link_id: link Id on which packet is received
  1313. *
  1314. * Return: bool: true if it is forwarded else false
  1315. */
  1316. bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc,
  1317. struct dp_txrx_peer *ta_peer,
  1318. uint8_t tx_vdev_id,
  1319. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  1320. struct cdp_tid_rx_stats *tid_stats,
  1321. uint8_t link_id);
  1322. /**
  1323. * dp_rx_defrag_concat() - Concatenate the fragments
  1324. *
  1325. * @dst: destination pointer to the buffer
  1326. * @src: source pointer from where the fragment payload is to be copied
  1327. *
  1328. * Return: QDF_STATUS
  1329. */
  1330. static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
  1331. {
  1332. /*
  1333. * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
  1334. * to provide space for src, the headroom portion is copied from
  1335. * the original dst buffer to the larger new dst buffer.
  1336. * (This is needed, because the headroom of the dst buffer
  1337. * contains the rx desc.)
  1338. */
  1339. if (!qdf_nbuf_cat(dst, src)) {
  1340. /*
  1341. * qdf_nbuf_cat does not free the src memory.
  1342. * Free src nbuf before returning
  1343. * For failure case the caller takes of freeing the nbuf
  1344. */
  1345. qdf_nbuf_free(src);
  1346. return QDF_STATUS_SUCCESS;
  1347. }
  1348. return QDF_STATUS_E_DEFRAG_ERROR;
  1349. }
  1350. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1351. #ifndef FEATURE_WDS
  1352. void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
  1353. struct dp_txrx_peer *ta_txrx_peer, qdf_nbuf_t nbuf);
  1354. static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc, uint16_t sa_idx, bool is_active)
  1355. {
  1356. return QDF_STATUS_SUCCESS;
  1357. }
  1358. static inline void
  1359. dp_rx_wds_srcport_learn(struct dp_soc *soc,
  1360. uint8_t *rx_tlv_hdr,
  1361. struct dp_txrx_peer *txrx_peer,
  1362. qdf_nbuf_t nbuf,
  1363. struct hal_rx_msdu_metadata msdu_metadata)
  1364. {
  1365. }
  1366. static inline void
  1367. dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc,
  1368. struct dp_peer *ta_peer, qdf_nbuf_t nbuf,
  1369. struct hal_rx_msdu_metadata msdu_end_info,
  1370. bool ad4_valid, bool chfrag_start)
  1371. {
  1372. }
  1373. #endif
  1374. /**
  1375. * dp_rx_desc_dump() - dump the sw rx descriptor
  1376. *
  1377. * @rx_desc: sw rx descriptor
  1378. */
  1379. static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc)
  1380. {
  1381. dp_info("rx_desc->nbuf: %pK, rx_desc->cookie: %d, rx_desc->pool_id: %d, rx_desc->in_use: %d, rx_desc->unmapped: %d",
  1382. rx_desc->nbuf, rx_desc->cookie, rx_desc->pool_id,
  1383. rx_desc->in_use, rx_desc->unmapped);
  1384. }
  1385. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1386. /**
  1387. * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet.
  1388. * In qwrap mode, packets originated from
  1389. * any vdev should not loopback and
  1390. * should be dropped.
  1391. * @vdev: vdev on which rx packet is received
  1392. * @nbuf: rx pkt
  1393. *
  1394. */
  1395. #if ATH_SUPPORT_WRAP
  1396. static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
  1397. qdf_nbuf_t nbuf)
  1398. {
  1399. struct dp_vdev *psta_vdev;
  1400. struct dp_pdev *pdev = vdev->pdev;
  1401. uint8_t *data = qdf_nbuf_data(nbuf);
  1402. if (qdf_unlikely(vdev->proxysta_vdev)) {
  1403. /* In qwrap isolation mode, allow loopback packets as all
  1404. * packets go to RootAP and Loopback on the mpsta.
  1405. */
  1406. if (vdev->isolation_vdev)
  1407. return false;
  1408. TAILQ_FOREACH(psta_vdev, &pdev->vdev_list, vdev_list_elem) {
  1409. if (qdf_unlikely(psta_vdev->proxysta_vdev &&
  1410. !qdf_mem_cmp(psta_vdev->mac_addr.raw,
  1411. &data[QDF_MAC_ADDR_SIZE],
  1412. QDF_MAC_ADDR_SIZE))) {
  1413. /* Drop packet if source address is equal to
  1414. * any of the vdev addresses.
  1415. */
  1416. return true;
  1417. }
  1418. }
  1419. }
  1420. return false;
  1421. }
  1422. #else
  1423. static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev,
  1424. qdf_nbuf_t nbuf)
  1425. {
  1426. return false;
  1427. }
  1428. #endif
  1429. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1430. #if defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) ||\
  1431. defined(WLAN_SUPPORT_RX_TAG_STATISTICS) ||\
  1432. defined(WLAN_SUPPORT_RX_FLOW_TAG)
  1433. #include "dp_rx_tag.h"
  1434. #endif
  1435. #if !defined(WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG) &&\
  1436. !defined(WLAN_SUPPORT_RX_FLOW_TAG)
  1437. /**
  1438. * dp_rx_update_protocol_tag() - Reads CCE metadata from the RX MSDU end TLV
  1439. * and set the corresponding tag in QDF packet
  1440. * @soc: core txrx main context
  1441. * @vdev: vdev on which the packet is received
  1442. * @nbuf: QDF pkt buffer on which the protocol tag should be set
  1443. * @rx_tlv_hdr: rBbase address where the RX TLVs starts
  1444. * @ring_index: REO ring number, not used for error & monitor ring
  1445. * @is_reo_exception: flag to indicate if rx from REO ring or exception ring
  1446. * @is_update_stats: flag to indicate whether to update stats or not
  1447. *
  1448. * Return: void
  1449. */
  1450. static inline void
  1451. dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev,
  1452. qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  1453. uint16_t ring_index,
  1454. bool is_reo_exception, bool is_update_stats)
  1455. {
  1456. }
  1457. #endif
  1458. #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
  1459. /**
  1460. * dp_rx_err_cce_drop() - Reads CCE metadata from the RX MSDU end TLV
  1461. * and returns whether cce metadata matches
  1462. * @soc: core txrx main context
  1463. * @vdev: vdev on which the packet is received
  1464. * @nbuf: QDF pkt buffer on which the protocol tag should be set
  1465. * @rx_tlv_hdr: rBbase address where the RX TLVs starts
  1466. *
  1467. * Return: bool
  1468. */
  1469. static inline bool
  1470. dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev,
  1471. qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr)
  1472. {
  1473. return false;
  1474. }
  1475. #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
  1476. #ifndef WLAN_SUPPORT_RX_FLOW_TAG
  1477. /**
  1478. * dp_rx_update_flow_tag() - Reads FSE metadata from the RX MSDU end TLV
  1479. * and set the corresponding tag in QDF packet
  1480. * @soc: core txrx main context
  1481. * @vdev: vdev on which the packet is received
  1482. * @nbuf: QDF pkt buffer on which the protocol tag should be set
  1483. * @rx_tlv_hdr: base address where the RX TLVs starts
  1484. * @update_stats: flag to indicate whether to update stats or not
  1485. *
  1486. * Return: void
  1487. */
  1488. static inline void
  1489. dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
  1490. qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, bool update_stats)
  1491. {
  1492. }
  1493. #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
  1494. #define CRITICAL_BUFFER_THRESHOLD 64
  1495. /**
  1496. * __dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  1497. * called during dp rx initialization
  1498. * and at the end of dp_rx_process.
  1499. *
  1500. * @dp_soc: core txrx main context
  1501. * @mac_id: mac_id which is one of 3 mac_ids
  1502. * @dp_rxdma_srng: dp rxdma circular ring
  1503. * @rx_desc_pool: Pointer to free Rx descriptor pool
  1504. * @num_req_buffers: number of buffer to be replenished
  1505. * @desc_list: list of descs if called from dp_rx_process
  1506. * or NULL during dp rx initialization or out of buffer
  1507. * interrupt.
  1508. * @tail: tail of descs list
  1509. * @req_only: If true don't replenish more than req buffers
  1510. * @func_name: name of the caller function
  1511. *
  1512. * Return: return success or failure
  1513. */
  1514. QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  1515. struct dp_srng *dp_rxdma_srng,
  1516. struct rx_desc_pool *rx_desc_pool,
  1517. uint32_t num_req_buffers,
  1518. union dp_rx_desc_list_elem_t **desc_list,
  1519. union dp_rx_desc_list_elem_t **tail,
  1520. bool req_only,
  1521. const char *func_name);
  1522. /**
  1523. * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs
  1524. * use direct APIs to get invalidate
  1525. * and get the physical address of the
  1526. * nbuf instead of map api,called during
  1527. * dp rx initialization and at the end
  1528. * of dp_rx_process.
  1529. *
  1530. * @dp_soc: core txrx main context
  1531. * @mac_id: mac_id which is one of 3 mac_ids
  1532. * @dp_rxdma_srng: dp rxdma circular ring
  1533. * @rx_desc_pool: Pointer to free Rx descriptor pool
  1534. * @num_req_buffers: number of buffer to be replenished
  1535. * @desc_list: list of descs if called from dp_rx_process
  1536. * or NULL during dp rx initialization or out of buffer
  1537. * interrupt.
  1538. * @tail: tail of descs list
  1539. *
  1540. * Return: return success or failure
  1541. */
  1542. QDF_STATUS
  1543. __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  1544. struct dp_srng *dp_rxdma_srng,
  1545. struct rx_desc_pool *rx_desc_pool,
  1546. uint32_t num_req_buffers,
  1547. union dp_rx_desc_list_elem_t **desc_list,
  1548. union dp_rx_desc_list_elem_t **tail);
  1549. /**
  1550. * __dp_rx_comp2refill_replenish() - replenish rxdma ring with rx nbufs
  1551. * use direct APIs to get invalidate
  1552. * and get the physical address of the
  1553. * nbuf instead of map api,called during
  1554. * dp rx initialization and at the end
  1555. * of dp_rx_process.
  1556. *
  1557. * @dp_soc: core txrx main context
  1558. * @mac_id: mac_id which is one of 3 mac_ids
  1559. * @dp_rxdma_srng: dp rxdma circular ring
  1560. * @rx_desc_pool: Pointer to free Rx descriptor pool
  1561. * @num_req_buffers: number of buffer to be replenished
  1562. * @desc_list: list of descs if called from dp_rx_process
  1563. * or NULL during dp rx initialization or out of buffer
  1564. * interrupt.
  1565. * @tail: tail of descs list
  1566. * Return: return success or failure
  1567. */
  1568. QDF_STATUS
  1569. __dp_rx_comp2refill_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  1570. struct dp_srng *dp_rxdma_srng,
  1571. struct rx_desc_pool *rx_desc_pool,
  1572. uint32_t num_req_buffers,
  1573. union dp_rx_desc_list_elem_t **desc_list,
  1574. union dp_rx_desc_list_elem_t **tail);
  1575. /**
  1576. * __dp_rx_buffers_no_map_lt_replenish() - replenish rxdma ring with rx nbufs
  1577. * use direct APIs to get invalidate
  1578. * and get the physical address of the
  1579. * nbuf instead of map api,called when
  1580. * low threshold interrupt is triggered
  1581. *
  1582. * @dp_soc: core txrx main context
  1583. * @mac_id: mac_id which is one of 3 mac_ids
  1584. * @dp_rxdma_srng: dp rxdma circular ring
  1585. * @rx_desc_pool: Pointer to free Rx descriptor pool
  1586. *
  1587. * Return: return success or failure
  1588. */
  1589. QDF_STATUS
  1590. __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  1591. struct dp_srng *dp_rxdma_srng,
  1592. struct rx_desc_pool *rx_desc_pool);
  1593. /**
  1594. * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs
  1595. * use direct APIs to get invalidate
  1596. * and get the physical address of the
  1597. * nbuf instead of map api,called during
  1598. * dp rx initialization.
  1599. *
  1600. * @dp_soc: core txrx main context
  1601. * @mac_id: mac_id which is one of 3 mac_ids
  1602. * @dp_rxdma_srng: dp rxdma circular ring
  1603. * @rx_desc_pool: Pointer to free Rx descriptor pool
  1604. * @num_req_buffers: number of buffer to be replenished
  1605. *
  1606. * Return: return success or failure
  1607. */
  1608. QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc,
  1609. uint32_t mac_id,
  1610. struct dp_srng *dp_rxdma_srng,
  1611. struct rx_desc_pool *rx_desc_pool,
  1612. uint32_t num_req_buffers);
  1613. /**
  1614. * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
  1615. * called during dp rx initialization
  1616. *
  1617. * @dp_soc: core txrx main context
  1618. * @mac_id: mac_id which is one of 3 mac_ids
  1619. * @dp_rxdma_srng: dp rxdma circular ring
  1620. * @rx_desc_pool: Pointer to free Rx descriptor pool
  1621. * @num_req_buffers: number of buffer to be replenished
  1622. *
  1623. * Return: return success or failure
  1624. */
  1625. QDF_STATUS
  1626. dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
  1627. struct dp_srng *dp_rxdma_srng,
  1628. struct rx_desc_pool *rx_desc_pool,
  1629. uint32_t num_req_buffers);
  1630. /**
  1631. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  1632. * @vdev: DP Virtual device handle
  1633. * @nbuf: Buffer pointer
  1634. * @rx_tlv_hdr: start of rx tlv header
  1635. * @txrx_peer: pointer to peer
  1636. *
  1637. * This function allocated memory for mesh receive stats and fill the
  1638. * required stats. Stores the memory address in skb cb.
  1639. *
  1640. * Return: void
  1641. */
  1642. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1643. uint8_t *rx_tlv_hdr,
  1644. struct dp_txrx_peer *txrx_peer);
  1645. /**
  1646. * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
  1647. * @vdev: DP Virtual device handle
  1648. * @nbuf: Buffer pointer
  1649. * @rx_tlv_hdr: start of rx tlv header
  1650. *
  1651. * This checks if the received packet is matching any filter out
  1652. * catogery and and drop the packet if it matches.
  1653. *
  1654. * Return: QDF_STATUS_SUCCESS indicates drop,
  1655. * QDF_STATUS_E_FAILURE indicate to not drop
  1656. */
  1657. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1658. uint8_t *rx_tlv_hdr);
  1659. int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
  1660. struct dp_txrx_peer *peer);
  1661. /**
  1662. * dp_rx_compute_delay() - Compute and fill in all timestamps
  1663. * to pass in correct fields
  1664. * @vdev: pdev handle
  1665. * @nbuf: network buffer
  1666. *
  1667. * Return: none
  1668. */
  1669. void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
  1670. #ifdef QCA_PEER_EXT_STATS
  1671. /**
  1672. * dp_rx_compute_tid_delay - Compute per TID delay stats
  1673. * @stats: TID delay stats to update
  1674. * @nbuf: NBuffer
  1675. *
  1676. * Return: Void
  1677. */
  1678. void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  1679. qdf_nbuf_t nbuf);
  1680. #endif /* QCA_PEER_EXT_STATS */
  1681. #ifdef WLAN_SUPPORT_PPEDS
  1682. static inline
  1683. void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
  1684. {
  1685. rx_desc->reuse_nbuf = nbuf;
  1686. rx_desc->has_reuse_nbuf = true;
  1687. }
  1688. /**
  1689. * __dp_rx_add_to_free_desc_list_reuse() - Adds to a local free descriptor list
  1690. * this list will reused
  1691. *
  1692. * @head: pointer to the head of local free list
  1693. * @tail: pointer to the tail of local free list
  1694. * @new: new descriptor that is added to the free list
  1695. * @func_name: caller func name
  1696. *
  1697. * Return: void:
  1698. */
  1699. static inline
  1700. void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head,
  1701. union dp_rx_desc_list_elem_t **tail,
  1702. struct dp_rx_desc *new,
  1703. const char *func_name)
  1704. {
  1705. qdf_assert(head && new);
  1706. dp_rx_desc_update_dbg_info(new, func_name, RX_DESC_IN_FREELIST);
  1707. new->nbuf = NULL;
  1708. ((union dp_rx_desc_list_elem_t *)new)->next = *head;
  1709. *head = (union dp_rx_desc_list_elem_t *)new;
  1710. /* reset tail if head->next is NULL */
  1711. if (!*tail || !(*head)->next)
  1712. *tail = *head;
  1713. }
  1714. #else
  1715. static inline
  1716. void dp_rx_set_reuse_nbuf(struct dp_rx_desc *rx_desc, qdf_nbuf_t nbuf)
  1717. {
  1718. }
  1719. static inline
  1720. void __dp_rx_add_to_free_desc_list_reuse(union dp_rx_desc_list_elem_t **head,
  1721. union dp_rx_desc_list_elem_t **tail,
  1722. struct dp_rx_desc *new,
  1723. const char *func_name)
  1724. {
  1725. }
  1726. #endif
  1727. #ifdef RX_DESC_DEBUG_CHECK
  1728. /**
  1729. * dp_rx_desc_check_magic() - check the magic value in dp_rx_desc
  1730. * @rx_desc: rx descriptor pointer
  1731. *
  1732. * Return: true, if magic is correct, else false.
  1733. */
  1734. static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
  1735. {
  1736. if (qdf_unlikely(rx_desc->magic != DP_RX_DESC_MAGIC))
  1737. return false;
  1738. rx_desc->magic = 0;
  1739. return true;
  1740. }
  1741. /**
  1742. * dp_rx_desc_prep() - prepare rx desc
  1743. * @rx_desc: rx descriptor pointer to be prepared
  1744. * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
  1745. *
  1746. * Note: assumption is that we are associating a nbuf which is mapped
  1747. *
  1748. * Return: none
  1749. */
  1750. static inline
  1751. void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
  1752. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  1753. {
  1754. rx_desc->magic = DP_RX_DESC_MAGIC;
  1755. rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
  1756. rx_desc->unmapped = 0;
  1757. rx_desc->nbuf_data_addr = (uint8_t *)qdf_nbuf_data(rx_desc->nbuf);
  1758. dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf);
  1759. rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
  1760. }
  1761. /**
  1762. * dp_rx_desc_frag_prep() - prepare rx desc
  1763. * @rx_desc: rx descriptor pointer to be prepared
  1764. * @nbuf_frag_info_t: struct dp_rx_nbuf_frag_info *
  1765. *
  1766. * Note: assumption is that we frag address is mapped
  1767. *
  1768. * Return: none
  1769. */
  1770. #ifdef DP_RX_MON_MEM_FRAG
  1771. static inline
  1772. void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
  1773. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  1774. {
  1775. rx_desc->magic = DP_RX_DESC_MAGIC;
  1776. rx_desc->rx_buf_start =
  1777. (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
  1778. rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
  1779. rx_desc->unmapped = 0;
  1780. }
  1781. #else
  1782. static inline
  1783. void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
  1784. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  1785. {
  1786. }
  1787. #endif /* DP_RX_MON_MEM_FRAG */
  1788. /**
  1789. * dp_rx_desc_paddr_sanity_check() - paddr sanity for ring desc vs rx_desc
  1790. * @rx_desc: rx descriptor
  1791. * @ring_paddr: paddr obatined from the ring
  1792. *
  1793. * Return: QDF_STATUS
  1794. */
  1795. static inline
  1796. bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
  1797. uint64_t ring_paddr)
  1798. {
  1799. return (ring_paddr == qdf_nbuf_get_frag_paddr(rx_desc->nbuf, 0));
  1800. }
  1801. #else
  1802. static inline bool dp_rx_desc_check_magic(struct dp_rx_desc *rx_desc)
  1803. {
  1804. return true;
  1805. }
  1806. static inline
  1807. void dp_rx_desc_prep(struct dp_rx_desc *rx_desc,
  1808. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  1809. {
  1810. rx_desc->nbuf = (nbuf_frag_info_t->virt_addr).nbuf;
  1811. dp_rx_set_reuse_nbuf(rx_desc, rx_desc->nbuf);
  1812. rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
  1813. rx_desc->unmapped = 0;
  1814. }
  1815. #ifdef DP_RX_MON_MEM_FRAG
  1816. static inline
  1817. void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
  1818. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  1819. {
  1820. rx_desc->rx_buf_start =
  1821. (uint8_t *)((nbuf_frag_info_t->virt_addr).vaddr);
  1822. rx_desc->paddr_buf_start = nbuf_frag_info_t->paddr;
  1823. rx_desc->unmapped = 0;
  1824. }
  1825. #else
  1826. static inline
  1827. void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc,
  1828. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
  1829. {
  1830. }
  1831. #endif /* DP_RX_MON_MEM_FRAG */
  1832. static inline
  1833. bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
  1834. uint64_t ring_paddr)
  1835. {
  1836. return true;
  1837. }
  1838. #endif /* RX_DESC_DEBUG_CHECK */
  1839. /**
  1840. * dp_rx_enable_mon_dest_frag() - Enable frag processing for
  1841. * monitor destination ring via frag.
  1842. * @rx_desc_pool: Rx desc pool
  1843. * @is_mon_dest_desc: Is it for monitor dest buffer
  1844. *
  1845. * Enable this flag only for monitor destination buffer processing
  1846. * if DP_RX_MON_MEM_FRAG feature is enabled.
  1847. * If flag is set then frag based function will be called for alloc,
  1848. * map, prep desc and free ops for desc buffer else normal nbuf based
  1849. * function will be called.
  1850. *
  1851. * Return: None
  1852. */
  1853. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  1854. bool is_mon_dest_desc);
  1855. #ifndef QCA_MULTIPASS_SUPPORT
  1856. static inline
  1857. bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
  1858. uint8_t tid)
  1859. {
  1860. return false;
  1861. }
  1862. #else
  1863. /**
  1864. * dp_rx_multipass_process - insert vlan tag on frames for traffic separation
  1865. * @txrx_peer: DP txrx peer handle
  1866. * @nbuf: skb
  1867. * @tid: traffic priority
  1868. *
  1869. * Return: bool: true in case of success else false
  1870. * Success is considered if:
  1871. * i. If frame has vlan header
  1872. * ii. If the frame comes from different peer and dont need multipass processing
  1873. * Failure is considered if:
  1874. * i. Frame comes from multipass peer but doesn't contain vlan header.
  1875. * In failure case, drop such frames.
  1876. */
  1877. bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf,
  1878. uint8_t tid);
  1879. #endif
  1880. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1881. #ifndef WLAN_RX_PKT_CAPTURE_ENH
  1882. static inline
  1883. QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev,
  1884. struct dp_peer *peer_handle,
  1885. bool value, uint8_t *mac_addr)
  1886. {
  1887. return QDF_STATUS_SUCCESS;
  1888. }
  1889. #endif
  1890. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1891. /**
  1892. * dp_rx_deliver_to_stack() - deliver pkts to network stack
  1893. * Caller to hold peer refcount and check for valid peer
  1894. * @soc: soc
  1895. * @vdev: vdev
  1896. * @peer: txrx peer
  1897. * @nbuf_head: skb list head
  1898. * @nbuf_tail: skb list tail
  1899. *
  1900. * Return: QDF_STATUS
  1901. */
  1902. QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
  1903. struct dp_vdev *vdev,
  1904. struct dp_txrx_peer *peer,
  1905. qdf_nbuf_t nbuf_head,
  1906. qdf_nbuf_t nbuf_tail);
  1907. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  1908. /**
  1909. * dp_rx_eapol_deliver_to_stack() - deliver pkts to network stack
  1910. * caller to hold peer refcount and check for valid peer
  1911. * @soc: soc
  1912. * @vdev: vdev
  1913. * @peer: peer
  1914. * @nbuf_head: skb list head
  1915. * @nbuf_tail: skb list tail
  1916. *
  1917. * Return: QDF_STATUS
  1918. */
  1919. QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
  1920. struct dp_vdev *vdev,
  1921. struct dp_txrx_peer *peer,
  1922. qdf_nbuf_t nbuf_head,
  1923. qdf_nbuf_t nbuf_tail);
  1924. #endif
  1925. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1926. #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
  1927. #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
  1928. do { \
  1929. if (!soc->rx_buff_pool[rx_desc->pool_id].is_initialized) { \
  1930. DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf); \
  1931. break; \
  1932. } \
  1933. DP_RX_LIST_APPEND(ebuf_head, ebuf_tail, rx_desc->nbuf); \
  1934. if (!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)) { \
  1935. if (!dp_rx_buffer_pool_refill(soc, ebuf_head, \
  1936. rx_desc->pool_id)) \
  1937. DP_RX_MERGE_TWO_LIST(head, tail, \
  1938. ebuf_head, ebuf_tail);\
  1939. ebuf_head = NULL; \
  1940. ebuf_tail = NULL; \
  1941. } \
  1942. } while (0)
  1943. #else
  1944. #define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
  1945. DP_RX_LIST_APPEND(head, tail, rx_desc->nbuf)
  1946. #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
  1947. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1948. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  1949. /**
  1950. * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
  1951. * @soc : dp_soc handle
  1952. * @pdev: dp_pdev handle
  1953. * @peer_id: peer_id of the peer for which completion came
  1954. * @is_offload:
  1955. * @netbuf: Buffer pointer
  1956. *
  1957. * This function is used to deliver rx packet to packet capture
  1958. */
  1959. void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  1960. uint16_t peer_id, uint32_t is_offload,
  1961. qdf_nbuf_t netbuf);
  1962. void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1963. uint32_t is_offload);
  1964. #else
  1965. static inline void
  1966. dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  1967. uint16_t peer_id, uint32_t is_offload,
  1968. qdf_nbuf_t netbuf)
  1969. {
  1970. }
  1971. static inline void
  1972. dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1973. uint32_t is_offload)
  1974. {
  1975. }
  1976. #endif
  1977. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1978. #ifdef FEATURE_MEC
  1979. /**
  1980. * dp_rx_mcast_echo_check() - check if the mcast pkt is a loop
  1981. * back on same vap or a different vap.
  1982. * @soc: core DP main context
  1983. * @peer: dp peer handler
  1984. * @rx_tlv_hdr: start of the rx TLV header
  1985. * @nbuf: pkt buffer
  1986. *
  1987. * Return: bool (true if it is a looped back pkt else false)
  1988. *
  1989. */
  1990. bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  1991. struct dp_txrx_peer *peer,
  1992. uint8_t *rx_tlv_hdr,
  1993. qdf_nbuf_t nbuf);
  1994. #else
  1995. static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc,
  1996. struct dp_txrx_peer *peer,
  1997. uint8_t *rx_tlv_hdr,
  1998. qdf_nbuf_t nbuf)
  1999. {
  2000. return false;
  2001. }
  2002. #endif /* FEATURE_MEC */
  2003. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2004. #ifdef RECEIVE_OFFLOAD
  2005. /**
  2006. * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
  2007. * @soc: DP SOC handle
  2008. * @rx_tlv: RX TLV received for the msdu
  2009. * @msdu: msdu for which GRO info needs to be filled
  2010. * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
  2011. *
  2012. * Return: None
  2013. */
  2014. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  2015. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt);
  2016. #else
  2017. static inline
  2018. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  2019. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
  2020. {
  2021. }
  2022. #endif
  2023. /**
  2024. * dp_rx_msdu_stats_update() - update per msdu stats.
  2025. * @soc: core txrx main context
  2026. * @nbuf: pointer to the first msdu of an amsdu.
  2027. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  2028. * @txrx_peer: pointer to the txrx peer object.
  2029. * @ring_id: reo dest ring number on which pkt is reaped.
  2030. * @tid_stats: per tid rx stats.
  2031. * @link_id: link Id on which packet is received
  2032. *
  2033. * update all the per msdu stats for that nbuf.
  2034. *
  2035. * Return: void
  2036. */
  2037. void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2038. uint8_t *rx_tlv_hdr,
  2039. struct dp_txrx_peer *txrx_peer,
  2040. uint8_t ring_id,
  2041. struct cdp_tid_rx_stats *tid_stats,
  2042. uint8_t link_id);
  2043. /**
  2044. * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
  2045. * no corresbonding peer found
  2046. * @soc: core txrx main context
  2047. * @nbuf: pkt skb pointer
  2048. *
  2049. * This function will try to deliver some RX special frames to stack
  2050. * even there is no peer matched found. for instance, LFR case, some
  2051. * eapol data will be sent to host before peer_map done.
  2052. *
  2053. * Return: None
  2054. */
  2055. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
  2056. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2057. #ifdef DP_RX_DROP_RAW_FRM
  2058. /**
  2059. * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
  2060. * @nbuf: pkt skb pointer
  2061. *
  2062. * Return: true - raw frame, dropped
  2063. * false - not raw frame, do nothing
  2064. */
  2065. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf);
  2066. #else
  2067. static inline
  2068. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
  2069. {
  2070. return false;
  2071. }
  2072. #endif
  2073. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  2074. /**
  2075. * dp_rx_update_stats() - Update soc level rx packet count
  2076. * @soc: DP soc handle
  2077. * @nbuf: nbuf received
  2078. *
  2079. * Return: none
  2080. */
  2081. void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
  2082. #else
  2083. static inline
  2084. void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2085. {
  2086. }
  2087. #endif
  2088. /**
  2089. * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
  2090. * @pdev: dp_pdev handle
  2091. * @nbuf: pointer to the first msdu of an amsdu.
  2092. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  2093. *
  2094. * The ipsumed field of the skb is set based on whether HW validated the
  2095. * IP/TCP/UDP checksum.
  2096. *
  2097. * Return: void
  2098. */
  2099. #if defined(MAX_PDEV_CNT) && (MAX_PDEV_CNT == 1)
  2100. static inline
  2101. void dp_rx_cksum_offload(struct dp_pdev *pdev,
  2102. qdf_nbuf_t nbuf,
  2103. uint8_t *rx_tlv_hdr)
  2104. {
  2105. qdf_nbuf_rx_cksum_t cksum = {0};
  2106. //TODO - Move this to ring desc api
  2107. //HAL_RX_MSDU_DESC_IP_CHKSUM_FAIL_GET
  2108. //HAL_RX_MSDU_DESC_TCP_UDP_CHKSUM_FAIL_GET
  2109. uint32_t ip_csum_err, tcp_udp_csum_er;
  2110. hal_rx_tlv_csum_err_get(pdev->soc->hal_soc, rx_tlv_hdr, &ip_csum_err,
  2111. &tcp_udp_csum_er);
  2112. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  2113. if (qdf_likely(!ip_csum_err)) {
  2114. cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
  2115. if (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
  2116. qdf_nbuf_is_ipv4_tcp_pkt(nbuf)) {
  2117. if (qdf_likely(!tcp_udp_csum_er))
  2118. cksum.csum_level = 1;
  2119. else
  2120. DP_STATS_INCC(pdev,
  2121. err.tcp_udp_csum_err, 1,
  2122. tcp_udp_csum_er);
  2123. }
  2124. } else {
  2125. DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
  2126. }
  2127. } else if (qdf_nbuf_is_ipv6_udp_pkt(nbuf) ||
  2128. qdf_nbuf_is_ipv6_tcp_pkt(nbuf)) {
  2129. if (qdf_likely(!tcp_udp_csum_er))
  2130. cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
  2131. else
  2132. DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1,
  2133. tcp_udp_csum_er);
  2134. } else {
  2135. cksum.l4_result = QDF_NBUF_RX_CKSUM_NONE;
  2136. }
  2137. qdf_nbuf_set_rx_cksum(nbuf, &cksum);
  2138. }
  2139. #else
  2140. static inline
  2141. void dp_rx_cksum_offload(struct dp_pdev *pdev,
  2142. qdf_nbuf_t nbuf,
  2143. uint8_t *rx_tlv_hdr)
  2144. {
  2145. }
  2146. #endif
  2147. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2148. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  2149. static inline
  2150. bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  2151. int max_reap_limit)
  2152. {
  2153. bool limit_hit = false;
  2154. limit_hit =
  2155. (num_reaped >= max_reap_limit) ? true : false;
  2156. if (limit_hit)
  2157. DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
  2158. return limit_hit;
  2159. }
  2160. static inline
  2161. bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
  2162. {
  2163. return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
  2164. }
  2165. static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
  2166. {
  2167. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  2168. return cfg->rx_reap_loop_pkt_limit;
  2169. }
  2170. #else
  2171. static inline
  2172. bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  2173. int max_reap_limit)
  2174. {
  2175. return false;
  2176. }
  2177. static inline
  2178. bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
  2179. {
  2180. return false;
  2181. }
  2182. static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
  2183. {
  2184. return 0;
  2185. }
  2186. #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
  2187. void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
  2188. static inline uint16_t
  2189. dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata)
  2190. {
  2191. return soc->arch_ops.dp_rx_peer_metadata_peer_id_get(soc,
  2192. peer_metadata);
  2193. }
  2194. #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
  2195. /**
  2196. * dp_rx_nbuf_set_link_id_from_tlv() - Set link id in nbuf cb
  2197. * @soc: SOC handle
  2198. * @tlv_hdr: rx tlv header
  2199. * @nbuf: nbuf pointer
  2200. *
  2201. * Return: None
  2202. */
  2203. static inline void
  2204. dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc *soc, uint8_t *tlv_hdr,
  2205. qdf_nbuf_t nbuf)
  2206. {
  2207. uint32_t peer_metadata = hal_rx_tlv_peer_meta_data_get(soc->hal_soc,
  2208. tlv_hdr);
  2209. if (soc->arch_ops.dp_rx_peer_set_link_id)
  2210. soc->arch_ops.dp_rx_peer_set_link_id(nbuf, peer_metadata);
  2211. }
  2212. /**
  2213. * dp_rx_set_nbuf_band() - Set band info in nbuf cb
  2214. * @nbuf: nbuf pointer
  2215. * @txrx_peer: txrx_peer pointer
  2216. * @link_id: Peer Link ID
  2217. *
  2218. * Returen: None
  2219. */
  2220. static inline void
  2221. dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
  2222. uint8_t link_id)
  2223. {
  2224. qdf_nbuf_rx_set_band(nbuf, txrx_peer->band[link_id]);
  2225. }
  2226. #else
  2227. static inline void
  2228. dp_rx_nbuf_set_link_id_from_tlv(struct dp_soc *soc, uint8_t *tlv_hdr,
  2229. qdf_nbuf_t nbuf)
  2230. {
  2231. }
  2232. static inline void
  2233. dp_rx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
  2234. uint8_t link_id)
  2235. {
  2236. }
  2237. #endif
  2238. /**
  2239. * dp_rx_desc_pool_init_generic() - Generic Rx descriptors initialization
  2240. * @soc: SOC handle
  2241. * @rx_desc_pool: pointer to RX descriptor pool
  2242. * @pool_id: pool ID
  2243. *
  2244. * Return: None
  2245. */
  2246. QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
  2247. struct rx_desc_pool *rx_desc_pool,
  2248. uint32_t pool_id);
  2249. void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
  2250. struct rx_desc_pool *rx_desc_pool,
  2251. uint32_t pool_id);
  2252. /**
  2253. * dp_rx_pkt_tracepoints_enabled() - Get the state of rx pkt tracepoint
  2254. *
  2255. * Return: True if any rx pkt tracepoint is enabled else false
  2256. */
  2257. static inline
  2258. bool dp_rx_pkt_tracepoints_enabled(void)
  2259. {
  2260. return (qdf_trace_dp_rx_tcp_pkt_enabled() ||
  2261. qdf_trace_dp_rx_udp_pkt_enabled() ||
  2262. qdf_trace_dp_rx_pkt_enabled());
  2263. }
  2264. #ifdef FEATURE_DIRECT_LINK
  2265. /**
  2266. * dp_audio_smmu_map()- Map memory region into Audio SMMU CB
  2267. * @qdf_dev: pointer to QDF device structure
  2268. * @paddr: physical address
  2269. * @iova: DMA address
  2270. * @size: memory region size
  2271. *
  2272. * Return: 0 on success else failure code
  2273. */
  2274. static inline
  2275. int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
  2276. qdf_dma_addr_t iova, qdf_size_t size)
  2277. {
  2278. return pld_audio_smmu_map(qdf_dev->dev, paddr, iova, size);
  2279. }
  2280. /**
  2281. * dp_audio_smmu_unmap()- Remove memory region mapping from Audio SMMU CB
  2282. * @qdf_dev: pointer to QDF device structure
  2283. * @iova: DMA address
  2284. * @size: memory region size
  2285. *
  2286. * Return: None
  2287. */
  2288. static inline
  2289. void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
  2290. qdf_size_t size)
  2291. {
  2292. pld_audio_smmu_unmap(qdf_dev->dev, iova, size);
  2293. }
  2294. #else
  2295. static inline
  2296. int dp_audio_smmu_map(qdf_device_t qdf_dev, qdf_dma_addr_t paddr,
  2297. qdf_dma_addr_t iova, qdf_size_t size)
  2298. {
  2299. return 0;
  2300. }
  2301. static inline
  2302. void dp_audio_smmu_unmap(qdf_device_t qdf_dev, qdf_dma_addr_t iova,
  2303. qdf_size_t size)
  2304. {
  2305. }
  2306. #endif
  2307. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  2308. static inline
  2309. QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
  2310. struct dp_srng *rxdma_srng,
  2311. struct rx_desc_pool *rx_desc_pool,
  2312. uint32_t num_req_buffers)
  2313. {
  2314. return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id,
  2315. rxdma_srng,
  2316. rx_desc_pool,
  2317. num_req_buffers);
  2318. }
  2319. static inline
  2320. void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
  2321. struct dp_srng *rxdma_srng,
  2322. struct rx_desc_pool *rx_desc_pool,
  2323. uint32_t num_req_buffers,
  2324. union dp_rx_desc_list_elem_t **desc_list,
  2325. union dp_rx_desc_list_elem_t **tail)
  2326. {
  2327. __dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
  2328. num_req_buffers, desc_list, tail);
  2329. }
  2330. static inline
  2331. void dp_rx_comp2refill_replenish(struct dp_soc *soc, uint32_t mac_id,
  2332. struct dp_srng *rxdma_srng,
  2333. struct rx_desc_pool *rx_desc_pool,
  2334. uint32_t num_req_buffers,
  2335. union dp_rx_desc_list_elem_t **desc_list,
  2336. union dp_rx_desc_list_elem_t **tail)
  2337. {
  2338. __dp_rx_comp2refill_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
  2339. num_req_buffers, desc_list, tail);
  2340. }
  2341. static inline
  2342. void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
  2343. struct dp_srng *rxdma_srng,
  2344. struct rx_desc_pool *rx_desc_pool,
  2345. uint32_t num_req_buffers,
  2346. union dp_rx_desc_list_elem_t **desc_list,
  2347. union dp_rx_desc_list_elem_t **tail)
  2348. {
  2349. __dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng,
  2350. rx_desc_pool);
  2351. }
  2352. #ifndef QCA_DP_NBUF_FAST_RECYCLE_CHECK
  2353. static inline
  2354. qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
  2355. qdf_nbuf_t nbuf,
  2356. uint32_t buf_size)
  2357. {
  2358. qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
  2359. (void *)(nbuf->data + buf_size));
  2360. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  2361. }
  2362. #else
  2363. #define L3_HEADER_PAD 2
  2364. static inline
  2365. qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
  2366. qdf_nbuf_t nbuf,
  2367. uint32_t buf_size)
  2368. {
  2369. if (nbuf->recycled_for_ds)
  2370. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  2371. if (unlikely(!nbuf->fast_recycled)) {
  2372. qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
  2373. (void *)(nbuf->data + buf_size));
  2374. }
  2375. DP_STATS_INC(dp_soc, rx.fast_recycled, 1);
  2376. nbuf->fast_recycled = 0;
  2377. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  2378. }
  2379. #endif
  2380. static inline
  2381. qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
  2382. qdf_nbuf_t nbuf,
  2383. uint32_t buf_size)
  2384. {
  2385. qdf_nbuf_dma_inv_range((void *)nbuf->data,
  2386. (void *)(nbuf->data + buf_size));
  2387. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  2388. }
  2389. #if !defined(SPECULATIVE_READ_DISABLED)
  2390. static inline
  2391. void dp_rx_nbuf_unmap(struct dp_soc *soc,
  2392. struct dp_rx_desc *rx_desc,
  2393. uint8_t reo_ring_num)
  2394. {
  2395. struct rx_desc_pool *rx_desc_pool;
  2396. qdf_nbuf_t nbuf;
  2397. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  2398. nbuf = rx_desc->nbuf;
  2399. qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
  2400. (void *)(nbuf->data + rx_desc_pool->buf_size));
  2401. }
  2402. static inline
  2403. void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
  2404. struct rx_desc_pool *rx_desc_pool,
  2405. qdf_nbuf_t nbuf)
  2406. {
  2407. qdf_nbuf_dma_inv_range((void *)nbuf->data,
  2408. (void *)(nbuf->data + rx_desc_pool->buf_size));
  2409. }
  2410. #else
  2411. static inline
  2412. void dp_rx_nbuf_unmap(struct dp_soc *soc,
  2413. struct dp_rx_desc *rx_desc,
  2414. uint8_t reo_ring_num)
  2415. {
  2416. }
  2417. static inline
  2418. void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
  2419. struct rx_desc_pool *rx_desc_pool,
  2420. qdf_nbuf_t nbuf)
  2421. {
  2422. }
  2423. #endif
  2424. static inline
  2425. void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
  2426. uint32_t bufs_reaped)
  2427. {
  2428. }
  2429. static inline
  2430. qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
  2431. struct rx_desc_pool *rx_desc_pool)
  2432. {
  2433. return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size,
  2434. RX_BUFFER_RESERVATION,
  2435. rx_desc_pool->buf_alignment, FALSE);
  2436. }
  2437. static inline
  2438. void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
  2439. {
  2440. qdf_nbuf_free_simple(nbuf);
  2441. }
  2442. #else
  2443. static inline
  2444. QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
  2445. struct dp_srng *rxdma_srng,
  2446. struct rx_desc_pool *rx_desc_pool,
  2447. uint32_t num_req_buffers)
  2448. {
  2449. return dp_pdev_rx_buffers_attach(soc, mac_id,
  2450. rxdma_srng,
  2451. rx_desc_pool,
  2452. num_req_buffers);
  2453. }
  2454. static inline
  2455. void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
  2456. struct dp_srng *rxdma_srng,
  2457. struct rx_desc_pool *rx_desc_pool,
  2458. uint32_t num_req_buffers,
  2459. union dp_rx_desc_list_elem_t **desc_list,
  2460. union dp_rx_desc_list_elem_t **tail)
  2461. {
  2462. dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
  2463. num_req_buffers, desc_list, tail, false);
  2464. }
  2465. static inline
  2466. void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
  2467. struct dp_srng *rxdma_srng,
  2468. struct rx_desc_pool *rx_desc_pool,
  2469. uint32_t num_req_buffers,
  2470. union dp_rx_desc_list_elem_t **desc_list,
  2471. union dp_rx_desc_list_elem_t **tail)
  2472. {
  2473. dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
  2474. num_req_buffers, desc_list, tail, false);
  2475. }
  2476. static inline
  2477. qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
  2478. qdf_nbuf_t nbuf,
  2479. uint32_t buf_size)
  2480. {
  2481. return (qdf_dma_addr_t)NULL;
  2482. }
  2483. static inline
  2484. qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
  2485. qdf_nbuf_t nbuf,
  2486. uint32_t buf_size)
  2487. {
  2488. return (qdf_dma_addr_t)NULL;
  2489. }
  2490. static inline
  2491. void dp_rx_nbuf_unmap(struct dp_soc *soc,
  2492. struct dp_rx_desc *rx_desc,
  2493. uint8_t reo_ring_num)
  2494. {
  2495. struct rx_desc_pool *rx_desc_pool;
  2496. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  2497. dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num);
  2498. dp_audio_smmu_unmap(soc->osdev,
  2499. QDF_NBUF_CB_PADDR(rx_desc->nbuf),
  2500. rx_desc_pool->buf_size);
  2501. if (qdf_atomic_read(&soc->ipa_mapped))
  2502. dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
  2503. rx_desc_pool->buf_size,
  2504. false, __func__, __LINE__);
  2505. qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
  2506. QDF_DMA_FROM_DEVICE,
  2507. rx_desc_pool->buf_size);
  2508. dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
  2509. }
  2510. static inline
  2511. void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
  2512. struct rx_desc_pool *rx_desc_pool,
  2513. qdf_nbuf_t nbuf)
  2514. {
  2515. dp_audio_smmu_unmap(soc->osdev, QDF_NBUF_CB_PADDR(nbuf),
  2516. rx_desc_pool->buf_size);
  2517. if (qdf_atomic_read(&soc->ipa_mapped))
  2518. dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
  2519. rx_desc_pool->buf_size,
  2520. false, __func__, __LINE__);
  2521. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE,
  2522. rx_desc_pool->buf_size);
  2523. }
  2524. static inline
  2525. void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
  2526. uint32_t bufs_reaped)
  2527. {
  2528. int cpu_id = qdf_get_cpu();
  2529. DP_STATS_INC(soc, rx.ring_packets[cpu_id][ring_id], bufs_reaped);
  2530. }
  2531. static inline
  2532. qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
  2533. struct rx_desc_pool *rx_desc_pool)
  2534. {
  2535. return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
  2536. RX_BUFFER_RESERVATION,
  2537. rx_desc_pool->buf_alignment, FALSE);
  2538. }
  2539. static inline
  2540. void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
  2541. {
  2542. qdf_nbuf_free(nbuf);
  2543. }
  2544. #endif
  2545. #ifdef DP_UMAC_HW_RESET_SUPPORT
  2546. /**
  2547. * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
  2548. * @soc: core txrx main context
  2549. * @nbuf_list: nbuf list for delayed free
  2550. *
  2551. * Return: void
  2552. */
  2553. void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
  2554. /**
  2555. * dp_rx_desc_delayed_free() - Delayed free of the rx descs
  2556. *
  2557. * @soc: core txrx main context
  2558. *
  2559. * Return: void
  2560. */
  2561. void dp_rx_desc_delayed_free(struct dp_soc *soc);
  2562. #endif
  2563. /**
  2564. * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id
  2565. * @soc: core txrx main context
  2566. * @nbuf : pointer to the first msdu of an amsdu.
  2567. * @peer_id : Peer id of the peer
  2568. * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference
  2569. * @pkt_capture_offload : Flag indicating if pkt capture offload is needed
  2570. * @vdev : Buffer to hold pointer to vdev
  2571. * @rx_pdev : Buffer to hold pointer to rx pdev
  2572. * @dsf : delay stats flag
  2573. * @old_tid : Old tid
  2574. *
  2575. * Get txrx peer and vdev from peer id
  2576. *
  2577. * Return: Pointer to txrx peer
  2578. */
  2579. static inline struct dp_txrx_peer *
  2580. dp_rx_get_txrx_peer_and_vdev(struct dp_soc *soc,
  2581. qdf_nbuf_t nbuf,
  2582. uint16_t peer_id,
  2583. dp_txrx_ref_handle *txrx_ref_handle,
  2584. bool pkt_capture_offload,
  2585. struct dp_vdev **vdev,
  2586. struct dp_pdev **rx_pdev,
  2587. uint32_t *dsf,
  2588. uint32_t *old_tid)
  2589. {
  2590. struct dp_txrx_peer *txrx_peer = NULL;
  2591. txrx_peer = dp_txrx_peer_get_ref_by_id(soc, peer_id, txrx_ref_handle,
  2592. DP_MOD_ID_RX);
  2593. if (qdf_likely(txrx_peer)) {
  2594. *vdev = txrx_peer->vdev;
  2595. } else {
  2596. nbuf->next = NULL;
  2597. dp_rx_deliver_to_pkt_capture_no_peer(soc, nbuf,
  2598. pkt_capture_offload);
  2599. if (!pkt_capture_offload)
  2600. dp_rx_deliver_to_stack_no_peer(soc, nbuf);
  2601. goto end;
  2602. }
  2603. if (qdf_unlikely(!(*vdev))) {
  2604. qdf_nbuf_free(nbuf);
  2605. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  2606. goto end;
  2607. }
  2608. *rx_pdev = (*vdev)->pdev;
  2609. *dsf = (*rx_pdev)->delay_stats_flag;
  2610. *old_tid = 0xff;
  2611. end:
  2612. return txrx_peer;
  2613. }
  2614. static inline QDF_STATUS
  2615. dp_peer_rx_reorder_queue_setup(struct dp_soc *soc, struct dp_peer *peer,
  2616. int tid, uint32_t ba_window_size)
  2617. {
  2618. return soc->arch_ops.dp_peer_rx_reorder_queue_setup(soc,
  2619. peer, tid,
  2620. ba_window_size);
  2621. }
  2622. static inline
  2623. void dp_rx_nbuf_list_deliver(struct dp_soc *soc,
  2624. struct dp_vdev *vdev,
  2625. struct dp_txrx_peer *txrx_peer,
  2626. uint16_t peer_id,
  2627. uint8_t pkt_capture_offload,
  2628. qdf_nbuf_t deliver_list_head,
  2629. qdf_nbuf_t deliver_list_tail)
  2630. {
  2631. qdf_nbuf_t nbuf, next;
  2632. if (qdf_likely(deliver_list_head)) {
  2633. if (qdf_likely(txrx_peer)) {
  2634. dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id,
  2635. pkt_capture_offload,
  2636. deliver_list_head);
  2637. if (!pkt_capture_offload)
  2638. dp_rx_deliver_to_stack(soc, vdev, txrx_peer,
  2639. deliver_list_head,
  2640. deliver_list_tail);
  2641. } else {
  2642. nbuf = deliver_list_head;
  2643. while (nbuf) {
  2644. next = nbuf->next;
  2645. nbuf->next = NULL;
  2646. dp_rx_deliver_to_stack_no_peer(soc, nbuf);
  2647. nbuf = next;
  2648. }
  2649. }
  2650. }
  2651. }
  2652. #ifdef DP_TX_RX_TPUT_SIMULATE
  2653. /*
  2654. * Change this macro value to simulate different RX T-put,
  2655. * if OTA is 100 Mbps, to simulate 200 Mbps, then multiplication factor
  2656. * is 2, set macro value as 1 (multiplication factor - 1).
  2657. */
  2658. #define DP_RX_PKTS_DUPLICATE_CNT 0
  2659. static inline
  2660. void dp_rx_nbuf_list_dup_deliver(struct dp_soc *soc,
  2661. struct dp_vdev *vdev,
  2662. struct dp_txrx_peer *txrx_peer,
  2663. uint16_t peer_id,
  2664. uint8_t pkt_capture_offload,
  2665. qdf_nbuf_t ori_list_head,
  2666. qdf_nbuf_t ori_list_tail)
  2667. {
  2668. qdf_nbuf_t new_skb = NULL;
  2669. qdf_nbuf_t new_list_head = NULL;
  2670. qdf_nbuf_t new_list_tail = NULL;
  2671. qdf_nbuf_t nbuf = NULL;
  2672. int i;
  2673. for (i = 0; i < DP_RX_PKTS_DUPLICATE_CNT; i++) {
  2674. nbuf = ori_list_head;
  2675. new_list_head = NULL;
  2676. new_list_tail = NULL;
  2677. while (nbuf) {
  2678. new_skb = qdf_nbuf_copy(nbuf);
  2679. if (qdf_likely(new_skb))
  2680. DP_RX_LIST_APPEND(new_list_head,
  2681. new_list_tail,
  2682. new_skb);
  2683. else
  2684. dp_err("copy skb failed");
  2685. nbuf = qdf_nbuf_next(nbuf);
  2686. }
  2687. /* deliver the copied nbuf list */
  2688. dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
  2689. pkt_capture_offload,
  2690. new_list_head,
  2691. new_list_tail);
  2692. }
  2693. /* deliver the original skb_list */
  2694. dp_rx_nbuf_list_deliver(soc, vdev, txrx_peer, peer_id,
  2695. pkt_capture_offload,
  2696. ori_list_head,
  2697. ori_list_tail);
  2698. }
  2699. #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_dup_deliver
  2700. #else /* !DP_TX_RX_TPUT_SIMULATE */
  2701. #define DP_RX_DELIVER_TO_STACK dp_rx_nbuf_list_deliver
  2702. #endif /* DP_TX_RX_TPUT_SIMULATE */
  2703. /**
  2704. * dp_rx_wbm_desc_nbuf_sanity_check() - Add sanity check to for WBM rx_desc
  2705. * paddr corruption
  2706. * @soc: core txrx main context
  2707. * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring
  2708. * @ring_desc: REO ring descriptor
  2709. * @rx_desc: Rx descriptor
  2710. *
  2711. * Return: NONE
  2712. */
  2713. QDF_STATUS dp_rx_wbm_desc_nbuf_sanity_check(struct dp_soc *soc,
  2714. hal_ring_handle_t hal_ring_hdl,
  2715. hal_ring_desc_t ring_desc,
  2716. struct dp_rx_desc *rx_desc);
  2717. /**
  2718. * dp_rx_is_sg_formation_required() - Check if sg formation is required
  2719. * @info: WBM desc info
  2720. *
  2721. * Return: True if sg is required else false
  2722. */
  2723. bool dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info);
  2724. /**
  2725. * dp_rx_err_tlv_invalidate() - Invalidate network buffer
  2726. * @soc: core txrx main context
  2727. * @nbuf: Network buffer to invalidate
  2728. *
  2729. * Return: NONE
  2730. */
  2731. void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
  2732. qdf_nbuf_t nbuf);
  2733. /**
  2734. * dp_rx_wbm_sg_list_last_msdu_war() - war for HW issue
  2735. * @soc: DP SOC handle
  2736. *
  2737. * This is a war for HW issue where length is only valid in last msdu
  2738. *
  2739. * Return: NONE
  2740. */
  2741. void dp_rx_wbm_sg_list_last_msdu_war(struct dp_soc *soc);
  2742. /**
  2743. * dp_rx_check_pkt_len() - Check for pktlen validity
  2744. * @soc: DP SOC context
  2745. * @pkt_len: computed length of the pkt from caller in bytes
  2746. *
  2747. * Return: true if pktlen > RX_BUFFER_SIZE, else return false
  2748. *
  2749. */
  2750. bool dp_rx_check_pkt_len(struct dp_soc *soc, uint32_t pkt_len);
  2751. /**
  2752. * dp_rx_null_q_handle_invalid_peer_id_exception() - to find exception
  2753. * @soc: pointer to dp_soc struct
  2754. * @pool_id: Pool id to find dp_pdev
  2755. * @rx_tlv_hdr: TLV header of received packet
  2756. * @nbuf: SKB
  2757. *
  2758. * In certain types of packets if peer_id is not correct then
  2759. * driver may not be able find. Try finding peer by addr_2 of
  2760. * received MPDU. If you find the peer then most likely sw_peer_id &
  2761. * ast_idx is corrupted.
  2762. *
  2763. * Return: True if you find the peer by addr_2 of received MPDU else false
  2764. */
  2765. bool dp_rx_null_q_handle_invalid_peer_id_exception(struct dp_soc *soc,
  2766. uint8_t pool_id,
  2767. uint8_t *rx_tlv_hdr,
  2768. qdf_nbuf_t nbuf);
  2769. /**
  2770. * dp_rx_err_drop_3addr_mcast() - Check if feature drop_3ddr_mcast is enabled
  2771. * If so, drop the multicast frame.
  2772. * @vdev: datapath vdev
  2773. * @rx_tlv_hdr: TLV header
  2774. *
  2775. * Return: true if packet is to be dropped,
  2776. * false, if packet is not dropped.
  2777. */
  2778. bool dp_rx_err_drop_3addr_mcast(struct dp_vdev *vdev, uint8_t *rx_tlv_hdr);
  2779. /**
  2780. * dp_rx_deliver_to_osif_stack() - function to deliver rx pkts to stack
  2781. * @soc: DP soc
  2782. * @vdev: DP vdev handle
  2783. * @txrx_peer: pointer to the txrx_peer object
  2784. * @nbuf: skb list head
  2785. * @tail: skb list tail
  2786. * @is_eapol: eapol pkt check
  2787. *
  2788. * Return: None
  2789. */
  2790. void
  2791. dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
  2792. struct dp_vdev *vdev,
  2793. struct dp_txrx_peer *txrx_peer,
  2794. qdf_nbuf_t nbuf,
  2795. qdf_nbuf_t tail,
  2796. bool is_eapol);
  2797. /**
  2798. * dp_rx_set_wbm_err_info_in_nbuf() - function to set wbm err info in nbuf
  2799. * @soc: DP soc
  2800. * @nbuf: skb list head
  2801. * @wbm_err: wbm error info details
  2802. *
  2803. * Return: None
  2804. */
  2805. void
  2806. dp_rx_set_wbm_err_info_in_nbuf(struct dp_soc *soc,
  2807. qdf_nbuf_t nbuf,
  2808. union hal_wbm_err_info_u wbm_err);
  2809. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  2810. static inline uint8_t
  2811. dp_rx_get_defrag_bm_id(struct dp_soc *soc)
  2812. {
  2813. return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id);
  2814. }
  2815. static inline uint8_t
  2816. dp_rx_get_rx_bm_id(struct dp_soc *soc)
  2817. {
  2818. return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id);
  2819. }
  2820. #else
  2821. static inline uint8_t
  2822. dp_rx_get_rx_bm_id(struct dp_soc *soc)
  2823. {
  2824. struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
  2825. uint8_t wbm2_sw_rx_rel_ring_id;
  2826. wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
  2827. return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id,
  2828. wbm2_sw_rx_rel_ring_id);
  2829. }
  2830. static inline uint8_t
  2831. dp_rx_get_defrag_bm_id(struct dp_soc *soc)
  2832. {
  2833. return dp_rx_get_rx_bm_id(soc);
  2834. }
  2835. #endif
  2836. #ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
  2837. /**
  2838. * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
  2839. *
  2840. * @soc: core txrx main context
  2841. * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
  2842. * @ring_desc: opaque pointer to the RX ring descriptor
  2843. * @rx_desc: host rx descriptor
  2844. *
  2845. * Return: void
  2846. */
  2847. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  2848. hal_ring_handle_t hal_ring_hdl,
  2849. hal_ring_desc_t ring_desc,
  2850. struct dp_rx_desc *rx_desc);
  2851. /**
  2852. * dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
  2853. * (WBM), following error handling
  2854. *
  2855. * @soc: core DP main context
  2856. * @ring_desc: opaque pointer to the REO error ring descriptor
  2857. * @bm_action: put to idle_list or release to msdu_list
  2858. *
  2859. * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
  2860. */
  2861. QDF_STATUS
  2862. dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
  2863. uint8_t bm_action);
  2864. /**
  2865. * dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
  2866. * (WBM) by address
  2867. *
  2868. * @soc: core DP main context
  2869. * @link_desc_addr: link descriptor addr
  2870. * @bm_action: put to idle_list or release to msdu_list
  2871. *
  2872. * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
  2873. */
  2874. QDF_STATUS
  2875. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  2876. hal_buff_addrinfo_t link_desc_addr,
  2877. uint8_t bm_action);
  2878. /**
  2879. * dp_rxdma_err_process() - RxDMA error processing functionality
  2880. * @int_ctx: pointer to DP interrupt context
  2881. * @soc: core txrx main context
  2882. * @mac_id: mac id which is one of 3 mac_ids
  2883. * @quota: No. of units (packets) that can be serviced in one shot.
  2884. *
  2885. * Return: num of buffers processed
  2886. */
  2887. uint32_t
  2888. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2889. uint32_t mac_id, uint32_t quota);
  2890. /**
  2891. * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
  2892. * frames to OS or wifi parse errors.
  2893. * @soc: core DP main context
  2894. * @nbuf: buffer pointer
  2895. * @rx_tlv_hdr: start of rx tlv header
  2896. * @txrx_peer: peer reference
  2897. * @err_code: rxdma err code
  2898. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  2899. * pool_id has same mapping)
  2900. * @link_id: link Id on which the packet is received
  2901. *
  2902. * Return: None
  2903. */
  2904. void
  2905. dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2906. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  2907. uint8_t err_code, uint8_t mac_id, uint8_t link_id);
  2908. /**
  2909. * dp_rx_process_mic_error(): Function to pass mic error indication to umac
  2910. * @soc: core DP main context
  2911. * @nbuf: buffer pointer
  2912. * @rx_tlv_hdr: start of rx tlv header
  2913. * @txrx_peer: txrx peer handle
  2914. *
  2915. * Return: void
  2916. */
  2917. void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2918. uint8_t *rx_tlv_hdr,
  2919. struct dp_txrx_peer *txrx_peer);
  2920. /**
  2921. * dp_2k_jump_handle() - Function to handle 2k jump exception
  2922. * on WBM ring
  2923. * @soc: core DP main context
  2924. * @nbuf: buffer pointer
  2925. * @rx_tlv_hdr: start of rx tlv header
  2926. * @peer_id: peer id of first msdu
  2927. * @tid: Tid for which exception occurred
  2928. *
  2929. * This function handles 2k jump violations arising out
  2930. * of receiving aggregates in non BA case. This typically
  2931. * may happen if aggregates are received on a QOS enabled TID
  2932. * while Rx window size is still initialized to value of 2. Or
  2933. * it may also happen if negotiated window size is 1 but peer
  2934. * sends aggregates.
  2935. */
  2936. void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
  2937. uint16_t peer_id, uint8_t tid);
  2938. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2939. /**
  2940. * dp_rx_err_process() - Processes error frames routed to REO error ring
  2941. * @int_ctx: pointer to DP interrupt context
  2942. * @soc: core txrx main context
  2943. * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be serviced
  2944. * @quota: No. of units (packets) that can be serviced in one shot.
  2945. *
  2946. * This function implements error processing and top level demultiplexer
  2947. * for all the frames routed to REO error ring.
  2948. *
  2949. * Return: uint32_t: No. of elements processed
  2950. */
  2951. uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2952. hal_ring_handle_t hal_ring_hdl, uint32_t quota);
  2953. /**
  2954. * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
  2955. * @int_ctx: pointer to DP interrupt context
  2956. * @soc: core txrx main context
  2957. * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be
  2958. * serviced
  2959. * @quota: No. of units (packets) that can be serviced in one shot.
  2960. *
  2961. * This function implements error processing and top level demultiplexer
  2962. * for all the frames routed to WBM2HOST sw release ring.
  2963. *
  2964. * Return: uint32_t: No. of elements processed
  2965. */
  2966. uint32_t
  2967. dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  2968. hal_ring_handle_t hal_ring_hdl, uint32_t quota);
  2969. #ifdef QCA_OL_RX_LOCK_LESS_ACCESS
  2970. /**
  2971. * dp_rx_srng_access_start()- Wrapper function to log access start of a hal ring
  2972. * @int_ctx: pointer to DP interrupt context
  2973. * @soc: DP soc structure pointer
  2974. * @hal_ring_hdl: HAL ring handle
  2975. *
  2976. * Return: 0 on success; error on failure
  2977. */
  2978. static inline int
  2979. dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
  2980. hal_ring_handle_t hal_ring_hdl)
  2981. {
  2982. return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
  2983. }
  2984. /**
  2985. * dp_rx_srng_access_end()- Wrapper function to log access end of a hal ring
  2986. * @int_ctx: pointer to DP interrupt context
  2987. * @soc: DP soc structure pointer
  2988. * @hal_ring_hdl: HAL ring handle
  2989. *
  2990. * Return: None
  2991. */
  2992. static inline void
  2993. dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
  2994. hal_ring_handle_t hal_ring_hdl)
  2995. {
  2996. hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
  2997. }
  2998. #else
  2999. static inline int
  3000. dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
  3001. hal_ring_handle_t hal_ring_hdl)
  3002. {
  3003. return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
  3004. }
  3005. static inline void
  3006. dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
  3007. hal_ring_handle_t hal_ring_hdl)
  3008. {
  3009. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  3010. }
  3011. #endif
  3012. #ifdef RX_DESC_SANITY_WAR
  3013. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  3014. hal_ring_handle_t hal_ring_hdl,
  3015. hal_ring_desc_t ring_desc,
  3016. struct dp_rx_desc *rx_desc);
  3017. #else
  3018. static inline
  3019. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  3020. hal_ring_handle_t hal_ring_hdl,
  3021. hal_ring_desc_t ring_desc,
  3022. struct dp_rx_desc *rx_desc)
  3023. {
  3024. return QDF_STATUS_SUCCESS;
  3025. }
  3026. #endif
  3027. #ifdef RX_DESC_DEBUG_CHECK
  3028. /**
  3029. * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
  3030. * corruption
  3031. * @soc: DP SoC context
  3032. * @ring_desc: REO ring descriptor
  3033. * @rx_desc: Rx descriptor
  3034. *
  3035. * Return: NONE
  3036. */
  3037. QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
  3038. hal_ring_desc_t ring_desc,
  3039. struct dp_rx_desc *rx_desc);
  3040. #else
  3041. static inline
  3042. QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
  3043. hal_ring_desc_t ring_desc,
  3044. struct dp_rx_desc *rx_desc)
  3045. {
  3046. return QDF_STATUS_SUCCESS;
  3047. }
  3048. #endif
  3049. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  3050. /**
  3051. * dp_rx_wbm_sg_list_reset() - Initialize sg list
  3052. *
  3053. * This api should be called at soc init and afterevery sg processing.
  3054. *@soc: DP SOC handle
  3055. */
  3056. static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
  3057. {
  3058. if (soc) {
  3059. soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
  3060. soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
  3061. soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
  3062. soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
  3063. }
  3064. }
  3065. /**
  3066. * dp_rx_wbm_sg_list_deinit() - De-initialize sg list
  3067. *
  3068. * This api should be called in down path, to avoid any leak.
  3069. *@soc: DP SOC handle
  3070. */
  3071. static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
  3072. {
  3073. if (soc) {
  3074. if (soc->wbm_sg_param.wbm_sg_nbuf_head)
  3075. qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
  3076. dp_rx_wbm_sg_list_reset(soc);
  3077. }
  3078. }
  3079. /**
  3080. * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
  3081. * to refill
  3082. * @soc: DP SOC handle
  3083. * @buf_info: the last link desc buf info
  3084. * @ring_buf_info: current buf address pointor including link desc
  3085. *
  3086. * Return: none.
  3087. */
  3088. void dp_rx_link_desc_refill_duplicate_check(
  3089. struct dp_soc *soc,
  3090. struct hal_buf_info *buf_info,
  3091. hal_buff_addrinfo_t ring_buf_info);
  3092. /**
  3093. * dp_rx_srng_get_num_pending() - get number of pending entries
  3094. * @hal_soc: hal soc opaque pointer
  3095. * @hal_ring_hdl: opaque pointer to the HAL Rx Ring
  3096. * @num_entries: number of entries in the hal_ring.
  3097. * @near_full: pointer to a boolean. This is set if ring is near full.
  3098. *
  3099. * The function returns the number of entries in a destination ring which are
  3100. * yet to be reaped. The function also checks if the ring is near full.
  3101. * If more than half of the ring needs to be reaped, the ring is considered
  3102. * approaching full.
  3103. * The function uses hal_srng_dst_num_valid_locked to get the number of valid
  3104. * entries. It should not be called within a SRNG lock. HW pointer value is
  3105. * synced into cached_hp.
  3106. *
  3107. * Return: Number of pending entries if any
  3108. */
  3109. uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
  3110. hal_ring_handle_t hal_ring_hdl,
  3111. uint32_t num_entries,
  3112. bool *near_full);
  3113. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  3114. /**
  3115. * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
  3116. * @soc: Datapath soc structure
  3117. * @ring_num: REO ring number
  3118. * @ring_desc: REO ring descriptor
  3119. *
  3120. * Return: None
  3121. */
  3122. void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  3123. hal_ring_desc_t ring_desc);
  3124. #else
  3125. static inline void
  3126. dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  3127. hal_ring_desc_t ring_desc)
  3128. {
  3129. }
  3130. #endif
  3131. #ifdef QCA_SUPPORT_WDS_EXTENDED
  3132. /**
  3133. * dp_rx_is_list_ready() - Make different lists for 4-address
  3134. * and 3-address frames
  3135. * @nbuf_head: skb list head
  3136. * @vdev: vdev
  3137. * @txrx_peer : txrx_peer
  3138. * @peer_id: peer id of new received frame
  3139. * @vdev_id: vdev_id of new received frame
  3140. *
  3141. * Return: true if peer_ids are different.
  3142. */
  3143. static inline bool
  3144. dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
  3145. struct dp_vdev *vdev,
  3146. struct dp_txrx_peer *txrx_peer,
  3147. uint16_t peer_id,
  3148. uint8_t vdev_id)
  3149. {
  3150. if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id)
  3151. return true;
  3152. return false;
  3153. }
  3154. #else
  3155. static inline bool
  3156. dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
  3157. struct dp_vdev *vdev,
  3158. struct dp_txrx_peer *txrx_peer,
  3159. uint16_t peer_id,
  3160. uint8_t vdev_id)
  3161. {
  3162. if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
  3163. return true;
  3164. return false;
  3165. }
  3166. #endif
  3167. #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
  3168. /**
  3169. * dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup
  3170. * @pdev: pointer to dp_pdev structure
  3171. * @rx_tlv: pointer to rx_pkt_tlvs structure
  3172. * @nbuf: pointer to skb buffer
  3173. *
  3174. * Return: None
  3175. */
  3176. void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
  3177. uint8_t *rx_tlv,
  3178. qdf_nbuf_t nbuf);
  3179. #else
  3180. static inline void
  3181. dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
  3182. uint8_t *rx_tlv,
  3183. qdf_nbuf_t nbuf)
  3184. {
  3185. }
  3186. #endif
  3187. #else
  3188. static inline QDF_STATUS
  3189. dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
  3190. hal_buff_addrinfo_t link_desc_addr,
  3191. uint8_t bm_action)
  3192. {
  3193. return QDF_STATUS_SUCCESS;
  3194. }
  3195. static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
  3196. {
  3197. }
  3198. static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
  3199. {
  3200. }
  3201. static inline uint32_t
  3202. dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
  3203. uint32_t mac_id, uint32_t quota)
  3204. {
  3205. return 0;
  3206. }
  3207. #endif /* WLAN_SOFTUMAC_SUPPORT */
  3208. #ifndef CONFIG_NBUF_AP_PLATFORM
  3209. static inline uint8_t
  3210. dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
  3211. struct dp_txrx_peer *txrx_peer)
  3212. {
  3213. return QDF_NBUF_CB_RX_LOGICAL_LINK_ID(nbuf);
  3214. }
  3215. #else
  3216. static inline uint8_t
  3217. dp_rx_get_stats_arr_idx_from_link_id(qdf_nbuf_t nbuf,
  3218. struct dp_txrx_peer *txrx_peer)
  3219. {
  3220. uint8_t link_id = 0;
  3221. link_id = (QDF_NBUF_CB_RX_HW_LINK_ID(nbuf) + 1);
  3222. if (link_id > DP_MAX_MLO_LINKS) {
  3223. link_id = 0;
  3224. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  3225. rx.inval_link_id_pkt_cnt,
  3226. 1, link_id);
  3227. }
  3228. return link_id;
  3229. }
  3230. #endif /* CONFIG_NBUF_AP_PLATFORM */
  3231. #endif /* _DP_RX_H */