dp_rx.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "hal_rx.h"
  23. #include "hal_api.h"
  24. #include "qdf_nbuf.h"
  25. #ifdef MESH_MODE_SUPPORT
  26. #include "if_meta_hdr.h"
  27. #endif
  28. #include "dp_internal.h"
  29. #include "dp_rx_mon.h"
  30. #include "dp_ipa.h"
  31. #ifdef FEATURE_WDS
  32. #include "dp_txrx_wds.h"
  33. #endif
  34. #include "dp_hist.h"
  35. #include "dp_rx_buffer_pool.h"
  36. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  37. #ifdef ATH_RX_PRI_SAVE
  38. #define DP_RX_TID_SAVE(_nbuf, _tid) \
  39. (qdf_nbuf_set_priority(_nbuf, _tid))
  40. #else
  41. #define DP_RX_TID_SAVE(_nbuf, _tid)
  42. #endif
  43. #ifdef DP_RX_DISABLE_NDI_MDNS_FORWARDING
  44. static inline
  45. bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
  46. {
  47. if (ta_peer->vdev->opmode == wlan_op_mode_ndi &&
  48. qdf_nbuf_is_ipv6_mdns_pkt(nbuf)) {
  49. DP_STATS_INC(ta_peer, rx.intra_bss.mdns_no_fwd, 1);
  50. return false;
  51. }
  52. return true;
  53. }
  54. #else
  55. static inline
  56. bool dp_rx_check_ndi_mdns_fwding(struct dp_peer *ta_peer, qdf_nbuf_t nbuf)
  57. {
  58. return true;
  59. }
  60. #endif
  61. static inline bool dp_rx_check_ap_bridge(struct dp_vdev *vdev)
  62. {
  63. return vdev->ap_bridge_enabled;
  64. }
  65. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  66. #ifdef DUP_RX_DESC_WAR
  67. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  68. hal_ring_handle_t hal_ring,
  69. hal_ring_desc_t ring_desc,
  70. struct dp_rx_desc *rx_desc)
  71. {
  72. void *hal_soc = soc->hal_soc;
  73. hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
  74. dp_rx_desc_dump(rx_desc);
  75. }
  76. #else
  77. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  78. hal_ring_handle_t hal_ring_hdl,
  79. hal_ring_desc_t ring_desc,
  80. struct dp_rx_desc *rx_desc)
  81. {
  82. hal_soc_handle_t hal_soc = soc->hal_soc;
  83. dp_rx_desc_dump(rx_desc);
  84. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
  85. hal_srng_dump_ring(hal_soc, hal_ring_hdl);
  86. qdf_assert_always(0);
  87. }
  88. #endif
  89. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  90. #ifdef RX_DESC_SANITY_WAR
  91. static inline
  92. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  93. hal_ring_handle_t hal_ring_hdl,
  94. hal_ring_desc_t ring_desc,
  95. struct dp_rx_desc *rx_desc)
  96. {
  97. uint8_t return_buffer_manager;
  98. if (qdf_unlikely(!rx_desc)) {
  99. /*
  100. * This is an unlikely case where the cookie obtained
  101. * from the ring_desc is invalid and hence we are not
  102. * able to find the corresponding rx_desc
  103. */
  104. goto fail;
  105. }
  106. return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc);
  107. if (qdf_unlikely(!(return_buffer_manager == HAL_RX_BUF_RBM_SW1_BM ||
  108. return_buffer_manager == HAL_RX_BUF_RBM_SW3_BM))) {
  109. goto fail;
  110. }
  111. return QDF_STATUS_SUCCESS;
  112. fail:
  113. DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
  114. dp_err("Ring Desc:");
  115. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
  116. ring_desc);
  117. return QDF_STATUS_E_NULL_VALUE;
  118. }
  119. #else
  120. static inline
  121. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  122. hal_ring_handle_t hal_ring_hdl,
  123. hal_ring_desc_t ring_desc,
  124. struct dp_rx_desc *rx_desc)
  125. {
  126. return QDF_STATUS_SUCCESS;
  127. }
  128. #endif
  129. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  130. /**
  131. * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
  132. *
  133. * @dp_soc: struct dp_soc *
  134. * @nbuf_frag_info_t: nbuf frag info
  135. * @dp_pdev: struct dp_pdev *
  136. * @rx_desc_pool: Rx desc pool
  137. *
  138. * Return: QDF_STATUS
  139. */
  140. #ifdef DP_RX_MON_MEM_FRAG
  141. static inline QDF_STATUS
  142. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  143. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  144. struct dp_pdev *dp_pdev,
  145. struct rx_desc_pool *rx_desc_pool)
  146. {
  147. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  148. (nbuf_frag_info_t->virt_addr).vaddr =
  149. qdf_frag_alloc(rx_desc_pool->buf_size);
  150. if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
  151. dp_err("Frag alloc failed");
  152. DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
  153. return QDF_STATUS_E_NOMEM;
  154. }
  155. ret = qdf_mem_map_page(dp_soc->osdev,
  156. (nbuf_frag_info_t->virt_addr).vaddr,
  157. QDF_DMA_FROM_DEVICE,
  158. rx_desc_pool->buf_size,
  159. &nbuf_frag_info_t->paddr);
  160. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  161. qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
  162. dp_err("Frag map failed");
  163. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  164. return QDF_STATUS_E_FAULT;
  165. }
  166. return QDF_STATUS_SUCCESS;
  167. }
  168. #else
  169. static inline QDF_STATUS
  170. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  171. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  172. struct dp_pdev *dp_pdev,
  173. struct rx_desc_pool *rx_desc_pool)
  174. {
  175. return QDF_STATUS_SUCCESS;
  176. }
  177. #endif /* DP_RX_MON_MEM_FRAG */
  178. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  179. /**
  180. * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history
  181. * @soc: Datapath soc structure
  182. * @ring_num: Refill ring number
  183. * @num_req: number of buffers requested for refill
  184. * @num_refill: number of buffers refilled
  185. *
  186. * Returns: None
  187. */
  188. static inline void
  189. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  190. hal_ring_handle_t hal_ring_hdl,
  191. uint32_t num_req, uint32_t num_refill)
  192. {
  193. struct dp_refill_info_record *record;
  194. uint32_t idx;
  195. uint32_t tp;
  196. uint32_t hp;
  197. if (qdf_unlikely(ring_num >= MAX_PDEV_CNT ||
  198. !soc->rx_refill_ring_history[ring_num]))
  199. return;
  200. idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index,
  201. DP_RX_REFILL_HIST_MAX);
  202. /* No NULL check needed for record since its an array */
  203. record = &soc->rx_refill_ring_history[ring_num]->entry[idx];
  204. hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp);
  205. record->timestamp = qdf_get_log_timestamp();
  206. record->num_req = num_req;
  207. record->num_refill = num_refill;
  208. record->hp = hp;
  209. record->tp = tp;
  210. }
  211. #else
  212. static inline void
  213. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  214. hal_ring_handle_t hal_ring_hdl,
  215. uint32_t num_req, uint32_t num_refill)
  216. {
  217. }
  218. #endif
  219. /**
  220. * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
  221. *
  222. * @dp_soc: struct dp_soc *
  223. * @mac_id: Mac id
  224. * @num_entries_avail: num_entries_avail
  225. * @nbuf_frag_info_t: nbuf frag info
  226. * @dp_pdev: struct dp_pdev *
  227. * @rx_desc_pool: Rx desc pool
  228. *
  229. * Return: QDF_STATUS
  230. */
  231. static inline QDF_STATUS
  232. dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
  233. uint32_t mac_id,
  234. uint32_t num_entries_avail,
  235. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  236. struct dp_pdev *dp_pdev,
  237. struct rx_desc_pool *rx_desc_pool)
  238. {
  239. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  240. (nbuf_frag_info_t->virt_addr).nbuf =
  241. dp_rx_buffer_pool_nbuf_alloc(dp_soc,
  242. mac_id,
  243. rx_desc_pool,
  244. num_entries_avail);
  245. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  246. dp_err("nbuf alloc failed");
  247. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  248. return QDF_STATUS_E_NOMEM;
  249. }
  250. ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
  251. nbuf_frag_info_t);
  252. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  253. dp_rx_buffer_pool_nbuf_free(dp_soc,
  254. (nbuf_frag_info_t->virt_addr).nbuf, mac_id);
  255. dp_err("nbuf map failed");
  256. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  257. return QDF_STATUS_E_FAULT;
  258. }
  259. nbuf_frag_info_t->paddr =
  260. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  261. dp_ipa_handle_rx_buf_smmu_mapping(dp_soc,
  262. (qdf_nbuf_t)((nbuf_frag_info_t->virt_addr).nbuf),
  263. rx_desc_pool->buf_size,
  264. true);
  265. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  266. &nbuf_frag_info_t->paddr,
  267. rx_desc_pool);
  268. if (ret == QDF_STATUS_E_FAILURE) {
  269. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  270. return QDF_STATUS_E_ADDRNOTAVAIL;
  271. }
  272. return QDF_STATUS_SUCCESS;
  273. }
  274. /*
  275. * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  276. * called during dp rx initialization
  277. * and at the end of dp_rx_process.
  278. *
  279. * @soc: core txrx main context
  280. * @mac_id: mac_id which is one of 3 mac_ids
  281. * @dp_rxdma_srng: dp rxdma circular ring
  282. * @rx_desc_pool: Pointer to free Rx descriptor pool
  283. * @num_req_buffers: number of buffer to be replenished
  284. * @desc_list: list of descs if called from dp_rx_process
  285. * or NULL during dp rx initialization or out of buffer
  286. * interrupt.
  287. * @tail: tail of descs list
  288. * @func_name: name of the caller function
  289. * Return: return success or failure
  290. */
  291. QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  292. struct dp_srng *dp_rxdma_srng,
  293. struct rx_desc_pool *rx_desc_pool,
  294. uint32_t num_req_buffers,
  295. union dp_rx_desc_list_elem_t **desc_list,
  296. union dp_rx_desc_list_elem_t **tail,
  297. const char *func_name)
  298. {
  299. uint32_t num_alloc_desc;
  300. uint16_t num_desc_to_free = 0;
  301. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  302. uint32_t num_entries_avail;
  303. uint32_t count;
  304. int sync_hw_ptr = 1;
  305. struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
  306. void *rxdma_ring_entry;
  307. union dp_rx_desc_list_elem_t *next;
  308. QDF_STATUS ret;
  309. void *rxdma_srng;
  310. rxdma_srng = dp_rxdma_srng->hal_srng;
  311. if (!rxdma_srng) {
  312. dp_rx_debug("%pK: rxdma srng not initialized", dp_soc);
  313. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  314. return QDF_STATUS_E_FAILURE;
  315. }
  316. dp_rx_debug("%pK: requested %d buffers for replenish",
  317. dp_soc, num_req_buffers);
  318. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  319. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  320. rxdma_srng,
  321. sync_hw_ptr);
  322. dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
  323. dp_soc, num_entries_avail);
  324. if (!(*desc_list) && (num_entries_avail >
  325. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  326. num_req_buffers = num_entries_avail;
  327. } else if (num_entries_avail < num_req_buffers) {
  328. num_desc_to_free = num_req_buffers - num_entries_avail;
  329. num_req_buffers = num_entries_avail;
  330. }
  331. if (qdf_unlikely(!num_req_buffers)) {
  332. num_desc_to_free = num_req_buffers;
  333. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  334. goto free_descs;
  335. }
  336. /*
  337. * if desc_list is NULL, allocate the descs from freelist
  338. */
  339. if (!(*desc_list)) {
  340. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  341. rx_desc_pool,
  342. num_req_buffers,
  343. desc_list,
  344. tail);
  345. if (!num_alloc_desc) {
  346. dp_rx_err("%pK: no free rx_descs in freelist", dp_soc);
  347. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  348. num_req_buffers);
  349. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  350. return QDF_STATUS_E_NOMEM;
  351. }
  352. dp_rx_debug("%pK: %d rx desc allocated", dp_soc, num_alloc_desc);
  353. num_req_buffers = num_alloc_desc;
  354. }
  355. count = 0;
  356. while (count < num_req_buffers) {
  357. /* Flag is set while pdev rx_desc_pool initialization */
  358. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  359. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  360. &nbuf_frag_info,
  361. dp_pdev,
  362. rx_desc_pool);
  363. else
  364. ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
  365. mac_id,
  366. num_entries_avail, &nbuf_frag_info,
  367. dp_pdev, rx_desc_pool);
  368. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  369. if (qdf_unlikely(ret == QDF_STATUS_E_FAULT))
  370. continue;
  371. break;
  372. }
  373. count++;
  374. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  375. rxdma_srng);
  376. qdf_assert_always(rxdma_ring_entry);
  377. next = (*desc_list)->next;
  378. /* Flag is set while pdev rx_desc_pool initialization */
  379. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  380. dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
  381. &nbuf_frag_info);
  382. else
  383. dp_rx_desc_prep(&((*desc_list)->rx_desc),
  384. &nbuf_frag_info);
  385. /* rx_desc.in_use should be zero at this time*/
  386. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  387. (*desc_list)->rx_desc.in_use = 1;
  388. (*desc_list)->rx_desc.in_err_state = 0;
  389. dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
  390. func_name, RX_DESC_REPLENISHED);
  391. dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
  392. nbuf_frag_info.virt_addr.nbuf,
  393. (unsigned long long)(nbuf_frag_info.paddr),
  394. (*desc_list)->rx_desc.cookie);
  395. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry,
  396. nbuf_frag_info.paddr,
  397. (*desc_list)->rx_desc.cookie,
  398. rx_desc_pool->owner);
  399. *desc_list = next;
  400. }
  401. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
  402. num_req_buffers, count);
  403. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  404. dp_rx_schedule_refill_thread(dp_soc);
  405. dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
  406. count, num_desc_to_free);
  407. /* No need to count the number of bytes received during replenish.
  408. * Therefore set replenish.pkts.bytes as 0.
  409. */
  410. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  411. free_descs:
  412. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  413. /*
  414. * add any available free desc back to the free list
  415. */
  416. if (*desc_list)
  417. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  418. mac_id, rx_desc_pool);
  419. return QDF_STATUS_SUCCESS;
  420. }
  421. /*
  422. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  423. * pkts to RAW mode simulation to
  424. * decapsulate the pkt.
  425. *
  426. * @vdev: vdev on which RAW mode is enabled
  427. * @nbuf_list: list of RAW pkts to process
  428. * @peer: peer object from which the pkt is rx
  429. *
  430. * Return: void
  431. */
  432. void
  433. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  434. struct dp_peer *peer)
  435. {
  436. qdf_nbuf_t deliver_list_head = NULL;
  437. qdf_nbuf_t deliver_list_tail = NULL;
  438. qdf_nbuf_t nbuf;
  439. nbuf = nbuf_list;
  440. while (nbuf) {
  441. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  442. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  443. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  444. DP_STATS_INC_PKT(peer, rx.raw, 1, qdf_nbuf_len(nbuf));
  445. /*
  446. * reset the chfrag_start and chfrag_end bits in nbuf cb
  447. * as this is a non-amsdu pkt and RAW mode simulation expects
  448. * these bit s to be 0 for non-amsdu pkt.
  449. */
  450. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  451. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  452. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  453. qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
  454. }
  455. nbuf = next;
  456. }
  457. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  458. &deliver_list_tail, peer->mac_addr.raw);
  459. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  460. }
  461. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  462. #ifndef FEATURE_WDS
  463. static void
  464. dp_rx_da_learn(struct dp_soc *soc,
  465. uint8_t *rx_tlv_hdr,
  466. struct dp_peer *ta_peer,
  467. qdf_nbuf_t nbuf)
  468. {
  469. }
  470. #endif
  471. /*
  472. * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
  473. *
  474. * @soc: core txrx main context
  475. * @ta_peer : source peer entry
  476. * @rx_tlv_hdr : start address of rx tlvs
  477. * @nbuf : nbuf that has to be intrabss forwarded
  478. *
  479. * Return: bool: true if it is forwarded else false
  480. */
  481. static bool
  482. dp_rx_intrabss_fwd(struct dp_soc *soc,
  483. struct dp_peer *ta_peer,
  484. uint8_t *rx_tlv_hdr,
  485. qdf_nbuf_t nbuf,
  486. struct hal_rx_msdu_metadata msdu_metadata)
  487. {
  488. uint16_t len;
  489. uint8_t is_frag;
  490. uint16_t da_peer_id = HTT_INVALID_PEER;
  491. struct dp_peer *da_peer = NULL;
  492. bool is_da_bss_peer = false;
  493. struct dp_ast_entry *ast_entry;
  494. qdf_nbuf_t nbuf_copy;
  495. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  496. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  497. struct cdp_tid_rx_stats *tid_stats = &ta_peer->vdev->pdev->stats.
  498. tid_stats.tid_rx_stats[ring_id][tid];
  499. /* check if the destination peer is available in peer table
  500. * and also check if the source peer and destination peer
  501. * belong to the same vap and destination peer is not bss peer.
  502. */
  503. if ((qdf_nbuf_is_da_valid(nbuf) && !qdf_nbuf_is_da_mcbc(nbuf))) {
  504. ast_entry = soc->ast_table[msdu_metadata.da_idx];
  505. if (!ast_entry)
  506. return false;
  507. if (ast_entry->type == CDP_TXRX_AST_TYPE_DA) {
  508. ast_entry->is_active = TRUE;
  509. return false;
  510. }
  511. da_peer_id = ast_entry->peer_id;
  512. if (da_peer_id == HTT_INVALID_PEER)
  513. return false;
  514. /* TA peer cannot be same as peer(DA) on which AST is present
  515. * this indicates a change in topology and that AST entries
  516. * are yet to be updated.
  517. */
  518. if (da_peer_id == ta_peer->peer_id)
  519. return false;
  520. if (ast_entry->vdev_id != ta_peer->vdev->vdev_id)
  521. return false;
  522. da_peer = dp_peer_get_ref_by_id(soc, da_peer_id,
  523. DP_MOD_ID_RX);
  524. if (!da_peer)
  525. return false;
  526. is_da_bss_peer = da_peer->bss_peer;
  527. dp_peer_unref_delete(da_peer, DP_MOD_ID_RX);
  528. if (!is_da_bss_peer) {
  529. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  530. is_frag = qdf_nbuf_is_frag(nbuf);
  531. memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
  532. /* If the source or destination peer in the isolation
  533. * list then dont forward instead push to bridge stack.
  534. */
  535. if (dp_get_peer_isolation(ta_peer) ||
  536. dp_get_peer_isolation(da_peer))
  537. return false;
  538. /* linearize the nbuf just before we send to
  539. * dp_tx_send()
  540. */
  541. if (qdf_unlikely(is_frag)) {
  542. if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
  543. return false;
  544. nbuf = qdf_nbuf_unshare(nbuf);
  545. if (!nbuf) {
  546. DP_STATS_INC_PKT(ta_peer,
  547. rx.intra_bss.fail,
  548. 1,
  549. len);
  550. /* return true even though the pkt is
  551. * not forwarded. Basically skb_unshare
  552. * failed and we want to continue with
  553. * next nbuf.
  554. */
  555. tid_stats->fail_cnt[INTRABSS_DROP]++;
  556. return true;
  557. }
  558. }
  559. if (!dp_tx_send((struct cdp_soc_t *)soc,
  560. ta_peer->vdev->vdev_id, nbuf)) {
  561. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  562. len);
  563. return true;
  564. } else {
  565. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  566. len);
  567. tid_stats->fail_cnt[INTRABSS_DROP]++;
  568. return false;
  569. }
  570. }
  571. }
  572. /* if it is a broadcast pkt (eg: ARP) and it is not its own
  573. * source, then clone the pkt and send the cloned pkt for
  574. * intra BSS forwarding and original pkt up the network stack
  575. * Note: how do we handle multicast pkts. do we forward
  576. * all multicast pkts as is or let a higher layer module
  577. * like igmpsnoop decide whether to forward or not with
  578. * Mcast enhancement.
  579. */
  580. else if (qdf_unlikely((qdf_nbuf_is_da_mcbc(nbuf) &&
  581. !ta_peer->bss_peer))) {
  582. if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
  583. goto end;
  584. /* If the source peer in the isolation list
  585. * then dont forward instead push to bridge stack
  586. */
  587. if (dp_get_peer_isolation(ta_peer))
  588. goto end;
  589. nbuf_copy = qdf_nbuf_copy(nbuf);
  590. if (!nbuf_copy)
  591. goto end;
  592. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  593. memset(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
  594. /* Set cb->ftype to intrabss FWD */
  595. qdf_nbuf_set_tx_ftype(nbuf_copy, CB_FTYPE_INTRABSS_FWD);
  596. if (dp_tx_send((struct cdp_soc_t *)soc,
  597. ta_peer->vdev->vdev_id, nbuf_copy)) {
  598. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1, len);
  599. tid_stats->fail_cnt[INTRABSS_DROP]++;
  600. qdf_nbuf_free(nbuf_copy);
  601. } else {
  602. DP_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1, len);
  603. tid_stats->intrabss_cnt++;
  604. }
  605. }
  606. end:
  607. /* return false as we have to still send the original pkt
  608. * up the stack
  609. */
  610. return false;
  611. }
  612. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  613. #ifdef MESH_MODE_SUPPORT
  614. /**
  615. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  616. *
  617. * @vdev: DP Virtual device handle
  618. * @nbuf: Buffer pointer
  619. * @rx_tlv_hdr: start of rx tlv header
  620. * @peer: pointer to peer
  621. *
  622. * This function allocated memory for mesh receive stats and fill the
  623. * required stats. Stores the memory address in skb cb.
  624. *
  625. * Return: void
  626. */
  627. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  628. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  629. {
  630. struct mesh_recv_hdr_s *rx_info = NULL;
  631. uint32_t pkt_type;
  632. uint32_t nss;
  633. uint32_t rate_mcs;
  634. uint32_t bw;
  635. uint8_t primary_chan_num;
  636. uint32_t center_chan_freq;
  637. struct dp_soc *soc;
  638. /* fill recv mesh stats */
  639. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  640. /* upper layers are resposible to free this memory */
  641. if (!rx_info) {
  642. dp_rx_err("%pK: Memory allocation failed for mesh rx stats",
  643. vdev->pdev->soc);
  644. DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
  645. return;
  646. }
  647. rx_info->rs_flags = MESH_RXHDR_VER1;
  648. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  649. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  650. if (qdf_nbuf_is_rx_chfrag_end(nbuf))
  651. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  652. if (hal_rx_attn_msdu_get_is_decrypted(rx_tlv_hdr)) {
  653. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  654. rx_info->rs_keyix = hal_rx_msdu_get_keyid(rx_tlv_hdr);
  655. if (vdev->osif_get_key)
  656. vdev->osif_get_key(vdev->osif_vdev,
  657. &rx_info->rs_decryptkey[0],
  658. &peer->mac_addr.raw[0],
  659. rx_info->rs_keyix);
  660. }
  661. rx_info->rs_snr = peer->stats.rx.snr;
  662. rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
  663. soc = vdev->pdev->soc;
  664. primary_chan_num = hal_rx_msdu_start_get_freq(rx_tlv_hdr);
  665. center_chan_freq = hal_rx_msdu_start_get_freq(rx_tlv_hdr) >> 16;
  666. if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
  667. rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
  668. soc->ctrl_psoc,
  669. vdev->pdev->pdev_id,
  670. center_chan_freq);
  671. }
  672. rx_info->rs_channel = primary_chan_num;
  673. pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
  674. rate_mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
  675. bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
  676. nss = hal_rx_msdu_start_nss_get(vdev->pdev->soc->hal_soc, rx_tlv_hdr);
  677. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
  678. (bw << 24);
  679. qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  680. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
  681. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"),
  682. rx_info->rs_flags,
  683. rx_info->rs_rssi,
  684. rx_info->rs_channel,
  685. rx_info->rs_ratephy1,
  686. rx_info->rs_keyix,
  687. rx_info->rs_snr);
  688. }
  689. /**
  690. * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
  691. *
  692. * @vdev: DP Virtual device handle
  693. * @nbuf: Buffer pointer
  694. * @rx_tlv_hdr: start of rx tlv header
  695. *
  696. * This checks if the received packet is matching any filter out
  697. * catogery and and drop the packet if it matches.
  698. *
  699. * Return: status(0 indicates drop, 1 indicate to no drop)
  700. */
  701. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  702. uint8_t *rx_tlv_hdr)
  703. {
  704. union dp_align_mac_addr mac_addr;
  705. struct dp_soc *soc = vdev->pdev->soc;
  706. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  707. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  708. if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  709. rx_tlv_hdr))
  710. return QDF_STATUS_SUCCESS;
  711. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  712. if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
  713. rx_tlv_hdr))
  714. return QDF_STATUS_SUCCESS;
  715. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  716. if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  717. rx_tlv_hdr) &&
  718. !hal_rx_mpdu_get_to_ds(soc->hal_soc,
  719. rx_tlv_hdr))
  720. return QDF_STATUS_SUCCESS;
  721. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  722. if (hal_rx_mpdu_get_addr1(soc->hal_soc,
  723. rx_tlv_hdr,
  724. &mac_addr.raw[0]))
  725. return QDF_STATUS_E_FAILURE;
  726. if (!qdf_mem_cmp(&mac_addr.raw[0],
  727. &vdev->mac_addr.raw[0],
  728. QDF_MAC_ADDR_SIZE))
  729. return QDF_STATUS_SUCCESS;
  730. }
  731. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  732. if (hal_rx_mpdu_get_addr2(soc->hal_soc,
  733. rx_tlv_hdr,
  734. &mac_addr.raw[0]))
  735. return QDF_STATUS_E_FAILURE;
  736. if (!qdf_mem_cmp(&mac_addr.raw[0],
  737. &vdev->mac_addr.raw[0],
  738. QDF_MAC_ADDR_SIZE))
  739. return QDF_STATUS_SUCCESS;
  740. }
  741. }
  742. return QDF_STATUS_E_FAILURE;
  743. }
  744. #else
  745. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  746. uint8_t *rx_tlv_hdr, struct dp_peer *peer)
  747. {
  748. }
  749. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  750. uint8_t *rx_tlv_hdr)
  751. {
  752. return QDF_STATUS_E_FAILURE;
  753. }
  754. #endif
  755. #ifdef FEATURE_NAC_RSSI
  756. /**
  757. * dp_rx_nac_filter(): Function to perform filtering of non-associated
  758. * clients
  759. * @pdev: DP pdev handle
  760. * @rx_pkt_hdr: Rx packet Header
  761. *
  762. * return: dp_vdev*
  763. */
  764. static
  765. struct dp_vdev *dp_rx_nac_filter(struct dp_pdev *pdev,
  766. uint8_t *rx_pkt_hdr)
  767. {
  768. struct ieee80211_frame *wh;
  769. struct dp_neighbour_peer *peer = NULL;
  770. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  771. if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_TODS)
  772. return NULL;
  773. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  774. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  775. neighbour_peer_list_elem) {
  776. if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
  777. wh->i_addr2, QDF_MAC_ADDR_SIZE) == 0) {
  778. dp_rx_debug("%pK: NAC configuration matched for mac-%2x:%2x:%2x:%2x:%2x:%2x",
  779. pdev->soc,
  780. peer->neighbour_peers_macaddr.raw[0],
  781. peer->neighbour_peers_macaddr.raw[1],
  782. peer->neighbour_peers_macaddr.raw[2],
  783. peer->neighbour_peers_macaddr.raw[3],
  784. peer->neighbour_peers_macaddr.raw[4],
  785. peer->neighbour_peers_macaddr.raw[5]);
  786. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  787. return pdev->monitor_vdev;
  788. }
  789. }
  790. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  791. return NULL;
  792. }
  793. /**
  794. * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
  795. * @soc: DP SOC handle
  796. * @mpdu: mpdu for which peer is invalid
  797. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  798. * pool_id has same mapping)
  799. *
  800. * return: integer type
  801. */
  802. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  803. uint8_t mac_id)
  804. {
  805. struct dp_invalid_peer_msg msg;
  806. struct dp_vdev *vdev = NULL;
  807. struct dp_pdev *pdev = NULL;
  808. struct ieee80211_frame *wh;
  809. qdf_nbuf_t curr_nbuf, next_nbuf;
  810. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  811. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  812. rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  813. if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
  814. dp_rx_debug("%pK: Drop decapped frames", soc);
  815. goto free;
  816. }
  817. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  818. if (!DP_FRAME_IS_DATA(wh)) {
  819. dp_rx_debug("%pK: NAWDS valid only for data frames", soc);
  820. goto free;
  821. }
  822. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  823. dp_rx_err("%pK: Invalid nbuf length", soc);
  824. goto free;
  825. }
  826. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  827. if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
  828. dp_rx_err("%pK: PDEV %s", soc, !pdev ? "not found" : "down");
  829. goto free;
  830. }
  831. if (pdev->filter_neighbour_peers) {
  832. /* Next Hop scenario not yet handle */
  833. vdev = dp_rx_nac_filter(pdev, rx_pkt_hdr);
  834. if (vdev) {
  835. dp_rx_mon_deliver(soc, pdev->pdev_id,
  836. pdev->invalid_peer_head_msdu,
  837. pdev->invalid_peer_tail_msdu);
  838. pdev->invalid_peer_head_msdu = NULL;
  839. pdev->invalid_peer_tail_msdu = NULL;
  840. return 0;
  841. }
  842. }
  843. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  844. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  845. QDF_MAC_ADDR_SIZE) == 0) {
  846. goto out;
  847. }
  848. }
  849. if (!vdev) {
  850. dp_rx_err("%pK: VDEV not found", soc);
  851. goto free;
  852. }
  853. out:
  854. msg.wh = wh;
  855. qdf_nbuf_pull_head(mpdu, RX_PKT_TLVS_LEN);
  856. msg.nbuf = mpdu;
  857. msg.vdev_id = vdev->vdev_id;
  858. /*
  859. * NOTE: Only valid for HKv1.
  860. * If smart monitor mode is enabled on RE, we are getting invalid
  861. * peer frames with RA as STA mac of RE and the TA not matching
  862. * with any NAC list or the the BSSID.Such frames need to dropped
  863. * in order to avoid HM_WDS false addition.
  864. */
  865. if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
  866. if (!soc->hw_nac_monitor_support &&
  867. pdev->filter_neighbour_peers &&
  868. vdev->opmode == wlan_op_mode_sta) {
  869. dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm",
  870. soc, wh->i_addr1);
  871. goto free;
  872. }
  873. pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
  874. (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
  875. pdev->pdev_id, &msg);
  876. }
  877. free:
  878. /* Drop and free packet */
  879. curr_nbuf = mpdu;
  880. while (curr_nbuf) {
  881. next_nbuf = qdf_nbuf_next(curr_nbuf);
  882. qdf_nbuf_free(curr_nbuf);
  883. curr_nbuf = next_nbuf;
  884. }
  885. return 0;
  886. }
  887. /**
  888. * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
  889. * @soc: DP SOC handle
  890. * @mpdu: mpdu for which peer is invalid
  891. * @mpdu_done: if an mpdu is completed
  892. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  893. * pool_id has same mapping)
  894. *
  895. * return: integer type
  896. */
  897. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  898. qdf_nbuf_t mpdu, bool mpdu_done,
  899. uint8_t mac_id)
  900. {
  901. /* Only trigger the process when mpdu is completed */
  902. if (mpdu_done)
  903. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  904. }
  905. #else
  906. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  907. uint8_t mac_id)
  908. {
  909. qdf_nbuf_t curr_nbuf, next_nbuf;
  910. struct dp_pdev *pdev;
  911. struct dp_vdev *vdev = NULL;
  912. struct ieee80211_frame *wh;
  913. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  914. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(rx_tlv_hdr);
  915. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  916. if (!DP_FRAME_IS_DATA(wh)) {
  917. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
  918. "only for data frames");
  919. goto free;
  920. }
  921. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  922. dp_rx_info_rl("%pK: Invalid nbuf length", soc);
  923. goto free;
  924. }
  925. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  926. if (!pdev) {
  927. dp_rx_info_rl("%pK: PDEV not found", soc);
  928. goto free;
  929. }
  930. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  931. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  932. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  933. QDF_MAC_ADDR_SIZE) == 0) {
  934. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  935. goto out;
  936. }
  937. }
  938. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  939. if (!vdev) {
  940. dp_rx_info_rl("%pK: VDEV not found", soc);
  941. goto free;
  942. }
  943. out:
  944. if (soc->cdp_soc.ol_ops->rx_invalid_peer)
  945. soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
  946. free:
  947. /* reset the head and tail pointers */
  948. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  949. if (pdev) {
  950. pdev->invalid_peer_head_msdu = NULL;
  951. pdev->invalid_peer_tail_msdu = NULL;
  952. }
  953. /* Drop and free packet */
  954. curr_nbuf = mpdu;
  955. while (curr_nbuf) {
  956. next_nbuf = qdf_nbuf_next(curr_nbuf);
  957. qdf_nbuf_free(curr_nbuf);
  958. curr_nbuf = next_nbuf;
  959. }
  960. /* Reset the head and tail pointers */
  961. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  962. if (pdev) {
  963. pdev->invalid_peer_head_msdu = NULL;
  964. pdev->invalid_peer_tail_msdu = NULL;
  965. }
  966. return 0;
  967. }
  968. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  969. qdf_nbuf_t mpdu, bool mpdu_done,
  970. uint8_t mac_id)
  971. {
  972. /* Process the nbuf */
  973. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  974. }
  975. #endif
  976. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  977. #ifdef RECEIVE_OFFLOAD
  978. /**
  979. * dp_rx_print_offload_info() - Print offload info from RX TLV
  980. * @soc: dp soc handle
  981. * @rx_tlv: RX TLV for which offload information is to be printed
  982. *
  983. * Return: None
  984. */
  985. static void dp_rx_print_offload_info(struct dp_soc *soc, uint8_t *rx_tlv)
  986. {
  987. dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
  988. dp_verbose_debug("lro_eligible 0x%x", HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
  989. dp_verbose_debug("pure_ack 0x%x", HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv));
  990. dp_verbose_debug("chksum 0x%x", hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
  991. rx_tlv));
  992. dp_verbose_debug("TCP seq num 0x%x", HAL_RX_TLV_GET_TCP_SEQ(rx_tlv));
  993. dp_verbose_debug("TCP ack num 0x%x", HAL_RX_TLV_GET_TCP_ACK(rx_tlv));
  994. dp_verbose_debug("TCP window 0x%x", HAL_RX_TLV_GET_TCP_WIN(rx_tlv));
  995. dp_verbose_debug("TCP protocol 0x%x", HAL_RX_TLV_GET_TCP_PROTO(rx_tlv));
  996. dp_verbose_debug("TCP offset 0x%x", HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv));
  997. dp_verbose_debug("toeplitz 0x%x", HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
  998. dp_verbose_debug("---------------------------------------------------------");
  999. }
  1000. /**
  1001. * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
  1002. * @soc: DP SOC handle
  1003. * @rx_tlv: RX TLV received for the msdu
  1004. * @msdu: msdu for which GRO info needs to be filled
  1005. * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
  1006. *
  1007. * Return: None
  1008. */
  1009. static
  1010. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  1011. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
  1012. {
  1013. if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
  1014. return;
  1015. /* Filling up RX offload info only for TCP packets */
  1016. if (!HAL_RX_TLV_GET_TCP_PROTO(rx_tlv))
  1017. return;
  1018. *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
  1019. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) =
  1020. HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv);
  1021. QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) =
  1022. HAL_RX_TLV_GET_TCP_PURE_ACK(rx_tlv);
  1023. QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
  1024. hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
  1025. rx_tlv);
  1026. QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) =
  1027. HAL_RX_TLV_GET_TCP_SEQ(rx_tlv);
  1028. QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) =
  1029. HAL_RX_TLV_GET_TCP_ACK(rx_tlv);
  1030. QDF_NBUF_CB_RX_TCP_WIN(msdu) =
  1031. HAL_RX_TLV_GET_TCP_WIN(rx_tlv);
  1032. QDF_NBUF_CB_RX_TCP_PROTO(msdu) =
  1033. HAL_RX_TLV_GET_TCP_PROTO(rx_tlv);
  1034. QDF_NBUF_CB_RX_IPV6_PROTO(msdu) =
  1035. HAL_RX_TLV_GET_IPV6(rx_tlv);
  1036. QDF_NBUF_CB_RX_TCP_OFFSET(msdu) =
  1037. HAL_RX_TLV_GET_TCP_OFFSET(rx_tlv);
  1038. QDF_NBUF_CB_RX_FLOW_ID(msdu) =
  1039. HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv);
  1040. dp_rx_print_offload_info(soc, rx_tlv);
  1041. }
  1042. #else
  1043. static void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  1044. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
  1045. {
  1046. }
  1047. #endif /* RECEIVE_OFFLOAD */
  1048. /**
  1049. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
  1050. *
  1051. * @nbuf: pointer to msdu.
  1052. * @mpdu_len: mpdu length
  1053. *
  1054. * Return: returns true if nbuf is last msdu of mpdu else retuns false.
  1055. */
  1056. static inline bool dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
  1057. {
  1058. bool last_nbuf;
  1059. if (*mpdu_len > (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
  1060. qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
  1061. last_nbuf = false;
  1062. } else {
  1063. qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
  1064. last_nbuf = true;
  1065. }
  1066. *mpdu_len -= (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN);
  1067. return last_nbuf;
  1068. }
  1069. /**
  1070. * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
  1071. * multiple nbufs.
  1072. * @soc: DP SOC handle
  1073. * @nbuf: pointer to the first msdu of an amsdu.
  1074. *
  1075. * This function implements the creation of RX frag_list for cases
  1076. * where an MSDU is spread across multiple nbufs.
  1077. *
  1078. * Return: returns the head nbuf which contains complete frag_list.
  1079. */
  1080. qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1081. {
  1082. qdf_nbuf_t parent, frag_list, next = NULL;
  1083. uint16_t frag_list_len = 0;
  1084. uint16_t mpdu_len;
  1085. bool last_nbuf;
  1086. /*
  1087. * Use msdu len got from REO entry descriptor instead since
  1088. * there is case the RX PKT TLV is corrupted while msdu_len
  1089. * from REO descriptor is right for non-raw RX scatter msdu.
  1090. */
  1091. mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1092. /*
  1093. * this is a case where the complete msdu fits in one single nbuf.
  1094. * in this case HW sets both start and end bit and we only need to
  1095. * reset these bits for RAW mode simulator to decap the pkt
  1096. */
  1097. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  1098. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1099. qdf_nbuf_set_pktlen(nbuf, mpdu_len + RX_PKT_TLVS_LEN);
  1100. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  1101. return nbuf;
  1102. }
  1103. /*
  1104. * This is a case where we have multiple msdus (A-MSDU) spread across
  1105. * multiple nbufs. here we create a fraglist out of these nbufs.
  1106. *
  1107. * the moment we encounter a nbuf with continuation bit set we
  1108. * know for sure we have an MSDU which is spread across multiple
  1109. * nbufs. We loop through and reap nbufs till we reach last nbuf.
  1110. */
  1111. parent = nbuf;
  1112. frag_list = nbuf->next;
  1113. nbuf = nbuf->next;
  1114. /*
  1115. * set the start bit in the first nbuf we encounter with continuation
  1116. * bit set. This has the proper mpdu length set as it is the first
  1117. * msdu of the mpdu. this becomes the parent nbuf and the subsequent
  1118. * nbufs will form the frag_list of the parent nbuf.
  1119. */
  1120. qdf_nbuf_set_rx_chfrag_start(parent, 1);
  1121. last_nbuf = dp_rx_adjust_nbuf_len(parent, &mpdu_len);
  1122. /*
  1123. * HW issue: MSDU cont bit is set but reported MPDU length can fit
  1124. * in to single buffer
  1125. *
  1126. * Increment error stats and avoid SG list creation
  1127. */
  1128. if (last_nbuf) {
  1129. DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
  1130. qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
  1131. return parent;
  1132. }
  1133. /*
  1134. * this is where we set the length of the fragments which are
  1135. * associated to the parent nbuf. We iterate through the frag_list
  1136. * till we hit the last_nbuf of the list.
  1137. */
  1138. do {
  1139. last_nbuf = dp_rx_adjust_nbuf_len(nbuf, &mpdu_len);
  1140. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  1141. frag_list_len += qdf_nbuf_len(nbuf);
  1142. if (last_nbuf) {
  1143. next = nbuf->next;
  1144. nbuf->next = NULL;
  1145. break;
  1146. }
  1147. nbuf = nbuf->next;
  1148. } while (!last_nbuf);
  1149. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  1150. qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
  1151. parent->next = next;
  1152. qdf_nbuf_pull_head(parent, RX_PKT_TLVS_LEN);
  1153. return parent;
  1154. }
  1155. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1156. #ifdef QCA_PEER_EXT_STATS
  1157. /*
  1158. * dp_rx_compute_tid_delay - Computer per TID delay stats
  1159. * @peer: DP soc context
  1160. * @nbuf: NBuffer
  1161. *
  1162. * Return: Void
  1163. */
  1164. void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  1165. qdf_nbuf_t nbuf)
  1166. {
  1167. struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay;
  1168. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1169. dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
  1170. }
  1171. #endif /* QCA_PEER_EXT_STATS */
  1172. /**
  1173. * dp_rx_compute_delay() - Compute and fill in all timestamps
  1174. * to pass in correct fields
  1175. *
  1176. * @vdev: pdev handle
  1177. * @tx_desc: tx descriptor
  1178. * @tid: tid value
  1179. * Return: none
  1180. */
  1181. void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1182. {
  1183. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  1184. int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
  1185. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1186. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1187. uint32_t interframe_delay =
  1188. (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
  1189. dp_update_delay_stats(vdev->pdev, to_stack, tid,
  1190. CDP_DELAY_STATS_REAP_STACK, ring_id);
  1191. /*
  1192. * Update interframe delay stats calculated at deliver_data_ol point.
  1193. * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
  1194. * interframe delay will not be calculate correctly for 1st frame.
  1195. * On the other side, this will help in avoiding extra per packet check
  1196. * of vdev->prev_rx_deliver_tstamp.
  1197. */
  1198. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  1199. CDP_DELAY_STATS_RX_INTERFRAME, ring_id);
  1200. vdev->prev_rx_deliver_tstamp = current_ts;
  1201. }
  1202. /**
  1203. * dp_rx_drop_nbuf_list() - drop an nbuf list
  1204. * @pdev: dp pdev reference
  1205. * @buf_list: buffer list to be dropepd
  1206. *
  1207. * Return: int (number of bufs dropped)
  1208. */
  1209. static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
  1210. qdf_nbuf_t buf_list)
  1211. {
  1212. struct cdp_tid_rx_stats *stats = NULL;
  1213. uint8_t tid = 0, ring_id = 0;
  1214. int num_dropped = 0;
  1215. qdf_nbuf_t buf, next_buf;
  1216. buf = buf_list;
  1217. while (buf) {
  1218. ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
  1219. next_buf = qdf_nbuf_queue_next(buf);
  1220. tid = qdf_nbuf_get_tid_val(buf);
  1221. if (qdf_likely(pdev)) {
  1222. stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1223. stats->fail_cnt[INVALID_PEER_VDEV]++;
  1224. stats->delivered_to_stack--;
  1225. }
  1226. qdf_nbuf_free(buf);
  1227. buf = next_buf;
  1228. num_dropped++;
  1229. }
  1230. return num_dropped;
  1231. }
  1232. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1233. /**
  1234. * dp_rx_wds_ext() - Make different lists for 4-address and 3-address frames
  1235. * @nbuf_head: skb list head
  1236. * @vdev: vdev
  1237. * @peer: peer
  1238. * @peer_id: peer id of new received frame
  1239. * @vdev_id: vdev_id of new received frame
  1240. *
  1241. * Return: true if peer_ids are different.
  1242. */
  1243. static inline bool
  1244. dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
  1245. struct dp_vdev *vdev,
  1246. struct dp_peer *peer,
  1247. uint16_t peer_id,
  1248. uint8_t vdev_id)
  1249. {
  1250. if (nbuf_head && peer && (peer->peer_id != peer_id))
  1251. return true;
  1252. return false;
  1253. }
  1254. /**
  1255. * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
  1256. * @soc: core txrx main context
  1257. * @vdev: vdev
  1258. * @peer: peer
  1259. * @nbuf_head: skb list head
  1260. *
  1261. * Return: true if packet is delivered to netdev per STA.
  1262. */
  1263. static inline bool
  1264. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1265. struct dp_peer *peer, qdf_nbuf_t nbuf_head)
  1266. {
  1267. /*
  1268. * When extended WDS is disabled, frames are sent to AP netdevice.
  1269. */
  1270. if (qdf_likely(!vdev->wds_ext_enabled))
  1271. return false;
  1272. /*
  1273. * There can be 2 cases:
  1274. * 1. Send frame to parent netdev if its not for netdev per STA
  1275. * 2. If frame is meant for netdev per STA:
  1276. * a. Send frame to appropriate netdev using registered fp.
  1277. * b. If fp is NULL, drop the frames.
  1278. */
  1279. if (!peer->wds_ext.init)
  1280. return false;
  1281. if (peer->osif_rx)
  1282. peer->osif_rx(peer->wds_ext.osif_peer, nbuf_head);
  1283. else
  1284. dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1285. return true;
  1286. }
  1287. #else
  1288. static inline bool
  1289. dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
  1290. struct dp_vdev *vdev,
  1291. struct dp_peer *peer,
  1292. uint16_t peer_id,
  1293. uint8_t vdev_id)
  1294. {
  1295. if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
  1296. return true;
  1297. return false;
  1298. }
  1299. static inline bool
  1300. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1301. struct dp_peer *peer, qdf_nbuf_t nbuf_head)
  1302. {
  1303. return false;
  1304. }
  1305. #endif
  1306. #ifdef PEER_CACHE_RX_PKTS
  1307. /**
  1308. * dp_rx_flush_rx_cached() - flush cached rx frames
  1309. * @peer: peer
  1310. * @drop: flag to drop frames or forward to net stack
  1311. *
  1312. * Return: None
  1313. */
  1314. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  1315. {
  1316. struct dp_peer_cached_bufq *bufqi;
  1317. struct dp_rx_cached_buf *cache_buf = NULL;
  1318. ol_txrx_rx_fp data_rx = NULL;
  1319. int num_buff_elem;
  1320. QDF_STATUS status;
  1321. if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
  1322. qdf_atomic_dec(&peer->flush_in_progress);
  1323. return;
  1324. }
  1325. qdf_spin_lock_bh(&peer->peer_info_lock);
  1326. if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
  1327. data_rx = peer->vdev->osif_rx;
  1328. else
  1329. drop = true;
  1330. qdf_spin_unlock_bh(&peer->peer_info_lock);
  1331. bufqi = &peer->bufq_info;
  1332. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1333. qdf_list_remove_front(&bufqi->cached_bufq,
  1334. (qdf_list_node_t **)&cache_buf);
  1335. while (cache_buf) {
  1336. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
  1337. cache_buf->buf);
  1338. bufqi->entries -= num_buff_elem;
  1339. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1340. if (drop) {
  1341. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1342. cache_buf->buf);
  1343. } else {
  1344. /* Flush the cached frames to OSIF DEV */
  1345. status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
  1346. if (status != QDF_STATUS_SUCCESS)
  1347. bufqi->dropped = dp_rx_drop_nbuf_list(
  1348. peer->vdev->pdev,
  1349. cache_buf->buf);
  1350. }
  1351. qdf_mem_free(cache_buf);
  1352. cache_buf = NULL;
  1353. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1354. qdf_list_remove_front(&bufqi->cached_bufq,
  1355. (qdf_list_node_t **)&cache_buf);
  1356. }
  1357. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1358. qdf_atomic_dec(&peer->flush_in_progress);
  1359. }
  1360. /**
  1361. * dp_rx_enqueue_rx() - cache rx frames
  1362. * @peer: peer
  1363. * @rx_buf_list: cache buffer list
  1364. *
  1365. * Return: None
  1366. */
  1367. static QDF_STATUS
  1368. dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
  1369. {
  1370. struct dp_rx_cached_buf *cache_buf;
  1371. struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
  1372. int num_buff_elem;
  1373. dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
  1374. bufqi->dropped);
  1375. if (!peer->valid) {
  1376. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1377. rx_buf_list);
  1378. return QDF_STATUS_E_INVAL;
  1379. }
  1380. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1381. if (bufqi->entries >= bufqi->thresh) {
  1382. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1383. rx_buf_list);
  1384. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1385. return QDF_STATUS_E_RESOURCES;
  1386. }
  1387. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1388. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
  1389. cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
  1390. if (!cache_buf) {
  1391. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1392. "Failed to allocate buf to cache rx frames");
  1393. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1394. rx_buf_list);
  1395. return QDF_STATUS_E_NOMEM;
  1396. }
  1397. cache_buf->buf = rx_buf_list;
  1398. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1399. qdf_list_insert_back(&bufqi->cached_bufq,
  1400. &cache_buf->node);
  1401. bufqi->entries += num_buff_elem;
  1402. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1403. return QDF_STATUS_SUCCESS;
  1404. }
  1405. static inline
  1406. bool dp_rx_is_peer_cache_bufq_supported(void)
  1407. {
  1408. return true;
  1409. }
  1410. #else
  1411. static inline
  1412. bool dp_rx_is_peer_cache_bufq_supported(void)
  1413. {
  1414. return false;
  1415. }
  1416. static inline QDF_STATUS
  1417. dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
  1418. {
  1419. return QDF_STATUS_SUCCESS;
  1420. }
  1421. #endif
  1422. #ifndef DELIVERY_TO_STACK_STATUS_CHECK
  1423. /**
  1424. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1425. * using the appropriate call back functions.
  1426. * @soc: soc
  1427. * @vdev: vdev
  1428. * @peer: peer
  1429. * @nbuf_head: skb list head
  1430. * @nbuf_tail: skb list tail
  1431. *
  1432. * Return: None
  1433. */
  1434. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1435. struct dp_vdev *vdev,
  1436. struct dp_peer *peer,
  1437. qdf_nbuf_t nbuf_head)
  1438. {
  1439. if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
  1440. peer, nbuf_head)))
  1441. return;
  1442. /* Function pointer initialized only when FISA is enabled */
  1443. if (vdev->osif_fisa_rx)
  1444. /* on failure send it via regular path */
  1445. vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1446. else
  1447. vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1448. }
  1449. #else
  1450. /**
  1451. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1452. * using the appropriate call back functions.
  1453. * @soc: soc
  1454. * @vdev: vdev
  1455. * @peer: peer
  1456. * @nbuf_head: skb list head
  1457. * @nbuf_tail: skb list tail
  1458. *
  1459. * Check the return status of the call back function and drop
  1460. * the packets if the return status indicates a failure.
  1461. *
  1462. * Return: None
  1463. */
  1464. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1465. struct dp_vdev *vdev,
  1466. struct dp_peer *peer,
  1467. qdf_nbuf_t nbuf_head)
  1468. {
  1469. int num_nbuf = 0;
  1470. QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
  1471. /* Function pointer initialized only when FISA is enabled */
  1472. if (vdev->osif_fisa_rx)
  1473. /* on failure send it via regular path */
  1474. ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1475. else if (vdev->osif_rx)
  1476. ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1477. if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
  1478. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1479. DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
  1480. if (peer)
  1481. DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
  1482. }
  1483. }
  1484. #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
  1485. void dp_rx_deliver_to_stack(struct dp_soc *soc,
  1486. struct dp_vdev *vdev,
  1487. struct dp_peer *peer,
  1488. qdf_nbuf_t nbuf_head,
  1489. qdf_nbuf_t nbuf_tail)
  1490. {
  1491. int num_nbuf = 0;
  1492. if (qdf_unlikely(!vdev || vdev->delete.pending)) {
  1493. num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
  1494. /*
  1495. * This is a special case where vdev is invalid,
  1496. * so we cannot know the pdev to which this packet
  1497. * belonged. Hence we update the soc rx error stats.
  1498. */
  1499. DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
  1500. return;
  1501. }
  1502. /*
  1503. * highly unlikely to have a vdev without a registered rx
  1504. * callback function. if so let us free the nbuf_list.
  1505. */
  1506. if (qdf_unlikely(!vdev->osif_rx)) {
  1507. if (peer && dp_rx_is_peer_cache_bufq_supported()) {
  1508. dp_rx_enqueue_rx(peer, nbuf_head);
  1509. } else {
  1510. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
  1511. nbuf_head);
  1512. DP_STATS_DEC(peer, rx.to_stack.num, num_nbuf);
  1513. }
  1514. return;
  1515. }
  1516. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
  1517. (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
  1518. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
  1519. &nbuf_tail, peer->mac_addr.raw);
  1520. }
  1521. dp_rx_check_delivery_to_stack(soc, vdev, peer, nbuf_head);
  1522. }
  1523. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1524. /**
  1525. * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware.
  1526. * @nbuf: pointer to the first msdu of an amsdu.
  1527. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  1528. *
  1529. * The ipsumed field of the skb is set based on whether HW validated the
  1530. * IP/TCP/UDP checksum.
  1531. *
  1532. * Return: void
  1533. */
  1534. static inline void dp_rx_cksum_offload(struct dp_pdev *pdev,
  1535. qdf_nbuf_t nbuf,
  1536. uint8_t *rx_tlv_hdr)
  1537. {
  1538. qdf_nbuf_rx_cksum_t cksum = {0};
  1539. bool ip_csum_err = hal_rx_attn_ip_cksum_fail_get(rx_tlv_hdr);
  1540. bool tcp_udp_csum_er = hal_rx_attn_tcp_udp_cksum_fail_get(rx_tlv_hdr);
  1541. if (qdf_likely(!ip_csum_err && !tcp_udp_csum_er)) {
  1542. cksum.l4_result = QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
  1543. qdf_nbuf_set_rx_cksum(nbuf, &cksum);
  1544. } else {
  1545. DP_STATS_INCC(pdev, err.ip_csum_err, 1, ip_csum_err);
  1546. DP_STATS_INCC(pdev, err.tcp_udp_csum_err, 1, tcp_udp_csum_er);
  1547. }
  1548. }
  1549. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1550. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer) \
  1551. { \
  1552. qdf_nbuf_t nbuf_local; \
  1553. struct dp_peer *peer_local; \
  1554. struct dp_vdev *vdev_local = vdev_hdl; \
  1555. do { \
  1556. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  1557. break; \
  1558. nbuf_local = nbuf; \
  1559. peer_local = peer; \
  1560. if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
  1561. break; \
  1562. else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
  1563. break; \
  1564. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  1565. (nbuf_local), \
  1566. (peer_local), 0, 1); \
  1567. } while (0); \
  1568. }
  1569. #else
  1570. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, peer)
  1571. #endif
  1572. /**
  1573. * dp_rx_msdu_stats_update() - update per msdu stats.
  1574. * @soc: core txrx main context
  1575. * @nbuf: pointer to the first msdu of an amsdu.
  1576. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  1577. * @peer: pointer to the peer object.
  1578. * @ring_id: reo dest ring number on which pkt is reaped.
  1579. * @tid_stats: per tid rx stats.
  1580. *
  1581. * update all the per msdu stats for that nbuf.
  1582. * Return: void
  1583. */
  1584. static void dp_rx_msdu_stats_update(struct dp_soc *soc,
  1585. qdf_nbuf_t nbuf,
  1586. uint8_t *rx_tlv_hdr,
  1587. struct dp_peer *peer,
  1588. uint8_t ring_id,
  1589. struct cdp_tid_rx_stats *tid_stats)
  1590. {
  1591. bool is_ampdu, is_not_amsdu;
  1592. uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
  1593. struct dp_vdev *vdev = peer->vdev;
  1594. qdf_ether_header_t *eh;
  1595. uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1596. dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, peer);
  1597. is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
  1598. qdf_nbuf_is_rx_chfrag_end(nbuf);
  1599. DP_STATS_INC_PKT(peer, rx.rcvd_reo[ring_id], 1, msdu_len);
  1600. DP_STATS_INCC(peer, rx.non_amsdu_cnt, 1, is_not_amsdu);
  1601. DP_STATS_INCC(peer, rx.amsdu_cnt, 1, !is_not_amsdu);
  1602. DP_STATS_INCC(peer, rx.rx_retries, 1, qdf_nbuf_is_rx_retry_flag(nbuf));
  1603. tid_stats->msdu_cnt++;
  1604. if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
  1605. (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
  1606. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1607. DP_STATS_INC_PKT(peer, rx.multicast, 1, msdu_len);
  1608. tid_stats->mcast_msdu_cnt++;
  1609. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  1610. DP_STATS_INC_PKT(peer, rx.bcast, 1, msdu_len);
  1611. tid_stats->bcast_msdu_cnt++;
  1612. }
  1613. }
  1614. /*
  1615. * currently we can return from here as we have similar stats
  1616. * updated at per ppdu level instead of msdu level
  1617. */
  1618. if (!soc->process_rx_status)
  1619. return;
  1620. is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(rx_tlv_hdr);
  1621. DP_STATS_INCC(peer, rx.ampdu_cnt, 1, is_ampdu);
  1622. DP_STATS_INCC(peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
  1623. sgi = hal_rx_msdu_start_sgi_get(rx_tlv_hdr);
  1624. mcs = hal_rx_msdu_start_rate_mcs_get(rx_tlv_hdr);
  1625. tid = qdf_nbuf_get_tid_val(nbuf);
  1626. bw = hal_rx_msdu_start_bw_get(rx_tlv_hdr);
  1627. reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
  1628. rx_tlv_hdr);
  1629. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  1630. pkt_type = hal_rx_msdu_start_get_pkt_type(rx_tlv_hdr);
  1631. DP_STATS_INCC(peer, rx.rx_mpdu_cnt[mcs], 1,
  1632. ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  1633. DP_STATS_INCC(peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
  1634. ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  1635. DP_STATS_INC(peer, rx.bw[bw], 1);
  1636. /*
  1637. * only if nss > 0 and pkt_type is 11N/AC/AX,
  1638. * then increase index [nss - 1] in array counter.
  1639. */
  1640. if (nss > 0 && (pkt_type == DOT11_N ||
  1641. pkt_type == DOT11_AC ||
  1642. pkt_type == DOT11_AX))
  1643. DP_STATS_INC(peer, rx.nss[nss - 1], 1);
  1644. DP_STATS_INC(peer, rx.sgi_count[sgi], 1);
  1645. DP_STATS_INCC(peer, rx.err.mic_err, 1,
  1646. hal_rx_mpdu_end_mic_err_get(rx_tlv_hdr));
  1647. DP_STATS_INCC(peer, rx.err.decrypt_err, 1,
  1648. hal_rx_mpdu_end_decrypt_err_get(rx_tlv_hdr));
  1649. DP_STATS_INC(peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
  1650. DP_STATS_INC(peer, rx.reception_type[reception_type], 1);
  1651. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1652. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  1653. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1654. ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  1655. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1656. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  1657. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1658. ((mcs <= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  1659. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1660. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  1661. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1662. ((mcs <= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  1663. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1664. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  1665. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1666. ((mcs <= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  1667. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  1668. ((mcs >= MAX_MCS) && (pkt_type == DOT11_AX)));
  1669. DP_STATS_INCC(peer, rx.pkt_type[pkt_type].mcs_count[mcs], 1,
  1670. ((mcs < MAX_MCS) && (pkt_type == DOT11_AX)));
  1671. if ((soc->process_rx_status) &&
  1672. hal_rx_attn_first_mpdu_get(rx_tlv_hdr)) {
  1673. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  1674. if (!vdev->pdev)
  1675. return;
  1676. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
  1677. &peer->stats, peer->peer_id,
  1678. UPDATE_PEER_STATS,
  1679. vdev->pdev->pdev_id);
  1680. #endif
  1681. }
  1682. }
  1683. static inline bool is_sa_da_idx_valid(struct dp_soc *soc,
  1684. uint8_t *rx_tlv_hdr,
  1685. qdf_nbuf_t nbuf,
  1686. struct hal_rx_msdu_metadata msdu_info)
  1687. {
  1688. if ((qdf_nbuf_is_sa_valid(nbuf) &&
  1689. (msdu_info.sa_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))) ||
  1690. (!qdf_nbuf_is_da_mcbc(nbuf) &&
  1691. qdf_nbuf_is_da_valid(nbuf) &&
  1692. (msdu_info.da_idx > wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx))))
  1693. return false;
  1694. return true;
  1695. }
  1696. #ifndef WDS_VENDOR_EXTENSION
  1697. int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
  1698. struct dp_vdev *vdev,
  1699. struct dp_peer *peer)
  1700. {
  1701. return 1;
  1702. }
  1703. #endif
  1704. #ifdef RX_DESC_DEBUG_CHECK
  1705. /**
  1706. * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
  1707. * corruption
  1708. *
  1709. * @ring_desc: REO ring descriptor
  1710. * @rx_desc: Rx descriptor
  1711. *
  1712. * Return: NONE
  1713. */
  1714. static inline
  1715. QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
  1716. struct dp_rx_desc *rx_desc)
  1717. {
  1718. struct hal_buf_info hbi;
  1719. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  1720. /* Sanity check for possible buffer paddr corruption */
  1721. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  1722. return QDF_STATUS_SUCCESS;
  1723. return QDF_STATUS_E_FAILURE;
  1724. }
  1725. /**
  1726. * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer
  1727. * out of bound access from H.W
  1728. *
  1729. * @soc: DP soc
  1730. * @pkt_len: Packet length received from H.W
  1731. *
  1732. * Return: NONE
  1733. */
  1734. static inline void
  1735. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc,
  1736. uint32_t pkt_len)
  1737. {
  1738. struct rx_desc_pool *rx_desc_pool;
  1739. rx_desc_pool = &soc->rx_desc_buf[0];
  1740. qdf_assert_always(pkt_len <= rx_desc_pool->buf_size);
  1741. }
  1742. #else
  1743. static inline
  1744. QDF_STATUS dp_rx_desc_nbuf_sanity_check(hal_ring_desc_t ring_desc,
  1745. struct dp_rx_desc *rx_desc)
  1746. {
  1747. return QDF_STATUS_SUCCESS;
  1748. }
  1749. static inline void
  1750. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { }
  1751. #endif
  1752. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  1753. static inline
  1754. bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  1755. {
  1756. bool limit_hit = false;
  1757. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  1758. limit_hit =
  1759. (num_reaped >= cfg->rx_reap_loop_pkt_limit) ? true : false;
  1760. if (limit_hit)
  1761. DP_STATS_INC(soc, rx.reap_loop_pkt_limit_hit, 1)
  1762. return limit_hit;
  1763. }
  1764. static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
  1765. {
  1766. return soc->wlan_cfg_ctx->rx_enable_eol_data_check;
  1767. }
  1768. #else
  1769. static inline
  1770. bool dp_rx_reap_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  1771. {
  1772. return false;
  1773. }
  1774. static inline bool dp_rx_enable_eol_data_check(struct dp_soc *soc)
  1775. {
  1776. return false;
  1777. }
  1778. #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
  1779. #ifdef DP_RX_PKT_NO_PEER_DELIVER
  1780. /**
  1781. * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
  1782. * no corresbonding peer found
  1783. * @soc: core txrx main context
  1784. * @nbuf: pkt skb pointer
  1785. *
  1786. * This function will try to deliver some RX special frames to stack
  1787. * even there is no peer matched found. for instance, LFR case, some
  1788. * eapol data will be sent to host before peer_map done.
  1789. *
  1790. * Return: None
  1791. */
  1792. static
  1793. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1794. {
  1795. uint16_t peer_id;
  1796. uint8_t vdev_id;
  1797. struct dp_vdev *vdev = NULL;
  1798. uint32_t l2_hdr_offset = 0;
  1799. uint16_t msdu_len = 0;
  1800. uint32_t pkt_len = 0;
  1801. uint8_t *rx_tlv_hdr;
  1802. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  1803. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  1804. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  1805. if (peer_id > soc->max_peers)
  1806. goto deliver_fail;
  1807. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  1808. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
  1809. if (!vdev || vdev->delete.pending || !vdev->osif_rx)
  1810. goto deliver_fail;
  1811. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
  1812. goto deliver_fail;
  1813. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1814. l2_hdr_offset =
  1815. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  1816. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1817. pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
  1818. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  1819. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  1820. qdf_nbuf_pull_head(nbuf,
  1821. RX_PKT_TLVS_LEN +
  1822. l2_hdr_offset);
  1823. if (dp_rx_is_special_frame(nbuf, frame_mask)) {
  1824. qdf_nbuf_set_exc_frame(nbuf, 1);
  1825. if (QDF_STATUS_SUCCESS !=
  1826. vdev->osif_rx(vdev->osif_vdev, nbuf))
  1827. goto deliver_fail;
  1828. DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
  1829. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  1830. return;
  1831. }
  1832. deliver_fail:
  1833. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1834. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  1835. qdf_nbuf_free(nbuf);
  1836. if (vdev)
  1837. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  1838. }
  1839. #else
  1840. static inline
  1841. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1842. {
  1843. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  1844. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  1845. qdf_nbuf_free(nbuf);
  1846. }
  1847. #endif
  1848. /**
  1849. * dp_rx_srng_get_num_pending() - get number of pending entries
  1850. * @hal_soc: hal soc opaque pointer
  1851. * @hal_ring: opaque pointer to the HAL Rx Ring
  1852. * @num_entries: number of entries in the hal_ring.
  1853. * @near_full: pointer to a boolean. This is set if ring is near full.
  1854. *
  1855. * The function returns the number of entries in a destination ring which are
  1856. * yet to be reaped. The function also checks if the ring is near full.
  1857. * If more than half of the ring needs to be reaped, the ring is considered
  1858. * approaching full.
  1859. * The function useses hal_srng_dst_num_valid_locked to get the number of valid
  1860. * entries. It should not be called within a SRNG lock. HW pointer value is
  1861. * synced into cached_hp.
  1862. *
  1863. * Return: Number of pending entries if any
  1864. */
  1865. static
  1866. uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
  1867. hal_ring_handle_t hal_ring_hdl,
  1868. uint32_t num_entries,
  1869. bool *near_full)
  1870. {
  1871. uint32_t num_pending = 0;
  1872. num_pending = hal_srng_dst_num_valid_locked(hal_soc,
  1873. hal_ring_hdl,
  1874. true);
  1875. if (num_entries && (num_pending >= num_entries >> 1))
  1876. *near_full = true;
  1877. else
  1878. *near_full = false;
  1879. return num_pending;
  1880. }
  1881. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1882. #ifdef WLAN_SUPPORT_RX_FISA
  1883. void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
  1884. {
  1885. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  1886. qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
  1887. }
  1888. /**
  1889. * dp_rx_set_hdr_pad() - set l3 padding in nbuf cb
  1890. * @nbuf: pkt skb pointer
  1891. * @l3_padding: l3 padding
  1892. *
  1893. * Return: None
  1894. */
  1895. static inline
  1896. void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
  1897. {
  1898. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  1899. }
  1900. #else
  1901. void dp_rx_skip_tlvs(qdf_nbuf_t nbuf, uint32_t l3_padding)
  1902. {
  1903. qdf_nbuf_pull_head(nbuf, l3_padding + RX_PKT_TLVS_LEN);
  1904. }
  1905. static inline
  1906. void dp_rx_set_hdr_pad(qdf_nbuf_t nbuf, uint32_t l3_padding)
  1907. {
  1908. }
  1909. #endif
  1910. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1911. #ifdef DP_RX_DROP_RAW_FRM
  1912. /**
  1913. * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
  1914. * @nbuf: pkt skb pointer
  1915. *
  1916. * Return: true - raw frame, dropped
  1917. * false - not raw frame, do nothing
  1918. */
  1919. static inline
  1920. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
  1921. {
  1922. if (qdf_nbuf_is_raw_frame(nbuf)) {
  1923. qdf_nbuf_free(nbuf);
  1924. return true;
  1925. }
  1926. return false;
  1927. }
  1928. #else
  1929. static inline
  1930. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
  1931. {
  1932. return false;
  1933. }
  1934. #endif
  1935. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  1936. /**
  1937. * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
  1938. * @soc: Datapath soc structure
  1939. * @ring_num: REO ring number
  1940. * @ring_desc: REO ring descriptor
  1941. *
  1942. * Returns: None
  1943. */
  1944. static inline void
  1945. dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  1946. hal_ring_desc_t ring_desc)
  1947. {
  1948. struct dp_buf_info_record *record;
  1949. uint8_t rbm;
  1950. struct hal_buf_info hbi;
  1951. uint32_t idx;
  1952. if (qdf_unlikely(!soc->rx_ring_history[ring_num]))
  1953. return;
  1954. hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
  1955. rbm = hal_rx_ret_buf_manager_get(ring_desc);
  1956. idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
  1957. DP_RX_HIST_MAX);
  1958. /* No NULL check needed for record since its an array */
  1959. record = &soc->rx_ring_history[ring_num]->entry[idx];
  1960. record->timestamp = qdf_get_log_timestamp();
  1961. record->hbi.paddr = hbi.paddr;
  1962. record->hbi.sw_cookie = hbi.sw_cookie;
  1963. record->hbi.rbm = rbm;
  1964. }
  1965. #else
  1966. static inline void
  1967. dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  1968. hal_ring_desc_t ring_desc)
  1969. {
  1970. }
  1971. #endif
  1972. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1973. /**
  1974. * dp_rx_update_stats() - Update soc level rx packet count
  1975. * @soc: DP soc handle
  1976. * @nbuf: nbuf received
  1977. *
  1978. * Returns: none
  1979. */
  1980. static inline void dp_rx_update_stats(struct dp_soc *soc,
  1981. qdf_nbuf_t nbuf)
  1982. {
  1983. DP_STATS_INC_PKT(soc, rx.ingress, 1,
  1984. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  1985. }
  1986. #else
  1987. static inline void dp_rx_update_stats(struct dp_soc *soc,
  1988. qdf_nbuf_t nbuf)
  1989. {
  1990. }
  1991. #endif
  1992. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  1993. /**
  1994. * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
  1995. * @soc : dp_soc handle
  1996. * @pdev: dp_pdev handle
  1997. * @peer_id: peer_id of the peer for which completion came
  1998. * @ppdu_id: ppdu_id
  1999. * @netbuf: Buffer pointer
  2000. *
  2001. * This function is used to deliver rx packet to packet capture
  2002. */
  2003. void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  2004. uint16_t peer_id, uint32_t is_offload,
  2005. qdf_nbuf_t netbuf)
  2006. {
  2007. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf,
  2008. peer_id, is_offload, pdev->pdev_id);
  2009. }
  2010. void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2011. uint32_t is_offload)
  2012. {
  2013. uint16_t msdu_len = 0;
  2014. uint16_t peer_id, vdev_id;
  2015. uint32_t pkt_len = 0;
  2016. uint8_t *rx_tlv_hdr;
  2017. uint32_t l2_hdr_offset = 0;
  2018. struct hal_rx_msdu_metadata msdu_metadata;
  2019. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  2020. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  2021. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2022. hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
  2023. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2024. pkt_len = msdu_len + msdu_metadata.l3_hdr_pad +
  2025. RX_PKT_TLVS_LEN;
  2026. l2_hdr_offset =
  2027. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2028. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  2029. dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad);
  2030. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, nbuf,
  2031. HTT_INVALID_VDEV, is_offload, 0);
  2032. }
  2033. #endif
  2034. #if defined(FEATURE_MCL_REPEATER) && defined(FEATURE_MEC)
  2035. /**
  2036. * dp_rx_mec_check_wrapper() - wrapper to dp_rx_mcast_echo_check
  2037. * @soc: core DP main context
  2038. * @peer: dp peer handler
  2039. * @rx_tlv_hdr: start of the rx TLV header
  2040. * @nbuf: pkt buffer
  2041. *
  2042. * Return: bool (true if it is a looped back pkt else false)
  2043. */
  2044. static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc,
  2045. struct dp_peer *peer,
  2046. uint8_t *rx_tlv_hdr,
  2047. qdf_nbuf_t nbuf)
  2048. {
  2049. return dp_rx_mcast_echo_check(soc, peer, rx_tlv_hdr, nbuf);
  2050. }
  2051. #else
  2052. static inline bool dp_rx_mec_check_wrapper(struct dp_soc *soc,
  2053. struct dp_peer *peer,
  2054. uint8_t *rx_tlv_hdr,
  2055. qdf_nbuf_t nbuf)
  2056. {
  2057. return false;
  2058. }
  2059. #endif
  2060. #ifdef DISABLE_EAPOL_INTRABSS_FWD
  2061. /*
  2062. * dp_rx_intrabss_fwd_wrapper() - Wrapper API for intrabss fwd. For EAPOL
  2063. * pkt with DA not equal to vdev mac addr, fwd is not allowed.
  2064. * @soc: core txrx main context
  2065. * @ta_peer: source peer entry
  2066. * @rx_tlv_hdr: start address of rx tlvs
  2067. * @nbuf: nbuf that has to be intrabss forwarded
  2068. * @msdu_metadata: msdu metadata
  2069. *
  2070. * Return: true if it is forwarded else false
  2071. */
  2072. static inline
  2073. bool dp_rx_intrabss_fwd_wrapper(struct dp_soc *soc, struct dp_peer *ta_peer,
  2074. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  2075. struct hal_rx_msdu_metadata msdu_metadata)
  2076. {
  2077. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf) &&
  2078. qdf_mem_cmp(qdf_nbuf_data(nbuf) +
  2079. QDF_NBUF_DEST_MAC_OFFSET,
  2080. ta_peer->vdev->mac_addr.raw,
  2081. QDF_MAC_ADDR_SIZE))) {
  2082. qdf_nbuf_free(nbuf);
  2083. DP_STATS_INC(soc, rx.err.intrabss_eapol_drop, 1);
  2084. return true;
  2085. }
  2086. return dp_rx_intrabss_fwd(soc, ta_peer, rx_tlv_hdr, nbuf,
  2087. msdu_metadata);
  2088. }
  2089. #define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \
  2090. dp_rx_intrabss_fwd_wrapper(soc, peer, rx_tlv_hdr, nbuf, \
  2091. msdu_metadata)
  2092. #else
  2093. #define DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata) \
  2094. dp_rx_intrabss_fwd(soc, peer, rx_tlv_hdr, nbuf, msdu_metadata)
  2095. #endif
  2096. /**
  2097. * dp_rx_process() - Brain of the Rx processing functionality
  2098. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  2099. * @int_ctx: per interrupt context
  2100. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  2101. * @reo_ring_num: ring number (0, 1, 2 or 3) of the reo ring.
  2102. * @quota: No. of units (packets) that can be serviced in one shot.
  2103. *
  2104. * This function implements the core of Rx functionality. This is
  2105. * expected to handle only non-error frames.
  2106. *
  2107. * Return: uint32_t: No. of elements processed
  2108. */
  2109. uint32_t dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
  2110. uint8_t reo_ring_num, uint32_t quota)
  2111. {
  2112. hal_ring_desc_t ring_desc;
  2113. hal_soc_handle_t hal_soc;
  2114. struct dp_rx_desc *rx_desc = NULL;
  2115. qdf_nbuf_t nbuf, next;
  2116. bool near_full;
  2117. union dp_rx_desc_list_elem_t *head[MAX_PDEV_CNT];
  2118. union dp_rx_desc_list_elem_t *tail[MAX_PDEV_CNT];
  2119. uint32_t num_pending;
  2120. uint32_t rx_bufs_used = 0, rx_buf_cookie;
  2121. uint16_t msdu_len = 0;
  2122. uint16_t peer_id;
  2123. uint8_t vdev_id;
  2124. struct dp_peer *peer;
  2125. struct dp_vdev *vdev;
  2126. uint32_t pkt_len = 0;
  2127. struct hal_rx_mpdu_desc_info mpdu_desc_info;
  2128. struct hal_rx_msdu_desc_info msdu_desc_info;
  2129. enum hal_reo_error_status error;
  2130. uint32_t peer_mdata;
  2131. uint8_t *rx_tlv_hdr;
  2132. uint32_t rx_bufs_reaped[MAX_PDEV_CNT];
  2133. uint8_t mac_id = 0;
  2134. struct dp_pdev *rx_pdev;
  2135. struct dp_srng *dp_rxdma_srng;
  2136. struct rx_desc_pool *rx_desc_pool;
  2137. struct dp_soc *soc = int_ctx->soc;
  2138. uint8_t ring_id = 0;
  2139. uint8_t core_id = 0;
  2140. struct cdp_tid_rx_stats *tid_stats;
  2141. qdf_nbuf_t nbuf_head;
  2142. qdf_nbuf_t nbuf_tail;
  2143. qdf_nbuf_t deliver_list_head;
  2144. qdf_nbuf_t deliver_list_tail;
  2145. uint32_t num_rx_bufs_reaped = 0;
  2146. uint32_t intr_id;
  2147. struct hif_opaque_softc *scn;
  2148. int32_t tid = 0;
  2149. bool is_prev_msdu_last = true;
  2150. uint32_t num_entries_avail = 0;
  2151. uint32_t rx_ol_pkt_cnt = 0;
  2152. uint32_t num_entries = 0;
  2153. struct hal_rx_msdu_metadata msdu_metadata;
  2154. QDF_STATUS status;
  2155. qdf_nbuf_t ebuf_head;
  2156. qdf_nbuf_t ebuf_tail;
  2157. uint8_t pkt_capture_offload = 0;
  2158. DP_HIST_INIT();
  2159. qdf_assert_always(soc && hal_ring_hdl);
  2160. hal_soc = soc->hal_soc;
  2161. qdf_assert_always(hal_soc);
  2162. scn = soc->hif_handle;
  2163. hif_pm_runtime_mark_dp_rx_busy(scn);
  2164. intr_id = int_ctx->dp_intr_id;
  2165. num_entries = hal_srng_get_num_entries(hal_soc, hal_ring_hdl);
  2166. more_data:
  2167. /* reset local variables here to be re-used in the function */
  2168. nbuf_head = NULL;
  2169. nbuf_tail = NULL;
  2170. deliver_list_head = NULL;
  2171. deliver_list_tail = NULL;
  2172. peer = NULL;
  2173. vdev = NULL;
  2174. num_rx_bufs_reaped = 0;
  2175. ebuf_head = NULL;
  2176. ebuf_tail = NULL;
  2177. qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
  2178. qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
  2179. qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
  2180. qdf_mem_zero(head, sizeof(head));
  2181. qdf_mem_zero(tail, sizeof(tail));
  2182. if (qdf_unlikely(dp_rx_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  2183. /*
  2184. * Need API to convert from hal_ring pointer to
  2185. * Ring Type / Ring Id combo
  2186. */
  2187. DP_STATS_INC(soc, rx.err.hal_ring_access_fail, 1);
  2188. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2189. FL("HAL RING Access Failed -- %pK"), hal_ring_hdl);
  2190. goto done;
  2191. }
  2192. /*
  2193. * start reaping the buffers from reo ring and queue
  2194. * them in per vdev queue.
  2195. * Process the received pkts in a different per vdev loop.
  2196. */
  2197. while (qdf_likely(quota &&
  2198. (ring_desc = hal_srng_dst_peek(hal_soc,
  2199. hal_ring_hdl)))) {
  2200. error = HAL_RX_ERROR_STATUS_GET(ring_desc);
  2201. ring_id = hal_srng_ring_id_get(hal_ring_hdl);
  2202. if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
  2203. dp_rx_err("%pK: HAL RING 0x%pK:error %d",
  2204. soc, hal_ring_hdl, error);
  2205. DP_STATS_INC(soc, rx.err.hal_reo_error[ring_id], 1);
  2206. /* Don't know how to deal with this -- assert */
  2207. qdf_assert(0);
  2208. }
  2209. dp_rx_ring_record_entry(soc, reo_ring_num, ring_desc);
  2210. rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
  2211. status = dp_rx_cookie_check_and_invalidate(ring_desc);
  2212. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  2213. DP_STATS_INC(soc, rx.err.stale_cookie, 1);
  2214. qdf_assert_always(0);
  2215. }
  2216. rx_desc = dp_rx_cookie_2_va_rxdma_buf(soc, rx_buf_cookie);
  2217. status = dp_rx_desc_sanity(soc, hal_soc, hal_ring_hdl,
  2218. ring_desc, rx_desc);
  2219. if (QDF_IS_STATUS_ERROR(status)) {
  2220. if (qdf_unlikely(rx_desc && rx_desc->nbuf)) {
  2221. qdf_assert_always(rx_desc->unmapped);
  2222. dp_ipa_reo_ctx_buf_mapping_lock(
  2223. soc,
  2224. reo_ring_num);
  2225. dp_ipa_handle_rx_buf_smmu_mapping(
  2226. soc,
  2227. rx_desc->nbuf,
  2228. RX_DATA_BUFFER_SIZE,
  2229. false);
  2230. qdf_nbuf_unmap_nbytes_single(
  2231. soc->osdev,
  2232. rx_desc->nbuf,
  2233. QDF_DMA_FROM_DEVICE,
  2234. RX_DATA_BUFFER_SIZE);
  2235. rx_desc->unmapped = 1;
  2236. dp_ipa_reo_ctx_buf_mapping_unlock(
  2237. soc,
  2238. reo_ring_num);
  2239. dp_rx_buffer_pool_nbuf_free(soc, rx_desc->nbuf,
  2240. rx_desc->pool_id);
  2241. dp_rx_add_to_free_desc_list(
  2242. &head[rx_desc->pool_id],
  2243. &tail[rx_desc->pool_id],
  2244. rx_desc);
  2245. }
  2246. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  2247. continue;
  2248. }
  2249. /*
  2250. * this is a unlikely scenario where the host is reaping
  2251. * a descriptor which it already reaped just a while ago
  2252. * but is yet to replenish it back to HW.
  2253. * In this case host will dump the last 128 descriptors
  2254. * including the software descriptor rx_desc and assert.
  2255. */
  2256. if (qdf_unlikely(!rx_desc->in_use)) {
  2257. DP_STATS_INC(soc, rx.err.hal_reo_dest_dup, 1);
  2258. dp_info_rl("Reaping rx_desc not in use!");
  2259. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  2260. ring_desc, rx_desc);
  2261. /* ignore duplicate RX desc and continue to process */
  2262. /* Pop out the descriptor */
  2263. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  2264. continue;
  2265. }
  2266. status = dp_rx_desc_nbuf_sanity_check(ring_desc, rx_desc);
  2267. if (qdf_unlikely(QDF_IS_STATUS_ERROR(status))) {
  2268. DP_STATS_INC(soc, rx.err.nbuf_sanity_fail, 1);
  2269. dp_info_rl("Nbuf sanity check failure!");
  2270. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  2271. ring_desc, rx_desc);
  2272. rx_desc->in_err_state = 1;
  2273. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  2274. continue;
  2275. }
  2276. if (qdf_unlikely(!dp_rx_desc_check_magic(rx_desc))) {
  2277. dp_err("Invalid rx_desc cookie=%d", rx_buf_cookie);
  2278. DP_STATS_INC(soc, rx.err.rx_desc_invalid_magic, 1);
  2279. dp_rx_dump_info_and_assert(soc, hal_ring_hdl,
  2280. ring_desc, rx_desc);
  2281. }
  2282. /* Get MPDU DESC info */
  2283. hal_rx_mpdu_desc_info_get(ring_desc, &mpdu_desc_info);
  2284. /* Get MSDU DESC info */
  2285. hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
  2286. if (qdf_unlikely(msdu_desc_info.msdu_flags &
  2287. HAL_MSDU_F_MSDU_CONTINUATION)) {
  2288. /* previous msdu has end bit set, so current one is
  2289. * the new MPDU
  2290. */
  2291. if (is_prev_msdu_last) {
  2292. /* Get number of entries available in HW ring */
  2293. num_entries_avail =
  2294. hal_srng_dst_num_valid(hal_soc,
  2295. hal_ring_hdl, 1);
  2296. /* For new MPDU check if we can read complete
  2297. * MPDU by comparing the number of buffers
  2298. * available and number of buffers needed to
  2299. * reap this MPDU
  2300. */
  2301. if (((msdu_desc_info.msdu_len /
  2302. (RX_DATA_BUFFER_SIZE - RX_PKT_TLVS_LEN) +
  2303. 1)) > num_entries_avail) {
  2304. DP_STATS_INC(
  2305. soc,
  2306. rx.msdu_scatter_wait_break,
  2307. 1);
  2308. dp_rx_cookie_reset_invalid_bit(
  2309. ring_desc);
  2310. break;
  2311. }
  2312. is_prev_msdu_last = false;
  2313. }
  2314. }
  2315. core_id = smp_processor_id();
  2316. DP_STATS_INC(soc, rx.ring_packets[core_id][ring_id], 1);
  2317. if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
  2318. qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
  2319. if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
  2320. HAL_MPDU_F_RAW_AMPDU))
  2321. qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
  2322. if (!is_prev_msdu_last &&
  2323. msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  2324. is_prev_msdu_last = true;
  2325. /* Pop out the descriptor*/
  2326. hal_srng_dst_get_next(hal_soc, hal_ring_hdl);
  2327. rx_bufs_reaped[rx_desc->pool_id]++;
  2328. peer_mdata = mpdu_desc_info.peer_meta_data;
  2329. QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
  2330. DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
  2331. QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
  2332. DP_PEER_METADATA_VDEV_ID_GET(peer_mdata);
  2333. /* to indicate whether this msdu is rx offload */
  2334. pkt_capture_offload =
  2335. DP_PEER_METADATA_OFFLOAD_GET(peer_mdata);
  2336. /*
  2337. * save msdu flags first, last and continuation msdu in
  2338. * nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
  2339. * length to nbuf->cb. This ensures the info required for
  2340. * per pkt processing is always in the same cache line.
  2341. * This helps in improving throughput for smaller pkt
  2342. * sizes.
  2343. */
  2344. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
  2345. qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
  2346. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
  2347. qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
  2348. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
  2349. qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
  2350. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
  2351. qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
  2352. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
  2353. qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
  2354. if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
  2355. qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
  2356. qdf_nbuf_set_tid_val(rx_desc->nbuf,
  2357. HAL_RX_REO_QUEUE_NUMBER_GET(ring_desc));
  2358. qdf_nbuf_set_rx_reo_dest_ind(
  2359. rx_desc->nbuf,
  2360. HAL_RX_REO_MSDU_REO_DST_IND_GET(ring_desc));
  2361. QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
  2362. QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
  2363. /*
  2364. * move unmap after scattered msdu waiting break logic
  2365. * in case double skb unmap happened.
  2366. */
  2367. rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
  2368. dp_ipa_reo_ctx_buf_mapping_lock(soc,
  2369. reo_ring_num);
  2370. dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
  2371. rx_desc_pool->buf_size,
  2372. false);
  2373. qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
  2374. QDF_DMA_FROM_DEVICE,
  2375. rx_desc_pool->buf_size);
  2376. rx_desc->unmapped = 1;
  2377. dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
  2378. DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
  2379. ebuf_tail, rx_desc);
  2380. /*
  2381. * if continuation bit is set then we have MSDU spread
  2382. * across multiple buffers, let us not decrement quota
  2383. * till we reap all buffers of that MSDU.
  2384. */
  2385. if (qdf_likely(!qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
  2386. quota -= 1;
  2387. dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
  2388. &tail[rx_desc->pool_id],
  2389. rx_desc);
  2390. num_rx_bufs_reaped++;
  2391. /*
  2392. * only if complete msdu is received for scatter case,
  2393. * then allow break.
  2394. */
  2395. if (is_prev_msdu_last &&
  2396. dp_rx_reap_loop_pkt_limit_hit(soc, num_rx_bufs_reaped))
  2397. break;
  2398. }
  2399. done:
  2400. dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
  2401. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  2402. /*
  2403. * continue with next mac_id if no pkts were reaped
  2404. * from that pool
  2405. */
  2406. if (!rx_bufs_reaped[mac_id])
  2407. continue;
  2408. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
  2409. rx_desc_pool = &soc->rx_desc_buf[mac_id];
  2410. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  2411. rx_desc_pool, rx_bufs_reaped[mac_id],
  2412. &head[mac_id], &tail[mac_id]);
  2413. }
  2414. dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
  2415. /* Peer can be NULL is case of LFR */
  2416. if (qdf_likely(peer))
  2417. vdev = NULL;
  2418. /*
  2419. * BIG loop where each nbuf is dequeued from global queue,
  2420. * processed and queued back on a per vdev basis. These nbufs
  2421. * are sent to stack as and when we run out of nbufs
  2422. * or a new nbuf dequeued from global queue has a different
  2423. * vdev when compared to previous nbuf.
  2424. */
  2425. nbuf = nbuf_head;
  2426. while (nbuf) {
  2427. next = nbuf->next;
  2428. if (qdf_unlikely(dp_rx_is_raw_frame_dropped(nbuf))) {
  2429. nbuf = next;
  2430. DP_STATS_INC(soc, rx.err.raw_frm_drop, 1);
  2431. continue;
  2432. }
  2433. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2434. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  2435. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  2436. if (dp_rx_is_list_ready(deliver_list_head, vdev, peer,
  2437. peer_id, vdev_id)) {
  2438. dp_rx_deliver_to_stack(soc, vdev, peer,
  2439. deliver_list_head,
  2440. deliver_list_tail);
  2441. deliver_list_head = NULL;
  2442. deliver_list_tail = NULL;
  2443. }
  2444. /* Get TID from struct cb->tid_val, save to tid */
  2445. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  2446. tid = qdf_nbuf_get_tid_val(nbuf);
  2447. if (qdf_unlikely(!peer)) {
  2448. peer = dp_peer_get_ref_by_id(soc, peer_id,
  2449. DP_MOD_ID_RX);
  2450. } else if (peer && peer->peer_id != peer_id) {
  2451. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  2452. peer = dp_peer_get_ref_by_id(soc, peer_id,
  2453. DP_MOD_ID_RX);
  2454. }
  2455. if (peer) {
  2456. QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) = false;
  2457. qdf_dp_trace_set_track(nbuf, QDF_RX);
  2458. QDF_NBUF_CB_RX_DP_TRACE(nbuf) = 1;
  2459. QDF_NBUF_CB_RX_PACKET_TRACK(nbuf) =
  2460. QDF_NBUF_RX_PKT_DATA_TRACK;
  2461. }
  2462. rx_bufs_used++;
  2463. if (qdf_likely(peer)) {
  2464. vdev = peer->vdev;
  2465. } else {
  2466. nbuf->next = NULL;
  2467. dp_rx_deliver_to_pkt_capture_no_peer(
  2468. soc, nbuf, pkt_capture_offload);
  2469. if (!pkt_capture_offload)
  2470. dp_rx_deliver_to_stack_no_peer(soc, nbuf);
  2471. nbuf = next;
  2472. continue;
  2473. }
  2474. if (qdf_unlikely(!vdev)) {
  2475. qdf_nbuf_free(nbuf);
  2476. nbuf = next;
  2477. DP_STATS_INC(soc, rx.err.invalid_vdev, 1);
  2478. continue;
  2479. }
  2480. /* when hlos tid override is enabled, save tid in
  2481. * skb->priority
  2482. */
  2483. if (qdf_unlikely(vdev->skip_sw_tid_classification &
  2484. DP_TXRX_HLOS_TID_OVERRIDE_ENABLED))
  2485. qdf_nbuf_set_priority(nbuf, tid);
  2486. rx_pdev = vdev->pdev;
  2487. DP_RX_TID_SAVE(nbuf, tid);
  2488. if (qdf_unlikely(rx_pdev->delay_stats_flag) ||
  2489. qdf_unlikely(wlan_cfg_is_peer_ext_stats_enabled(
  2490. soc->wlan_cfg_ctx)))
  2491. qdf_nbuf_set_timestamp(nbuf);
  2492. ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  2493. tid_stats =
  2494. &rx_pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  2495. /*
  2496. * Check if DMA completed -- msdu_done is the last bit
  2497. * to be written
  2498. */
  2499. if (qdf_unlikely(!qdf_nbuf_is_rx_chfrag_cont(nbuf) &&
  2500. !hal_rx_attn_msdu_done_get(rx_tlv_hdr))) {
  2501. dp_err("MSDU DONE failure");
  2502. DP_STATS_INC(soc, rx.err.msdu_done_fail, 1);
  2503. hal_rx_dump_pkt_tlvs(hal_soc, rx_tlv_hdr,
  2504. QDF_TRACE_LEVEL_INFO);
  2505. tid_stats->fail_cnt[MSDU_DONE_FAILURE]++;
  2506. qdf_nbuf_free(nbuf);
  2507. qdf_assert(0);
  2508. nbuf = next;
  2509. continue;
  2510. }
  2511. DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
  2512. /*
  2513. * First IF condition:
  2514. * 802.11 Fragmented pkts are reinjected to REO
  2515. * HW block as SG pkts and for these pkts we only
  2516. * need to pull the RX TLVS header length.
  2517. * Second IF condition:
  2518. * The below condition happens when an MSDU is spread
  2519. * across multiple buffers. This can happen in two cases
  2520. * 1. The nbuf size is smaller then the received msdu.
  2521. * ex: we have set the nbuf size to 2048 during
  2522. * nbuf_alloc. but we received an msdu which is
  2523. * 2304 bytes in size then this msdu is spread
  2524. * across 2 nbufs.
  2525. *
  2526. * 2. AMSDUs when RAW mode is enabled.
  2527. * ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
  2528. * across 1st nbuf and 2nd nbuf and last MSDU is
  2529. * spread across 2nd nbuf and 3rd nbuf.
  2530. *
  2531. * for these scenarios let us create a skb frag_list and
  2532. * append these buffers till the last MSDU of the AMSDU
  2533. * Third condition:
  2534. * This is the most likely case, we receive 802.3 pkts
  2535. * decapsulated by HW, here we need to set the pkt length.
  2536. */
  2537. hal_rx_msdu_metadata_get(hal_soc, rx_tlv_hdr, &msdu_metadata);
  2538. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  2539. bool is_mcbc, is_sa_vld, is_da_vld;
  2540. is_mcbc = hal_rx_msdu_end_da_is_mcbc_get(soc->hal_soc,
  2541. rx_tlv_hdr);
  2542. is_sa_vld =
  2543. hal_rx_msdu_end_sa_is_valid_get(soc->hal_soc,
  2544. rx_tlv_hdr);
  2545. is_da_vld =
  2546. hal_rx_msdu_end_da_is_valid_get(soc->hal_soc,
  2547. rx_tlv_hdr);
  2548. qdf_nbuf_set_da_mcbc(nbuf, is_mcbc);
  2549. qdf_nbuf_set_da_valid(nbuf, is_da_vld);
  2550. qdf_nbuf_set_sa_valid(nbuf, is_sa_vld);
  2551. qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
  2552. } else if (qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  2553. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2554. nbuf = dp_rx_sg_create(soc, nbuf);
  2555. next = nbuf->next;
  2556. if (qdf_nbuf_is_raw_frame(nbuf)) {
  2557. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  2558. DP_STATS_INC_PKT(peer, rx.raw, 1, msdu_len);
  2559. } else {
  2560. qdf_nbuf_free(nbuf);
  2561. DP_STATS_INC(soc, rx.err.scatter_msdu, 1);
  2562. dp_info_rl("scatter msdu len %d, dropped",
  2563. msdu_len);
  2564. nbuf = next;
  2565. continue;
  2566. }
  2567. } else {
  2568. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2569. pkt_len = msdu_len +
  2570. msdu_metadata.l3_hdr_pad +
  2571. RX_PKT_TLVS_LEN;
  2572. dp_rx_desc_nbuf_len_sanity_check(soc, pkt_len);
  2573. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  2574. dp_rx_skip_tlvs(nbuf, msdu_metadata.l3_hdr_pad);
  2575. }
  2576. /*
  2577. * process frame for mulitpass phrase processing
  2578. */
  2579. if (qdf_unlikely(vdev->multipass_en)) {
  2580. if (dp_rx_multipass_process(peer, nbuf, tid) == false) {
  2581. DP_STATS_INC(peer, rx.multipass_rx_pkt_drop, 1);
  2582. qdf_nbuf_free(nbuf);
  2583. nbuf = next;
  2584. continue;
  2585. }
  2586. }
  2587. if (!dp_wds_rx_policy_check(rx_tlv_hdr, vdev, peer)) {
  2588. dp_rx_err("%pK: Policy Check Drop pkt", soc);
  2589. tid_stats->fail_cnt[POLICY_CHECK_DROP]++;
  2590. /* Drop & free packet */
  2591. qdf_nbuf_free(nbuf);
  2592. /* Statistics */
  2593. nbuf = next;
  2594. continue;
  2595. }
  2596. if (qdf_unlikely(peer && (peer->nawds_enabled) &&
  2597. (qdf_nbuf_is_da_mcbc(nbuf)) &&
  2598. (hal_rx_get_mpdu_mac_ad4_valid(soc->hal_soc,
  2599. rx_tlv_hdr) ==
  2600. false))) {
  2601. tid_stats->fail_cnt[NAWDS_MCAST_DROP]++;
  2602. DP_STATS_INC(peer, rx.nawds_mcast_drop, 1);
  2603. qdf_nbuf_free(nbuf);
  2604. nbuf = next;
  2605. continue;
  2606. }
  2607. /*
  2608. * Drop non-EAPOL frames from unauthorized peer.
  2609. */
  2610. if (qdf_likely(peer) && qdf_unlikely(!peer->authorize)) {
  2611. bool is_eapol = qdf_nbuf_is_ipv4_eapol_pkt(nbuf) ||
  2612. qdf_nbuf_is_ipv4_wapi_pkt(nbuf);
  2613. if (!is_eapol) {
  2614. DP_STATS_INC(soc,
  2615. rx.err.peer_unauth_rx_pkt_drop,
  2616. 1);
  2617. qdf_nbuf_free(nbuf);
  2618. nbuf = next;
  2619. continue;
  2620. }
  2621. }
  2622. if (soc->process_rx_status)
  2623. dp_rx_cksum_offload(vdev->pdev, nbuf, rx_tlv_hdr);
  2624. /* Update the protocol tag in SKB based on CCE metadata */
  2625. dp_rx_update_protocol_tag(soc, vdev, nbuf, rx_tlv_hdr,
  2626. reo_ring_num, false, true);
  2627. /* Update the flow tag in SKB based on FSE metadata */
  2628. dp_rx_update_flow_tag(soc, vdev, nbuf, rx_tlv_hdr, true);
  2629. dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer,
  2630. ring_id, tid_stats);
  2631. if (qdf_unlikely(vdev->mesh_vdev)) {
  2632. if (dp_rx_filter_mesh_packets(vdev, nbuf, rx_tlv_hdr)
  2633. == QDF_STATUS_SUCCESS) {
  2634. dp_rx_info("%pK: mesh pkt filtered", soc);
  2635. tid_stats->fail_cnt[MESH_FILTER_DROP]++;
  2636. DP_STATS_INC(vdev->pdev, dropped.mesh_filter,
  2637. 1);
  2638. qdf_nbuf_free(nbuf);
  2639. nbuf = next;
  2640. continue;
  2641. }
  2642. dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr, peer);
  2643. }
  2644. if (qdf_likely(vdev->rx_decap_type ==
  2645. htt_cmn_pkt_type_ethernet) &&
  2646. qdf_likely(!vdev->mesh_vdev)) {
  2647. /* WDS Destination Address Learning */
  2648. dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
  2649. /* Due to HW issue, sometimes we see that the sa_idx
  2650. * and da_idx are invalid with sa_valid and da_valid
  2651. * bits set
  2652. *
  2653. * in this case we also see that value of
  2654. * sa_sw_peer_id is set as 0
  2655. *
  2656. * Drop the packet if sa_idx and da_idx OOB or
  2657. * sa_sw_peerid is 0
  2658. */
  2659. if (!is_sa_da_idx_valid(soc, rx_tlv_hdr, nbuf,
  2660. msdu_metadata)) {
  2661. qdf_nbuf_free(nbuf);
  2662. nbuf = next;
  2663. DP_STATS_INC(soc, rx.err.invalid_sa_da_idx, 1);
  2664. continue;
  2665. }
  2666. /* WDS Source Port Learning */
  2667. if (qdf_likely(vdev->wds_enabled))
  2668. dp_rx_wds_srcport_learn(soc,
  2669. rx_tlv_hdr,
  2670. peer,
  2671. nbuf,
  2672. msdu_metadata);
  2673. /* Intrabss-fwd */
  2674. if (dp_rx_check_ap_bridge(vdev))
  2675. if (DP_RX_INTRABSS_FWD(soc, peer, rx_tlv_hdr,
  2676. nbuf, msdu_metadata)) {
  2677. nbuf = next;
  2678. tid_stats->intrabss_cnt++;
  2679. continue; /* Get next desc */
  2680. }
  2681. if (qdf_unlikely(dp_rx_mec_check_wrapper(soc,
  2682. peer,
  2683. rx_tlv_hdr,
  2684. nbuf))) {
  2685. /* this is a looped back MCBC pkt,drop it */
  2686. DP_STATS_INC_PKT(peer, rx.mec_drop, 1,
  2687. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2688. qdf_nbuf_free(nbuf);
  2689. nbuf = next;
  2690. continue;
  2691. }
  2692. }
  2693. dp_rx_fill_gro_info(soc, rx_tlv_hdr, nbuf, &rx_ol_pkt_cnt);
  2694. dp_rx_update_stats(soc, nbuf);
  2695. DP_RX_LIST_APPEND(deliver_list_head,
  2696. deliver_list_tail,
  2697. nbuf);
  2698. DP_STATS_INC_PKT(peer, rx.to_stack, 1,
  2699. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2700. if (qdf_unlikely(peer->in_twt))
  2701. DP_STATS_INC_PKT(peer, rx.to_stack_twt, 1,
  2702. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2703. tid_stats->delivered_to_stack++;
  2704. nbuf = next;
  2705. }
  2706. if (qdf_likely(deliver_list_head)) {
  2707. if (qdf_likely(peer)) {
  2708. dp_rx_deliver_to_pkt_capture(soc, vdev->pdev, peer_id,
  2709. pkt_capture_offload,
  2710. deliver_list_head);
  2711. if (!pkt_capture_offload)
  2712. dp_rx_deliver_to_stack(soc, vdev, peer,
  2713. deliver_list_head,
  2714. deliver_list_tail);
  2715. }
  2716. else {
  2717. nbuf = deliver_list_head;
  2718. while (nbuf) {
  2719. next = nbuf->next;
  2720. nbuf->next = NULL;
  2721. dp_rx_deliver_to_stack_no_peer(soc, nbuf);
  2722. nbuf = next;
  2723. }
  2724. }
  2725. }
  2726. if (qdf_likely(peer))
  2727. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  2728. if (dp_rx_enable_eol_data_check(soc) && rx_bufs_used) {
  2729. if (quota) {
  2730. num_pending =
  2731. dp_rx_srng_get_num_pending(hal_soc,
  2732. hal_ring_hdl,
  2733. num_entries,
  2734. &near_full);
  2735. if (num_pending) {
  2736. DP_STATS_INC(soc, rx.hp_oos2, 1);
  2737. if (!hif_exec_should_yield(scn, intr_id))
  2738. goto more_data;
  2739. if (qdf_unlikely(near_full)) {
  2740. DP_STATS_INC(soc, rx.near_full, 1);
  2741. goto more_data;
  2742. }
  2743. }
  2744. }
  2745. if (vdev && vdev->osif_fisa_flush)
  2746. vdev->osif_fisa_flush(soc, reo_ring_num);
  2747. if (vdev && vdev->osif_gro_flush && rx_ol_pkt_cnt) {
  2748. vdev->osif_gro_flush(vdev->osif_vdev,
  2749. reo_ring_num);
  2750. }
  2751. }
  2752. /* Update histogram statistics by looping through pdev's */
  2753. DP_RX_HIST_STATS_PER_PDEV();
  2754. return rx_bufs_used; /* Assume no scale factor for now */
  2755. }
  2756. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2757. QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
  2758. {
  2759. QDF_STATUS ret;
  2760. if (vdev->osif_rx_flush) {
  2761. ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
  2762. if (!QDF_IS_STATUS_SUCCESS(ret)) {
  2763. dp_err("Failed to flush rx pkts for vdev %d\n",
  2764. vdev->vdev_id);
  2765. return ret;
  2766. }
  2767. }
  2768. return QDF_STATUS_SUCCESS;
  2769. }
  2770. static QDF_STATUS
  2771. dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
  2772. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  2773. struct dp_pdev *dp_pdev,
  2774. struct rx_desc_pool *rx_desc_pool)
  2775. {
  2776. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  2777. (nbuf_frag_info_t->virt_addr).nbuf =
  2778. qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
  2779. RX_BUFFER_RESERVATION,
  2780. rx_desc_pool->buf_alignment, FALSE);
  2781. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  2782. dp_err("nbuf alloc failed");
  2783. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  2784. return ret;
  2785. }
  2786. ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
  2787. (nbuf_frag_info_t->virt_addr).nbuf,
  2788. QDF_DMA_FROM_DEVICE,
  2789. rx_desc_pool->buf_size);
  2790. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  2791. qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
  2792. dp_err("nbuf map failed");
  2793. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  2794. return ret;
  2795. }
  2796. nbuf_frag_info_t->paddr =
  2797. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  2798. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  2799. &nbuf_frag_info_t->paddr,
  2800. rx_desc_pool);
  2801. if (ret == QDF_STATUS_E_FAILURE) {
  2802. dp_err("nbuf check x86 failed");
  2803. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  2804. return ret;
  2805. }
  2806. return QDF_STATUS_SUCCESS;
  2807. }
  2808. QDF_STATUS
  2809. dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
  2810. struct dp_srng *dp_rxdma_srng,
  2811. struct rx_desc_pool *rx_desc_pool,
  2812. uint32_t num_req_buffers)
  2813. {
  2814. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  2815. hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
  2816. union dp_rx_desc_list_elem_t *next;
  2817. void *rxdma_ring_entry;
  2818. qdf_dma_addr_t paddr;
  2819. struct dp_rx_nbuf_frag_info *nf_info;
  2820. uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
  2821. uint32_t buffer_index, nbuf_ptrs_per_page;
  2822. qdf_nbuf_t nbuf;
  2823. QDF_STATUS ret;
  2824. int page_idx, total_pages;
  2825. union dp_rx_desc_list_elem_t *desc_list = NULL;
  2826. union dp_rx_desc_list_elem_t *tail = NULL;
  2827. int sync_hw_ptr = 1;
  2828. uint32_t num_entries_avail;
  2829. if (qdf_unlikely(!rxdma_srng)) {
  2830. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2831. return QDF_STATUS_E_FAILURE;
  2832. }
  2833. dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
  2834. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2835. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  2836. rxdma_srng,
  2837. sync_hw_ptr);
  2838. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2839. if (!num_entries_avail) {
  2840. dp_err("Num of available entries is zero, nothing to do");
  2841. return QDF_STATUS_E_NOMEM;
  2842. }
  2843. if (num_entries_avail < num_req_buffers)
  2844. num_req_buffers = num_entries_avail;
  2845. nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
  2846. num_req_buffers, &desc_list, &tail);
  2847. if (!nr_descs) {
  2848. dp_err("no free rx_descs in freelist");
  2849. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  2850. return QDF_STATUS_E_NOMEM;
  2851. }
  2852. dp_debug("got %u RX descs for driver attach", nr_descs);
  2853. /*
  2854. * Try to allocate pointers to the nbuf one page at a time.
  2855. * Take pointers that can fit in one page of memory and
  2856. * iterate through the total descriptors that need to be
  2857. * allocated in order of pages. Reuse the pointers that
  2858. * have been allocated to fit in one page across each
  2859. * iteration to index into the nbuf.
  2860. */
  2861. total_pages = (nr_descs * sizeof(*nf_info)) / PAGE_SIZE;
  2862. /*
  2863. * Add an extra page to store the remainder if any
  2864. */
  2865. if ((nr_descs * sizeof(*nf_info)) % PAGE_SIZE)
  2866. total_pages++;
  2867. nf_info = qdf_mem_malloc(PAGE_SIZE);
  2868. if (!nf_info) {
  2869. dp_err("failed to allocate nbuf array");
  2870. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2871. QDF_BUG(0);
  2872. return QDF_STATUS_E_NOMEM;
  2873. }
  2874. nbuf_ptrs_per_page = PAGE_SIZE / sizeof(*nf_info);
  2875. for (page_idx = 0; page_idx < total_pages; page_idx++) {
  2876. qdf_mem_zero(nf_info, PAGE_SIZE);
  2877. for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
  2878. /*
  2879. * The last page of buffer pointers may not be required
  2880. * completely based on the number of descriptors. Below
  2881. * check will ensure we are allocating only the
  2882. * required number of descriptors.
  2883. */
  2884. if (nr_nbuf_total >= nr_descs)
  2885. break;
  2886. /* Flag is set while pdev rx_desc_pool initialization */
  2887. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2888. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  2889. &nf_info[nr_nbuf], dp_pdev,
  2890. rx_desc_pool);
  2891. else
  2892. ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
  2893. &nf_info[nr_nbuf], dp_pdev,
  2894. rx_desc_pool);
  2895. if (QDF_IS_STATUS_ERROR(ret))
  2896. break;
  2897. nr_nbuf_total++;
  2898. }
  2899. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2900. for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
  2901. rxdma_ring_entry =
  2902. hal_srng_src_get_next(dp_soc->hal_soc,
  2903. rxdma_srng);
  2904. qdf_assert_always(rxdma_ring_entry);
  2905. next = desc_list->next;
  2906. paddr = nf_info[buffer_index].paddr;
  2907. nbuf = nf_info[buffer_index].virt_addr.nbuf;
  2908. /* Flag is set while pdev rx_desc_pool initialization */
  2909. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2910. dp_rx_desc_frag_prep(&desc_list->rx_desc,
  2911. &nf_info[buffer_index]);
  2912. else
  2913. dp_rx_desc_prep(&desc_list->rx_desc,
  2914. &nf_info[buffer_index]);
  2915. desc_list->rx_desc.in_use = 1;
  2916. dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
  2917. dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
  2918. __func__,
  2919. RX_DESC_REPLENISHED);
  2920. hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
  2921. desc_list->rx_desc.cookie,
  2922. rx_desc_pool->owner);
  2923. dp_ipa_handle_rx_buf_smmu_mapping(
  2924. dp_soc, nbuf,
  2925. rx_desc_pool->buf_size,
  2926. true);
  2927. desc_list = next;
  2928. }
  2929. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id,
  2930. rxdma_srng, nr_nbuf, nr_nbuf);
  2931. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2932. }
  2933. dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
  2934. qdf_mem_free(nf_info);
  2935. if (!nr_nbuf_total) {
  2936. dp_err("No nbuf's allocated");
  2937. QDF_BUG(0);
  2938. return QDF_STATUS_E_RESOURCES;
  2939. }
  2940. /* No need to count the number of bytes received during replenish.
  2941. * Therefore set replenish.pkts.bytes as 0.
  2942. */
  2943. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
  2944. return QDF_STATUS_SUCCESS;
  2945. }
  2946. /**
  2947. * dp_rx_enable_mon_dest_frag() - Enable frag processing for
  2948. * monitor destination ring via frag.
  2949. *
  2950. * Enable this flag only for monitor destination buffer processing
  2951. * if DP_RX_MON_MEM_FRAG feature is enabled.
  2952. * If flag is set then frag based function will be called for alloc,
  2953. * map, prep desc and free ops for desc buffer else normal nbuf based
  2954. * function will be called.
  2955. *
  2956. * @rx_desc_pool: Rx desc pool
  2957. * @is_mon_dest_desc: Is it for monitor dest buffer
  2958. *
  2959. * Return: None
  2960. */
  2961. #ifdef DP_RX_MON_MEM_FRAG
  2962. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2963. bool is_mon_dest_desc)
  2964. {
  2965. rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
  2966. if (is_mon_dest_desc)
  2967. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
  2968. }
  2969. #else
  2970. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2971. bool is_mon_dest_desc)
  2972. {
  2973. rx_desc_pool->rx_mon_dest_frag_enable = false;
  2974. if (is_mon_dest_desc)
  2975. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
  2976. }
  2977. #endif
  2978. /*
  2979. * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor
  2980. * pool
  2981. *
  2982. * @pdev: core txrx pdev context
  2983. *
  2984. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2985. * QDF_STATUS_E_NOMEM
  2986. */
  2987. QDF_STATUS
  2988. dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
  2989. {
  2990. struct dp_soc *soc = pdev->soc;
  2991. uint32_t rxdma_entries;
  2992. uint32_t rx_sw_desc_num;
  2993. struct dp_srng *dp_rxdma_srng;
  2994. struct rx_desc_pool *rx_desc_pool;
  2995. uint32_t status = QDF_STATUS_SUCCESS;
  2996. int mac_for_pdev;
  2997. mac_for_pdev = pdev->lmac_id;
  2998. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2999. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  3000. soc, mac_for_pdev);
  3001. return status;
  3002. }
  3003. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  3004. rxdma_entries = dp_rxdma_srng->num_entries;
  3005. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  3006. rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  3007. rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
  3008. status = dp_rx_desc_pool_alloc(soc,
  3009. rx_sw_desc_num,
  3010. rx_desc_pool);
  3011. if (status != QDF_STATUS_SUCCESS)
  3012. return status;
  3013. return status;
  3014. }
  3015. /*
  3016. * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
  3017. *
  3018. * @pdev: core txrx pdev context
  3019. */
  3020. void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
  3021. {
  3022. int mac_for_pdev = pdev->lmac_id;
  3023. struct dp_soc *soc = pdev->soc;
  3024. struct rx_desc_pool *rx_desc_pool;
  3025. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  3026. dp_rx_desc_pool_free(soc, rx_desc_pool);
  3027. }
  3028. /*
  3029. * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
  3030. *
  3031. * @pdev: core txrx pdev context
  3032. *
  3033. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  3034. * QDF_STATUS_E_NOMEM
  3035. */
  3036. QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
  3037. {
  3038. int mac_for_pdev = pdev->lmac_id;
  3039. struct dp_soc *soc = pdev->soc;
  3040. uint32_t rxdma_entries;
  3041. uint32_t rx_sw_desc_num;
  3042. struct dp_srng *dp_rxdma_srng;
  3043. struct rx_desc_pool *rx_desc_pool;
  3044. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  3045. /**
  3046. * If NSS is enabled, rx_desc_pool is already filled.
  3047. * Hence, just disable desc_pool frag flag.
  3048. */
  3049. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  3050. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  3051. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  3052. soc, mac_for_pdev);
  3053. return QDF_STATUS_SUCCESS;
  3054. }
  3055. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  3056. if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
  3057. return QDF_STATUS_E_NOMEM;
  3058. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  3059. rxdma_entries = dp_rxdma_srng->num_entries;
  3060. soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
  3061. rx_sw_desc_num =
  3062. wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  3063. rx_desc_pool->owner = DP_WBM2SW_RBM;
  3064. rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
  3065. rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
  3066. /* Disable monitor dest processing via frag */
  3067. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  3068. dp_rx_desc_pool_init(soc, mac_for_pdev,
  3069. rx_sw_desc_num, rx_desc_pool);
  3070. return QDF_STATUS_SUCCESS;
  3071. }
  3072. /*
  3073. * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
  3074. * @pdev: core txrx pdev context
  3075. *
  3076. * This function resets the freelist of rx descriptors and destroys locks
  3077. * associated with this list of descriptors.
  3078. */
  3079. void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
  3080. {
  3081. int mac_for_pdev = pdev->lmac_id;
  3082. struct dp_soc *soc = pdev->soc;
  3083. struct rx_desc_pool *rx_desc_pool;
  3084. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  3085. dp_rx_desc_pool_deinit(soc, rx_desc_pool);
  3086. }
  3087. /*
  3088. * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
  3089. *
  3090. * @pdev: core txrx pdev context
  3091. *
  3092. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  3093. * QDF_STATUS_E_NOMEM
  3094. */
  3095. QDF_STATUS
  3096. dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
  3097. {
  3098. int mac_for_pdev = pdev->lmac_id;
  3099. struct dp_soc *soc = pdev->soc;
  3100. struct dp_srng *dp_rxdma_srng;
  3101. struct rx_desc_pool *rx_desc_pool;
  3102. uint32_t rxdma_entries;
  3103. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  3104. rxdma_entries = dp_rxdma_srng->num_entries;
  3105. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  3106. /* Initialize RX buffer pool which will be
  3107. * used during low memory conditions
  3108. */
  3109. dp_rx_buffer_pool_init(soc, mac_for_pdev);
  3110. return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
  3111. rx_desc_pool, rxdma_entries - 1);
  3112. }
  3113. /*
  3114. * dp_rx_pdev_buffers_free - Free nbufs (skbs)
  3115. *
  3116. * @pdev: core txrx pdev context
  3117. */
  3118. void
  3119. dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
  3120. {
  3121. int mac_for_pdev = pdev->lmac_id;
  3122. struct dp_soc *soc = pdev->soc;
  3123. struct rx_desc_pool *rx_desc_pool;
  3124. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  3125. dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  3126. dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
  3127. }
  3128. #ifdef DP_RX_SPECIAL_FRAME_NEED
  3129. bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
  3130. qdf_nbuf_t nbuf, uint32_t frame_mask,
  3131. uint8_t *rx_tlv_hdr)
  3132. {
  3133. uint32_t l2_hdr_offset = 0;
  3134. uint16_t msdu_len = 0;
  3135. uint32_t skip_len;
  3136. l2_hdr_offset =
  3137. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  3138. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  3139. skip_len = l2_hdr_offset;
  3140. } else {
  3141. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  3142. skip_len = l2_hdr_offset + RX_PKT_TLVS_LEN;
  3143. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  3144. }
  3145. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  3146. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  3147. qdf_nbuf_pull_head(nbuf, skip_len);
  3148. if (dp_rx_is_special_frame(nbuf, frame_mask)) {
  3149. qdf_nbuf_set_exc_frame(nbuf, 1);
  3150. dp_rx_deliver_to_stack(soc, peer->vdev, peer,
  3151. nbuf, NULL);
  3152. return true;
  3153. }
  3154. return false;
  3155. }
  3156. #endif