dp_rx.c 91 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "hal_hw_headers.h"
  20. #include "dp_types.h"
  21. #include "dp_rx.h"
  22. #include "dp_tx.h"
  23. #include "dp_peer.h"
  24. #include "hal_rx.h"
  25. #include "hal_api.h"
  26. #include "qdf_nbuf.h"
  27. #ifdef MESH_MODE_SUPPORT
  28. #include "if_meta_hdr.h"
  29. #endif
  30. #include "dp_internal.h"
  31. #include "dp_ipa.h"
  32. #include "dp_hist.h"
  33. #include "dp_rx_buffer_pool.h"
  34. #ifdef WIFI_MONITOR_SUPPORT
  35. #include "dp_htt.h"
  36. #include <dp_mon.h>
  37. #endif
  38. #ifdef FEATURE_WDS
  39. #include "dp_txrx_wds.h"
  40. #endif
  41. #ifdef DP_RATETABLE_SUPPORT
  42. #include "dp_ratetable.h"
  43. #endif
  44. #ifdef DUP_RX_DESC_WAR
  45. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  46. hal_ring_handle_t hal_ring,
  47. hal_ring_desc_t ring_desc,
  48. struct dp_rx_desc *rx_desc)
  49. {
  50. void *hal_soc = soc->hal_soc;
  51. hal_srng_dump_ring_desc(hal_soc, hal_ring, ring_desc);
  52. dp_rx_desc_dump(rx_desc);
  53. }
  54. #else
  55. void dp_rx_dump_info_and_assert(struct dp_soc *soc,
  56. hal_ring_handle_t hal_ring_hdl,
  57. hal_ring_desc_t ring_desc,
  58. struct dp_rx_desc *rx_desc)
  59. {
  60. hal_soc_handle_t hal_soc = soc->hal_soc;
  61. dp_rx_desc_dump(rx_desc);
  62. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl, ring_desc);
  63. hal_srng_dump_ring(hal_soc, hal_ring_hdl);
  64. qdf_assert_always(0);
  65. }
  66. #endif
  67. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  68. #ifdef RX_DESC_SANITY_WAR
  69. QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
  70. hal_ring_handle_t hal_ring_hdl,
  71. hal_ring_desc_t ring_desc,
  72. struct dp_rx_desc *rx_desc)
  73. {
  74. uint8_t return_buffer_manager;
  75. if (qdf_unlikely(!rx_desc)) {
  76. /*
  77. * This is an unlikely case where the cookie obtained
  78. * from the ring_desc is invalid and hence we are not
  79. * able to find the corresponding rx_desc
  80. */
  81. goto fail;
  82. }
  83. return_buffer_manager = hal_rx_ret_buf_manager_get(hal_soc, ring_desc);
  84. if (qdf_unlikely(!(return_buffer_manager ==
  85. HAL_RX_BUF_RBM_SW1_BM(soc->wbm_sw0_bm_id) ||
  86. return_buffer_manager ==
  87. HAL_RX_BUF_RBM_SW3_BM(soc->wbm_sw0_bm_id)))) {
  88. goto fail;
  89. }
  90. return QDF_STATUS_SUCCESS;
  91. fail:
  92. DP_STATS_INC(soc, rx.err.invalid_cookie, 1);
  93. dp_err("Ring Desc:");
  94. hal_srng_dump_ring_desc(hal_soc, hal_ring_hdl,
  95. ring_desc);
  96. return QDF_STATUS_E_NULL_VALUE;
  97. }
  98. #endif
  99. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  100. /**
  101. * dp_pdev_frag_alloc_and_map() - Allocate frag for desc buffer and map
  102. *
  103. * @dp_soc: struct dp_soc *
  104. * @nbuf_frag_info_t: nbuf frag info
  105. * @dp_pdev: struct dp_pdev *
  106. * @rx_desc_pool: Rx desc pool
  107. *
  108. * Return: QDF_STATUS
  109. */
  110. #ifdef DP_RX_MON_MEM_FRAG
  111. static inline QDF_STATUS
  112. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  113. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  114. struct dp_pdev *dp_pdev,
  115. struct rx_desc_pool *rx_desc_pool)
  116. {
  117. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  118. (nbuf_frag_info_t->virt_addr).vaddr =
  119. qdf_frag_alloc(NULL, rx_desc_pool->buf_size);
  120. if (!((nbuf_frag_info_t->virt_addr).vaddr)) {
  121. dp_err("Frag alloc failed");
  122. DP_STATS_INC(dp_pdev, replenish.frag_alloc_fail, 1);
  123. return QDF_STATUS_E_NOMEM;
  124. }
  125. ret = qdf_mem_map_page(dp_soc->osdev,
  126. (nbuf_frag_info_t->virt_addr).vaddr,
  127. QDF_DMA_FROM_DEVICE,
  128. rx_desc_pool->buf_size,
  129. &nbuf_frag_info_t->paddr);
  130. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  131. qdf_frag_free((nbuf_frag_info_t->virt_addr).vaddr);
  132. dp_err("Frag map failed");
  133. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  134. return QDF_STATUS_E_FAULT;
  135. }
  136. return QDF_STATUS_SUCCESS;
  137. }
  138. #else
  139. static inline QDF_STATUS
  140. dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc,
  141. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  142. struct dp_pdev *dp_pdev,
  143. struct rx_desc_pool *rx_desc_pool)
  144. {
  145. return QDF_STATUS_SUCCESS;
  146. }
  147. #endif /* DP_RX_MON_MEM_FRAG */
  148. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  149. /**
  150. * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history
  151. * @soc: Datapath soc structure
  152. * @ring_num: Refill ring number
  153. * @num_req: number of buffers requested for refill
  154. * @num_refill: number of buffers refilled
  155. *
  156. * Returns: None
  157. */
  158. static inline void
  159. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  160. hal_ring_handle_t hal_ring_hdl,
  161. uint32_t num_req, uint32_t num_refill)
  162. {
  163. struct dp_refill_info_record *record;
  164. uint32_t idx;
  165. uint32_t tp;
  166. uint32_t hp;
  167. if (qdf_unlikely(ring_num >= MAX_PDEV_CNT ||
  168. !soc->rx_refill_ring_history[ring_num]))
  169. return;
  170. idx = dp_history_get_next_index(&soc->rx_refill_ring_history[ring_num]->index,
  171. DP_RX_REFILL_HIST_MAX);
  172. /* No NULL check needed for record since its an array */
  173. record = &soc->rx_refill_ring_history[ring_num]->entry[idx];
  174. hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &tp, &hp);
  175. record->timestamp = qdf_get_log_timestamp();
  176. record->num_req = num_req;
  177. record->num_refill = num_refill;
  178. record->hp = hp;
  179. record->tp = tp;
  180. }
  181. #else
  182. static inline void
  183. dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  184. hal_ring_handle_t hal_ring_hdl,
  185. uint32_t num_req, uint32_t num_refill)
  186. {
  187. }
  188. #endif
  189. /**
  190. * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map
  191. *
  192. * @dp_soc: struct dp_soc *
  193. * @mac_id: Mac id
  194. * @num_entries_avail: num_entries_avail
  195. * @nbuf_frag_info_t: nbuf frag info
  196. * @dp_pdev: struct dp_pdev *
  197. * @rx_desc_pool: Rx desc pool
  198. *
  199. * Return: QDF_STATUS
  200. */
  201. static inline QDF_STATUS
  202. dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
  203. uint32_t mac_id,
  204. uint32_t num_entries_avail,
  205. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  206. struct dp_pdev *dp_pdev,
  207. struct rx_desc_pool *rx_desc_pool)
  208. {
  209. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  210. (nbuf_frag_info_t->virt_addr).nbuf =
  211. dp_rx_buffer_pool_nbuf_alloc(dp_soc,
  212. mac_id,
  213. rx_desc_pool,
  214. num_entries_avail);
  215. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  216. dp_err("nbuf alloc failed");
  217. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  218. return QDF_STATUS_E_NOMEM;
  219. }
  220. ret = dp_rx_buffer_pool_nbuf_map(dp_soc, rx_desc_pool,
  221. nbuf_frag_info_t);
  222. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  223. dp_rx_buffer_pool_nbuf_free(dp_soc,
  224. (nbuf_frag_info_t->virt_addr).nbuf, mac_id);
  225. dp_err("nbuf map failed");
  226. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  227. return QDF_STATUS_E_FAULT;
  228. }
  229. nbuf_frag_info_t->paddr =
  230. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  231. dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)(
  232. (nbuf_frag_info_t->virt_addr).nbuf),
  233. rx_desc_pool->buf_size,
  234. true, __func__, __LINE__);
  235. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  236. &nbuf_frag_info_t->paddr,
  237. rx_desc_pool);
  238. if (ret == QDF_STATUS_E_FAILURE) {
  239. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  240. return QDF_STATUS_E_ADDRNOTAVAIL;
  241. }
  242. return QDF_STATUS_SUCCESS;
  243. }
  244. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  245. QDF_STATUS
  246. __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id,
  247. struct dp_srng *dp_rxdma_srng,
  248. struct rx_desc_pool *rx_desc_pool)
  249. {
  250. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  251. uint32_t count;
  252. void *rxdma_ring_entry;
  253. union dp_rx_desc_list_elem_t *next = NULL;
  254. void *rxdma_srng;
  255. qdf_nbuf_t nbuf;
  256. qdf_dma_addr_t paddr;
  257. uint16_t num_entries_avail = 0;
  258. uint16_t num_alloc_desc = 0;
  259. union dp_rx_desc_list_elem_t *desc_list = NULL;
  260. union dp_rx_desc_list_elem_t *tail = NULL;
  261. int sync_hw_ptr = 0;
  262. rxdma_srng = dp_rxdma_srng->hal_srng;
  263. if (qdf_unlikely(!dp_pdev)) {
  264. dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
  265. return QDF_STATUS_E_FAILURE;
  266. }
  267. if (qdf_unlikely(!rxdma_srng)) {
  268. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  269. return QDF_STATUS_E_FAILURE;
  270. }
  271. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  272. num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
  273. rxdma_srng,
  274. sync_hw_ptr);
  275. dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
  276. soc, num_entries_avail);
  277. if (qdf_unlikely(num_entries_avail <
  278. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  279. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  280. return QDF_STATUS_E_FAILURE;
  281. }
  282. DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
  283. num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
  284. rx_desc_pool,
  285. num_entries_avail,
  286. &desc_list,
  287. &tail);
  288. if (!num_alloc_desc) {
  289. dp_rx_err("%pK: no free rx_descs in freelist", soc);
  290. DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail,
  291. num_entries_avail);
  292. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  293. return QDF_STATUS_E_NOMEM;
  294. }
  295. for (count = 0; count < num_alloc_desc; count++) {
  296. next = desc_list->next;
  297. qdf_prefetch(next);
  298. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  299. if (qdf_unlikely(!nbuf)) {
  300. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  301. break;
  302. }
  303. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  304. rx_desc_pool->buf_size);
  305. rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc,
  306. rxdma_srng);
  307. qdf_assert_always(rxdma_ring_entry);
  308. desc_list->rx_desc.nbuf = nbuf;
  309. desc_list->rx_desc.rx_buf_start = nbuf->data;
  310. desc_list->rx_desc.unmapped = 0;
  311. /* rx_desc.in_use should be zero at this time*/
  312. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  313. desc_list->rx_desc.in_use = 1;
  314. desc_list->rx_desc.in_err_state = 0;
  315. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  316. paddr,
  317. desc_list->rx_desc.cookie,
  318. rx_desc_pool->owner);
  319. desc_list = next;
  320. }
  321. qdf_dsb();
  322. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  323. /* No need to count the number of bytes received during replenish.
  324. * Therefore set replenish.pkts.bytes as 0.
  325. */
  326. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  327. DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count));
  328. /*
  329. * add any available free desc back to the free list
  330. */
  331. if (desc_list)
  332. dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail,
  333. mac_id, rx_desc_pool);
  334. return QDF_STATUS_SUCCESS;
  335. }
  336. QDF_STATUS
  337. __dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
  338. struct dp_srng *dp_rxdma_srng,
  339. struct rx_desc_pool *rx_desc_pool,
  340. uint32_t num_req_buffers,
  341. union dp_rx_desc_list_elem_t **desc_list,
  342. union dp_rx_desc_list_elem_t **tail)
  343. {
  344. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  345. uint32_t count;
  346. void *rxdma_ring_entry;
  347. union dp_rx_desc_list_elem_t *next;
  348. void *rxdma_srng;
  349. qdf_nbuf_t nbuf;
  350. qdf_dma_addr_t paddr;
  351. rxdma_srng = dp_rxdma_srng->hal_srng;
  352. if (qdf_unlikely(!dp_pdev)) {
  353. dp_rx_err("%pK: pdev is null for mac_id = %d",
  354. soc, mac_id);
  355. return QDF_STATUS_E_FAILURE;
  356. }
  357. if (qdf_unlikely(!rxdma_srng)) {
  358. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  359. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  360. return QDF_STATUS_E_FAILURE;
  361. }
  362. dp_rx_debug("%pK: requested %d buffers for replenish",
  363. soc, num_req_buffers);
  364. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  365. for (count = 0; count < num_req_buffers; count++) {
  366. next = (*desc_list)->next;
  367. qdf_prefetch(next);
  368. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  369. if (qdf_unlikely(!nbuf)) {
  370. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  371. break;
  372. }
  373. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  374. rx_desc_pool->buf_size);
  375. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  376. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  377. if (!rxdma_ring_entry)
  378. break;
  379. qdf_assert_always(rxdma_ring_entry);
  380. (*desc_list)->rx_desc.nbuf = nbuf;
  381. (*desc_list)->rx_desc.rx_buf_start = nbuf->data;
  382. (*desc_list)->rx_desc.unmapped = 0;
  383. /* rx_desc.in_use should be zero at this time*/
  384. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  385. (*desc_list)->rx_desc.in_use = 1;
  386. (*desc_list)->rx_desc.in_err_state = 0;
  387. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  388. paddr,
  389. (*desc_list)->rx_desc.cookie,
  390. rx_desc_pool->owner);
  391. *desc_list = next;
  392. }
  393. qdf_dsb();
  394. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  395. /* No need to count the number of bytes received during replenish.
  396. * Therefore set replenish.pkts.bytes as 0.
  397. */
  398. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  399. DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count));
  400. /*
  401. * add any available free desc back to the free list
  402. */
  403. if (*desc_list)
  404. dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
  405. mac_id, rx_desc_pool);
  406. return QDF_STATUS_SUCCESS;
  407. }
  408. QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
  409. uint32_t mac_id,
  410. struct dp_srng *dp_rxdma_srng,
  411. struct rx_desc_pool *rx_desc_pool,
  412. uint32_t num_req_buffers)
  413. {
  414. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  415. uint32_t count;
  416. uint32_t nr_descs = 0;
  417. void *rxdma_ring_entry;
  418. union dp_rx_desc_list_elem_t *next;
  419. void *rxdma_srng;
  420. qdf_nbuf_t nbuf;
  421. qdf_dma_addr_t paddr;
  422. union dp_rx_desc_list_elem_t *desc_list = NULL;
  423. union dp_rx_desc_list_elem_t *tail = NULL;
  424. rxdma_srng = dp_rxdma_srng->hal_srng;
  425. if (qdf_unlikely(!dp_pdev)) {
  426. dp_rx_err("%pK: pdev is null for mac_id = %d",
  427. soc, mac_id);
  428. return QDF_STATUS_E_FAILURE;
  429. }
  430. if (qdf_unlikely(!rxdma_srng)) {
  431. dp_rx_debug("%pK: rxdma srng not initialized", soc);
  432. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  433. return QDF_STATUS_E_FAILURE;
  434. }
  435. dp_rx_debug("%pK: requested %d buffers for replenish",
  436. soc, num_req_buffers);
  437. nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool,
  438. num_req_buffers, &desc_list, &tail);
  439. if (!nr_descs) {
  440. dp_err("no free rx_descs in freelist");
  441. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  442. return QDF_STATUS_E_NOMEM;
  443. }
  444. dp_debug("got %u RX descs for driver attach", nr_descs);
  445. hal_srng_access_start(soc->hal_soc, rxdma_srng);
  446. for (count = 0; count < nr_descs; count++) {
  447. next = desc_list->next;
  448. qdf_prefetch(next);
  449. nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
  450. if (qdf_unlikely(!nbuf)) {
  451. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  452. break;
  453. }
  454. paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
  455. rx_desc_pool->buf_size);
  456. rxdma_ring_entry = (struct dp_buffer_addr_info *)
  457. hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  458. if (!rxdma_ring_entry)
  459. break;
  460. qdf_assert_always(rxdma_ring_entry);
  461. desc_list->rx_desc.nbuf = nbuf;
  462. desc_list->rx_desc.rx_buf_start = nbuf->data;
  463. desc_list->rx_desc.unmapped = 0;
  464. /* rx_desc.in_use should be zero at this time*/
  465. qdf_assert_always(desc_list->rx_desc.in_use == 0);
  466. desc_list->rx_desc.in_use = 1;
  467. desc_list->rx_desc.in_err_state = 0;
  468. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
  469. paddr,
  470. desc_list->rx_desc.cookie,
  471. rx_desc_pool->owner);
  472. desc_list = next;
  473. }
  474. qdf_dsb();
  475. hal_srng_access_end(soc->hal_soc, rxdma_srng);
  476. /* No need to count the number of bytes received during replenish.
  477. * Therefore set replenish.pkts.bytes as 0.
  478. */
  479. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  480. return QDF_STATUS_SUCCESS;
  481. }
  482. #endif
  483. #ifdef DP_UMAC_HW_RESET_SUPPORT
  484. #if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  485. static inline
  486. qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
  487. uint32_t buf_size)
  488. {
  489. return dp_rx_nbuf_sync_no_dsb(soc, nbuf, rx_desc_pool->buf_size);
  490. }
  491. #else
  492. static inline
  493. qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
  494. uint32_t buf_size)
  495. {
  496. return qdf_nbuf_get_frag_paddr(nbuf, 0);
  497. }
  498. #endif
  499. /*
  500. * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time
  501. *
  502. * @soc: core txrx main context
  503. * @dp_rxdma_srng: rxdma ring
  504. * @rx_desc_pool: rx descriptor pool
  505. * @rx_desc:rx descriptor
  506. *
  507. * Return: void
  508. */
  509. static inline
  510. void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
  511. struct rx_desc_pool *rx_desc_pool,
  512. struct dp_rx_desc *rx_desc)
  513. {
  514. void *rxdma_srng;
  515. void *rxdma_ring_entry;
  516. qdf_dma_addr_t paddr;
  517. rxdma_srng = dp_rxdma_srng->hal_srng;
  518. /* No one else should be accessing the srng at this point */
  519. hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng);
  520. rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
  521. qdf_assert_always(rxdma_ring_entry);
  522. rx_desc->in_err_state = 0;
  523. paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf,
  524. rx_desc_pool->buf_size);
  525. hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr,
  526. rx_desc->cookie, rx_desc_pool->owner);
  527. hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng);
  528. }
  529. /*
  530. * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
  531. *
  532. * @soc: core txrx main context
  533. * @nbuf_list: nbuf list for delayed free
  534. *
  535. * Return: void
  536. */
  537. void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
  538. {
  539. int mac_id, i, j;
  540. union dp_rx_desc_list_elem_t *head = NULL;
  541. union dp_rx_desc_list_elem_t *tail = NULL;
  542. for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
  543. struct dp_srng *dp_rxdma_srng =
  544. &soc->rx_refill_buf_ring[mac_id];
  545. struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
  546. uint32_t rx_sw_desc_num = rx_desc_pool->pool_size;
  547. /* Only fill up 1/3 of the ring size */
  548. uint32_t num_req_decs;
  549. if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng ||
  550. !rx_desc_pool->array)
  551. continue;
  552. num_req_decs = dp_rxdma_srng->num_entries / 3;
  553. for (i = 0, j = 0; i < rx_sw_desc_num; i++) {
  554. struct dp_rx_desc *rx_desc =
  555. (struct dp_rx_desc *)&rx_desc_pool->array[i];
  556. if (rx_desc->in_use) {
  557. if (j < dp_rxdma_srng->num_entries) {
  558. dp_rx_desc_replenish(soc, dp_rxdma_srng,
  559. rx_desc_pool,
  560. rx_desc);
  561. } else {
  562. dp_rx_nbuf_unmap(soc, rx_desc, 0);
  563. rx_desc->unmapped = 0;
  564. rx_desc->nbuf->next = *nbuf_list;
  565. *nbuf_list = rx_desc->nbuf;
  566. dp_rx_add_to_free_desc_list(&head,
  567. &tail,
  568. rx_desc);
  569. }
  570. j++;
  571. }
  572. }
  573. if (head)
  574. dp_rx_add_desc_list_to_free_list(soc, &head, &tail,
  575. mac_id, rx_desc_pool);
  576. /* If num of descs in use were less, then we need to replenish
  577. * the ring with some buffers
  578. */
  579. head = NULL;
  580. tail = NULL;
  581. if (j < (num_req_decs - 1))
  582. dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
  583. rx_desc_pool,
  584. ((num_req_decs - 1) - j),
  585. &head, &tail, true);
  586. }
  587. }
  588. #endif
  589. /*
  590. * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  591. * called during dp rx initialization
  592. * and at the end of dp_rx_process.
  593. *
  594. * @soc: core txrx main context
  595. * @mac_id: mac_id which is one of 3 mac_ids
  596. * @dp_rxdma_srng: dp rxdma circular ring
  597. * @rx_desc_pool: Pointer to free Rx descriptor pool
  598. * @num_req_buffers: number of buffer to be replenished
  599. * @desc_list: list of descs if called from dp_rx_process
  600. * or NULL during dp rx initialization or out of buffer
  601. * interrupt.
  602. * @tail: tail of descs list
  603. * @req_only: If true don't replenish more than req buffers
  604. * @func_name: name of the caller function
  605. * Return: return success or failure
  606. */
  607. QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
  608. struct dp_srng *dp_rxdma_srng,
  609. struct rx_desc_pool *rx_desc_pool,
  610. uint32_t num_req_buffers,
  611. union dp_rx_desc_list_elem_t **desc_list,
  612. union dp_rx_desc_list_elem_t **tail,
  613. bool req_only, const char *func_name)
  614. {
  615. uint32_t num_alloc_desc;
  616. uint16_t num_desc_to_free = 0;
  617. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  618. uint32_t num_entries_avail;
  619. uint32_t count;
  620. int sync_hw_ptr = 1;
  621. struct dp_rx_nbuf_frag_info nbuf_frag_info = {0};
  622. void *rxdma_ring_entry;
  623. union dp_rx_desc_list_elem_t *next;
  624. QDF_STATUS ret;
  625. void *rxdma_srng;
  626. union dp_rx_desc_list_elem_t *desc_list_append = NULL;
  627. union dp_rx_desc_list_elem_t *tail_append = NULL;
  628. union dp_rx_desc_list_elem_t *temp_list = NULL;
  629. rxdma_srng = dp_rxdma_srng->hal_srng;
  630. if (qdf_unlikely(!dp_pdev)) {
  631. dp_rx_err("%pK: pdev is null for mac_id = %d",
  632. dp_soc, mac_id);
  633. return QDF_STATUS_E_FAILURE;
  634. }
  635. if (qdf_unlikely(!rxdma_srng)) {
  636. dp_rx_debug("%pK: rxdma srng not initialized", dp_soc);
  637. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  638. return QDF_STATUS_E_FAILURE;
  639. }
  640. dp_verbose_debug("%pK: requested %d buffers for replenish",
  641. dp_soc, num_req_buffers);
  642. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  643. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  644. rxdma_srng,
  645. sync_hw_ptr);
  646. dp_verbose_debug("%pK: no of available entries in rxdma ring: %d",
  647. dp_soc, num_entries_avail);
  648. if (!req_only && !(*desc_list) && (num_entries_avail >
  649. ((dp_rxdma_srng->num_entries * 3) / 4))) {
  650. num_req_buffers = num_entries_avail;
  651. DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
  652. } else if (num_entries_avail < num_req_buffers) {
  653. num_desc_to_free = num_req_buffers - num_entries_avail;
  654. num_req_buffers = num_entries_avail;
  655. } else if ((*desc_list) &&
  656. dp_rxdma_srng->num_entries - num_entries_avail <
  657. CRITICAL_BUFFER_THRESHOLD) {
  658. /* Append some free descriptors to tail */
  659. num_alloc_desc =
  660. dp_rx_get_free_desc_list(dp_soc, mac_id,
  661. rx_desc_pool,
  662. CRITICAL_BUFFER_THRESHOLD,
  663. &desc_list_append,
  664. &tail_append);
  665. if (num_alloc_desc) {
  666. temp_list = *desc_list;
  667. *desc_list = desc_list_append;
  668. tail_append->next = temp_list;
  669. num_req_buffers += num_alloc_desc;
  670. DP_STATS_DEC(dp_pdev,
  671. replenish.free_list,
  672. num_alloc_desc);
  673. } else
  674. dp_err_rl("%pK: no free rx_descs in freelist", dp_soc);
  675. }
  676. if (qdf_unlikely(!num_req_buffers)) {
  677. num_desc_to_free = num_req_buffers;
  678. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  679. goto free_descs;
  680. }
  681. /*
  682. * if desc_list is NULL, allocate the descs from freelist
  683. */
  684. if (!(*desc_list)) {
  685. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  686. rx_desc_pool,
  687. num_req_buffers,
  688. desc_list,
  689. tail);
  690. if (!num_alloc_desc) {
  691. dp_rx_err("%pK: no free rx_descs in freelist", dp_soc);
  692. DP_STATS_INC(dp_pdev, err.desc_alloc_fail,
  693. num_req_buffers);
  694. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  695. return QDF_STATUS_E_NOMEM;
  696. }
  697. dp_verbose_debug("%pK: %d rx desc allocated", dp_soc,
  698. num_alloc_desc);
  699. num_req_buffers = num_alloc_desc;
  700. }
  701. count = 0;
  702. while (count < num_req_buffers) {
  703. /* Flag is set while pdev rx_desc_pool initialization */
  704. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  705. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  706. &nbuf_frag_info,
  707. dp_pdev,
  708. rx_desc_pool);
  709. else
  710. ret = dp_pdev_nbuf_alloc_and_map_replenish(dp_soc,
  711. mac_id,
  712. num_entries_avail, &nbuf_frag_info,
  713. dp_pdev, rx_desc_pool);
  714. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  715. if (qdf_unlikely(ret == QDF_STATUS_E_FAULT))
  716. continue;
  717. break;
  718. }
  719. count++;
  720. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  721. rxdma_srng);
  722. qdf_assert_always(rxdma_ring_entry);
  723. next = (*desc_list)->next;
  724. /* Flag is set while pdev rx_desc_pool initialization */
  725. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  726. dp_rx_desc_frag_prep(&((*desc_list)->rx_desc),
  727. &nbuf_frag_info);
  728. else
  729. dp_rx_desc_prep(&((*desc_list)->rx_desc),
  730. &nbuf_frag_info);
  731. /* rx_desc.in_use should be zero at this time*/
  732. qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
  733. (*desc_list)->rx_desc.in_use = 1;
  734. (*desc_list)->rx_desc.in_err_state = 0;
  735. dp_rx_desc_update_dbg_info(&(*desc_list)->rx_desc,
  736. func_name, RX_DESC_REPLENISHED);
  737. dp_verbose_debug("rx_netbuf=%pK, paddr=0x%llx, cookie=%d",
  738. nbuf_frag_info.virt_addr.nbuf,
  739. (unsigned long long)(nbuf_frag_info.paddr),
  740. (*desc_list)->rx_desc.cookie);
  741. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc, rxdma_ring_entry,
  742. nbuf_frag_info.paddr,
  743. (*desc_list)->rx_desc.cookie,
  744. rx_desc_pool->owner);
  745. *desc_list = next;
  746. }
  747. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id, rxdma_srng,
  748. num_req_buffers, count);
  749. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  750. dp_rx_schedule_refill_thread(dp_soc);
  751. dp_verbose_debug("replenished buffers %d, rx desc added back to free list %u",
  752. count, num_desc_to_free);
  753. /* No need to count the number of bytes received during replenish.
  754. * Therefore set replenish.pkts.bytes as 0.
  755. */
  756. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
  757. DP_STATS_INC(dp_pdev, replenish.free_list, num_req_buffers - count);
  758. free_descs:
  759. DP_STATS_INC(dp_pdev, buf_freelist, num_desc_to_free);
  760. /*
  761. * add any available free desc back to the free list
  762. */
  763. if (*desc_list)
  764. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  765. mac_id, rx_desc_pool);
  766. return QDF_STATUS_SUCCESS;
  767. }
  768. qdf_export_symbol(__dp_rx_buffers_replenish);
  769. /*
  770. * dp_rx_deliver_raw() - process RAW mode pkts and hand over the
  771. * pkts to RAW mode simulation to
  772. * decapsulate the pkt.
  773. *
  774. * @vdev: vdev on which RAW mode is enabled
  775. * @nbuf_list: list of RAW pkts to process
  776. * @txrx_peer: peer object from which the pkt is rx
  777. *
  778. * Return: void
  779. */
  780. void
  781. dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list,
  782. struct dp_txrx_peer *txrx_peer)
  783. {
  784. qdf_nbuf_t deliver_list_head = NULL;
  785. qdf_nbuf_t deliver_list_tail = NULL;
  786. qdf_nbuf_t nbuf;
  787. nbuf = nbuf_list;
  788. while (nbuf) {
  789. qdf_nbuf_t next = qdf_nbuf_next(nbuf);
  790. DP_RX_LIST_APPEND(deliver_list_head, deliver_list_tail, nbuf);
  791. DP_STATS_INC(vdev->pdev, rx_raw_pkts, 1);
  792. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.raw, 1,
  793. qdf_nbuf_len(nbuf));
  794. /*
  795. * reset the chfrag_start and chfrag_end bits in nbuf cb
  796. * as this is a non-amsdu pkt and RAW mode simulation expects
  797. * these bit s to be 0 for non-amsdu pkt.
  798. */
  799. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  800. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  801. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  802. qdf_nbuf_set_rx_chfrag_end(nbuf, 0);
  803. }
  804. nbuf = next;
  805. }
  806. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &deliver_list_head,
  807. &deliver_list_tail);
  808. vdev->osif_rx(vdev->osif_vdev, deliver_list_head);
  809. }
  810. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  811. #ifndef FEATURE_WDS
  812. void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr,
  813. struct dp_txrx_peer *ta_peer, qdf_nbuf_t nbuf)
  814. {
  815. }
  816. #endif
  817. #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES
  818. /*
  819. * dp_classify_critical_pkts() - API for marking critical packets
  820. * @soc: dp_soc context
  821. * @vdev: vdev on which packet is to be sent
  822. * @nbuf: nbuf that has to be classified
  823. *
  824. * The function parses the packet, identifies whether its a critical frame and
  825. * marks QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL bit in qdf_nbuf_cb for the nbuf.
  826. * Code for marking which frames are CRITICAL is accessed via callback.
  827. * EAPOL, ARP, DHCP, DHCPv6, ICMPv6 NS/NA are the typical critical frames.
  828. *
  829. * Return: None
  830. */
  831. static
  832. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  833. qdf_nbuf_t nbuf)
  834. {
  835. if (vdev->tx_classify_critical_pkt_cb)
  836. vdev->tx_classify_critical_pkt_cb(vdev->osif_vdev, nbuf);
  837. }
  838. #else
  839. static inline
  840. void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
  841. qdf_nbuf_t nbuf)
  842. {
  843. }
  844. #endif
  845. #ifdef QCA_OL_TX_MULTIQ_SUPPORT
  846. static inline
  847. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  848. {
  849. qdf_nbuf_set_queue_mapping(nbuf, ring_id);
  850. }
  851. #else
  852. static inline
  853. void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
  854. {
  855. }
  856. #endif
  857. /*
  858. * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
  859. *
  860. * @soc: core txrx main context
  861. * @ta_peer : source peer entry
  862. * @rx_tlv_hdr : start address of rx tlvs
  863. * @nbuf : nbuf that has to be intrabss forwarded
  864. * @tid_stats : tid stats pointer
  865. *
  866. * Return: bool: true if it is forwarded else false
  867. */
  868. bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  869. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  870. struct cdp_tid_rx_stats *tid_stats)
  871. {
  872. uint16_t len;
  873. qdf_nbuf_t nbuf_copy;
  874. if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
  875. nbuf))
  876. return true;
  877. if (!dp_rx_check_ndi_mdns_fwding(ta_peer, nbuf))
  878. return false;
  879. /* If the source peer in the isolation list
  880. * then dont forward instead push to bridge stack
  881. */
  882. if (dp_get_peer_isolation(ta_peer))
  883. return false;
  884. nbuf_copy = qdf_nbuf_copy(nbuf);
  885. if (!nbuf_copy)
  886. return false;
  887. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  888. qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
  889. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
  890. if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy,
  891. tid_stats))
  892. return false;
  893. if (dp_tx_send((struct cdp_soc_t *)soc,
  894. ta_peer->vdev->vdev_id, nbuf_copy)) {
  895. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  896. len);
  897. tid_stats->fail_cnt[INTRABSS_DROP]++;
  898. dp_rx_nbuf_free(nbuf_copy);
  899. } else {
  900. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  901. len);
  902. tid_stats->intrabss_cnt++;
  903. }
  904. return false;
  905. }
  906. /*
  907. * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets
  908. *
  909. * @soc: core txrx main context
  910. * @ta_peer: source peer entry
  911. * @tx_vdev_id: VDEV ID for Intra-BSS TX
  912. * @rx_tlv_hdr: start address of rx tlvs
  913. * @nbuf: nbuf that has to be intrabss forwarded
  914. * @tid_stats: tid stats pointer
  915. *
  916. * Return: bool: true if it is forwarded else false
  917. */
  918. bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
  919. uint8_t tx_vdev_id,
  920. uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
  921. struct cdp_tid_rx_stats *tid_stats)
  922. {
  923. uint16_t len;
  924. len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  925. /* linearize the nbuf just before we send to
  926. * dp_tx_send()
  927. */
  928. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  929. if (qdf_nbuf_linearize(nbuf) == -ENOMEM)
  930. return false;
  931. nbuf = qdf_nbuf_unshare(nbuf);
  932. if (!nbuf) {
  933. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer,
  934. rx.intra_bss.fail,
  935. 1, len);
  936. /* return true even though the pkt is
  937. * not forwarded. Basically skb_unshare
  938. * failed and we want to continue with
  939. * next nbuf.
  940. */
  941. tid_stats->fail_cnt[INTRABSS_DROP]++;
  942. return false;
  943. }
  944. }
  945. qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb));
  946. dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf);
  947. if (!dp_tx_send((struct cdp_soc_t *)soc,
  948. tx_vdev_id, nbuf)) {
  949. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
  950. len);
  951. } else {
  952. DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
  953. len);
  954. tid_stats->fail_cnt[INTRABSS_DROP]++;
  955. return false;
  956. }
  957. return true;
  958. }
  959. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  960. #ifdef MESH_MODE_SUPPORT
  961. /**
  962. * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
  963. *
  964. * @vdev: DP Virtual device handle
  965. * @nbuf: Buffer pointer
  966. * @rx_tlv_hdr: start of rx tlv header
  967. * @txrx_peer: pointer to peer
  968. *
  969. * This function allocated memory for mesh receive stats and fill the
  970. * required stats. Stores the memory address in skb cb.
  971. *
  972. * Return: void
  973. */
  974. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  975. uint8_t *rx_tlv_hdr,
  976. struct dp_txrx_peer *txrx_peer)
  977. {
  978. struct mesh_recv_hdr_s *rx_info = NULL;
  979. uint32_t pkt_type;
  980. uint32_t nss;
  981. uint32_t rate_mcs;
  982. uint32_t bw;
  983. uint8_t primary_chan_num;
  984. uint32_t center_chan_freq;
  985. struct dp_soc *soc = vdev->pdev->soc;
  986. struct dp_peer *peer;
  987. struct dp_peer *primary_link_peer;
  988. struct dp_soc *link_peer_soc;
  989. cdp_peer_stats_param_t buf = {0};
  990. /* fill recv mesh stats */
  991. rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
  992. /* upper layers are resposible to free this memory */
  993. if (!rx_info) {
  994. dp_rx_err("%pK: Memory allocation failed for mesh rx stats",
  995. vdev->pdev->soc);
  996. DP_STATS_INC(vdev->pdev, mesh_mem_alloc, 1);
  997. return;
  998. }
  999. rx_info->rs_flags = MESH_RXHDR_VER1;
  1000. if (qdf_nbuf_is_rx_chfrag_start(nbuf))
  1001. rx_info->rs_flags |= MESH_RX_FIRST_MSDU;
  1002. if (qdf_nbuf_is_rx_chfrag_end(nbuf))
  1003. rx_info->rs_flags |= MESH_RX_LAST_MSDU;
  1004. peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id, DP_MOD_ID_MESH);
  1005. if (peer) {
  1006. if (hal_rx_tlv_get_is_decrypted(soc->hal_soc, rx_tlv_hdr)) {
  1007. rx_info->rs_flags |= MESH_RX_DECRYPTED;
  1008. rx_info->rs_keyix = hal_rx_msdu_get_keyid(soc->hal_soc,
  1009. rx_tlv_hdr);
  1010. if (vdev->osif_get_key)
  1011. vdev->osif_get_key(vdev->osif_vdev,
  1012. &rx_info->rs_decryptkey[0],
  1013. &peer->mac_addr.raw[0],
  1014. rx_info->rs_keyix);
  1015. }
  1016. dp_peer_unref_delete(peer, DP_MOD_ID_MESH);
  1017. }
  1018. primary_link_peer = dp_get_primary_link_peer_by_id(soc,
  1019. txrx_peer->peer_id,
  1020. DP_MOD_ID_MESH);
  1021. if (qdf_likely(primary_link_peer)) {
  1022. link_peer_soc = primary_link_peer->vdev->pdev->soc;
  1023. dp_monitor_peer_get_stats_param(link_peer_soc,
  1024. primary_link_peer,
  1025. cdp_peer_rx_snr, &buf);
  1026. rx_info->rs_snr = buf.rx_snr;
  1027. dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_MESH);
  1028. }
  1029. rx_info->rs_rssi = rx_info->rs_snr + DP_DEFAULT_NOISEFLOOR;
  1030. soc = vdev->pdev->soc;
  1031. primary_chan_num = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr);
  1032. center_chan_freq = hal_rx_tlv_get_freq(soc->hal_soc, rx_tlv_hdr) >> 16;
  1033. if (soc->cdp_soc.ol_ops && soc->cdp_soc.ol_ops->freq_to_band) {
  1034. rx_info->rs_band = soc->cdp_soc.ol_ops->freq_to_band(
  1035. soc->ctrl_psoc,
  1036. vdev->pdev->pdev_id,
  1037. center_chan_freq);
  1038. }
  1039. rx_info->rs_channel = primary_chan_num;
  1040. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  1041. rate_mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  1042. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  1043. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  1044. rx_info->rs_ratephy1 = rate_mcs | (nss << 0x8) | (pkt_type << 16) |
  1045. (bw << 24);
  1046. qdf_nbuf_set_rx_fctx_type(nbuf, (void *)rx_info, CB_FTYPE_MESH_RX_INFO);
  1047. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_MED,
  1048. FL("Mesh rx stats: flags %x, rssi %x, chn %x, rate %x, kix %x, snr %x"),
  1049. rx_info->rs_flags,
  1050. rx_info->rs_rssi,
  1051. rx_info->rs_channel,
  1052. rx_info->rs_ratephy1,
  1053. rx_info->rs_keyix,
  1054. rx_info->rs_snr);
  1055. }
  1056. /**
  1057. * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets
  1058. *
  1059. * @vdev: DP Virtual device handle
  1060. * @nbuf: Buffer pointer
  1061. * @rx_tlv_hdr: start of rx tlv header
  1062. *
  1063. * This checks if the received packet is matching any filter out
  1064. * catogery and and drop the packet if it matches.
  1065. *
  1066. * Return: status(0 indicates drop, 1 indicate to no drop)
  1067. */
  1068. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1069. uint8_t *rx_tlv_hdr)
  1070. {
  1071. union dp_align_mac_addr mac_addr;
  1072. struct dp_soc *soc = vdev->pdev->soc;
  1073. if (qdf_unlikely(vdev->mesh_rx_filter)) {
  1074. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_FROMDS)
  1075. if (hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  1076. rx_tlv_hdr))
  1077. return QDF_STATUS_SUCCESS;
  1078. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TODS)
  1079. if (hal_rx_mpdu_get_to_ds(soc->hal_soc,
  1080. rx_tlv_hdr))
  1081. return QDF_STATUS_SUCCESS;
  1082. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_NODS)
  1083. if (!hal_rx_mpdu_get_fr_ds(soc->hal_soc,
  1084. rx_tlv_hdr) &&
  1085. !hal_rx_mpdu_get_to_ds(soc->hal_soc,
  1086. rx_tlv_hdr))
  1087. return QDF_STATUS_SUCCESS;
  1088. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_RA) {
  1089. if (hal_rx_mpdu_get_addr1(soc->hal_soc,
  1090. rx_tlv_hdr,
  1091. &mac_addr.raw[0]))
  1092. return QDF_STATUS_E_FAILURE;
  1093. if (!qdf_mem_cmp(&mac_addr.raw[0],
  1094. &vdev->mac_addr.raw[0],
  1095. QDF_MAC_ADDR_SIZE))
  1096. return QDF_STATUS_SUCCESS;
  1097. }
  1098. if (vdev->mesh_rx_filter & MESH_FILTER_OUT_TA) {
  1099. if (hal_rx_mpdu_get_addr2(soc->hal_soc,
  1100. rx_tlv_hdr,
  1101. &mac_addr.raw[0]))
  1102. return QDF_STATUS_E_FAILURE;
  1103. if (!qdf_mem_cmp(&mac_addr.raw[0],
  1104. &vdev->mac_addr.raw[0],
  1105. QDF_MAC_ADDR_SIZE))
  1106. return QDF_STATUS_SUCCESS;
  1107. }
  1108. }
  1109. return QDF_STATUS_E_FAILURE;
  1110. }
  1111. #else
  1112. void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1113. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer)
  1114. {
  1115. }
  1116. QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1117. uint8_t *rx_tlv_hdr)
  1118. {
  1119. return QDF_STATUS_E_FAILURE;
  1120. }
  1121. #endif
  1122. #ifdef FEATURE_NAC_RSSI
  1123. /**
  1124. * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac
  1125. * @soc: DP SOC handle
  1126. * @mpdu: mpdu for which peer is invalid
  1127. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1128. * pool_id has same mapping)
  1129. *
  1130. * return: integer type
  1131. */
  1132. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1133. uint8_t mac_id)
  1134. {
  1135. struct dp_invalid_peer_msg msg;
  1136. struct dp_vdev *vdev = NULL;
  1137. struct dp_pdev *pdev = NULL;
  1138. struct ieee80211_frame *wh;
  1139. qdf_nbuf_t curr_nbuf, next_nbuf;
  1140. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1141. uint8_t *rx_pkt_hdr = NULL;
  1142. int i = 0;
  1143. if (!HAL_IS_DECAP_FORMAT_RAW(soc->hal_soc, rx_tlv_hdr)) {
  1144. dp_rx_debug("%pK: Drop decapped frames", soc);
  1145. goto free;
  1146. }
  1147. /* In RAW packet, packet header will be part of data */
  1148. rx_pkt_hdr = rx_tlv_hdr + soc->rx_pkt_tlv_size;
  1149. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1150. if (!DP_FRAME_IS_DATA(wh)) {
  1151. dp_rx_debug("%pK: NAWDS valid only for data frames", soc);
  1152. goto free;
  1153. }
  1154. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  1155. dp_rx_err("%pK: Invalid nbuf length", soc);
  1156. goto free;
  1157. }
  1158. /* In DMAC case the rx_desc_pools are common across PDEVs
  1159. * so PDEV cannot be derived from the pool_id.
  1160. *
  1161. * link_id need to derived from the TLV tag word which is
  1162. * disabled by default. For now adding a WAR to get vdev
  1163. * with brute force this need to fixed with word based subscription
  1164. * support is added by enabling TLV tag word
  1165. */
  1166. if (soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
  1167. for (i = 0; i < MAX_PDEV_CNT; i++) {
  1168. pdev = soc->pdev_list[i];
  1169. if (!pdev || qdf_unlikely(pdev->is_pdev_down))
  1170. continue;
  1171. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1172. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1173. QDF_MAC_ADDR_SIZE) == 0) {
  1174. goto out;
  1175. }
  1176. }
  1177. }
  1178. } else {
  1179. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1180. if (!pdev || qdf_unlikely(pdev->is_pdev_down)) {
  1181. dp_rx_err("%pK: PDEV %s",
  1182. soc, !pdev ? "not found" : "down");
  1183. goto free;
  1184. }
  1185. if (dp_monitor_filter_neighbour_peer(pdev, rx_pkt_hdr) ==
  1186. QDF_STATUS_SUCCESS)
  1187. return 0;
  1188. TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
  1189. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1190. QDF_MAC_ADDR_SIZE) == 0) {
  1191. goto out;
  1192. }
  1193. }
  1194. }
  1195. if (!vdev) {
  1196. dp_rx_err("%pK: VDEV not found", soc);
  1197. goto free;
  1198. }
  1199. out:
  1200. msg.wh = wh;
  1201. qdf_nbuf_pull_head(mpdu, soc->rx_pkt_tlv_size);
  1202. msg.nbuf = mpdu;
  1203. msg.vdev_id = vdev->vdev_id;
  1204. /*
  1205. * NOTE: Only valid for HKv1.
  1206. * If smart monitor mode is enabled on RE, we are getting invalid
  1207. * peer frames with RA as STA mac of RE and the TA not matching
  1208. * with any NAC list or the the BSSID.Such frames need to dropped
  1209. * in order to avoid HM_WDS false addition.
  1210. */
  1211. if (pdev->soc->cdp_soc.ol_ops->rx_invalid_peer) {
  1212. if (dp_monitor_drop_inv_peer_pkts(vdev) == QDF_STATUS_SUCCESS) {
  1213. dp_rx_warn("%pK: Drop inv peer pkts with STA RA:%pm",
  1214. soc, wh->i_addr1);
  1215. goto free;
  1216. }
  1217. pdev->soc->cdp_soc.ol_ops->rx_invalid_peer(
  1218. (struct cdp_ctrl_objmgr_psoc *)soc->ctrl_psoc,
  1219. pdev->pdev_id, &msg);
  1220. }
  1221. free:
  1222. /* Drop and free packet */
  1223. curr_nbuf = mpdu;
  1224. while (curr_nbuf) {
  1225. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1226. dp_rx_nbuf_free(curr_nbuf);
  1227. curr_nbuf = next_nbuf;
  1228. }
  1229. return 0;
  1230. }
  1231. /**
  1232. * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler
  1233. * @soc: DP SOC handle
  1234. * @mpdu: mpdu for which peer is invalid
  1235. * @mpdu_done: if an mpdu is completed
  1236. * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
  1237. * pool_id has same mapping)
  1238. *
  1239. * return: integer type
  1240. */
  1241. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1242. qdf_nbuf_t mpdu, bool mpdu_done,
  1243. uint8_t mac_id)
  1244. {
  1245. /* Only trigger the process when mpdu is completed */
  1246. if (mpdu_done)
  1247. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1248. }
  1249. #else
  1250. uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu,
  1251. uint8_t mac_id)
  1252. {
  1253. qdf_nbuf_t curr_nbuf, next_nbuf;
  1254. struct dp_pdev *pdev;
  1255. struct dp_vdev *vdev = NULL;
  1256. struct ieee80211_frame *wh;
  1257. uint8_t *rx_tlv_hdr = qdf_nbuf_data(mpdu);
  1258. uint8_t *rx_pkt_hdr = hal_rx_pkt_hdr_get(soc->hal_soc, rx_tlv_hdr);
  1259. wh = (struct ieee80211_frame *)rx_pkt_hdr;
  1260. if (!DP_FRAME_IS_DATA(wh)) {
  1261. QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP,
  1262. "only for data frames");
  1263. goto free;
  1264. }
  1265. if (qdf_nbuf_len(mpdu) < sizeof(struct ieee80211_frame)) {
  1266. dp_rx_info_rl("%pK: Invalid nbuf length", soc);
  1267. goto free;
  1268. }
  1269. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1270. if (!pdev) {
  1271. dp_rx_info_rl("%pK: PDEV not found", soc);
  1272. goto free;
  1273. }
  1274. qdf_spin_lock_bh(&pdev->vdev_list_lock);
  1275. DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
  1276. if (qdf_mem_cmp(wh->i_addr1, vdev->mac_addr.raw,
  1277. QDF_MAC_ADDR_SIZE) == 0) {
  1278. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1279. goto out;
  1280. }
  1281. }
  1282. qdf_spin_unlock_bh(&pdev->vdev_list_lock);
  1283. if (!vdev) {
  1284. dp_rx_info_rl("%pK: VDEV not found", soc);
  1285. goto free;
  1286. }
  1287. out:
  1288. if (soc->cdp_soc.ol_ops->rx_invalid_peer)
  1289. soc->cdp_soc.ol_ops->rx_invalid_peer(vdev->vdev_id, wh);
  1290. free:
  1291. /* Drop and free packet */
  1292. curr_nbuf = mpdu;
  1293. while (curr_nbuf) {
  1294. next_nbuf = qdf_nbuf_next(curr_nbuf);
  1295. dp_rx_nbuf_free(curr_nbuf);
  1296. curr_nbuf = next_nbuf;
  1297. }
  1298. /* Reset the head and tail pointers */
  1299. pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
  1300. if (pdev) {
  1301. pdev->invalid_peer_head_msdu = NULL;
  1302. pdev->invalid_peer_tail_msdu = NULL;
  1303. }
  1304. return 0;
  1305. }
  1306. void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
  1307. qdf_nbuf_t mpdu, bool mpdu_done,
  1308. uint8_t mac_id)
  1309. {
  1310. /* Process the nbuf */
  1311. dp_rx_process_invalid_peer(soc, mpdu, mac_id);
  1312. }
  1313. #endif
  1314. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1315. #ifdef RECEIVE_OFFLOAD
  1316. /**
  1317. * dp_rx_print_offload_info() - Print offload info from RX TLV
  1318. * @soc: dp soc handle
  1319. * @msdu: MSDU for which the offload info is to be printed
  1320. *
  1321. * Return: None
  1322. */
  1323. static void dp_rx_print_offload_info(struct dp_soc *soc,
  1324. qdf_nbuf_t msdu)
  1325. {
  1326. dp_verbose_debug("----------------------RX DESC LRO/GRO----------------------");
  1327. dp_verbose_debug("lro_eligible 0x%x",
  1328. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu));
  1329. dp_verbose_debug("pure_ack 0x%x", QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu));
  1330. dp_verbose_debug("chksum 0x%x", QDF_NBUF_CB_RX_TCP_CHKSUM(msdu));
  1331. dp_verbose_debug("TCP seq num 0x%x", QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu));
  1332. dp_verbose_debug("TCP ack num 0x%x", QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu));
  1333. dp_verbose_debug("TCP window 0x%x", QDF_NBUF_CB_RX_TCP_WIN(msdu));
  1334. dp_verbose_debug("TCP protocol 0x%x", QDF_NBUF_CB_RX_TCP_PROTO(msdu));
  1335. dp_verbose_debug("TCP offset 0x%x", QDF_NBUF_CB_RX_TCP_OFFSET(msdu));
  1336. dp_verbose_debug("toeplitz 0x%x", QDF_NBUF_CB_RX_FLOW_ID(msdu));
  1337. dp_verbose_debug("---------------------------------------------------------");
  1338. }
  1339. /**
  1340. * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb
  1341. * @soc: DP SOC handle
  1342. * @rx_tlv: RX TLV received for the msdu
  1343. * @msdu: msdu for which GRO info needs to be filled
  1344. * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets
  1345. *
  1346. * Return: None
  1347. */
  1348. void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv,
  1349. qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt)
  1350. {
  1351. struct hal_offload_info offload_info;
  1352. if (!wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx))
  1353. return;
  1354. if (hal_rx_tlv_get_offload_info(soc->hal_soc, rx_tlv, &offload_info))
  1355. return;
  1356. *rx_ol_pkt_cnt = *rx_ol_pkt_cnt + 1;
  1357. QDF_NBUF_CB_RX_LRO_ELIGIBLE(msdu) = offload_info.lro_eligible;
  1358. QDF_NBUF_CB_RX_TCP_PURE_ACK(msdu) = offload_info.tcp_pure_ack;
  1359. QDF_NBUF_CB_RX_TCP_CHKSUM(msdu) =
  1360. hal_rx_tlv_get_tcp_chksum(soc->hal_soc,
  1361. rx_tlv);
  1362. QDF_NBUF_CB_RX_TCP_SEQ_NUM(msdu) = offload_info.tcp_seq_num;
  1363. QDF_NBUF_CB_RX_TCP_ACK_NUM(msdu) = offload_info.tcp_ack_num;
  1364. QDF_NBUF_CB_RX_TCP_WIN(msdu) = offload_info.tcp_win;
  1365. QDF_NBUF_CB_RX_TCP_PROTO(msdu) = offload_info.tcp_proto;
  1366. QDF_NBUF_CB_RX_IPV6_PROTO(msdu) = offload_info.ipv6_proto;
  1367. QDF_NBUF_CB_RX_TCP_OFFSET(msdu) = offload_info.tcp_offset;
  1368. QDF_NBUF_CB_RX_FLOW_ID(msdu) = offload_info.flow_id;
  1369. dp_rx_print_offload_info(soc, msdu);
  1370. }
  1371. #endif /* RECEIVE_OFFLOAD */
  1372. /**
  1373. * dp_rx_adjust_nbuf_len() - set appropriate msdu length in nbuf.
  1374. *
  1375. * @soc: DP soc handle
  1376. * @nbuf: pointer to msdu.
  1377. * @mpdu_len: mpdu length
  1378. * @l3_pad_len: L3 padding length by HW
  1379. *
  1380. * Return: returns true if nbuf is last msdu of mpdu else retuns false.
  1381. */
  1382. static inline bool dp_rx_adjust_nbuf_len(struct dp_soc *soc,
  1383. qdf_nbuf_t nbuf,
  1384. uint16_t *mpdu_len,
  1385. uint32_t l3_pad_len)
  1386. {
  1387. bool last_nbuf;
  1388. uint32_t pkt_hdr_size;
  1389. pkt_hdr_size = soc->rx_pkt_tlv_size + l3_pad_len;
  1390. if ((*mpdu_len + pkt_hdr_size) > RX_DATA_BUFFER_SIZE) {
  1391. qdf_nbuf_set_pktlen(nbuf, RX_DATA_BUFFER_SIZE);
  1392. last_nbuf = false;
  1393. *mpdu_len -= (RX_DATA_BUFFER_SIZE - pkt_hdr_size);
  1394. } else {
  1395. qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + pkt_hdr_size));
  1396. last_nbuf = true;
  1397. *mpdu_len = 0;
  1398. }
  1399. return last_nbuf;
  1400. }
  1401. /**
  1402. * dp_get_l3_hdr_pad_len() - get L3 header padding length.
  1403. *
  1404. * @soc: DP soc handle
  1405. * @nbuf: pointer to msdu.
  1406. *
  1407. * Return: returns padding length in bytes.
  1408. */
  1409. static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc,
  1410. qdf_nbuf_t nbuf)
  1411. {
  1412. uint32_t l3_hdr_pad = 0;
  1413. uint8_t *rx_tlv_hdr;
  1414. struct hal_rx_msdu_metadata msdu_metadata;
  1415. while (nbuf) {
  1416. if (!qdf_nbuf_is_rx_chfrag_cont(nbuf)) {
  1417. /* scattered msdu end with continuation is 0 */
  1418. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  1419. hal_rx_msdu_metadata_get(soc->hal_soc,
  1420. rx_tlv_hdr,
  1421. &msdu_metadata);
  1422. l3_hdr_pad = msdu_metadata.l3_hdr_pad;
  1423. break;
  1424. }
  1425. nbuf = nbuf->next;
  1426. }
  1427. return l3_hdr_pad;
  1428. }
  1429. /**
  1430. * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
  1431. * multiple nbufs.
  1432. * @soc: DP SOC handle
  1433. * @nbuf: pointer to the first msdu of an amsdu.
  1434. *
  1435. * This function implements the creation of RX frag_list for cases
  1436. * where an MSDU is spread across multiple nbufs.
  1437. *
  1438. * Return: returns the head nbuf which contains complete frag_list.
  1439. */
  1440. qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf)
  1441. {
  1442. qdf_nbuf_t parent, frag_list, next = NULL;
  1443. uint16_t frag_list_len = 0;
  1444. uint16_t mpdu_len;
  1445. bool last_nbuf;
  1446. uint32_t l3_hdr_pad_offset = 0;
  1447. /*
  1448. * Use msdu len got from REO entry descriptor instead since
  1449. * there is case the RX PKT TLV is corrupted while msdu_len
  1450. * from REO descriptor is right for non-raw RX scatter msdu.
  1451. */
  1452. mpdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  1453. /*
  1454. * this is a case where the complete msdu fits in one single nbuf.
  1455. * in this case HW sets both start and end bit and we only need to
  1456. * reset these bits for RAW mode simulator to decap the pkt
  1457. */
  1458. if (qdf_nbuf_is_rx_chfrag_start(nbuf) &&
  1459. qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1460. qdf_nbuf_set_pktlen(nbuf, mpdu_len + soc->rx_pkt_tlv_size);
  1461. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size);
  1462. return nbuf;
  1463. }
  1464. l3_hdr_pad_offset = dp_get_l3_hdr_pad_len(soc, nbuf);
  1465. /*
  1466. * This is a case where we have multiple msdus (A-MSDU) spread across
  1467. * multiple nbufs. here we create a fraglist out of these nbufs.
  1468. *
  1469. * the moment we encounter a nbuf with continuation bit set we
  1470. * know for sure we have an MSDU which is spread across multiple
  1471. * nbufs. We loop through and reap nbufs till we reach last nbuf.
  1472. */
  1473. parent = nbuf;
  1474. frag_list = nbuf->next;
  1475. nbuf = nbuf->next;
  1476. /*
  1477. * set the start bit in the first nbuf we encounter with continuation
  1478. * bit set. This has the proper mpdu length set as it is the first
  1479. * msdu of the mpdu. this becomes the parent nbuf and the subsequent
  1480. * nbufs will form the frag_list of the parent nbuf.
  1481. */
  1482. qdf_nbuf_set_rx_chfrag_start(parent, 1);
  1483. /*
  1484. * L3 header padding is only needed for the 1st buffer
  1485. * in a scattered msdu
  1486. */
  1487. last_nbuf = dp_rx_adjust_nbuf_len(soc, parent, &mpdu_len,
  1488. l3_hdr_pad_offset);
  1489. /*
  1490. * MSDU cont bit is set but reported MPDU length can fit
  1491. * in to single buffer
  1492. *
  1493. * Increment error stats and avoid SG list creation
  1494. */
  1495. if (last_nbuf) {
  1496. DP_STATS_INC(soc, rx.err.msdu_continuation_err, 1);
  1497. qdf_nbuf_pull_head(parent,
  1498. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1499. return parent;
  1500. }
  1501. /*
  1502. * this is where we set the length of the fragments which are
  1503. * associated to the parent nbuf. We iterate through the frag_list
  1504. * till we hit the last_nbuf of the list.
  1505. */
  1506. do {
  1507. last_nbuf = dp_rx_adjust_nbuf_len(soc, nbuf, &mpdu_len, 0);
  1508. qdf_nbuf_pull_head(nbuf,
  1509. soc->rx_pkt_tlv_size);
  1510. frag_list_len += qdf_nbuf_len(nbuf);
  1511. if (last_nbuf) {
  1512. next = nbuf->next;
  1513. nbuf->next = NULL;
  1514. break;
  1515. } else if (qdf_nbuf_is_rx_chfrag_end(nbuf)) {
  1516. dp_err("Invalid packet length\n");
  1517. qdf_assert_always(0);
  1518. }
  1519. nbuf = nbuf->next;
  1520. } while (!last_nbuf);
  1521. qdf_nbuf_set_rx_chfrag_start(nbuf, 0);
  1522. qdf_nbuf_append_ext_list(parent, frag_list, frag_list_len);
  1523. parent->next = next;
  1524. qdf_nbuf_pull_head(parent,
  1525. soc->rx_pkt_tlv_size + l3_hdr_pad_offset);
  1526. return parent;
  1527. }
  1528. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  1529. #ifdef QCA_PEER_EXT_STATS
  1530. /*
  1531. * dp_rx_compute_tid_delay - Computer per TID delay stats
  1532. * @peer: DP soc context
  1533. * @nbuf: NBuffer
  1534. *
  1535. * Return: Void
  1536. */
  1537. void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  1538. qdf_nbuf_t nbuf)
  1539. {
  1540. struct cdp_delay_rx_stats *rx_delay = &stats->rx_delay;
  1541. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1542. dp_hist_update_stats(&rx_delay->to_stack_delay, to_stack);
  1543. }
  1544. #endif /* QCA_PEER_EXT_STATS */
  1545. /**
  1546. * dp_rx_compute_delay() - Compute and fill in all timestamps
  1547. * to pass in correct fields
  1548. *
  1549. * @vdev: pdev handle
  1550. * @tx_desc: tx descriptor
  1551. * @tid: tid value
  1552. * Return: none
  1553. */
  1554. void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1555. {
  1556. uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
  1557. int64_t current_ts = qdf_ktime_to_ms(qdf_ktime_get());
  1558. uint32_t to_stack = qdf_nbuf_get_timedelta_ms(nbuf);
  1559. uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
  1560. uint32_t interframe_delay =
  1561. (uint32_t)(current_ts - vdev->prev_rx_deliver_tstamp);
  1562. struct cdp_tid_rx_stats *rstats =
  1563. &vdev->pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1564. dp_update_delay_stats(NULL, rstats, to_stack, tid,
  1565. CDP_DELAY_STATS_REAP_STACK, ring_id, false);
  1566. /*
  1567. * Update interframe delay stats calculated at deliver_data_ol point.
  1568. * Value of vdev->prev_rx_deliver_tstamp will be 0 for 1st frame, so
  1569. * interframe delay will not be calculate correctly for 1st frame.
  1570. * On the other side, this will help in avoiding extra per packet check
  1571. * of vdev->prev_rx_deliver_tstamp.
  1572. */
  1573. dp_update_delay_stats(NULL, rstats, interframe_delay, tid,
  1574. CDP_DELAY_STATS_RX_INTERFRAME, ring_id, false);
  1575. vdev->prev_rx_deliver_tstamp = current_ts;
  1576. }
  1577. /**
  1578. * dp_rx_drop_nbuf_list() - drop an nbuf list
  1579. * @pdev: dp pdev reference
  1580. * @buf_list: buffer list to be dropepd
  1581. *
  1582. * Return: int (number of bufs dropped)
  1583. */
  1584. static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
  1585. qdf_nbuf_t buf_list)
  1586. {
  1587. struct cdp_tid_rx_stats *stats = NULL;
  1588. uint8_t tid = 0, ring_id = 0;
  1589. int num_dropped = 0;
  1590. qdf_nbuf_t buf, next_buf;
  1591. buf = buf_list;
  1592. while (buf) {
  1593. ring_id = QDF_NBUF_CB_RX_CTX_ID(buf);
  1594. next_buf = qdf_nbuf_queue_next(buf);
  1595. tid = qdf_nbuf_get_tid_val(buf);
  1596. if (qdf_likely(pdev)) {
  1597. stats = &pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
  1598. stats->fail_cnt[INVALID_PEER_VDEV]++;
  1599. stats->delivered_to_stack--;
  1600. }
  1601. dp_rx_nbuf_free(buf);
  1602. buf = next_buf;
  1603. num_dropped++;
  1604. }
  1605. return num_dropped;
  1606. }
  1607. #ifdef QCA_SUPPORT_WDS_EXTENDED
  1608. /**
  1609. * dp_rx_deliver_to_stack_ext() - Deliver to netdev per sta
  1610. * @soc: core txrx main context
  1611. * @vdev: vdev
  1612. * @txrx_peer: txrx peer
  1613. * @nbuf_head: skb list head
  1614. *
  1615. * Return: true if packet is delivered to netdev per STA.
  1616. */
  1617. static inline bool
  1618. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1619. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1620. {
  1621. /*
  1622. * When extended WDS is disabled, frames are sent to AP netdevice.
  1623. */
  1624. if (qdf_likely(!vdev->wds_ext_enabled))
  1625. return false;
  1626. /*
  1627. * There can be 2 cases:
  1628. * 1. Send frame to parent netdev if its not for netdev per STA
  1629. * 2. If frame is meant for netdev per STA:
  1630. * a. Send frame to appropriate netdev using registered fp.
  1631. * b. If fp is NULL, drop the frames.
  1632. */
  1633. if (!txrx_peer->wds_ext.init)
  1634. return false;
  1635. if (txrx_peer->osif_rx)
  1636. txrx_peer->osif_rx(txrx_peer->wds_ext.osif_peer, nbuf_head);
  1637. else
  1638. dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1639. return true;
  1640. }
  1641. #else
  1642. static inline bool
  1643. dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev,
  1644. struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf_head)
  1645. {
  1646. return false;
  1647. }
  1648. #endif
  1649. #ifdef PEER_CACHE_RX_PKTS
  1650. /**
  1651. * dp_rx_flush_rx_cached() - flush cached rx frames
  1652. * @peer: peer
  1653. * @drop: flag to drop frames or forward to net stack
  1654. *
  1655. * Return: None
  1656. */
  1657. void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
  1658. {
  1659. struct dp_peer_cached_bufq *bufqi;
  1660. struct dp_rx_cached_buf *cache_buf = NULL;
  1661. ol_txrx_rx_fp data_rx = NULL;
  1662. int num_buff_elem;
  1663. QDF_STATUS status;
  1664. /*
  1665. * Flush dp cached frames only for mld peers and legacy peers, as
  1666. * link peers don't store cached frames
  1667. */
  1668. if (IS_MLO_DP_LINK_PEER(peer))
  1669. return;
  1670. if (!peer->txrx_peer) {
  1671. dp_err("txrx_peer NULL!! peer mac_addr("QDF_MAC_ADDR_FMT")",
  1672. QDF_MAC_ADDR_REF(peer->mac_addr.raw));
  1673. return;
  1674. }
  1675. if (qdf_atomic_inc_return(&peer->txrx_peer->flush_in_progress) > 1) {
  1676. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1677. return;
  1678. }
  1679. qdf_spin_lock_bh(&peer->peer_info_lock);
  1680. if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
  1681. data_rx = peer->vdev->osif_rx;
  1682. else
  1683. drop = true;
  1684. qdf_spin_unlock_bh(&peer->peer_info_lock);
  1685. bufqi = &peer->txrx_peer->bufq_info;
  1686. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1687. qdf_list_remove_front(&bufqi->cached_bufq,
  1688. (qdf_list_node_t **)&cache_buf);
  1689. while (cache_buf) {
  1690. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
  1691. cache_buf->buf);
  1692. bufqi->entries -= num_buff_elem;
  1693. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1694. if (drop) {
  1695. bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
  1696. cache_buf->buf);
  1697. } else {
  1698. /* Flush the cached frames to OSIF DEV */
  1699. status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
  1700. if (status != QDF_STATUS_SUCCESS)
  1701. bufqi->dropped = dp_rx_drop_nbuf_list(
  1702. peer->vdev->pdev,
  1703. cache_buf->buf);
  1704. }
  1705. qdf_mem_free(cache_buf);
  1706. cache_buf = NULL;
  1707. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1708. qdf_list_remove_front(&bufqi->cached_bufq,
  1709. (qdf_list_node_t **)&cache_buf);
  1710. }
  1711. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1712. qdf_atomic_dec(&peer->txrx_peer->flush_in_progress);
  1713. }
  1714. /**
  1715. * dp_rx_enqueue_rx() - cache rx frames
  1716. * @peer: peer
  1717. * @rx_buf_list: cache buffer list
  1718. *
  1719. * Return: None
  1720. */
  1721. static QDF_STATUS
  1722. dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list)
  1723. {
  1724. struct dp_rx_cached_buf *cache_buf;
  1725. struct dp_peer_cached_bufq *bufqi = &txrx_peer->bufq_info;
  1726. int num_buff_elem;
  1727. QDF_STATUS ret = QDF_STATUS_SUCCESS;
  1728. struct dp_soc *soc = txrx_peer->vdev->pdev->soc;
  1729. struct dp_peer *peer = dp_peer_get_ref_by_id(soc, txrx_peer->peer_id,
  1730. DP_MOD_ID_RX);
  1731. if (!peer) {
  1732. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1733. rx_buf_list);
  1734. return QDF_STATUS_E_INVAL;
  1735. }
  1736. dp_debug_rl("bufq->curr %d bufq->drops %d", bufqi->entries,
  1737. bufqi->dropped);
  1738. if (!peer->valid) {
  1739. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1740. rx_buf_list);
  1741. ret = QDF_STATUS_E_INVAL;
  1742. goto fail;
  1743. }
  1744. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1745. if (bufqi->entries >= bufqi->thresh) {
  1746. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1747. rx_buf_list);
  1748. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1749. ret = QDF_STATUS_E_RESOURCES;
  1750. goto fail;
  1751. }
  1752. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1753. num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
  1754. cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
  1755. if (!cache_buf) {
  1756. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1757. "Failed to allocate buf to cache rx frames");
  1758. bufqi->dropped = dp_rx_drop_nbuf_list(txrx_peer->vdev->pdev,
  1759. rx_buf_list);
  1760. ret = QDF_STATUS_E_NOMEM;
  1761. goto fail;
  1762. }
  1763. cache_buf->buf = rx_buf_list;
  1764. qdf_spin_lock_bh(&bufqi->bufq_lock);
  1765. qdf_list_insert_back(&bufqi->cached_bufq,
  1766. &cache_buf->node);
  1767. bufqi->entries += num_buff_elem;
  1768. qdf_spin_unlock_bh(&bufqi->bufq_lock);
  1769. fail:
  1770. dp_peer_unref_delete(peer, DP_MOD_ID_RX);
  1771. return ret;
  1772. }
  1773. static inline
  1774. bool dp_rx_is_peer_cache_bufq_supported(void)
  1775. {
  1776. return true;
  1777. }
  1778. #else
  1779. static inline
  1780. bool dp_rx_is_peer_cache_bufq_supported(void)
  1781. {
  1782. return false;
  1783. }
  1784. static inline QDF_STATUS
  1785. dp_rx_enqueue_rx(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t rx_buf_list)
  1786. {
  1787. return QDF_STATUS_SUCCESS;
  1788. }
  1789. #endif
  1790. #ifndef DELIVERY_TO_STACK_STATUS_CHECK
  1791. /**
  1792. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1793. * using the appropriate call back functions.
  1794. * @soc: soc
  1795. * @vdev: vdev
  1796. * @peer: peer
  1797. * @nbuf_head: skb list head
  1798. * @nbuf_tail: skb list tail
  1799. *
  1800. * Return: None
  1801. */
  1802. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1803. struct dp_vdev *vdev,
  1804. struct dp_txrx_peer *txrx_peer,
  1805. qdf_nbuf_t nbuf_head)
  1806. {
  1807. if (qdf_unlikely(dp_rx_deliver_to_stack_ext(soc, vdev,
  1808. txrx_peer, nbuf_head)))
  1809. return;
  1810. /* Function pointer initialized only when FISA is enabled */
  1811. if (vdev->osif_fisa_rx)
  1812. /* on failure send it via regular path */
  1813. vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1814. else
  1815. vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1816. }
  1817. #else
  1818. /**
  1819. * dp_rx_check_delivery_to_stack() - Deliver pkts to network
  1820. * using the appropriate call back functions.
  1821. * @soc: soc
  1822. * @vdev: vdev
  1823. * @txrx_peer: txrx peer
  1824. * @nbuf_head: skb list head
  1825. * @nbuf_tail: skb list tail
  1826. *
  1827. * Check the return status of the call back function and drop
  1828. * the packets if the return status indicates a failure.
  1829. *
  1830. * Return: None
  1831. */
  1832. static void dp_rx_check_delivery_to_stack(struct dp_soc *soc,
  1833. struct dp_vdev *vdev,
  1834. struct dp_txrx_peer *txrx_peer,
  1835. qdf_nbuf_t nbuf_head)
  1836. {
  1837. int num_nbuf = 0;
  1838. QDF_STATUS ret_val = QDF_STATUS_E_FAILURE;
  1839. /* Function pointer initialized only when FISA is enabled */
  1840. if (vdev->osif_fisa_rx)
  1841. /* on failure send it via regular path */
  1842. ret_val = vdev->osif_fisa_rx(soc, vdev, nbuf_head);
  1843. else if (vdev->osif_rx)
  1844. ret_val = vdev->osif_rx(vdev->osif_vdev, nbuf_head);
  1845. if (!QDF_IS_STATUS_SUCCESS(ret_val)) {
  1846. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
  1847. DP_STATS_INC(soc, rx.err.rejected, num_nbuf);
  1848. if (txrx_peer)
  1849. DP_PEER_STATS_FLAT_DEC(txrx_peer, to_stack.num,
  1850. num_nbuf);
  1851. }
  1852. }
  1853. #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */
  1854. /*
  1855. * dp_rx_validate_rx_callbacks() - validate rx callbacks
  1856. * @soc DP soc
  1857. * @vdev: DP vdev handle
  1858. * @txrx_peer: pointer to the txrx peer object
  1859. * nbuf_head: skb list head
  1860. *
  1861. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  1862. * QDF_STATUS_E_FAILURE
  1863. */
  1864. static inline QDF_STATUS
  1865. dp_rx_validate_rx_callbacks(struct dp_soc *soc,
  1866. struct dp_vdev *vdev,
  1867. struct dp_txrx_peer *txrx_peer,
  1868. qdf_nbuf_t nbuf_head)
  1869. {
  1870. int num_nbuf;
  1871. if (qdf_unlikely(!vdev || vdev->delete.pending)) {
  1872. num_nbuf = dp_rx_drop_nbuf_list(NULL, nbuf_head);
  1873. /*
  1874. * This is a special case where vdev is invalid,
  1875. * so we cannot know the pdev to which this packet
  1876. * belonged. Hence we update the soc rx error stats.
  1877. */
  1878. DP_STATS_INC(soc, rx.err.invalid_vdev, num_nbuf);
  1879. return QDF_STATUS_E_FAILURE;
  1880. }
  1881. /*
  1882. * highly unlikely to have a vdev without a registered rx
  1883. * callback function. if so let us free the nbuf_list.
  1884. */
  1885. if (qdf_unlikely(!vdev->osif_rx)) {
  1886. if (txrx_peer && dp_rx_is_peer_cache_bufq_supported()) {
  1887. dp_rx_enqueue_rx(txrx_peer, nbuf_head);
  1888. } else {
  1889. num_nbuf = dp_rx_drop_nbuf_list(vdev->pdev,
  1890. nbuf_head);
  1891. DP_PEER_TO_STACK_DECC(txrx_peer, num_nbuf,
  1892. vdev->pdev->enhanced_stats_en);
  1893. }
  1894. return QDF_STATUS_E_FAILURE;
  1895. }
  1896. return QDF_STATUS_SUCCESS;
  1897. }
  1898. QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc,
  1899. struct dp_vdev *vdev,
  1900. struct dp_txrx_peer *txrx_peer,
  1901. qdf_nbuf_t nbuf_head,
  1902. qdf_nbuf_t nbuf_tail)
  1903. {
  1904. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  1905. QDF_STATUS_SUCCESS)
  1906. return QDF_STATUS_E_FAILURE;
  1907. if (qdf_unlikely(vdev->rx_decap_type == htt_cmn_pkt_type_raw) ||
  1908. (vdev->rx_decap_type == htt_cmn_pkt_type_native_wifi)) {
  1909. vdev->osif_rsim_rx_decap(vdev->osif_vdev, &nbuf_head,
  1910. &nbuf_tail);
  1911. }
  1912. dp_rx_check_delivery_to_stack(soc, vdev, txrx_peer, nbuf_head);
  1913. return QDF_STATUS_SUCCESS;
  1914. }
  1915. #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
  1916. QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
  1917. struct dp_vdev *vdev,
  1918. struct dp_txrx_peer *txrx_peer,
  1919. qdf_nbuf_t nbuf_head,
  1920. qdf_nbuf_t nbuf_tail)
  1921. {
  1922. if (dp_rx_validate_rx_callbacks(soc, vdev, txrx_peer, nbuf_head) !=
  1923. QDF_STATUS_SUCCESS)
  1924. return QDF_STATUS_E_FAILURE;
  1925. vdev->osif_rx_eapol(vdev->osif_vdev, nbuf_head);
  1926. return QDF_STATUS_SUCCESS;
  1927. }
  1928. #endif
  1929. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  1930. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1931. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer) \
  1932. { \
  1933. qdf_nbuf_t nbuf_local; \
  1934. struct dp_txrx_peer *txrx_peer_local; \
  1935. struct dp_vdev *vdev_local = vdev_hdl; \
  1936. do { \
  1937. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  1938. break; \
  1939. nbuf_local = nbuf; \
  1940. txrx_peer_local = txrx_peer; \
  1941. if (qdf_unlikely(qdf_nbuf_is_frag((nbuf_local)))) \
  1942. break; \
  1943. else if (qdf_unlikely(qdf_nbuf_is_raw_frame((nbuf_local)))) \
  1944. break; \
  1945. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  1946. (nbuf_local), \
  1947. (txrx_peer_local), 0, 1); \
  1948. } while (0); \
  1949. }
  1950. #else
  1951. #define dp_rx_msdu_stats_update_prot_cnts(vdev_hdl, nbuf, txrx_peer)
  1952. #endif
  1953. #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
  1954. /**
  1955. * dp_rx_rates_stats_update() - update rate stats
  1956. * from rx msdu.
  1957. * @soc: datapath soc handle
  1958. * @nbuf: received msdu buffer
  1959. * @rx_tlv_hdr: rx tlv header
  1960. * @txrx_peer: datapath txrx_peer handle
  1961. * @sgi: Short Guard Interval
  1962. * @mcs: Modulation and Coding Set
  1963. * @nss: Number of Spatial Streams
  1964. * @bw: BandWidth
  1965. * @pkt_type: Corresponds to preamble
  1966. *
  1967. * To be precisely record rates, following factors are considered:
  1968. * Exclude specific frames, ARP, DHCP, ssdp, etc.
  1969. * Make sure to affect rx throughput as least as possible.
  1970. *
  1971. * Return: void
  1972. */
  1973. static void
  1974. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  1975. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  1976. uint32_t sgi, uint32_t mcs,
  1977. uint32_t nss, uint32_t bw, uint32_t pkt_type)
  1978. {
  1979. uint32_t rix;
  1980. uint16_t ratecode;
  1981. uint32_t avg_rx_rate;
  1982. uint32_t ratekbps;
  1983. enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
  1984. if (soc->high_throughput ||
  1985. dp_rx_data_is_specific(soc->hal_soc, rx_tlv_hdr, nbuf)) {
  1986. return;
  1987. }
  1988. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.rx_rate, mcs);
  1989. /* In 11b mode, the nss we get from tlv is 0, invalid and should be 1 */
  1990. if (qdf_unlikely(pkt_type == DOT11_B))
  1991. nss = 1;
  1992. /* here pkt_type corresponds to preamble */
  1993. ratekbps = dp_getrateindex(sgi,
  1994. mcs,
  1995. nss - 1,
  1996. pkt_type,
  1997. bw,
  1998. punc_mode,
  1999. &rix,
  2000. &ratecode);
  2001. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.last_rx_rate, ratekbps);
  2002. avg_rx_rate =
  2003. dp_ath_rate_lpf(txrx_peer->stats.extd_stats.rx.avg_rx_rate,
  2004. ratekbps);
  2005. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.avg_rx_rate, avg_rx_rate);
  2006. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.nss_info, nss);
  2007. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.mcs_info, mcs);
  2008. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.bw_info, bw);
  2009. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.gi_info, sgi);
  2010. DP_PEER_EXTD_STATS_UPD(txrx_peer, rx.preamble_info, pkt_type);
  2011. }
  2012. #else
  2013. static inline void
  2014. dp_rx_rates_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2015. uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
  2016. uint32_t sgi, uint32_t mcs,
  2017. uint32_t nss, uint32_t bw, uint32_t pkt_type)
  2018. {
  2019. }
  2020. #endif /* FEATURE_RX_LINKSPEED_ROAM_TRIGGER */
  2021. #ifndef QCA_ENHANCED_STATS_SUPPORT
  2022. /**
  2023. * dp_rx_msdu_extd_stats_update(): Update Rx extended path stats for peer
  2024. *
  2025. * @soc: datapath soc handle
  2026. * @nbuf: received msdu buffer
  2027. * @rx_tlv_hdr: rx tlv header
  2028. * @txrx_peer: datapath txrx_peer handle
  2029. *
  2030. * Return: void
  2031. */
  2032. static inline
  2033. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2034. uint8_t *rx_tlv_hdr,
  2035. struct dp_txrx_peer *txrx_peer)
  2036. {
  2037. bool is_ampdu;
  2038. uint32_t sgi, mcs, tid, nss, bw, reception_type, pkt_type;
  2039. uint8_t dst_mcs_idx;
  2040. /*
  2041. * TODO - For KIWI this field is present in ring_desc
  2042. * Try to use ring desc instead of tlv.
  2043. */
  2044. is_ampdu = hal_rx_mpdu_info_ampdu_flag_get(soc->hal_soc, rx_tlv_hdr);
  2045. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.ampdu_cnt, 1, is_ampdu);
  2046. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.non_ampdu_cnt, 1, !(is_ampdu));
  2047. sgi = hal_rx_tlv_sgi_get(soc->hal_soc, rx_tlv_hdr);
  2048. mcs = hal_rx_tlv_rate_mcs_get(soc->hal_soc, rx_tlv_hdr);
  2049. tid = qdf_nbuf_get_tid_val(nbuf);
  2050. bw = hal_rx_tlv_bw_get(soc->hal_soc, rx_tlv_hdr);
  2051. reception_type = hal_rx_msdu_start_reception_type_get(soc->hal_soc,
  2052. rx_tlv_hdr);
  2053. nss = hal_rx_msdu_start_nss_get(soc->hal_soc, rx_tlv_hdr);
  2054. pkt_type = hal_rx_tlv_get_pkt_type(soc->hal_soc, rx_tlv_hdr);
  2055. /* do HW to SW pkt type conversion */
  2056. pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
  2057. hal_2_dp_pkt_type_map[pkt_type]);
  2058. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[mcs], 1,
  2059. ((mcs < MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  2060. DP_PEER_EXTD_STATS_INCC(txrx_peer, rx.rx_mpdu_cnt[MAX_MCS - 1], 1,
  2061. ((mcs >= MAX_MCS) && QDF_NBUF_CB_RX_CHFRAG_START(nbuf)));
  2062. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.bw[bw], 1);
  2063. /*
  2064. * only if nss > 0 and pkt_type is 11N/AC/AX,
  2065. * then increase index [nss - 1] in array counter.
  2066. */
  2067. if (nss > 0 && CDP_IS_PKT_TYPE_SUPPORT_NSS(pkt_type))
  2068. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.nss[nss - 1], 1);
  2069. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.sgi_count[sgi], 1);
  2070. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.mic_err, 1,
  2071. hal_rx_tlv_mic_err_get(soc->hal_soc,
  2072. rx_tlv_hdr));
  2073. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.err.decrypt_err, 1,
  2074. hal_rx_tlv_decrypt_err_get(soc->hal_soc,
  2075. rx_tlv_hdr));
  2076. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.wme_ac_type[TID_TO_WME_AC(tid)], 1);
  2077. DP_PEER_EXTD_STATS_INC(txrx_peer, rx.reception_type[reception_type], 1);
  2078. dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
  2079. if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
  2080. DP_PEER_EXTD_STATS_INC(txrx_peer,
  2081. rx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
  2082. 1);
  2083. dp_rx_rates_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer,
  2084. sgi, mcs, nss, bw, pkt_type);
  2085. }
  2086. #else
  2087. static inline
  2088. void dp_rx_msdu_extd_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2089. uint8_t *rx_tlv_hdr,
  2090. struct dp_txrx_peer *txrx_peer)
  2091. {
  2092. }
  2093. #endif
  2094. #if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
  2095. static inline void
  2096. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  2097. qdf_nbuf_t nbuf)
  2098. {
  2099. uint8_t lmac_id = qdf_nbuf_get_lmac_id(nbuf);
  2100. /* only count stats per lmac for MLO connection*/
  2101. DP_PEER_PER_PKT_STATS_INCC_PKT(txrx_peer, rx.rx_lmac[lmac_id], 1,
  2102. QDF_NBUF_CB_RX_PKT_LEN(nbuf),
  2103. txrx_peer->mld_peer);
  2104. }
  2105. #else
  2106. static inline void
  2107. dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer,
  2108. qdf_nbuf_t nbuf)
  2109. {
  2110. }
  2111. #endif
  2112. /**
  2113. * dp_rx_msdu_stats_update() - update per msdu stats.
  2114. * @soc: core txrx main context
  2115. * @nbuf: pointer to the first msdu of an amsdu.
  2116. * @rx_tlv_hdr: pointer to the start of RX TLV headers.
  2117. * @txrx_peer: pointer to the txrx peer object.
  2118. * @ring_id: reo dest ring number on which pkt is reaped.
  2119. * @tid_stats: per tid rx stats.
  2120. *
  2121. * update all the per msdu stats for that nbuf.
  2122. * Return: void
  2123. */
  2124. void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2125. uint8_t *rx_tlv_hdr,
  2126. struct dp_txrx_peer *txrx_peer,
  2127. uint8_t ring_id,
  2128. struct cdp_tid_rx_stats *tid_stats)
  2129. {
  2130. bool is_not_amsdu;
  2131. struct dp_vdev *vdev = txrx_peer->vdev;
  2132. bool enh_flag;
  2133. qdf_ether_header_t *eh;
  2134. uint16_t msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2135. dp_rx_msdu_stats_update_prot_cnts(vdev, nbuf, txrx_peer);
  2136. is_not_amsdu = qdf_nbuf_is_rx_chfrag_start(nbuf) &
  2137. qdf_nbuf_is_rx_chfrag_end(nbuf);
  2138. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.rcvd_reo[ring_id], 1,
  2139. msdu_len);
  2140. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.non_amsdu_cnt, 1,
  2141. is_not_amsdu);
  2142. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.amsdu_cnt, 1, !is_not_amsdu);
  2143. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, rx.rx_retries, 1,
  2144. qdf_nbuf_is_rx_retry_flag(nbuf));
  2145. dp_peer_update_rx_pkt_per_lmac(txrx_peer, nbuf);
  2146. tid_stats->msdu_cnt++;
  2147. if (qdf_unlikely(qdf_nbuf_is_da_mcbc(nbuf) &&
  2148. (vdev->rx_decap_type == htt_cmn_pkt_type_ethernet))) {
  2149. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2150. enh_flag = vdev->pdev->enhanced_stats_en;
  2151. DP_PEER_MC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
  2152. tid_stats->mcast_msdu_cnt++;
  2153. if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  2154. DP_PEER_BC_INCC_PKT(txrx_peer, 1, msdu_len, enh_flag);
  2155. tid_stats->bcast_msdu_cnt++;
  2156. }
  2157. }
  2158. txrx_peer->stats.per_pkt_stats.rx.last_rx_ts = qdf_system_ticks();
  2159. dp_rx_msdu_extd_stats_update(soc, nbuf, rx_tlv_hdr, txrx_peer);
  2160. }
  2161. #ifndef WDS_VENDOR_EXTENSION
  2162. int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
  2163. struct dp_vdev *vdev,
  2164. struct dp_txrx_peer *txrx_peer)
  2165. {
  2166. return 1;
  2167. }
  2168. #endif
  2169. #ifdef RX_DESC_DEBUG_CHECK
  2170. /**
  2171. * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
  2172. * corruption
  2173. *
  2174. * @ring_desc: REO ring descriptor
  2175. * @rx_desc: Rx descriptor
  2176. *
  2177. * Return: NONE
  2178. */
  2179. QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
  2180. hal_ring_desc_t ring_desc,
  2181. struct dp_rx_desc *rx_desc)
  2182. {
  2183. struct hal_buf_info hbi;
  2184. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2185. /* Sanity check for possible buffer paddr corruption */
  2186. if (dp_rx_desc_paddr_sanity_check(rx_desc, (&hbi)->paddr))
  2187. return QDF_STATUS_SUCCESS;
  2188. return QDF_STATUS_E_FAILURE;
  2189. }
  2190. /**
  2191. * dp_rx_desc_nbuf_len_sanity_check - Add sanity check to catch Rx buffer
  2192. * out of bound access from H.W
  2193. *
  2194. * @soc: DP soc
  2195. * @pkt_len: Packet length received from H.W
  2196. *
  2197. * Return: NONE
  2198. */
  2199. static inline void
  2200. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc,
  2201. uint32_t pkt_len)
  2202. {
  2203. struct rx_desc_pool *rx_desc_pool;
  2204. rx_desc_pool = &soc->rx_desc_buf[0];
  2205. qdf_assert_always(pkt_len <= rx_desc_pool->buf_size);
  2206. }
  2207. #else
  2208. static inline void
  2209. dp_rx_desc_nbuf_len_sanity_check(struct dp_soc *soc, uint32_t pkt_len) { }
  2210. #endif
  2211. #ifdef DP_RX_PKT_NO_PEER_DELIVER
  2212. #ifdef DP_RX_UDP_OVER_PEER_ROAM
  2213. /**
  2214. * dp_rx_is_udp_allowed_over_roam_peer() - check if udp data received
  2215. * during roaming
  2216. * @vdev: dp_vdev pointer
  2217. * @rx_tlv_hdr: rx tlv header
  2218. * @nbuf: pkt skb pointer
  2219. *
  2220. * This function will check if rx udp data is received from authorised
  2221. * roamed peer before peer map indication is received from FW after
  2222. * roaming. This is needed for VoIP scenarios in which packet loss
  2223. * expected during roaming is minimal.
  2224. *
  2225. * Return: bool
  2226. */
  2227. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2228. uint8_t *rx_tlv_hdr,
  2229. qdf_nbuf_t nbuf)
  2230. {
  2231. char *hdr_desc;
  2232. struct ieee80211_frame *wh = NULL;
  2233. hdr_desc = hal_rx_desc_get_80211_hdr(vdev->pdev->soc->hal_soc,
  2234. rx_tlv_hdr);
  2235. wh = (struct ieee80211_frame *)hdr_desc;
  2236. if (vdev->roaming_peer_status ==
  2237. WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED &&
  2238. !qdf_mem_cmp(vdev->roaming_peer_mac.raw, wh->i_addr2,
  2239. QDF_MAC_ADDR_SIZE) && (qdf_nbuf_is_ipv4_udp_pkt(nbuf) ||
  2240. qdf_nbuf_is_ipv6_udp_pkt(nbuf)))
  2241. return true;
  2242. return false;
  2243. }
  2244. #else
  2245. static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev,
  2246. uint8_t *rx_tlv_hdr,
  2247. qdf_nbuf_t nbuf)
  2248. {
  2249. return false;
  2250. }
  2251. #endif
  2252. /**
  2253. * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if
  2254. * no corresbonding peer found
  2255. * @soc: core txrx main context
  2256. * @nbuf: pkt skb pointer
  2257. *
  2258. * This function will try to deliver some RX special frames to stack
  2259. * even there is no peer matched found. for instance, LFR case, some
  2260. * eapol data will be sent to host before peer_map done.
  2261. *
  2262. * Return: None
  2263. */
  2264. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2265. {
  2266. uint16_t peer_id;
  2267. uint8_t vdev_id;
  2268. struct dp_vdev *vdev = NULL;
  2269. uint32_t l2_hdr_offset = 0;
  2270. uint16_t msdu_len = 0;
  2271. uint32_t pkt_len = 0;
  2272. uint8_t *rx_tlv_hdr;
  2273. uint32_t frame_mask = FRAME_MASK_IPV4_ARP | FRAME_MASK_IPV4_DHCP |
  2274. FRAME_MASK_IPV4_EAPOL | FRAME_MASK_IPV6_DHCP;
  2275. peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
  2276. if (peer_id > soc->max_peer_id)
  2277. goto deliver_fail;
  2278. vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
  2279. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_RX);
  2280. if (!vdev || vdev->delete.pending || !vdev->osif_rx)
  2281. goto deliver_fail;
  2282. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf)))
  2283. goto deliver_fail;
  2284. rx_tlv_hdr = qdf_nbuf_data(nbuf);
  2285. l2_hdr_offset =
  2286. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2287. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2288. pkt_len = msdu_len + l2_hdr_offset + soc->rx_pkt_tlv_size;
  2289. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2290. qdf_nbuf_set_pktlen(nbuf, pkt_len);
  2291. qdf_nbuf_pull_head(nbuf, soc->rx_pkt_tlv_size + l2_hdr_offset);
  2292. if (dp_rx_is_special_frame(nbuf, frame_mask) ||
  2293. dp_rx_is_udp_allowed_over_roam_peer(vdev, rx_tlv_hdr, nbuf)) {
  2294. qdf_nbuf_set_exc_frame(nbuf, 1);
  2295. if (QDF_STATUS_SUCCESS !=
  2296. vdev->osif_rx(vdev->osif_vdev, nbuf))
  2297. goto deliver_fail;
  2298. DP_STATS_INC(soc, rx.err.pkt_delivered_no_peer, 1);
  2299. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2300. return;
  2301. }
  2302. deliver_fail:
  2303. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2304. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2305. dp_rx_nbuf_free(nbuf);
  2306. if (vdev)
  2307. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
  2308. }
  2309. #else
  2310. void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2311. {
  2312. DP_STATS_INC_PKT(soc, rx.err.rx_invalid_peer, 1,
  2313. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2314. dp_rx_nbuf_free(nbuf);
  2315. }
  2316. #endif
  2317. /**
  2318. * dp_rx_srng_get_num_pending() - get number of pending entries
  2319. * @hal_soc: hal soc opaque pointer
  2320. * @hal_ring: opaque pointer to the HAL Rx Ring
  2321. * @num_entries: number of entries in the hal_ring.
  2322. * @near_full: pointer to a boolean. This is set if ring is near full.
  2323. *
  2324. * The function returns the number of entries in a destination ring which are
  2325. * yet to be reaped. The function also checks if the ring is near full.
  2326. * If more than half of the ring needs to be reaped, the ring is considered
  2327. * approaching full.
  2328. * The function useses hal_srng_dst_num_valid_locked to get the number of valid
  2329. * entries. It should not be called within a SRNG lock. HW pointer value is
  2330. * synced into cached_hp.
  2331. *
  2332. * Return: Number of pending entries if any
  2333. */
  2334. uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
  2335. hal_ring_handle_t hal_ring_hdl,
  2336. uint32_t num_entries,
  2337. bool *near_full)
  2338. {
  2339. uint32_t num_pending = 0;
  2340. num_pending = hal_srng_dst_num_valid_locked(hal_soc,
  2341. hal_ring_hdl,
  2342. true);
  2343. if (num_entries && (num_pending >= num_entries >> 1))
  2344. *near_full = true;
  2345. else
  2346. *near_full = false;
  2347. return num_pending;
  2348. }
  2349. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2350. #ifdef WLAN_SUPPORT_RX_FISA
  2351. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2352. {
  2353. QDF_NBUF_CB_RX_PACKET_L3_HDR_PAD(nbuf) = l3_padding;
  2354. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2355. }
  2356. #else
  2357. void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding)
  2358. {
  2359. qdf_nbuf_pull_head(nbuf, l3_padding + soc->rx_pkt_tlv_size);
  2360. }
  2361. #endif
  2362. #ifndef QCA_HOST_MODE_WIFI_DISABLED
  2363. #ifdef DP_RX_DROP_RAW_FRM
  2364. /**
  2365. * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
  2366. * @nbuf: pkt skb pointer
  2367. *
  2368. * Return: true - raw frame, dropped
  2369. * false - not raw frame, do nothing
  2370. */
  2371. bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
  2372. {
  2373. if (qdf_nbuf_is_raw_frame(nbuf)) {
  2374. dp_rx_nbuf_free(nbuf);
  2375. return true;
  2376. }
  2377. return false;
  2378. }
  2379. #endif
  2380. #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
  2381. /**
  2382. * dp_rx_ring_record_entry() - Record an entry into the rx ring history.
  2383. * @soc: Datapath soc structure
  2384. * @ring_num: REO ring number
  2385. * @ring_desc: REO ring descriptor
  2386. *
  2387. * Returns: None
  2388. */
  2389. void
  2390. dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
  2391. hal_ring_desc_t ring_desc)
  2392. {
  2393. struct dp_buf_info_record *record;
  2394. struct hal_buf_info hbi;
  2395. uint32_t idx;
  2396. if (qdf_unlikely(!soc->rx_ring_history[ring_num]))
  2397. return;
  2398. hal_rx_reo_buf_paddr_get(soc->hal_soc, ring_desc, &hbi);
  2399. /* buffer_addr_info is the first element of ring_desc */
  2400. hal_rx_buf_cookie_rbm_get(soc->hal_soc, (uint32_t *)ring_desc,
  2401. &hbi);
  2402. idx = dp_history_get_next_index(&soc->rx_ring_history[ring_num]->index,
  2403. DP_RX_HIST_MAX);
  2404. /* No NULL check needed for record since its an array */
  2405. record = &soc->rx_ring_history[ring_num]->entry[idx];
  2406. record->timestamp = qdf_get_log_timestamp();
  2407. record->hbi.paddr = hbi.paddr;
  2408. record->hbi.sw_cookie = hbi.sw_cookie;
  2409. record->hbi.rbm = hbi.rbm;
  2410. }
  2411. #endif
  2412. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  2413. /**
  2414. * dp_rx_update_stats() - Update soc level rx packet count
  2415. * @soc: DP soc handle
  2416. * @nbuf: nbuf received
  2417. *
  2418. * Returns: none
  2419. */
  2420. void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf)
  2421. {
  2422. DP_STATS_INC_PKT(soc, rx.ingress, 1,
  2423. QDF_NBUF_CB_RX_PKT_LEN(nbuf));
  2424. }
  2425. #endif
  2426. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  2427. /**
  2428. * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
  2429. * @soc : dp_soc handle
  2430. * @pdev: dp_pdev handle
  2431. * @peer_id: peer_id of the peer for which completion came
  2432. * @ppdu_id: ppdu_id
  2433. * @netbuf: Buffer pointer
  2434. *
  2435. * This function is used to deliver rx packet to packet capture
  2436. */
  2437. void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  2438. uint16_t peer_id, uint32_t is_offload,
  2439. qdf_nbuf_t netbuf)
  2440. {
  2441. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2442. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA, soc, netbuf,
  2443. peer_id, is_offload, pdev->pdev_id);
  2444. }
  2445. void dp_rx_deliver_to_pkt_capture_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2446. uint32_t is_offload)
  2447. {
  2448. if (wlan_cfg_get_pkt_capture_mode(soc->wlan_cfg_ctx))
  2449. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_RX_DATA_NO_PEER,
  2450. soc, nbuf, HTT_INVALID_VDEV,
  2451. is_offload, 0);
  2452. }
  2453. #endif
  2454. #endif /* QCA_HOST_MODE_WIFI_DISABLED */
  2455. QDF_STATUS dp_rx_vdev_detach(struct dp_vdev *vdev)
  2456. {
  2457. QDF_STATUS ret;
  2458. if (vdev->osif_rx_flush) {
  2459. ret = vdev->osif_rx_flush(vdev->osif_vdev, vdev->vdev_id);
  2460. if (!QDF_IS_STATUS_SUCCESS(ret)) {
  2461. dp_err("Failed to flush rx pkts for vdev %d\n",
  2462. vdev->vdev_id);
  2463. return ret;
  2464. }
  2465. }
  2466. return QDF_STATUS_SUCCESS;
  2467. }
  2468. static QDF_STATUS
  2469. dp_pdev_nbuf_alloc_and_map(struct dp_soc *dp_soc,
  2470. struct dp_rx_nbuf_frag_info *nbuf_frag_info_t,
  2471. struct dp_pdev *dp_pdev,
  2472. struct rx_desc_pool *rx_desc_pool)
  2473. {
  2474. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  2475. (nbuf_frag_info_t->virt_addr).nbuf =
  2476. qdf_nbuf_alloc(dp_soc->osdev, rx_desc_pool->buf_size,
  2477. RX_BUFFER_RESERVATION,
  2478. rx_desc_pool->buf_alignment, FALSE);
  2479. if (!((nbuf_frag_info_t->virt_addr).nbuf)) {
  2480. dp_err("nbuf alloc failed");
  2481. DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
  2482. return ret;
  2483. }
  2484. ret = qdf_nbuf_map_nbytes_single(dp_soc->osdev,
  2485. (nbuf_frag_info_t->virt_addr).nbuf,
  2486. QDF_DMA_FROM_DEVICE,
  2487. rx_desc_pool->buf_size);
  2488. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
  2489. qdf_nbuf_free((nbuf_frag_info_t->virt_addr).nbuf);
  2490. dp_err("nbuf map failed");
  2491. DP_STATS_INC(dp_pdev, replenish.map_err, 1);
  2492. return ret;
  2493. }
  2494. nbuf_frag_info_t->paddr =
  2495. qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
  2496. ret = dp_check_paddr(dp_soc, &((nbuf_frag_info_t->virt_addr).nbuf),
  2497. &nbuf_frag_info_t->paddr,
  2498. rx_desc_pool);
  2499. if (ret == QDF_STATUS_E_FAILURE) {
  2500. dp_err("nbuf check x86 failed");
  2501. DP_STATS_INC(dp_pdev, replenish.x86_fail, 1);
  2502. return ret;
  2503. }
  2504. return QDF_STATUS_SUCCESS;
  2505. }
  2506. QDF_STATUS
  2507. dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
  2508. struct dp_srng *dp_rxdma_srng,
  2509. struct rx_desc_pool *rx_desc_pool,
  2510. uint32_t num_req_buffers)
  2511. {
  2512. struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(dp_soc, mac_id);
  2513. hal_ring_handle_t rxdma_srng = dp_rxdma_srng->hal_srng;
  2514. union dp_rx_desc_list_elem_t *next;
  2515. void *rxdma_ring_entry;
  2516. qdf_dma_addr_t paddr;
  2517. struct dp_rx_nbuf_frag_info *nf_info;
  2518. uint32_t nr_descs, nr_nbuf = 0, nr_nbuf_total = 0;
  2519. uint32_t buffer_index, nbuf_ptrs_per_page;
  2520. qdf_nbuf_t nbuf;
  2521. QDF_STATUS ret;
  2522. int page_idx, total_pages;
  2523. union dp_rx_desc_list_elem_t *desc_list = NULL;
  2524. union dp_rx_desc_list_elem_t *tail = NULL;
  2525. int sync_hw_ptr = 1;
  2526. uint32_t num_entries_avail;
  2527. if (qdf_unlikely(!dp_pdev)) {
  2528. dp_rx_err("%pK: pdev is null for mac_id = %d",
  2529. dp_soc, mac_id);
  2530. return QDF_STATUS_E_FAILURE;
  2531. }
  2532. if (qdf_unlikely(!rxdma_srng)) {
  2533. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2534. return QDF_STATUS_E_FAILURE;
  2535. }
  2536. dp_debug("requested %u RX buffers for driver attach", num_req_buffers);
  2537. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2538. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  2539. rxdma_srng,
  2540. sync_hw_ptr);
  2541. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2542. if (!num_entries_avail) {
  2543. dp_err("Num of available entries is zero, nothing to do");
  2544. return QDF_STATUS_E_NOMEM;
  2545. }
  2546. if (num_entries_avail < num_req_buffers)
  2547. num_req_buffers = num_entries_avail;
  2548. nr_descs = dp_rx_get_free_desc_list(dp_soc, mac_id, rx_desc_pool,
  2549. num_req_buffers, &desc_list, &tail);
  2550. if (!nr_descs) {
  2551. dp_err("no free rx_descs in freelist");
  2552. DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
  2553. return QDF_STATUS_E_NOMEM;
  2554. }
  2555. dp_debug("got %u RX descs for driver attach", nr_descs);
  2556. /*
  2557. * Try to allocate pointers to the nbuf one page at a time.
  2558. * Take pointers that can fit in one page of memory and
  2559. * iterate through the total descriptors that need to be
  2560. * allocated in order of pages. Reuse the pointers that
  2561. * have been allocated to fit in one page across each
  2562. * iteration to index into the nbuf.
  2563. */
  2564. total_pages = (nr_descs * sizeof(*nf_info)) / DP_BLOCKMEM_SIZE;
  2565. /*
  2566. * Add an extra page to store the remainder if any
  2567. */
  2568. if ((nr_descs * sizeof(*nf_info)) % DP_BLOCKMEM_SIZE)
  2569. total_pages++;
  2570. nf_info = qdf_mem_malloc(DP_BLOCKMEM_SIZE);
  2571. if (!nf_info) {
  2572. dp_err("failed to allocate nbuf array");
  2573. DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
  2574. QDF_BUG(0);
  2575. return QDF_STATUS_E_NOMEM;
  2576. }
  2577. nbuf_ptrs_per_page = DP_BLOCKMEM_SIZE / sizeof(*nf_info);
  2578. for (page_idx = 0; page_idx < total_pages; page_idx++) {
  2579. qdf_mem_zero(nf_info, DP_BLOCKMEM_SIZE);
  2580. for (nr_nbuf = 0; nr_nbuf < nbuf_ptrs_per_page; nr_nbuf++) {
  2581. /*
  2582. * The last page of buffer pointers may not be required
  2583. * completely based on the number of descriptors. Below
  2584. * check will ensure we are allocating only the
  2585. * required number of descriptors.
  2586. */
  2587. if (nr_nbuf_total >= nr_descs)
  2588. break;
  2589. /* Flag is set while pdev rx_desc_pool initialization */
  2590. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2591. ret = dp_pdev_frag_alloc_and_map(dp_soc,
  2592. &nf_info[nr_nbuf], dp_pdev,
  2593. rx_desc_pool);
  2594. else
  2595. ret = dp_pdev_nbuf_alloc_and_map(dp_soc,
  2596. &nf_info[nr_nbuf], dp_pdev,
  2597. rx_desc_pool);
  2598. if (QDF_IS_STATUS_ERROR(ret))
  2599. break;
  2600. nr_nbuf_total++;
  2601. }
  2602. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  2603. for (buffer_index = 0; buffer_index < nr_nbuf; buffer_index++) {
  2604. rxdma_ring_entry =
  2605. hal_srng_src_get_next(dp_soc->hal_soc,
  2606. rxdma_srng);
  2607. qdf_assert_always(rxdma_ring_entry);
  2608. next = desc_list->next;
  2609. paddr = nf_info[buffer_index].paddr;
  2610. nbuf = nf_info[buffer_index].virt_addr.nbuf;
  2611. /* Flag is set while pdev rx_desc_pool initialization */
  2612. if (qdf_unlikely(rx_desc_pool->rx_mon_dest_frag_enable))
  2613. dp_rx_desc_frag_prep(&desc_list->rx_desc,
  2614. &nf_info[buffer_index]);
  2615. else
  2616. dp_rx_desc_prep(&desc_list->rx_desc,
  2617. &nf_info[buffer_index]);
  2618. desc_list->rx_desc.in_use = 1;
  2619. dp_rx_desc_alloc_dbg_info(&desc_list->rx_desc);
  2620. dp_rx_desc_update_dbg_info(&desc_list->rx_desc,
  2621. __func__,
  2622. RX_DESC_REPLENISHED);
  2623. hal_rxdma_buff_addr_info_set(dp_soc->hal_soc ,rxdma_ring_entry, paddr,
  2624. desc_list->rx_desc.cookie,
  2625. rx_desc_pool->owner);
  2626. dp_ipa_handle_rx_buf_smmu_mapping(
  2627. dp_soc, nbuf,
  2628. rx_desc_pool->buf_size, true,
  2629. __func__, __LINE__);
  2630. desc_list = next;
  2631. }
  2632. dp_rx_refill_ring_record_entry(dp_soc, dp_pdev->lmac_id,
  2633. rxdma_srng, nr_nbuf, nr_nbuf);
  2634. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  2635. }
  2636. dp_info("filled %u RX buffers for driver attach", nr_nbuf_total);
  2637. qdf_mem_free(nf_info);
  2638. if (!nr_nbuf_total) {
  2639. dp_err("No nbuf's allocated");
  2640. QDF_BUG(0);
  2641. return QDF_STATUS_E_RESOURCES;
  2642. }
  2643. /* No need to count the number of bytes received during replenish.
  2644. * Therefore set replenish.pkts.bytes as 0.
  2645. */
  2646. DP_STATS_INC_PKT(dp_pdev, replenish.pkts, nr_nbuf, 0);
  2647. return QDF_STATUS_SUCCESS;
  2648. }
  2649. qdf_export_symbol(dp_pdev_rx_buffers_attach);
  2650. /**
  2651. * dp_rx_enable_mon_dest_frag() - Enable frag processing for
  2652. * monitor destination ring via frag.
  2653. *
  2654. * Enable this flag only for monitor destination buffer processing
  2655. * if DP_RX_MON_MEM_FRAG feature is enabled.
  2656. * If flag is set then frag based function will be called for alloc,
  2657. * map, prep desc and free ops for desc buffer else normal nbuf based
  2658. * function will be called.
  2659. *
  2660. * @rx_desc_pool: Rx desc pool
  2661. * @is_mon_dest_desc: Is it for monitor dest buffer
  2662. *
  2663. * Return: None
  2664. */
  2665. #ifdef DP_RX_MON_MEM_FRAG
  2666. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2667. bool is_mon_dest_desc)
  2668. {
  2669. rx_desc_pool->rx_mon_dest_frag_enable = is_mon_dest_desc;
  2670. if (is_mon_dest_desc)
  2671. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is enabled");
  2672. }
  2673. #else
  2674. void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
  2675. bool is_mon_dest_desc)
  2676. {
  2677. rx_desc_pool->rx_mon_dest_frag_enable = false;
  2678. if (is_mon_dest_desc)
  2679. dp_alert("Feature DP_RX_MON_MEM_FRAG for mon_dest is disabled");
  2680. }
  2681. #endif
  2682. qdf_export_symbol(dp_rx_enable_mon_dest_frag);
  2683. /*
  2684. * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor
  2685. * pool
  2686. *
  2687. * @pdev: core txrx pdev context
  2688. *
  2689. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2690. * QDF_STATUS_E_NOMEM
  2691. */
  2692. QDF_STATUS
  2693. dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev)
  2694. {
  2695. struct dp_soc *soc = pdev->soc;
  2696. uint32_t rxdma_entries;
  2697. uint32_t rx_sw_desc_num;
  2698. struct dp_srng *dp_rxdma_srng;
  2699. struct rx_desc_pool *rx_desc_pool;
  2700. uint32_t status = QDF_STATUS_SUCCESS;
  2701. int mac_for_pdev;
  2702. mac_for_pdev = pdev->lmac_id;
  2703. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2704. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2705. soc, mac_for_pdev);
  2706. return status;
  2707. }
  2708. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2709. rxdma_entries = dp_rxdma_srng->num_entries;
  2710. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2711. rx_sw_desc_num = wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2712. rx_desc_pool->desc_type = DP_RX_DESC_BUF_TYPE;
  2713. status = dp_rx_desc_pool_alloc(soc,
  2714. rx_sw_desc_num,
  2715. rx_desc_pool);
  2716. if (status != QDF_STATUS_SUCCESS)
  2717. return status;
  2718. return status;
  2719. }
  2720. /*
  2721. * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool
  2722. *
  2723. * @pdev: core txrx pdev context
  2724. */
  2725. void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev)
  2726. {
  2727. int mac_for_pdev = pdev->lmac_id;
  2728. struct dp_soc *soc = pdev->soc;
  2729. struct rx_desc_pool *rx_desc_pool;
  2730. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2731. dp_rx_desc_pool_free(soc, rx_desc_pool);
  2732. }
  2733. /*
  2734. * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors
  2735. *
  2736. * @pdev: core txrx pdev context
  2737. *
  2738. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2739. * QDF_STATUS_E_NOMEM
  2740. */
  2741. QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev)
  2742. {
  2743. int mac_for_pdev = pdev->lmac_id;
  2744. struct dp_soc *soc = pdev->soc;
  2745. uint32_t rxdma_entries;
  2746. uint32_t rx_sw_desc_num;
  2747. struct dp_srng *dp_rxdma_srng;
  2748. struct rx_desc_pool *rx_desc_pool;
  2749. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2750. if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
  2751. /**
  2752. * If NSS is enabled, rx_desc_pool is already filled.
  2753. * Hence, just disable desc_pool frag flag.
  2754. */
  2755. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2756. dp_rx_info("%pK: nss-wifi<4> skip Rx refil %d",
  2757. soc, mac_for_pdev);
  2758. return QDF_STATUS_SUCCESS;
  2759. }
  2760. if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_E_NOMEM)
  2761. return QDF_STATUS_E_NOMEM;
  2762. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2763. rxdma_entries = dp_rxdma_srng->num_entries;
  2764. soc->process_rx_status = CONFIG_PROCESS_RX_STATUS;
  2765. rx_sw_desc_num =
  2766. wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
  2767. rx_desc_pool->owner = dp_rx_get_rx_bm_id(soc);
  2768. rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
  2769. rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
  2770. /* Disable monitor dest processing via frag */
  2771. dp_rx_enable_mon_dest_frag(rx_desc_pool, false);
  2772. dp_rx_desc_pool_init(soc, mac_for_pdev,
  2773. rx_sw_desc_num, rx_desc_pool);
  2774. return QDF_STATUS_SUCCESS;
  2775. }
  2776. /*
  2777. * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools
  2778. * @pdev: core txrx pdev context
  2779. *
  2780. * This function resets the freelist of rx descriptors and destroys locks
  2781. * associated with this list of descriptors.
  2782. */
  2783. void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev)
  2784. {
  2785. int mac_for_pdev = pdev->lmac_id;
  2786. struct dp_soc *soc = pdev->soc;
  2787. struct rx_desc_pool *rx_desc_pool;
  2788. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2789. dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev);
  2790. }
  2791. /*
  2792. * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring
  2793. *
  2794. * @pdev: core txrx pdev context
  2795. *
  2796. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  2797. * QDF_STATUS_E_NOMEM
  2798. */
  2799. QDF_STATUS
  2800. dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
  2801. {
  2802. int mac_for_pdev = pdev->lmac_id;
  2803. struct dp_soc *soc = pdev->soc;
  2804. struct dp_srng *dp_rxdma_srng;
  2805. struct rx_desc_pool *rx_desc_pool;
  2806. uint32_t rxdma_entries;
  2807. dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_for_pdev];
  2808. rxdma_entries = dp_rxdma_srng->num_entries;
  2809. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2810. /* Initialize RX buffer pool which will be
  2811. * used during low memory conditions
  2812. */
  2813. dp_rx_buffer_pool_init(soc, mac_for_pdev);
  2814. return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev,
  2815. dp_rxdma_srng,
  2816. rx_desc_pool,
  2817. rxdma_entries - 1);
  2818. }
  2819. /*
  2820. * dp_rx_pdev_buffers_free - Free nbufs (skbs)
  2821. *
  2822. * @pdev: core txrx pdev context
  2823. */
  2824. void
  2825. dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
  2826. {
  2827. int mac_for_pdev = pdev->lmac_id;
  2828. struct dp_soc *soc = pdev->soc;
  2829. struct rx_desc_pool *rx_desc_pool;
  2830. rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
  2831. dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  2832. dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
  2833. }
  2834. #ifdef DP_RX_SPECIAL_FRAME_NEED
  2835. bool dp_rx_deliver_special_frame(struct dp_soc *soc,
  2836. struct dp_txrx_peer *txrx_peer,
  2837. qdf_nbuf_t nbuf, uint32_t frame_mask,
  2838. uint8_t *rx_tlv_hdr)
  2839. {
  2840. uint32_t l2_hdr_offset = 0;
  2841. uint16_t msdu_len = 0;
  2842. uint32_t skip_len;
  2843. l2_hdr_offset =
  2844. hal_rx_msdu_end_l3_hdr_padding_get(soc->hal_soc, rx_tlv_hdr);
  2845. if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
  2846. skip_len = l2_hdr_offset;
  2847. } else {
  2848. msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
  2849. skip_len = l2_hdr_offset + soc->rx_pkt_tlv_size;
  2850. qdf_nbuf_set_pktlen(nbuf, msdu_len + skip_len);
  2851. }
  2852. QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf) = 1;
  2853. dp_rx_set_hdr_pad(nbuf, l2_hdr_offset);
  2854. qdf_nbuf_pull_head(nbuf, skip_len);
  2855. if (txrx_peer->vdev) {
  2856. dp_rx_send_pktlog(soc, txrx_peer->vdev->pdev, nbuf,
  2857. QDF_TX_RX_STATUS_OK);
  2858. }
  2859. if (dp_rx_is_special_frame(nbuf, frame_mask)) {
  2860. dp_info("special frame, mpdu sn 0x%x",
  2861. hal_rx_get_rx_sequence(soc->hal_soc, rx_tlv_hdr));
  2862. qdf_nbuf_set_exc_frame(nbuf, 1);
  2863. dp_rx_deliver_to_stack(soc, txrx_peer->vdev, txrx_peer,
  2864. nbuf, NULL);
  2865. return true;
  2866. }
  2867. return false;
  2868. }
  2869. #endif
  2870. #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
  2871. void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
  2872. uint8_t *rx_tlv,
  2873. qdf_nbuf_t nbuf)
  2874. {
  2875. struct dp_soc *soc;
  2876. if (!pdev->is_first_wakeup_packet)
  2877. return;
  2878. soc = pdev->soc;
  2879. if (hal_get_first_wow_wakeup_packet(soc->hal_soc, rx_tlv)) {
  2880. qdf_nbuf_mark_wakeup_frame(nbuf);
  2881. dp_info("First packet after WOW Wakeup rcvd");
  2882. }
  2883. }
  2884. #endif