dp_tx.c 111 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_htt.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include "dp_peer.h"
  24. #include "dp_types.h"
  25. #include "hal_tx.h"
  26. #include "qdf_mem.h"
  27. #include "qdf_nbuf.h"
  28. #include "qdf_net_types.h"
  29. #include <wlan_cfg.h>
  30. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  31. #include "if_meta_hdr.h"
  32. #endif
  33. #include "enet.h"
  34. #include "dp_internal.h"
  35. #ifdef FEATURE_WDS
  36. #include "dp_txrx_wds.h"
  37. #endif
  38. #ifdef ATH_SUPPORT_IQUE
  39. #include "dp_txrx_me.h"
  40. #endif
  41. /* TODO Add support in TSO */
  42. #define DP_DESC_NUM_FRAG(x) 0
  43. /* disable TQM_BYPASS */
  44. #define TQM_BYPASS_WAR 0
  45. /* invalid peer id for reinject*/
  46. #define DP_INVALID_PEER 0XFFFE
  47. /*mapping between hal encrypt type and cdp_sec_type*/
  48. #define MAX_CDP_SEC_TYPE 12
  49. static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
  50. HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  51. HAL_TX_ENCRYPT_TYPE_WEP_128,
  52. HAL_TX_ENCRYPT_TYPE_WEP_104,
  53. HAL_TX_ENCRYPT_TYPE_WEP_40,
  54. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  55. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  56. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  57. HAL_TX_ENCRYPT_TYPE_WAPI,
  58. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  59. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  60. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  61. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  62. #ifdef QCA_TX_LIMIT_CHECK
  63. /**
  64. * dp_tx_limit_check - Check if allocated tx descriptors reached
  65. * soc max limit and pdev max limit
  66. * @vdev: DP vdev handle
  67. *
  68. * Return: true if allocated tx descriptors reached max configured value, else
  69. * false
  70. */
  71. static inline bool
  72. dp_tx_limit_check(struct dp_vdev *vdev)
  73. {
  74. struct dp_pdev *pdev = vdev->pdev;
  75. struct dp_soc *soc = pdev->soc;
  76. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  77. soc->num_tx_allowed) {
  78. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  79. "%s: queued packets are more than max tx, drop the frame",
  80. __func__);
  81. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  82. return true;
  83. }
  84. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  85. pdev->num_tx_allowed) {
  86. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  87. "%s: queued packets are more than max tx, drop the frame",
  88. __func__);
  89. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  90. return true;
  91. }
  92. return false;
  93. }
  94. /**
  95. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  96. * @vdev: DP pdev handle
  97. *
  98. * Return: void
  99. */
  100. static inline void
  101. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  102. {
  103. struct dp_soc *soc = pdev->soc;
  104. qdf_atomic_inc(&pdev->num_tx_outstanding);
  105. qdf_atomic_inc(&soc->num_tx_outstanding);
  106. }
  107. /**
  108. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  109. * @vdev: DP pdev handle
  110. *
  111. * Return: void
  112. */
  113. static inline void
  114. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  115. {
  116. struct dp_soc *soc = pdev->soc;
  117. qdf_atomic_dec(&pdev->num_tx_outstanding);
  118. qdf_atomic_dec(&soc->num_tx_outstanding);
  119. }
  120. #else //QCA_TX_LIMIT_CHECK
  121. static inline bool
  122. dp_tx_limit_check(struct dp_vdev *vdev)
  123. {
  124. return false;
  125. }
  126. static inline void
  127. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  128. {
  129. }
  130. static inline void
  131. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  132. {
  133. }
  134. #endif //QCA_TX_LIMIT_CHECK
  135. #if defined(FEATURE_TSO)
  136. /**
  137. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  138. *
  139. * @soc - core txrx main context
  140. * @seg_desc - tso segment descriptor
  141. * @num_seg_desc - tso number segment descriptor
  142. */
  143. static void dp_tx_tso_unmap_segment(
  144. struct dp_soc *soc,
  145. struct qdf_tso_seg_elem_t *seg_desc,
  146. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  147. {
  148. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  149. if (qdf_unlikely(!seg_desc)) {
  150. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  151. __func__, __LINE__);
  152. qdf_assert(0);
  153. } else if (qdf_unlikely(!num_seg_desc)) {
  154. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  155. __func__, __LINE__);
  156. qdf_assert(0);
  157. } else {
  158. bool is_last_seg;
  159. /* no tso segment left to do dma unmap */
  160. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  161. return;
  162. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  163. true : false;
  164. qdf_nbuf_unmap_tso_segment(soc->osdev,
  165. seg_desc, is_last_seg);
  166. num_seg_desc->num_seg.tso_cmn_num_seg--;
  167. }
  168. }
  169. /**
  170. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  171. * back to the freelist
  172. *
  173. * @soc - soc device handle
  174. * @tx_desc - Tx software descriptor
  175. */
  176. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  177. struct dp_tx_desc_s *tx_desc)
  178. {
  179. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  180. if (qdf_unlikely(!tx_desc->tso_desc)) {
  181. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  182. "%s %d TSO desc is NULL!",
  183. __func__, __LINE__);
  184. qdf_assert(0);
  185. } else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
  186. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  187. "%s %d TSO num desc is NULL!",
  188. __func__, __LINE__);
  189. qdf_assert(0);
  190. } else {
  191. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  192. (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
  193. /* Add the tso num segment into the free list */
  194. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  195. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  196. tx_desc->tso_num_desc);
  197. tx_desc->tso_num_desc = NULL;
  198. }
  199. /* Add the tso segment into the free list*/
  200. dp_tx_tso_desc_free(soc,
  201. tx_desc->pool_id, tx_desc->tso_desc);
  202. tx_desc->tso_desc = NULL;
  203. }
  204. }
  205. #else
  206. static void dp_tx_tso_unmap_segment(
  207. struct dp_soc *soc,
  208. struct qdf_tso_seg_elem_t *seg_desc,
  209. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  210. {
  211. }
  212. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  213. struct dp_tx_desc_s *tx_desc)
  214. {
  215. }
  216. #endif
  217. /**
  218. * dp_tx_desc_release() - Release Tx Descriptor
  219. * @tx_desc : Tx Descriptor
  220. * @desc_pool_id: Descriptor Pool ID
  221. *
  222. * Deallocate all resources attached to Tx descriptor and free the Tx
  223. * descriptor.
  224. *
  225. * Return:
  226. */
  227. static void
  228. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  229. {
  230. struct dp_pdev *pdev = tx_desc->pdev;
  231. struct dp_soc *soc;
  232. uint8_t comp_status = 0;
  233. qdf_assert(pdev);
  234. soc = pdev->soc;
  235. if (tx_desc->frm_type == dp_tx_frm_tso)
  236. dp_tx_tso_desc_release(soc, tx_desc);
  237. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  238. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  239. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  240. dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
  241. dp_tx_outstanding_dec(pdev);
  242. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  243. qdf_atomic_dec(&pdev->num_tx_exception);
  244. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  245. hal_tx_comp_get_buffer_source(&tx_desc->comp))
  246. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  247. soc->hal_soc);
  248. else
  249. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  250. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  251. "Tx Completion Release desc %d status %d outstanding %d",
  252. tx_desc->id, comp_status,
  253. qdf_atomic_read(&pdev->num_tx_outstanding));
  254. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  255. return;
  256. }
  257. /**
  258. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  259. * @vdev: DP vdev Handle
  260. * @nbuf: skb
  261. * @msdu_info: msdu_info required to create HTT metadata
  262. *
  263. * Prepares and fills HTT metadata in the frame pre-header for special frames
  264. * that should be transmitted using varying transmit parameters.
  265. * There are 2 VDEV modes that currently needs this special metadata -
  266. * 1) Mesh Mode
  267. * 2) DSRC Mode
  268. *
  269. * Return: HTT metadata size
  270. *
  271. */
  272. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  273. struct dp_tx_msdu_info_s *msdu_info)
  274. {
  275. uint32_t *meta_data = msdu_info->meta_data;
  276. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  277. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  278. uint8_t htt_desc_size;
  279. /* Size rounded of multiple of 8 bytes */
  280. uint8_t htt_desc_size_aligned;
  281. uint8_t *hdr = NULL;
  282. /*
  283. * Metadata - HTT MSDU Extension header
  284. */
  285. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  286. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  287. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  288. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  289. meta_data[0])) {
  290. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  291. htt_desc_size_aligned)) {
  292. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  293. htt_desc_size_aligned);
  294. if (!nbuf) {
  295. /*
  296. * qdf_nbuf_realloc_headroom won't do skb_clone
  297. * as skb_realloc_headroom does. so, no free is
  298. * needed here.
  299. */
  300. DP_STATS_INC(vdev,
  301. tx_i.dropped.headroom_insufficient,
  302. 1);
  303. qdf_print(" %s[%d] skb_realloc_headroom failed",
  304. __func__, __LINE__);
  305. return 0;
  306. }
  307. }
  308. /* Fill and add HTT metaheader */
  309. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  310. if (!hdr) {
  311. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  312. "Error in filling HTT metadata");
  313. return 0;
  314. }
  315. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  316. } else if (vdev->opmode == wlan_op_mode_ocb) {
  317. /* Todo - Add support for DSRC */
  318. }
  319. return htt_desc_size_aligned;
  320. }
  321. /**
  322. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  323. * @tso_seg: TSO segment to process
  324. * @ext_desc: Pointer to MSDU extension descriptor
  325. *
  326. * Return: void
  327. */
  328. #if defined(FEATURE_TSO)
  329. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  330. void *ext_desc)
  331. {
  332. uint8_t num_frag;
  333. uint32_t tso_flags;
  334. /*
  335. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  336. * tcp_flag_mask
  337. *
  338. * Checksum enable flags are set in TCL descriptor and not in Extension
  339. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  340. */
  341. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  342. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  343. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  344. tso_seg->tso_flags.ip_len);
  345. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  346. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  347. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  348. uint32_t lo = 0;
  349. uint32_t hi = 0;
  350. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  351. (tso_seg->tso_frags[num_frag].length));
  352. qdf_dmaaddr_to_32s(
  353. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  354. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  355. tso_seg->tso_frags[num_frag].length);
  356. }
  357. return;
  358. }
  359. #else
  360. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  361. void *ext_desc)
  362. {
  363. return;
  364. }
  365. #endif
  366. #if defined(FEATURE_TSO)
  367. /**
  368. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  369. * allocated and free them
  370. *
  371. * @soc: soc handle
  372. * @free_seg: list of tso segments
  373. * @msdu_info: msdu descriptor
  374. *
  375. * Return - void
  376. */
  377. static void dp_tx_free_tso_seg_list(
  378. struct dp_soc *soc,
  379. struct qdf_tso_seg_elem_t *free_seg,
  380. struct dp_tx_msdu_info_s *msdu_info)
  381. {
  382. struct qdf_tso_seg_elem_t *next_seg;
  383. while (free_seg) {
  384. next_seg = free_seg->next;
  385. dp_tx_tso_desc_free(soc,
  386. msdu_info->tx_queue.desc_pool_id,
  387. free_seg);
  388. free_seg = next_seg;
  389. }
  390. }
  391. /**
  392. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  393. * allocated and free them
  394. *
  395. * @soc: soc handle
  396. * @free_num_seg: list of tso number segments
  397. * @msdu_info: msdu descriptor
  398. * Return - void
  399. */
  400. static void dp_tx_free_tso_num_seg_list(
  401. struct dp_soc *soc,
  402. struct qdf_tso_num_seg_elem_t *free_num_seg,
  403. struct dp_tx_msdu_info_s *msdu_info)
  404. {
  405. struct qdf_tso_num_seg_elem_t *next_num_seg;
  406. while (free_num_seg) {
  407. next_num_seg = free_num_seg->next;
  408. dp_tso_num_seg_free(soc,
  409. msdu_info->tx_queue.desc_pool_id,
  410. free_num_seg);
  411. free_num_seg = next_num_seg;
  412. }
  413. }
  414. /**
  415. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  416. * do dma unmap for each segment
  417. *
  418. * @soc: soc handle
  419. * @free_seg: list of tso segments
  420. * @num_seg_desc: tso number segment descriptor
  421. *
  422. * Return - void
  423. */
  424. static void dp_tx_unmap_tso_seg_list(
  425. struct dp_soc *soc,
  426. struct qdf_tso_seg_elem_t *free_seg,
  427. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  428. {
  429. struct qdf_tso_seg_elem_t *next_seg;
  430. if (qdf_unlikely(!num_seg_desc)) {
  431. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  432. return;
  433. }
  434. while (free_seg) {
  435. next_seg = free_seg->next;
  436. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  437. free_seg = next_seg;
  438. }
  439. }
  440. /**
  441. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  442. * free the tso segments descriptor and
  443. * tso num segments descriptor
  444. *
  445. * @soc: soc handle
  446. * @msdu_info: msdu descriptor
  447. * @tso_seg_unmap: flag to show if dma unmap is necessary
  448. *
  449. * Return - void
  450. */
  451. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  452. struct dp_tx_msdu_info_s *msdu_info,
  453. bool tso_seg_unmap)
  454. {
  455. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  456. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  457. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  458. tso_info->tso_num_seg_list;
  459. /* do dma unmap for each segment */
  460. if (tso_seg_unmap)
  461. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  462. /* free all tso number segment descriptor though looks only have 1 */
  463. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  464. /* free all tso segment descriptor */
  465. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  466. }
  467. /**
  468. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  469. * @vdev: virtual device handle
  470. * @msdu: network buffer
  471. * @msdu_info: meta data associated with the msdu
  472. *
  473. * Return: QDF_STATUS_SUCCESS success
  474. */
  475. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  476. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  477. {
  478. struct qdf_tso_seg_elem_t *tso_seg;
  479. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  480. struct dp_soc *soc = vdev->pdev->soc;
  481. struct qdf_tso_info_t *tso_info;
  482. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  483. tso_info = &msdu_info->u.tso_info;
  484. tso_info->curr_seg = NULL;
  485. tso_info->tso_seg_list = NULL;
  486. tso_info->num_segs = num_seg;
  487. msdu_info->frm_type = dp_tx_frm_tso;
  488. tso_info->tso_num_seg_list = NULL;
  489. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  490. while (num_seg) {
  491. tso_seg = dp_tx_tso_desc_alloc(
  492. soc, msdu_info->tx_queue.desc_pool_id);
  493. if (tso_seg) {
  494. tso_seg->next = tso_info->tso_seg_list;
  495. tso_info->tso_seg_list = tso_seg;
  496. num_seg--;
  497. } else {
  498. DP_TRACE(ERROR, "%s: Failed to alloc tso seg desc",
  499. __func__);
  500. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  501. return QDF_STATUS_E_NOMEM;
  502. }
  503. }
  504. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  505. tso_num_seg = dp_tso_num_seg_alloc(soc,
  506. msdu_info->tx_queue.desc_pool_id);
  507. if (tso_num_seg) {
  508. tso_num_seg->next = tso_info->tso_num_seg_list;
  509. tso_info->tso_num_seg_list = tso_num_seg;
  510. } else {
  511. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  512. __func__);
  513. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  514. return QDF_STATUS_E_NOMEM;
  515. }
  516. msdu_info->num_seg =
  517. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  518. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  519. msdu_info->num_seg);
  520. if (!(msdu_info->num_seg)) {
  521. /*
  522. * Free allocated TSO seg desc and number seg desc,
  523. * do unmap for segments if dma map has done.
  524. */
  525. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  526. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  527. return QDF_STATUS_E_INVAL;
  528. }
  529. tso_info->curr_seg = tso_info->tso_seg_list;
  530. return QDF_STATUS_SUCCESS;
  531. }
  532. #else
  533. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  534. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  535. {
  536. return QDF_STATUS_E_NOMEM;
  537. }
  538. #endif
  539. /**
  540. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  541. * @vdev: DP Vdev handle
  542. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  543. * @desc_pool_id: Descriptor Pool ID
  544. *
  545. * Return:
  546. */
  547. static
  548. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  549. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  550. {
  551. uint8_t i;
  552. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  553. struct dp_tx_seg_info_s *seg_info;
  554. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  555. struct dp_soc *soc = vdev->pdev->soc;
  556. /* Allocate an extension descriptor */
  557. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  558. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  559. if (!msdu_ext_desc) {
  560. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  561. return NULL;
  562. }
  563. if (msdu_info->exception_fw &&
  564. qdf_unlikely(vdev->mesh_vdev)) {
  565. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  566. &msdu_info->meta_data[0],
  567. sizeof(struct htt_tx_msdu_desc_ext2_t));
  568. qdf_atomic_inc(&vdev->pdev->num_tx_exception);
  569. }
  570. switch (msdu_info->frm_type) {
  571. case dp_tx_frm_sg:
  572. case dp_tx_frm_me:
  573. case dp_tx_frm_raw:
  574. seg_info = msdu_info->u.sg_info.curr_seg;
  575. /* Update the buffer pointers in MSDU Extension Descriptor */
  576. for (i = 0; i < seg_info->frag_cnt; i++) {
  577. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  578. seg_info->frags[i].paddr_lo,
  579. seg_info->frags[i].paddr_hi,
  580. seg_info->frags[i].len);
  581. }
  582. break;
  583. case dp_tx_frm_tso:
  584. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  585. &cached_ext_desc[0]);
  586. break;
  587. default:
  588. break;
  589. }
  590. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  591. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  592. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  593. msdu_ext_desc->vaddr);
  594. return msdu_ext_desc;
  595. }
  596. /**
  597. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  598. *
  599. * @skb: skb to be traced
  600. * @msdu_id: msdu_id of the packet
  601. * @vdev_id: vdev_id of the packet
  602. *
  603. * Return: None
  604. */
  605. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  606. uint8_t vdev_id)
  607. {
  608. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  609. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  610. DPTRACE(qdf_dp_trace_ptr(skb,
  611. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  612. QDF_TRACE_DEFAULT_PDEV_ID,
  613. qdf_nbuf_data_addr(skb),
  614. sizeof(qdf_nbuf_data(skb)),
  615. msdu_id, vdev_id));
  616. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  617. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  618. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  619. msdu_id, QDF_TX));
  620. }
  621. /**
  622. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  623. * @vdev: DP vdev handle
  624. * @nbuf: skb
  625. * @desc_pool_id: Descriptor pool ID
  626. * @meta_data: Metadata to the fw
  627. * @tx_exc_metadata: Handle that holds exception path metadata
  628. * Allocate and prepare Tx descriptor with msdu information.
  629. *
  630. * Return: Pointer to Tx Descriptor on success,
  631. * NULL on failure
  632. */
  633. static
  634. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  635. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  636. struct dp_tx_msdu_info_s *msdu_info,
  637. struct cdp_tx_exception_metadata *tx_exc_metadata)
  638. {
  639. uint8_t align_pad;
  640. uint8_t is_exception = 0;
  641. uint8_t htt_hdr_size;
  642. qdf_ether_header_t *eh;
  643. struct dp_tx_desc_s *tx_desc;
  644. struct dp_pdev *pdev = vdev->pdev;
  645. struct dp_soc *soc = pdev->soc;
  646. if (dp_tx_limit_check(vdev))
  647. return NULL;
  648. /* Allocate software Tx descriptor */
  649. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  650. if (qdf_unlikely(!tx_desc)) {
  651. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  652. return NULL;
  653. }
  654. dp_tx_outstanding_inc(pdev);
  655. /* Initialize the SW tx descriptor */
  656. tx_desc->nbuf = nbuf;
  657. tx_desc->frm_type = dp_tx_frm_std;
  658. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  659. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  660. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  661. tx_desc->vdev = vdev;
  662. tx_desc->pdev = pdev;
  663. tx_desc->msdu_ext_desc = NULL;
  664. tx_desc->pkt_offset = 0;
  665. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  666. if (qdf_unlikely(vdev->multipass_en)) {
  667. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  668. goto failure;
  669. }
  670. /*
  671. * For special modes (vdev_type == ocb or mesh), data frames should be
  672. * transmitted using varying transmit parameters (tx spec) which include
  673. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  674. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  675. * These frames are sent as exception packets to firmware.
  676. *
  677. * HW requirement is that metadata should always point to a
  678. * 8-byte aligned address. So we add alignment pad to start of buffer.
  679. * HTT Metadata should be ensured to be multiple of 8-bytes,
  680. * to get 8-byte aligned start address along with align_pad added
  681. *
  682. * |-----------------------------|
  683. * | |
  684. * |-----------------------------| <-----Buffer Pointer Address given
  685. * | | ^ in HW descriptor (aligned)
  686. * | HTT Metadata | |
  687. * | | |
  688. * | | | Packet Offset given in descriptor
  689. * | | |
  690. * |-----------------------------| |
  691. * | Alignment Pad | v
  692. * |-----------------------------| <----- Actual buffer start address
  693. * | SKB Data | (Unaligned)
  694. * | |
  695. * | |
  696. * | |
  697. * | |
  698. * | |
  699. * |-----------------------------|
  700. */
  701. if (qdf_unlikely((msdu_info->exception_fw)) ||
  702. (vdev->opmode == wlan_op_mode_ocb) ||
  703. (tx_exc_metadata &&
  704. tx_exc_metadata->is_tx_sniffer)) {
  705. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  706. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  707. DP_STATS_INC(vdev,
  708. tx_i.dropped.headroom_insufficient, 1);
  709. goto failure;
  710. }
  711. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  712. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  713. "qdf_nbuf_push_head failed");
  714. goto failure;
  715. }
  716. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  717. msdu_info);
  718. if (htt_hdr_size == 0)
  719. goto failure;
  720. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  721. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  722. is_exception = 1;
  723. }
  724. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  725. qdf_nbuf_map(soc->osdev, nbuf,
  726. QDF_DMA_TO_DEVICE))) {
  727. /* Handle failure */
  728. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  729. "qdf_nbuf_map failed");
  730. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  731. goto failure;
  732. }
  733. if (qdf_unlikely(vdev->nawds_enabled)) {
  734. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  735. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  736. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  737. is_exception = 1;
  738. }
  739. }
  740. #if !TQM_BYPASS_WAR
  741. if (is_exception || tx_exc_metadata)
  742. #endif
  743. {
  744. /* Temporary WAR due to TQM VP issues */
  745. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  746. qdf_atomic_inc(&pdev->num_tx_exception);
  747. }
  748. return tx_desc;
  749. failure:
  750. dp_tx_desc_release(tx_desc, desc_pool_id);
  751. return NULL;
  752. }
  753. /**
  754. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  755. * @vdev: DP vdev handle
  756. * @nbuf: skb
  757. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  758. * @desc_pool_id : Descriptor Pool ID
  759. *
  760. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  761. * information. For frames wth fragments, allocate and prepare
  762. * an MSDU extension descriptor
  763. *
  764. * Return: Pointer to Tx Descriptor on success,
  765. * NULL on failure
  766. */
  767. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  768. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  769. uint8_t desc_pool_id)
  770. {
  771. struct dp_tx_desc_s *tx_desc;
  772. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  773. struct dp_pdev *pdev = vdev->pdev;
  774. struct dp_soc *soc = pdev->soc;
  775. if (dp_tx_limit_check(vdev))
  776. return NULL;
  777. /* Allocate software Tx descriptor */
  778. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  779. if (!tx_desc) {
  780. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  781. return NULL;
  782. }
  783. dp_tx_outstanding_inc(pdev);
  784. /* Initialize the SW tx descriptor */
  785. tx_desc->nbuf = nbuf;
  786. tx_desc->frm_type = msdu_info->frm_type;
  787. tx_desc->tx_encap_type = vdev->tx_encap_type;
  788. tx_desc->vdev = vdev;
  789. tx_desc->pdev = pdev;
  790. tx_desc->pkt_offset = 0;
  791. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  792. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  793. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  794. /* Handle scattered frames - TSO/SG/ME */
  795. /* Allocate and prepare an extension descriptor for scattered frames */
  796. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  797. if (!msdu_ext_desc) {
  798. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  799. "%s Tx Extension Descriptor Alloc Fail",
  800. __func__);
  801. goto failure;
  802. }
  803. #if TQM_BYPASS_WAR
  804. /* Temporary WAR due to TQM VP issues */
  805. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  806. qdf_atomic_inc(&pdev->num_tx_exception);
  807. #endif
  808. if (qdf_unlikely(msdu_info->exception_fw))
  809. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  810. tx_desc->msdu_ext_desc = msdu_ext_desc;
  811. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  812. return tx_desc;
  813. failure:
  814. dp_tx_desc_release(tx_desc, desc_pool_id);
  815. return NULL;
  816. }
  817. /**
  818. * dp_tx_prepare_raw() - Prepare RAW packet TX
  819. * @vdev: DP vdev handle
  820. * @nbuf: buffer pointer
  821. * @seg_info: Pointer to Segment info Descriptor to be prepared
  822. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  823. * descriptor
  824. *
  825. * Return:
  826. */
  827. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  828. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  829. {
  830. qdf_nbuf_t curr_nbuf = NULL;
  831. uint16_t total_len = 0;
  832. qdf_dma_addr_t paddr;
  833. int32_t i;
  834. int32_t mapped_buf_num = 0;
  835. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  836. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  837. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  838. /* Continue only if frames are of DATA type */
  839. if (!DP_FRAME_IS_DATA(qos_wh)) {
  840. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  841. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  842. "Pkt. recd is of not data type");
  843. goto error;
  844. }
  845. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  846. if (vdev->raw_mode_war &&
  847. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  848. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  849. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  850. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  851. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  852. if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
  853. QDF_DMA_TO_DEVICE)) {
  854. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  855. "%s dma map error ", __func__);
  856. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  857. mapped_buf_num = i;
  858. goto error;
  859. }
  860. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  861. seg_info->frags[i].paddr_lo = paddr;
  862. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  863. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  864. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  865. total_len += qdf_nbuf_len(curr_nbuf);
  866. }
  867. seg_info->frag_cnt = i;
  868. seg_info->total_len = total_len;
  869. seg_info->next = NULL;
  870. sg_info->curr_seg = seg_info;
  871. msdu_info->frm_type = dp_tx_frm_raw;
  872. msdu_info->num_seg = 1;
  873. return nbuf;
  874. error:
  875. i = 0;
  876. while (nbuf) {
  877. curr_nbuf = nbuf;
  878. if (i < mapped_buf_num) {
  879. qdf_nbuf_unmap(vdev->osdev, curr_nbuf, QDF_DMA_TO_DEVICE);
  880. i++;
  881. }
  882. nbuf = qdf_nbuf_next(nbuf);
  883. qdf_nbuf_free(curr_nbuf);
  884. }
  885. return NULL;
  886. }
  887. /**
  888. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  889. * @soc: DP soc handle
  890. * @nbuf: Buffer pointer
  891. *
  892. * unmap the chain of nbufs that belong to this RAW frame.
  893. *
  894. * Return: None
  895. */
  896. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  897. qdf_nbuf_t nbuf)
  898. {
  899. qdf_nbuf_t cur_nbuf = nbuf;
  900. do {
  901. qdf_nbuf_unmap(soc->osdev, cur_nbuf, QDF_DMA_TO_DEVICE);
  902. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  903. } while (cur_nbuf);
  904. }
  905. /**
  906. * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
  907. * @soc: DP Soc Handle
  908. * @vdev: DP vdev handle
  909. * @tx_desc: Tx Descriptor Handle
  910. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  911. * @fw_metadata: Metadata to send to Target Firmware along with frame
  912. * @ring_id: Ring ID of H/W ring to which we enqueue the packet
  913. * @tx_exc_metadata: Handle that holds exception path meta data
  914. *
  915. * Gets the next free TCL HW DMA descriptor and sets up required parameters
  916. * from software Tx descriptor
  917. *
  918. * Return:
  919. */
  920. static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
  921. struct dp_tx_desc_s *tx_desc, uint8_t tid,
  922. uint16_t fw_metadata, uint8_t ring_id,
  923. struct cdp_tx_exception_metadata
  924. *tx_exc_metadata)
  925. {
  926. uint8_t type;
  927. uint16_t length;
  928. void *hal_tx_desc, *hal_tx_desc_cached;
  929. qdf_dma_addr_t dma_addr;
  930. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
  931. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  932. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  933. tx_exc_metadata->sec_type : vdev->sec_type);
  934. /* Return Buffer Manager ID */
  935. uint8_t bm_id = ring_id;
  936. hal_ring_handle_t hal_ring_hdl = soc->tcl_data_ring[ring_id].hal_srng;
  937. hal_tx_desc_cached = (void *) cached_desc;
  938. qdf_mem_zero(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
  939. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
  940. length = HAL_TX_EXT_DESC_WITH_META_DATA;
  941. type = HAL_TX_BUF_TYPE_EXT_DESC;
  942. dma_addr = tx_desc->msdu_ext_desc->paddr;
  943. } else {
  944. length = qdf_nbuf_len(tx_desc->nbuf) - tx_desc->pkt_offset;
  945. type = HAL_TX_BUF_TYPE_BUFFER;
  946. dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  947. }
  948. qdf_assert_always(dma_addr);
  949. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  950. hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
  951. dma_addr, bm_id, tx_desc->id,
  952. type, soc->hal_soc);
  953. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
  954. return QDF_STATUS_E_RESOURCES;
  955. hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
  956. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  957. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  958. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  959. vdev->pdev->lmac_id);
  960. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  961. vdev->search_type);
  962. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  963. vdev->bss_ast_hash);
  964. hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
  965. vdev->dscp_tid_map_id);
  966. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  967. sec_type_map[sec_type]);
  968. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  969. length, type, (uint64_t)dma_addr,
  970. tx_desc->pkt_offset, tx_desc->id);
  971. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  972. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  973. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  974. vdev->hal_desc_addr_search_flags);
  975. /* verify checksum offload configuration*/
  976. if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
  977. ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
  978. || qdf_nbuf_is_tso(tx_desc->nbuf))) {
  979. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  980. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  981. }
  982. if (tid != HTT_TX_EXT_TID_INVALID)
  983. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  984. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  985. hal_tx_desc_set_mesh_en(hal_tx_desc_cached, 1);
  986. tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
  987. /* Sync cached descriptor with HW */
  988. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  989. if (!hal_tx_desc) {
  990. dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
  991. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  992. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  993. return QDF_STATUS_E_RESOURCES;
  994. }
  995. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  996. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
  997. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
  998. return QDF_STATUS_SUCCESS;
  999. }
  1000. /**
  1001. * dp_cce_classify() - Classify the frame based on CCE rules
  1002. * @vdev: DP vdev handle
  1003. * @nbuf: skb
  1004. *
  1005. * Classify frames based on CCE rules
  1006. * Return: bool( true if classified,
  1007. * else false)
  1008. */
  1009. static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1010. {
  1011. qdf_ether_header_t *eh = NULL;
  1012. uint16_t ether_type;
  1013. qdf_llc_t *llcHdr;
  1014. qdf_nbuf_t nbuf_clone = NULL;
  1015. qdf_dot3_qosframe_t *qos_wh = NULL;
  1016. /* for mesh packets don't do any classification */
  1017. if (qdf_unlikely(vdev->mesh_vdev))
  1018. return false;
  1019. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1020. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1021. ether_type = eh->ether_type;
  1022. llcHdr = (qdf_llc_t *)(nbuf->data +
  1023. sizeof(qdf_ether_header_t));
  1024. } else {
  1025. qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1026. /* For encrypted packets don't do any classification */
  1027. if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
  1028. return false;
  1029. if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
  1030. if (qdf_unlikely(
  1031. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
  1032. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
  1033. ether_type = *(uint16_t *)(nbuf->data
  1034. + QDF_IEEE80211_4ADDR_HDR_LEN
  1035. + sizeof(qdf_llc_t)
  1036. - sizeof(ether_type));
  1037. llcHdr = (qdf_llc_t *)(nbuf->data +
  1038. QDF_IEEE80211_4ADDR_HDR_LEN);
  1039. } else {
  1040. ether_type = *(uint16_t *)(nbuf->data
  1041. + QDF_IEEE80211_3ADDR_HDR_LEN
  1042. + sizeof(qdf_llc_t)
  1043. - sizeof(ether_type));
  1044. llcHdr = (qdf_llc_t *)(nbuf->data +
  1045. QDF_IEEE80211_3ADDR_HDR_LEN);
  1046. }
  1047. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
  1048. && (ether_type ==
  1049. qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
  1050. DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
  1051. return true;
  1052. }
  1053. }
  1054. return false;
  1055. }
  1056. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
  1057. ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1058. sizeof(*llcHdr));
  1059. nbuf_clone = qdf_nbuf_clone(nbuf);
  1060. if (qdf_unlikely(nbuf_clone)) {
  1061. qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
  1062. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1063. qdf_nbuf_pull_head(nbuf_clone,
  1064. sizeof(qdf_net_vlanhdr_t));
  1065. }
  1066. }
  1067. } else {
  1068. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1069. nbuf_clone = qdf_nbuf_clone(nbuf);
  1070. if (qdf_unlikely(nbuf_clone)) {
  1071. qdf_nbuf_pull_head(nbuf_clone,
  1072. sizeof(qdf_net_vlanhdr_t));
  1073. }
  1074. }
  1075. }
  1076. if (qdf_unlikely(nbuf_clone))
  1077. nbuf = nbuf_clone;
  1078. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
  1079. || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
  1080. || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
  1081. || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
  1082. || (qdf_nbuf_is_ipv4_pkt(nbuf)
  1083. && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  1084. || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
  1085. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
  1086. if (qdf_unlikely(nbuf_clone))
  1087. qdf_nbuf_free(nbuf_clone);
  1088. return true;
  1089. }
  1090. if (qdf_unlikely(nbuf_clone))
  1091. qdf_nbuf_free(nbuf_clone);
  1092. return false;
  1093. }
  1094. /**
  1095. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1096. * @vdev: DP vdev handle
  1097. * @nbuf: skb
  1098. *
  1099. * Extract the DSCP or PCP information from frame and map into TID value.
  1100. *
  1101. * Return: void
  1102. */
  1103. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1104. struct dp_tx_msdu_info_s *msdu_info)
  1105. {
  1106. uint8_t tos = 0, dscp_tid_override = 0;
  1107. uint8_t *hdr_ptr, *L3datap;
  1108. uint8_t is_mcast = 0;
  1109. qdf_ether_header_t *eh = NULL;
  1110. qdf_ethervlan_header_t *evh = NULL;
  1111. uint16_t ether_type;
  1112. qdf_llc_t *llcHdr;
  1113. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1114. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1115. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1116. eh = (qdf_ether_header_t *)nbuf->data;
  1117. hdr_ptr = eh->ether_dhost;
  1118. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1119. } else {
  1120. qdf_dot3_qosframe_t *qos_wh =
  1121. (qdf_dot3_qosframe_t *) nbuf->data;
  1122. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1123. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1124. return;
  1125. }
  1126. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1127. ether_type = eh->ether_type;
  1128. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1129. /*
  1130. * Check if packet is dot3 or eth2 type.
  1131. */
  1132. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1133. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1134. sizeof(*llcHdr));
  1135. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1136. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1137. sizeof(*llcHdr);
  1138. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1139. + sizeof(*llcHdr) +
  1140. sizeof(qdf_net_vlanhdr_t));
  1141. } else {
  1142. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1143. sizeof(*llcHdr);
  1144. }
  1145. } else {
  1146. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1147. evh = (qdf_ethervlan_header_t *) eh;
  1148. ether_type = evh->ether_type;
  1149. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1150. }
  1151. }
  1152. /*
  1153. * Find priority from IP TOS DSCP field
  1154. */
  1155. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1156. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1157. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1158. /* Only for unicast frames */
  1159. if (!is_mcast) {
  1160. /* send it on VO queue */
  1161. msdu_info->tid = DP_VO_TID;
  1162. }
  1163. } else {
  1164. /*
  1165. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1166. * from TOS byte.
  1167. */
  1168. tos = ip->ip_tos;
  1169. dscp_tid_override = 1;
  1170. }
  1171. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1172. /* TODO
  1173. * use flowlabel
  1174. *igmpmld cases to be handled in phase 2
  1175. */
  1176. unsigned long ver_pri_flowlabel;
  1177. unsigned long pri;
  1178. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1179. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1180. DP_IPV6_PRIORITY_SHIFT;
  1181. tos = pri;
  1182. dscp_tid_override = 1;
  1183. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1184. msdu_info->tid = DP_VO_TID;
  1185. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1186. /* Only for unicast frames */
  1187. if (!is_mcast) {
  1188. /* send ucast arp on VO queue */
  1189. msdu_info->tid = DP_VO_TID;
  1190. }
  1191. }
  1192. /*
  1193. * Assign all MCAST packets to BE
  1194. */
  1195. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1196. if (is_mcast) {
  1197. tos = 0;
  1198. dscp_tid_override = 1;
  1199. }
  1200. }
  1201. if (dscp_tid_override == 1) {
  1202. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1203. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1204. }
  1205. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1206. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1207. return;
  1208. }
  1209. /**
  1210. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1211. * @vdev: DP vdev handle
  1212. * @nbuf: skb
  1213. *
  1214. * Software based TID classification is required when more than 2 DSCP-TID
  1215. * mapping tables are needed.
  1216. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1217. *
  1218. * Return: void
  1219. */
  1220. static void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1221. struct dp_tx_msdu_info_s *msdu_info)
  1222. {
  1223. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1224. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1225. if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
  1226. return;
  1227. /* for mesh packets don't do any classification */
  1228. if (qdf_unlikely(vdev->mesh_vdev))
  1229. return;
  1230. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1231. }
  1232. #ifdef FEATURE_WLAN_TDLS
  1233. /**
  1234. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1235. * @tx_desc: TX descriptor
  1236. *
  1237. * Return: None
  1238. */
  1239. static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
  1240. {
  1241. if (tx_desc->vdev) {
  1242. if (tx_desc->vdev->is_tdls_frame) {
  1243. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1244. tx_desc->vdev->is_tdls_frame = false;
  1245. }
  1246. }
  1247. }
  1248. /**
  1249. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1250. * @tx_desc: TX descriptor
  1251. * @vdev: datapath vdev handle
  1252. *
  1253. * Return: None
  1254. */
  1255. static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
  1256. struct dp_vdev *vdev)
  1257. {
  1258. struct hal_tx_completion_status ts = {0};
  1259. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1260. if (qdf_unlikely(!vdev)) {
  1261. dp_err("vdev is null!");
  1262. return;
  1263. }
  1264. hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
  1265. if (vdev->tx_non_std_data_callback.func) {
  1266. qdf_nbuf_set_next(tx_desc->nbuf, NULL);
  1267. vdev->tx_non_std_data_callback.func(
  1268. vdev->tx_non_std_data_callback.ctxt,
  1269. nbuf, ts.status);
  1270. return;
  1271. }
  1272. }
  1273. #else
  1274. static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
  1275. {
  1276. }
  1277. static inline void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
  1278. struct dp_vdev *vdev)
  1279. {
  1280. }
  1281. #endif
  1282. /**
  1283. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1284. * @vdev: DP vdev handle
  1285. * @nbuf: skb
  1286. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1287. * @meta_data: Metadata to the fw
  1288. * @tx_q: Tx queue to be used for this Tx frame
  1289. * @peer_id: peer_id of the peer in case of NAWDS frames
  1290. * @tx_exc_metadata: Handle that holds exception path metadata
  1291. *
  1292. * Return: NULL on success,
  1293. * nbuf when it fails to send
  1294. */
  1295. qdf_nbuf_t
  1296. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1297. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1298. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1299. {
  1300. struct dp_pdev *pdev = vdev->pdev;
  1301. struct dp_soc *soc = pdev->soc;
  1302. struct dp_tx_desc_s *tx_desc;
  1303. QDF_STATUS status;
  1304. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1305. hal_ring_handle_t hal_ring_hdl =
  1306. soc->tcl_data_ring[tx_q->ring_id].hal_srng;
  1307. uint16_t htt_tcl_metadata = 0;
  1308. uint8_t tid = msdu_info->tid;
  1309. struct cdp_tid_tx_stats *tid_stats = NULL;
  1310. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1311. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1312. msdu_info, tx_exc_metadata);
  1313. if (!tx_desc) {
  1314. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  1315. vdev, tx_q->desc_pool_id);
  1316. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1317. tid_stats = &pdev->stats.tid_stats.
  1318. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  1319. tid_stats->swdrop_cnt[TX_DESC_ERR]++;
  1320. return nbuf;
  1321. }
  1322. if (qdf_unlikely(soc->cce_disable)) {
  1323. if (dp_cce_classify(vdev, nbuf) == true) {
  1324. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1325. tid = DP_VO_TID;
  1326. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1327. }
  1328. }
  1329. dp_tx_update_tdls_flags(tx_desc);
  1330. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
  1331. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1332. "%s %d : HAL RING Access Failed -- %pK",
  1333. __func__, __LINE__, hal_ring_hdl);
  1334. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1335. tid_stats = &pdev->stats.tid_stats.
  1336. tid_tx_stats[tx_q->ring_id][tid];
  1337. tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
  1338. DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
  1339. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1340. qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1341. goto fail_return;
  1342. }
  1343. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1344. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1345. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1346. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1347. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1348. HTT_TCL_METADATA_TYPE_PEER_BASED);
  1349. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1350. peer_id);
  1351. } else
  1352. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1353. if (msdu_info->exception_fw) {
  1354. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1355. }
  1356. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1357. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
  1358. htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
  1359. if (status != QDF_STATUS_SUCCESS) {
  1360. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1361. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1362. __func__, tx_desc, tx_q->ring_id);
  1363. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1364. tid_stats = &pdev->stats.tid_stats.
  1365. tid_tx_stats[tx_q->ring_id][tid];
  1366. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  1367. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1368. qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1369. goto fail_return;
  1370. }
  1371. nbuf = NULL;
  1372. fail_return:
  1373. if (hif_pm_runtime_get(soc->hif_handle) == 0) {
  1374. hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
  1375. hif_pm_runtime_put(soc->hif_handle);
  1376. } else {
  1377. hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
  1378. }
  1379. return nbuf;
  1380. }
  1381. /**
  1382. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1383. * @vdev: DP vdev handle
  1384. * @nbuf: skb
  1385. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1386. *
  1387. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1388. *
  1389. * Return: NULL on success,
  1390. * nbuf when it fails to send
  1391. */
  1392. #if QDF_LOCK_STATS
  1393. noinline
  1394. #else
  1395. #endif
  1396. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1397. struct dp_tx_msdu_info_s *msdu_info)
  1398. {
  1399. uint8_t i;
  1400. struct dp_pdev *pdev = vdev->pdev;
  1401. struct dp_soc *soc = pdev->soc;
  1402. struct dp_tx_desc_s *tx_desc;
  1403. bool is_cce_classified = false;
  1404. QDF_STATUS status;
  1405. uint16_t htt_tcl_metadata = 0;
  1406. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1407. hal_ring_handle_t hal_ring_hdl =
  1408. soc->tcl_data_ring[tx_q->ring_id].hal_srng;
  1409. struct cdp_tid_tx_stats *tid_stats = NULL;
  1410. if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
  1411. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1412. "%s %d : HAL RING Access Failed -- %pK",
  1413. __func__, __LINE__, hal_ring_hdl);
  1414. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1415. tid_stats = &pdev->stats.tid_stats.
  1416. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  1417. tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
  1418. DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
  1419. return nbuf;
  1420. }
  1421. if (qdf_unlikely(soc->cce_disable)) {
  1422. is_cce_classified = dp_cce_classify(vdev, nbuf);
  1423. if (is_cce_classified) {
  1424. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1425. msdu_info->tid = DP_VO_TID;
  1426. }
  1427. }
  1428. if (msdu_info->frm_type == dp_tx_frm_me)
  1429. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1430. i = 0;
  1431. /* Print statement to track i and num_seg */
  1432. /*
  1433. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1434. * descriptors using information in msdu_info
  1435. */
  1436. while (i < msdu_info->num_seg) {
  1437. /*
  1438. * Setup Tx descriptor for an MSDU, and MSDU extension
  1439. * descriptor
  1440. */
  1441. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1442. tx_q->desc_pool_id);
  1443. if (!tx_desc) {
  1444. if (msdu_info->frm_type == dp_tx_frm_me) {
  1445. dp_tx_me_free_buf(pdev,
  1446. (void *)(msdu_info->u.sg_info
  1447. .curr_seg->frags[0].vaddr));
  1448. }
  1449. goto done;
  1450. }
  1451. if (msdu_info->frm_type == dp_tx_frm_me) {
  1452. tx_desc->me_buffer =
  1453. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1454. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1455. }
  1456. if (is_cce_classified)
  1457. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1458. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1459. if (msdu_info->exception_fw) {
  1460. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1461. }
  1462. /*
  1463. * Enqueue the Tx MSDU descriptor to HW for transmit
  1464. */
  1465. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
  1466. htt_tcl_metadata, tx_q->ring_id, NULL);
  1467. if (status != QDF_STATUS_SUCCESS) {
  1468. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1469. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1470. __func__, tx_desc, tx_q->ring_id);
  1471. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1472. tid_stats = &pdev->stats.tid_stats.
  1473. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  1474. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  1475. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  1476. dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
  1477. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1478. goto done;
  1479. }
  1480. /*
  1481. * TODO
  1482. * if tso_info structure can be modified to have curr_seg
  1483. * as first element, following 2 blocks of code (for TSO and SG)
  1484. * can be combined into 1
  1485. */
  1486. /*
  1487. * For frames with multiple segments (TSO, ME), jump to next
  1488. * segment.
  1489. */
  1490. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1491. if (msdu_info->u.tso_info.curr_seg->next) {
  1492. msdu_info->u.tso_info.curr_seg =
  1493. msdu_info->u.tso_info.curr_seg->next;
  1494. /*
  1495. * If this is a jumbo nbuf, then increment the number of
  1496. * nbuf users for each additional segment of the msdu.
  1497. * This will ensure that the skb is freed only after
  1498. * receiving tx completion for all segments of an nbuf
  1499. */
  1500. qdf_nbuf_inc_users(nbuf);
  1501. /* Check with MCL if this is needed */
  1502. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
  1503. }
  1504. }
  1505. /*
  1506. * For Multicast-Unicast converted packets,
  1507. * each converted frame (for a client) is represented as
  1508. * 1 segment
  1509. */
  1510. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  1511. (msdu_info->frm_type == dp_tx_frm_me)) {
  1512. if (msdu_info->u.sg_info.curr_seg->next) {
  1513. msdu_info->u.sg_info.curr_seg =
  1514. msdu_info->u.sg_info.curr_seg->next;
  1515. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1516. }
  1517. }
  1518. i++;
  1519. }
  1520. nbuf = NULL;
  1521. done:
  1522. if (hif_pm_runtime_get(soc->hif_handle) == 0) {
  1523. hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
  1524. hif_pm_runtime_put(soc->hif_handle);
  1525. } else {
  1526. hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
  1527. }
  1528. return nbuf;
  1529. }
  1530. /**
  1531. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  1532. * for SG frames
  1533. * @vdev: DP vdev handle
  1534. * @nbuf: skb
  1535. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1536. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1537. *
  1538. * Return: NULL on success,
  1539. * nbuf when it fails to send
  1540. */
  1541. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1542. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1543. {
  1544. uint32_t cur_frag, nr_frags;
  1545. qdf_dma_addr_t paddr;
  1546. struct dp_tx_sg_info_s *sg_info;
  1547. sg_info = &msdu_info->u.sg_info;
  1548. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  1549. if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
  1550. QDF_DMA_TO_DEVICE)) {
  1551. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1552. "dma map error");
  1553. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1554. qdf_nbuf_free(nbuf);
  1555. return NULL;
  1556. }
  1557. paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1558. seg_info->frags[0].paddr_lo = paddr;
  1559. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  1560. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  1561. seg_info->frags[0].vaddr = (void *) nbuf;
  1562. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  1563. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  1564. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  1565. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1566. "frag dma map error");
  1567. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1568. qdf_nbuf_free(nbuf);
  1569. return NULL;
  1570. }
  1571. paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1572. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  1573. seg_info->frags[cur_frag + 1].paddr_hi =
  1574. ((uint64_t) paddr) >> 32;
  1575. seg_info->frags[cur_frag + 1].len =
  1576. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  1577. }
  1578. seg_info->frag_cnt = (cur_frag + 1);
  1579. seg_info->total_len = qdf_nbuf_len(nbuf);
  1580. seg_info->next = NULL;
  1581. sg_info->curr_seg = seg_info;
  1582. msdu_info->frm_type = dp_tx_frm_sg;
  1583. msdu_info->num_seg = 1;
  1584. return nbuf;
  1585. }
  1586. /**
  1587. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  1588. * @vdev: DP vdev handle
  1589. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1590. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  1591. *
  1592. * Return: NULL on failure,
  1593. * nbuf when extracted successfully
  1594. */
  1595. static
  1596. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  1597. struct dp_tx_msdu_info_s *msdu_info,
  1598. uint16_t ppdu_cookie)
  1599. {
  1600. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1601. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1602. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1603. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  1604. (msdu_info->meta_data[5], 1);
  1605. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  1606. (msdu_info->meta_data[5], 1);
  1607. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  1608. (msdu_info->meta_data[6], ppdu_cookie);
  1609. msdu_info->exception_fw = 1;
  1610. msdu_info->is_tx_sniffer = 1;
  1611. }
  1612. #ifdef MESH_MODE_SUPPORT
  1613. /**
  1614. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  1615. and prepare msdu_info for mesh frames.
  1616. * @vdev: DP vdev handle
  1617. * @nbuf: skb
  1618. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1619. *
  1620. * Return: NULL on failure,
  1621. * nbuf when extracted successfully
  1622. */
  1623. static
  1624. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1625. struct dp_tx_msdu_info_s *msdu_info)
  1626. {
  1627. struct meta_hdr_s *mhdr;
  1628. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1629. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1630. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1631. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  1632. msdu_info->exception_fw = 0;
  1633. goto remove_meta_hdr;
  1634. }
  1635. msdu_info->exception_fw = 1;
  1636. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1637. meta_data->host_tx_desc_pool = 1;
  1638. meta_data->update_peer_cache = 1;
  1639. meta_data->learning_frame = 1;
  1640. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  1641. meta_data->power = mhdr->power;
  1642. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  1643. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  1644. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  1645. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  1646. meta_data->dyn_bw = 1;
  1647. meta_data->valid_pwr = 1;
  1648. meta_data->valid_mcs_mask = 1;
  1649. meta_data->valid_nss_mask = 1;
  1650. meta_data->valid_preamble_type = 1;
  1651. meta_data->valid_retries = 1;
  1652. meta_data->valid_bw_info = 1;
  1653. }
  1654. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  1655. meta_data->encrypt_type = 0;
  1656. meta_data->valid_encrypt_type = 1;
  1657. meta_data->learning_frame = 0;
  1658. }
  1659. meta_data->valid_key_flags = 1;
  1660. meta_data->key_flags = (mhdr->keyix & 0x3);
  1661. remove_meta_hdr:
  1662. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  1663. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1664. "qdf_nbuf_pull_head failed");
  1665. qdf_nbuf_free(nbuf);
  1666. return NULL;
  1667. }
  1668. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1669. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1670. "%s , Meta hdr %0x %0x %0x %0x %0x %0x"
  1671. " tid %d to_fw %d",
  1672. __func__, msdu_info->meta_data[0],
  1673. msdu_info->meta_data[1],
  1674. msdu_info->meta_data[2],
  1675. msdu_info->meta_data[3],
  1676. msdu_info->meta_data[4],
  1677. msdu_info->meta_data[5],
  1678. msdu_info->tid, msdu_info->exception_fw);
  1679. return nbuf;
  1680. }
  1681. #else
  1682. static
  1683. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1684. struct dp_tx_msdu_info_s *msdu_info)
  1685. {
  1686. return nbuf;
  1687. }
  1688. #endif
  1689. /**
  1690. * dp_check_exc_metadata() - Checks if parameters are valid
  1691. * @tx_exc - holds all exception path parameters
  1692. *
  1693. * Returns true when all the parameters are valid else false
  1694. *
  1695. */
  1696. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  1697. {
  1698. bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
  1699. HTT_INVALID_TID);
  1700. bool invalid_encap_type =
  1701. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  1702. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  1703. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  1704. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  1705. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  1706. tx_exc->ppdu_cookie == 0);
  1707. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  1708. invalid_cookie) {
  1709. return false;
  1710. }
  1711. return true;
  1712. }
  1713. /**
  1714. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  1715. * @vap_dev: DP vdev handle
  1716. * @nbuf: skb
  1717. * @tx_exc_metadata: Handle that holds exception path meta data
  1718. *
  1719. * Entry point for Core Tx layer (DP_TX) invoked from
  1720. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  1721. *
  1722. * Return: NULL on success,
  1723. * nbuf when it fails to send
  1724. */
  1725. qdf_nbuf_t
  1726. dp_tx_send_exception(struct cdp_vdev *vap_dev, qdf_nbuf_t nbuf,
  1727. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1728. {
  1729. qdf_ether_header_t *eh = NULL;
  1730. struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
  1731. struct dp_tx_msdu_info_s msdu_info;
  1732. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  1733. if (!tx_exc_metadata)
  1734. goto fail;
  1735. msdu_info.tid = tx_exc_metadata->tid;
  1736. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1737. dp_verbose_debug("skb %pM", nbuf->data);
  1738. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  1739. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  1740. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1741. "Invalid parameters in exception path");
  1742. goto fail;
  1743. }
  1744. /* Basic sanity checks for unsupported packets */
  1745. /* MESH mode */
  1746. if (qdf_unlikely(vdev->mesh_vdev)) {
  1747. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1748. "Mesh mode is not supported in exception path");
  1749. goto fail;
  1750. }
  1751. /* TSO or SG */
  1752. if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
  1753. qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  1754. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1755. "TSO and SG are not supported in exception path");
  1756. goto fail;
  1757. }
  1758. /* RAW */
  1759. if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
  1760. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1761. "Raw frame is not supported in exception path");
  1762. goto fail;
  1763. }
  1764. /* Mcast enhancement*/
  1765. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  1766. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  1767. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  1768. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1769. "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
  1770. }
  1771. }
  1772. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  1773. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  1774. qdf_nbuf_len(nbuf));
  1775. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  1776. tx_exc_metadata->ppdu_cookie);
  1777. }
  1778. /*
  1779. * Get HW Queue to use for this frame.
  1780. * TCL supports upto 4 DMA rings, out of which 3 rings are
  1781. * dedicated for data and 1 for command.
  1782. * "queue_id" maps to one hardware ring.
  1783. * With each ring, we also associate a unique Tx descriptor pool
  1784. * to minimize lock contention for these resources.
  1785. */
  1786. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1787. /* Single linear frame */
  1788. /*
  1789. * If nbuf is a simple linear frame, use send_single function to
  1790. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  1791. * SRNG. There is no need to setup a MSDU extension descriptor.
  1792. */
  1793. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  1794. tx_exc_metadata->peer_id, tx_exc_metadata);
  1795. return nbuf;
  1796. fail:
  1797. dp_verbose_debug("pkt send failed");
  1798. return nbuf;
  1799. }
  1800. /**
  1801. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  1802. * @vap_dev: DP vdev handle
  1803. * @nbuf: skb
  1804. *
  1805. * Entry point for Core Tx layer (DP_TX) invoked from
  1806. * hard_start_xmit in OSIF/HDD
  1807. *
  1808. * Return: NULL on success,
  1809. * nbuf when it fails to send
  1810. */
  1811. #ifdef MESH_MODE_SUPPORT
  1812. qdf_nbuf_t dp_tx_send_mesh(struct cdp_vdev *vap_dev, qdf_nbuf_t nbuf)
  1813. {
  1814. struct meta_hdr_s *mhdr;
  1815. qdf_nbuf_t nbuf_mesh = NULL;
  1816. qdf_nbuf_t nbuf_clone = NULL;
  1817. struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
  1818. uint8_t no_enc_frame = 0;
  1819. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  1820. if (!nbuf_mesh) {
  1821. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1822. "qdf_nbuf_unshare failed");
  1823. return nbuf;
  1824. }
  1825. nbuf = nbuf_mesh;
  1826. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1827. if ((vdev->sec_type != cdp_sec_type_none) &&
  1828. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  1829. no_enc_frame = 1;
  1830. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  1831. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  1832. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  1833. !no_enc_frame) {
  1834. nbuf_clone = qdf_nbuf_clone(nbuf);
  1835. if (!nbuf_clone) {
  1836. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1837. "qdf_nbuf_clone failed");
  1838. return nbuf;
  1839. }
  1840. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  1841. }
  1842. if (nbuf_clone) {
  1843. if (!dp_tx_send(vap_dev, nbuf_clone)) {
  1844. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  1845. } else {
  1846. qdf_nbuf_free(nbuf_clone);
  1847. }
  1848. }
  1849. if (no_enc_frame)
  1850. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  1851. else
  1852. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  1853. nbuf = dp_tx_send(vap_dev, nbuf);
  1854. if ((!nbuf) && no_enc_frame) {
  1855. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  1856. }
  1857. return nbuf;
  1858. }
  1859. #else
  1860. qdf_nbuf_t dp_tx_send_mesh(struct cdp_vdev *vap_dev, qdf_nbuf_t nbuf)
  1861. {
  1862. return dp_tx_send(vap_dev, nbuf);
  1863. }
  1864. #endif
  1865. /**
  1866. * dp_tx_send() - Transmit a frame on a given VAP
  1867. * @vap_dev: DP vdev handle
  1868. * @nbuf: skb
  1869. *
  1870. * Entry point for Core Tx layer (DP_TX) invoked from
  1871. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  1872. * cases
  1873. *
  1874. * Return: NULL on success,
  1875. * nbuf when it fails to send
  1876. */
  1877. qdf_nbuf_t dp_tx_send(struct cdp_vdev *vap_dev, qdf_nbuf_t nbuf)
  1878. {
  1879. qdf_ether_header_t *eh = NULL;
  1880. struct dp_tx_msdu_info_s msdu_info;
  1881. struct dp_tx_seg_info_s seg_info;
  1882. struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
  1883. uint16_t peer_id = HTT_INVALID_PEER;
  1884. qdf_nbuf_t nbuf_mesh = NULL;
  1885. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  1886. qdf_mem_zero(&seg_info, sizeof(seg_info));
  1887. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1888. dp_verbose_debug("skb %pM", nbuf->data);
  1889. /*
  1890. * Set Default Host TID value to invalid TID
  1891. * (TID override disabled)
  1892. */
  1893. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  1894. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  1895. if (qdf_unlikely(vdev->mesh_vdev)) {
  1896. nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  1897. &msdu_info);
  1898. if (!nbuf_mesh) {
  1899. dp_verbose_debug("Extracting mesh metadata failed");
  1900. return nbuf;
  1901. }
  1902. nbuf = nbuf_mesh;
  1903. }
  1904. /*
  1905. * Get HW Queue to use for this frame.
  1906. * TCL supports upto 4 DMA rings, out of which 3 rings are
  1907. * dedicated for data and 1 for command.
  1908. * "queue_id" maps to one hardware ring.
  1909. * With each ring, we also associate a unique Tx descriptor pool
  1910. * to minimize lock contention for these resources.
  1911. */
  1912. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1913. /*
  1914. * TCL H/W supports 2 DSCP-TID mapping tables.
  1915. * Table 1 - Default DSCP-TID mapping table
  1916. * Table 2 - 1 DSCP-TID override table
  1917. *
  1918. * If we need a different DSCP-TID mapping for this vap,
  1919. * call tid_classify to extract DSCP/ToS from frame and
  1920. * map to a TID and store in msdu_info. This is later used
  1921. * to fill in TCL Input descriptor (per-packet TID override).
  1922. */
  1923. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  1924. /*
  1925. * Classify the frame and call corresponding
  1926. * "prepare" function which extracts the segment (TSO)
  1927. * and fragmentation information (for TSO , SG, ME, or Raw)
  1928. * into MSDU_INFO structure which is later used to fill
  1929. * SW and HW descriptors.
  1930. */
  1931. if (qdf_nbuf_is_tso(nbuf)) {
  1932. dp_verbose_debug("TSO frame %pK", vdev);
  1933. DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
  1934. qdf_nbuf_len(nbuf));
  1935. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  1936. DP_STATS_INC_PKT(vdev, tx_i.tso.dropped_host, 1,
  1937. qdf_nbuf_len(nbuf));
  1938. return nbuf;
  1939. }
  1940. goto send_multiple;
  1941. }
  1942. /* SG */
  1943. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  1944. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  1945. if (!nbuf)
  1946. return NULL;
  1947. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  1948. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  1949. qdf_nbuf_len(nbuf));
  1950. goto send_multiple;
  1951. }
  1952. #ifdef ATH_SUPPORT_IQUE
  1953. /* Mcast to Ucast Conversion*/
  1954. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  1955. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1956. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  1957. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  1958. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  1959. DP_STATS_INC_PKT(vdev,
  1960. tx_i.mcast_en.mcast_pkt, 1,
  1961. qdf_nbuf_len(nbuf));
  1962. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  1963. QDF_STATUS_SUCCESS) {
  1964. return NULL;
  1965. }
  1966. }
  1967. }
  1968. #endif
  1969. /* RAW */
  1970. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  1971. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  1972. if (!nbuf)
  1973. return NULL;
  1974. dp_verbose_debug("Raw frame %pK", vdev);
  1975. goto send_multiple;
  1976. }
  1977. /* Single linear frame */
  1978. /*
  1979. * If nbuf is a simple linear frame, use send_single function to
  1980. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  1981. * SRNG. There is no need to setup a MSDU extension descriptor.
  1982. */
  1983. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  1984. return nbuf;
  1985. send_multiple:
  1986. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  1987. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  1988. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  1989. return nbuf;
  1990. }
  1991. /**
  1992. * dp_tx_reinject_handler() - Tx Reinject Handler
  1993. * @tx_desc: software descriptor head pointer
  1994. * @status : Tx completion status from HTT descriptor
  1995. *
  1996. * This function reinjects frames back to Target.
  1997. * Todo - Host queue needs to be added
  1998. *
  1999. * Return: none
  2000. */
  2001. static
  2002. void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  2003. {
  2004. struct dp_vdev *vdev;
  2005. struct dp_peer *peer = NULL;
  2006. uint32_t peer_id = HTT_INVALID_PEER;
  2007. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2008. qdf_nbuf_t nbuf_copy = NULL;
  2009. struct dp_tx_msdu_info_s msdu_info;
  2010. struct dp_peer *sa_peer = NULL;
  2011. struct dp_ast_entry *ast_entry = NULL;
  2012. struct dp_soc *soc = NULL;
  2013. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2014. #ifdef WDS_VENDOR_EXTENSION
  2015. int is_mcast = 0, is_ucast = 0;
  2016. int num_peers_3addr = 0;
  2017. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  2018. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  2019. #endif
  2020. vdev = tx_desc->vdev;
  2021. soc = vdev->pdev->soc;
  2022. qdf_assert(vdev);
  2023. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2024. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2025. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2026. "%s Tx reinject path", __func__);
  2027. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  2028. qdf_nbuf_len(tx_desc->nbuf));
  2029. qdf_spin_lock_bh(&(soc->ast_lock));
  2030. ast_entry = dp_peer_ast_hash_find_by_pdevid
  2031. (soc,
  2032. (uint8_t *)(eh->ether_shost),
  2033. vdev->pdev->pdev_id);
  2034. if (ast_entry)
  2035. sa_peer = ast_entry->peer;
  2036. qdf_spin_unlock_bh(&(soc->ast_lock));
  2037. #ifdef WDS_VENDOR_EXTENSION
  2038. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  2039. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  2040. } else {
  2041. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  2042. }
  2043. is_ucast = !is_mcast;
  2044. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2045. if (peer->bss_peer)
  2046. continue;
  2047. /* Detect wds peers that use 3-addr framing for mcast.
  2048. * if there are any, the bss_peer is used to send the
  2049. * the mcast frame using 3-addr format. all wds enabled
  2050. * peers that use 4-addr framing for mcast frames will
  2051. * be duplicated and sent as 4-addr frames below.
  2052. */
  2053. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  2054. num_peers_3addr = 1;
  2055. break;
  2056. }
  2057. }
  2058. #endif
  2059. if (qdf_unlikely(vdev->mesh_vdev)) {
  2060. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  2061. } else {
  2062. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2063. if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
  2064. #ifdef WDS_VENDOR_EXTENSION
  2065. /*
  2066. * . if 3-addr STA, then send on BSS Peer
  2067. * . if Peer WDS enabled and accept 4-addr mcast,
  2068. * send mcast on that peer only
  2069. * . if Peer WDS enabled and accept 4-addr ucast,
  2070. * send ucast on that peer only
  2071. */
  2072. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  2073. (peer->wds_enabled &&
  2074. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  2075. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  2076. #else
  2077. ((peer->bss_peer &&
  2078. !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))) ||
  2079. peer->nawds_enabled)) {
  2080. #endif
  2081. peer_id = DP_INVALID_PEER;
  2082. if (peer->nawds_enabled) {
  2083. peer_id = peer->peer_ids[0];
  2084. if (sa_peer == peer) {
  2085. QDF_TRACE(
  2086. QDF_MODULE_ID_DP,
  2087. QDF_TRACE_LEVEL_DEBUG,
  2088. " %s: multicast packet",
  2089. __func__);
  2090. DP_STATS_INC(peer,
  2091. tx.nawds_mcast_drop, 1);
  2092. continue;
  2093. }
  2094. }
  2095. nbuf_copy = qdf_nbuf_copy(nbuf);
  2096. if (!nbuf_copy) {
  2097. QDF_TRACE(QDF_MODULE_ID_DP,
  2098. QDF_TRACE_LEVEL_DEBUG,
  2099. FL("nbuf copy failed"));
  2100. break;
  2101. }
  2102. nbuf_copy = dp_tx_send_msdu_single(vdev,
  2103. nbuf_copy,
  2104. &msdu_info,
  2105. peer_id,
  2106. NULL);
  2107. if (nbuf_copy) {
  2108. QDF_TRACE(QDF_MODULE_ID_DP,
  2109. QDF_TRACE_LEVEL_DEBUG,
  2110. FL("pkt send failed"));
  2111. qdf_nbuf_free(nbuf_copy);
  2112. } else {
  2113. if (peer_id != DP_INVALID_PEER)
  2114. DP_STATS_INC_PKT(peer,
  2115. tx.nawds_mcast,
  2116. 1, qdf_nbuf_len(nbuf));
  2117. }
  2118. }
  2119. }
  2120. }
  2121. if (vdev->nawds_enabled) {
  2122. peer_id = DP_INVALID_PEER;
  2123. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2124. 1, qdf_nbuf_len(nbuf));
  2125. nbuf = dp_tx_send_msdu_single(vdev,
  2126. nbuf,
  2127. &msdu_info,
  2128. peer_id, NULL);
  2129. if (nbuf) {
  2130. QDF_TRACE(QDF_MODULE_ID_DP,
  2131. QDF_TRACE_LEVEL_DEBUG,
  2132. FL("pkt send failed"));
  2133. qdf_nbuf_free(nbuf);
  2134. }
  2135. } else
  2136. qdf_nbuf_free(nbuf);
  2137. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2138. }
  2139. /**
  2140. * dp_tx_inspect_handler() - Tx Inspect Handler
  2141. * @tx_desc: software descriptor head pointer
  2142. * @status : Tx completion status from HTT descriptor
  2143. *
  2144. * Handles Tx frames sent back to Host for inspection
  2145. * (ProxyARP)
  2146. *
  2147. * Return: none
  2148. */
  2149. static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  2150. {
  2151. struct dp_soc *soc;
  2152. struct dp_pdev *pdev = tx_desc->pdev;
  2153. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2154. "%s Tx inspect path",
  2155. __func__);
  2156. qdf_assert(pdev);
  2157. soc = pdev->soc;
  2158. DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
  2159. qdf_nbuf_len(tx_desc->nbuf));
  2160. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  2161. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2162. }
  2163. #ifdef FEATURE_PERPKT_INFO
  2164. /**
  2165. * dp_get_completion_indication_for_stack() - send completion to stack
  2166. * @soc : dp_soc handle
  2167. * @pdev: dp_pdev handle
  2168. * @peer: dp peer handle
  2169. * @ts: transmit completion status structure
  2170. * @netbuf: Buffer pointer for free
  2171. *
  2172. * This function is used for indication whether buffer needs to be
  2173. * sent to stack for freeing or not
  2174. */
  2175. QDF_STATUS
  2176. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2177. struct dp_pdev *pdev,
  2178. struct dp_peer *peer,
  2179. struct hal_tx_completion_status *ts,
  2180. qdf_nbuf_t netbuf,
  2181. uint64_t time_latency)
  2182. {
  2183. struct tx_capture_hdr *ppdu_hdr;
  2184. uint16_t peer_id = ts->peer_id;
  2185. uint32_t ppdu_id = ts->ppdu_id;
  2186. uint8_t first_msdu = ts->first_msdu;
  2187. uint8_t last_msdu = ts->last_msdu;
  2188. if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  2189. !pdev->latency_capture_enable))
  2190. return QDF_STATUS_E_NOSUPPORT;
  2191. if (!peer) {
  2192. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2193. FL("Peer Invalid"));
  2194. return QDF_STATUS_E_INVAL;
  2195. }
  2196. if (pdev->mcopy_mode) {
  2197. if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  2198. (pdev->m_copy_id.tx_peer_id == peer_id)) {
  2199. return QDF_STATUS_E_INVAL;
  2200. }
  2201. pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  2202. pdev->m_copy_id.tx_peer_id = peer_id;
  2203. }
  2204. if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
  2205. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2206. FL("No headroom"));
  2207. return QDF_STATUS_E_NOMEM;
  2208. }
  2209. ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
  2210. qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
  2211. QDF_MAC_ADDR_SIZE);
  2212. qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
  2213. QDF_MAC_ADDR_SIZE);
  2214. ppdu_hdr->ppdu_id = ppdu_id;
  2215. ppdu_hdr->peer_id = peer_id;
  2216. ppdu_hdr->first_msdu = first_msdu;
  2217. ppdu_hdr->last_msdu = last_msdu;
  2218. if (qdf_unlikely(pdev->latency_capture_enable)) {
  2219. ppdu_hdr->tsf = ts->tsf;
  2220. ppdu_hdr->time_latency = time_latency;
  2221. }
  2222. return QDF_STATUS_SUCCESS;
  2223. }
  2224. /**
  2225. * dp_send_completion_to_stack() - send completion to stack
  2226. * @soc : dp_soc handle
  2227. * @pdev: dp_pdev handle
  2228. * @peer_id: peer_id of the peer for which completion came
  2229. * @ppdu_id: ppdu_id
  2230. * @netbuf: Buffer pointer for free
  2231. *
  2232. * This function is used to send completion to stack
  2233. * to free buffer
  2234. */
  2235. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  2236. uint16_t peer_id, uint32_t ppdu_id,
  2237. qdf_nbuf_t netbuf)
  2238. {
  2239. dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
  2240. netbuf, peer_id,
  2241. WDI_NO_VAL, pdev->pdev_id);
  2242. }
  2243. #else
  2244. static QDF_STATUS
  2245. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2246. struct dp_pdev *pdev,
  2247. struct dp_peer *peer,
  2248. struct hal_tx_completion_status *ts,
  2249. qdf_nbuf_t netbuf,
  2250. uint64_t time_latency)
  2251. {
  2252. return QDF_STATUS_E_NOSUPPORT;
  2253. }
  2254. static void
  2255. dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  2256. uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
  2257. {
  2258. }
  2259. #endif
  2260. /**
  2261. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  2262. * @soc: Soc handle
  2263. * @desc: software Tx descriptor to be processed
  2264. *
  2265. * Return: none
  2266. */
  2267. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  2268. struct dp_tx_desc_s *desc)
  2269. {
  2270. struct dp_vdev *vdev = desc->vdev;
  2271. qdf_nbuf_t nbuf = desc->nbuf;
  2272. /* nbuf already freed in vdev detach path */
  2273. if (!nbuf)
  2274. return;
  2275. /* If it is TDLS mgmt, don't unmap or free the frame */
  2276. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  2277. return dp_non_std_tx_comp_free_buff(desc, vdev);
  2278. /* 0 : MSDU buffer, 1 : MLE */
  2279. if (desc->msdu_ext_desc) {
  2280. /* TSO free */
  2281. if (hal_tx_ext_desc_get_tso_enable(
  2282. desc->msdu_ext_desc->vaddr)) {
  2283. /* unmap eash TSO seg before free the nbuf */
  2284. dp_tx_tso_unmap_segment(soc, desc->tso_desc,
  2285. desc->tso_num_desc);
  2286. qdf_nbuf_free(nbuf);
  2287. return;
  2288. }
  2289. }
  2290. qdf_nbuf_unmap(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  2291. if (qdf_unlikely(!vdev)) {
  2292. qdf_nbuf_free(nbuf);
  2293. return;
  2294. }
  2295. if (qdf_likely(!vdev->mesh_vdev))
  2296. qdf_nbuf_free(nbuf);
  2297. else {
  2298. if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  2299. qdf_nbuf_free(nbuf);
  2300. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  2301. } else
  2302. vdev->osif_tx_free_ext((nbuf));
  2303. }
  2304. }
  2305. #ifdef MESH_MODE_SUPPORT
  2306. /**
  2307. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  2308. * in mesh meta header
  2309. * @tx_desc: software descriptor head pointer
  2310. * @ts: pointer to tx completion stats
  2311. * Return: none
  2312. */
  2313. static
  2314. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2315. struct hal_tx_completion_status *ts)
  2316. {
  2317. struct meta_hdr_s *mhdr;
  2318. qdf_nbuf_t netbuf = tx_desc->nbuf;
  2319. if (!tx_desc->msdu_ext_desc) {
  2320. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  2321. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2322. "netbuf %pK offset %d",
  2323. netbuf, tx_desc->pkt_offset);
  2324. return;
  2325. }
  2326. }
  2327. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2328. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2329. "netbuf %pK offset %lu", netbuf,
  2330. sizeof(struct meta_hdr_s));
  2331. return;
  2332. }
  2333. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  2334. mhdr->rssi = ts->ack_frame_rssi;
  2335. mhdr->channel = tx_desc->pdev->operating_channel;
  2336. }
  2337. #else
  2338. static
  2339. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2340. struct hal_tx_completion_status *ts)
  2341. {
  2342. }
  2343. #endif
  2344. /**
  2345. * dp_tx_compute_delay() - Compute and fill in all timestamps
  2346. * to pass in correct fields
  2347. *
  2348. * @vdev: pdev handle
  2349. * @tx_desc: tx descriptor
  2350. * @tid: tid value
  2351. * @ring_id: TCL or WBM ring number for transmit path
  2352. * Return: none
  2353. */
  2354. static void dp_tx_compute_delay(struct dp_vdev *vdev,
  2355. struct dp_tx_desc_s *tx_desc,
  2356. uint8_t tid, uint8_t ring_id)
  2357. {
  2358. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  2359. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  2360. if (qdf_likely(!vdev->pdev->delay_stats_flag))
  2361. return;
  2362. current_timestamp = qdf_ktime_to_ms(qdf_ktime_get());
  2363. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  2364. timestamp_hw_enqueue = tx_desc->timestamp;
  2365. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  2366. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  2367. timestamp_hw_enqueue);
  2368. interframe_delay = (uint32_t)(timestamp_ingress -
  2369. vdev->prev_tx_enq_tstamp);
  2370. /*
  2371. * Delay in software enqueue
  2372. */
  2373. dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
  2374. CDP_DELAY_STATS_SW_ENQ, ring_id);
  2375. /*
  2376. * Delay between packet enqueued to HW and Tx completion
  2377. */
  2378. dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
  2379. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
  2380. /*
  2381. * Update interframe delay stats calculated at hardstart receive point.
  2382. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  2383. * interframe delay will not be calculate correctly for 1st frame.
  2384. * On the other side, this will help in avoiding extra per packet check
  2385. * of !vdev->prev_tx_enq_tstamp.
  2386. */
  2387. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  2388. CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
  2389. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  2390. }
  2391. /**
  2392. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  2393. * per wbm ring
  2394. *
  2395. * @tx_desc: software descriptor head pointer
  2396. * @ts: Tx completion status
  2397. * @peer: peer handle
  2398. * @ring_id: ring number
  2399. *
  2400. * Return: None
  2401. */
  2402. static inline void
  2403. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  2404. struct hal_tx_completion_status *ts,
  2405. struct dp_peer *peer, uint8_t ring_id)
  2406. {
  2407. struct dp_pdev *pdev = peer->vdev->pdev;
  2408. struct dp_soc *soc = NULL;
  2409. uint8_t mcs, pkt_type;
  2410. uint8_t tid = ts->tid;
  2411. uint32_t length;
  2412. struct cdp_tid_tx_stats *tid_stats;
  2413. if (!pdev)
  2414. return;
  2415. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  2416. tid = CDP_MAX_DATA_TIDS - 1;
  2417. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  2418. soc = pdev->soc;
  2419. mcs = ts->mcs;
  2420. pkt_type = ts->pkt_type;
  2421. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  2422. dp_err("Release source is not from TQM");
  2423. return;
  2424. }
  2425. length = qdf_nbuf_len(tx_desc->nbuf);
  2426. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  2427. if (qdf_unlikely(pdev->delay_stats_flag))
  2428. dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
  2429. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  2430. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  2431. DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
  2432. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  2433. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  2434. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  2435. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  2436. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  2437. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  2438. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  2439. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  2440. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  2441. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  2442. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  2443. /*
  2444. * tx_failed is ideally supposed to be updated from HTT ppdu completion
  2445. * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
  2446. * are no completions for failed cases. Hence updating tx_failed from
  2447. * data path. Please note that if tx_failed is fixed to be from ppdu,
  2448. * then this has to be removed
  2449. */
  2450. peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
  2451. peer->stats.tx.dropped.fw_rem_notx +
  2452. peer->stats.tx.dropped.fw_rem_tx +
  2453. peer->stats.tx.dropped.age_out +
  2454. peer->stats.tx.dropped.fw_reason1 +
  2455. peer->stats.tx.dropped.fw_reason2 +
  2456. peer->stats.tx.dropped.fw_reason3;
  2457. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  2458. tid_stats->tqm_status_cnt[ts->status]++;
  2459. }
  2460. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  2461. return;
  2462. }
  2463. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  2464. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  2465. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  2466. /*
  2467. * Following Rate Statistics are updated from HTT PPDU events from FW.
  2468. * Return from here if HTT PPDU events are enabled.
  2469. */
  2470. if (!(soc->process_tx_status))
  2471. return;
  2472. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2473. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  2474. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2475. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  2476. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2477. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2478. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2479. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2480. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2481. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2482. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2483. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2484. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2485. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2486. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2487. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2488. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2489. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2490. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2491. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2492. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  2493. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  2494. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  2495. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  2496. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  2497. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  2498. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  2499. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  2500. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  2501. &peer->stats, ts->peer_id,
  2502. UPDATE_PEER_STATS, pdev->pdev_id);
  2503. #endif
  2504. }
  2505. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  2506. /**
  2507. * dp_tx_flow_pool_lock() - take flow pool lock
  2508. * @soc: core txrx main context
  2509. * @tx_desc: tx desc
  2510. *
  2511. * Return: None
  2512. */
  2513. static inline
  2514. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  2515. struct dp_tx_desc_s *tx_desc)
  2516. {
  2517. struct dp_tx_desc_pool_s *pool;
  2518. uint8_t desc_pool_id;
  2519. desc_pool_id = tx_desc->pool_id;
  2520. pool = &soc->tx_desc[desc_pool_id];
  2521. qdf_spin_lock_bh(&pool->flow_pool_lock);
  2522. }
  2523. /**
  2524. * dp_tx_flow_pool_unlock() - release flow pool lock
  2525. * @soc: core txrx main context
  2526. * @tx_desc: tx desc
  2527. *
  2528. * Return: None
  2529. */
  2530. static inline
  2531. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  2532. struct dp_tx_desc_s *tx_desc)
  2533. {
  2534. struct dp_tx_desc_pool_s *pool;
  2535. uint8_t desc_pool_id;
  2536. desc_pool_id = tx_desc->pool_id;
  2537. pool = &soc->tx_desc[desc_pool_id];
  2538. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  2539. }
  2540. #else
  2541. static inline
  2542. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  2543. {
  2544. }
  2545. static inline
  2546. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  2547. {
  2548. }
  2549. #endif
  2550. /**
  2551. * dp_tx_notify_completion() - Notify tx completion for this desc
  2552. * @soc: core txrx main context
  2553. * @tx_desc: tx desc
  2554. * @netbuf: buffer
  2555. *
  2556. * Return: none
  2557. */
  2558. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  2559. struct dp_tx_desc_s *tx_desc,
  2560. qdf_nbuf_t netbuf)
  2561. {
  2562. void *osif_dev;
  2563. ol_txrx_completion_fp tx_compl_cbk = NULL;
  2564. qdf_assert(tx_desc);
  2565. dp_tx_flow_pool_lock(soc, tx_desc);
  2566. if (!tx_desc->vdev ||
  2567. !tx_desc->vdev->osif_vdev) {
  2568. dp_tx_flow_pool_unlock(soc, tx_desc);
  2569. return;
  2570. }
  2571. osif_dev = tx_desc->vdev->osif_vdev;
  2572. tx_compl_cbk = tx_desc->vdev->tx_comp;
  2573. dp_tx_flow_pool_unlock(soc, tx_desc);
  2574. if (tx_compl_cbk)
  2575. tx_compl_cbk(netbuf, osif_dev);
  2576. }
  2577. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  2578. * @pdev: pdev handle
  2579. * @tid: tid value
  2580. * @txdesc_ts: timestamp from txdesc
  2581. * @ppdu_id: ppdu id
  2582. *
  2583. * Return: none
  2584. */
  2585. #ifdef FEATURE_PERPKT_INFO
  2586. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  2587. struct dp_peer *peer,
  2588. uint8_t tid,
  2589. uint64_t txdesc_ts,
  2590. uint32_t ppdu_id)
  2591. {
  2592. uint64_t delta_ms;
  2593. struct cdp_tx_sojourn_stats *sojourn_stats;
  2594. if (qdf_unlikely(pdev->enhanced_stats_en == 0))
  2595. return;
  2596. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  2597. tid >= CDP_DATA_TID_MAX))
  2598. return;
  2599. if (qdf_unlikely(!pdev->sojourn_buf))
  2600. return;
  2601. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  2602. qdf_nbuf_data(pdev->sojourn_buf);
  2603. sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
  2604. delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
  2605. txdesc_ts;
  2606. qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
  2607. delta_ms);
  2608. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  2609. sojourn_stats->num_msdus[tid] = 1;
  2610. sojourn_stats->avg_sojourn_msdu[tid].internal =
  2611. peer->avg_sojourn_msdu[tid].internal;
  2612. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  2613. pdev->sojourn_buf, HTT_INVALID_PEER,
  2614. WDI_NO_VAL, pdev->pdev_id);
  2615. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  2616. sojourn_stats->num_msdus[tid] = 0;
  2617. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  2618. }
  2619. #else
  2620. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  2621. uint8_t tid,
  2622. uint64_t txdesc_ts,
  2623. uint32_t ppdu_id)
  2624. {
  2625. }
  2626. #endif
  2627. /**
  2628. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  2629. * @soc: DP Soc handle
  2630. * @tx_desc: software Tx descriptor
  2631. * @ts : Tx completion status from HAL/HTT descriptor
  2632. *
  2633. * Return: none
  2634. */
  2635. static inline void
  2636. dp_tx_comp_process_desc(struct dp_soc *soc,
  2637. struct dp_tx_desc_s *desc,
  2638. struct hal_tx_completion_status *ts,
  2639. struct dp_peer *peer)
  2640. {
  2641. uint64_t time_latency = 0;
  2642. /*
  2643. * m_copy/tx_capture modes are not supported for
  2644. * scatter gather packets
  2645. */
  2646. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  2647. time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) -
  2648. desc->timestamp);
  2649. }
  2650. if (!(desc->msdu_ext_desc)) {
  2651. if (QDF_STATUS_SUCCESS ==
  2652. dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
  2653. return;
  2654. }
  2655. if (QDF_STATUS_SUCCESS ==
  2656. dp_get_completion_indication_for_stack(soc,
  2657. desc->pdev,
  2658. peer, ts,
  2659. desc->nbuf,
  2660. time_latency)) {
  2661. qdf_nbuf_unmap(soc->osdev, desc->nbuf,
  2662. QDF_DMA_TO_DEVICE);
  2663. dp_send_completion_to_stack(soc,
  2664. desc->pdev,
  2665. ts->peer_id,
  2666. ts->ppdu_id,
  2667. desc->nbuf);
  2668. return;
  2669. }
  2670. }
  2671. dp_tx_comp_free_buf(soc, desc);
  2672. }
  2673. /**
  2674. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  2675. * @tx_desc: software descriptor head pointer
  2676. * @ts: Tx completion status
  2677. * @peer: peer handle
  2678. * @ring_id: ring number
  2679. *
  2680. * Return: none
  2681. */
  2682. static inline
  2683. void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
  2684. struct hal_tx_completion_status *ts,
  2685. struct dp_peer *peer, uint8_t ring_id)
  2686. {
  2687. uint32_t length;
  2688. qdf_ether_header_t *eh;
  2689. struct dp_soc *soc = NULL;
  2690. struct dp_vdev *vdev = tx_desc->vdev;
  2691. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2692. if (!vdev || !nbuf) {
  2693. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2694. "invalid tx descriptor. vdev or nbuf NULL");
  2695. goto out;
  2696. }
  2697. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2698. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  2699. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  2700. QDF_TRACE_DEFAULT_PDEV_ID,
  2701. qdf_nbuf_data_addr(nbuf),
  2702. sizeof(qdf_nbuf_data(nbuf)),
  2703. tx_desc->id,
  2704. ts->status));
  2705. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2706. "-------------------- \n"
  2707. "Tx Completion Stats: \n"
  2708. "-------------------- \n"
  2709. "ack_frame_rssi = %d \n"
  2710. "first_msdu = %d \n"
  2711. "last_msdu = %d \n"
  2712. "msdu_part_of_amsdu = %d \n"
  2713. "rate_stats valid = %d \n"
  2714. "bw = %d \n"
  2715. "pkt_type = %d \n"
  2716. "stbc = %d \n"
  2717. "ldpc = %d \n"
  2718. "sgi = %d \n"
  2719. "mcs = %d \n"
  2720. "ofdma = %d \n"
  2721. "tones_in_ru = %d \n"
  2722. "tsf = %d \n"
  2723. "ppdu_id = %d \n"
  2724. "transmit_cnt = %d \n"
  2725. "tid = %d \n"
  2726. "peer_id = %d\n",
  2727. ts->ack_frame_rssi, ts->first_msdu,
  2728. ts->last_msdu, ts->msdu_part_of_amsdu,
  2729. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  2730. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  2731. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  2732. ts->transmit_cnt, ts->tid, ts->peer_id);
  2733. soc = vdev->pdev->soc;
  2734. /* Update SoC level stats */
  2735. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  2736. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  2737. /* Update per-packet stats for mesh mode */
  2738. if (qdf_unlikely(vdev->mesh_vdev) &&
  2739. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  2740. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  2741. length = qdf_nbuf_len(nbuf);
  2742. /* Update peer level stats */
  2743. if (!peer) {
  2744. QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
  2745. "peer is null or deletion in progress");
  2746. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  2747. goto out;
  2748. }
  2749. if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
  2750. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  2751. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  2752. if ((peer->vdev->tx_encap_type ==
  2753. htt_cmn_pkt_type_ethernet) &&
  2754. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  2755. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  2756. }
  2757. }
  2758. } else {
  2759. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  2760. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
  2761. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  2762. }
  2763. dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
  2764. #ifdef QCA_SUPPORT_RDK_STATS
  2765. if (soc->wlanstats_enabled)
  2766. dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
  2767. tx_desc->timestamp,
  2768. ts->ppdu_id);
  2769. #endif
  2770. out:
  2771. return;
  2772. }
  2773. /**
  2774. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  2775. * @soc: core txrx main context
  2776. * @comp_head: software descriptor head pointer
  2777. * @ring_id: ring number
  2778. *
  2779. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  2780. * and release the software descriptors after processing is complete
  2781. *
  2782. * Return: none
  2783. */
  2784. static void
  2785. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  2786. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  2787. {
  2788. struct dp_tx_desc_s *desc;
  2789. struct dp_tx_desc_s *next;
  2790. struct hal_tx_completion_status ts = {0};
  2791. struct dp_peer *peer;
  2792. qdf_nbuf_t netbuf;
  2793. desc = comp_head;
  2794. while (desc) {
  2795. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  2796. peer = dp_peer_find_by_id(soc, ts.peer_id);
  2797. dp_tx_comp_process_tx_status(desc, &ts, peer, ring_id);
  2798. netbuf = desc->nbuf;
  2799. /* check tx complete notification */
  2800. if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
  2801. dp_tx_notify_completion(soc, desc, netbuf);
  2802. dp_tx_comp_process_desc(soc, desc, &ts, peer);
  2803. if (peer)
  2804. dp_peer_unref_del_find_by_id(peer);
  2805. next = desc->next;
  2806. dp_tx_desc_release(desc, desc->pool_id);
  2807. desc = next;
  2808. }
  2809. }
  2810. /**
  2811. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  2812. * @tx_desc: software descriptor head pointer
  2813. * @status : Tx completion status from HTT descriptor
  2814. * @ring_id: ring number
  2815. *
  2816. * This function will process HTT Tx indication messages from Target
  2817. *
  2818. * Return: none
  2819. */
  2820. static
  2821. void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
  2822. uint8_t ring_id)
  2823. {
  2824. uint8_t tx_status;
  2825. struct dp_pdev *pdev;
  2826. struct dp_vdev *vdev;
  2827. struct dp_soc *soc;
  2828. struct hal_tx_completion_status ts = {0};
  2829. uint32_t *htt_desc = (uint32_t *)status;
  2830. struct dp_peer *peer;
  2831. struct cdp_tid_tx_stats *tid_stats = NULL;
  2832. struct htt_soc *htt_handle;
  2833. qdf_assert(tx_desc->pdev);
  2834. pdev = tx_desc->pdev;
  2835. vdev = tx_desc->vdev;
  2836. soc = pdev->soc;
  2837. if (!vdev)
  2838. return;
  2839. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  2840. htt_handle = (struct htt_soc *)soc->htt_handle;
  2841. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  2842. switch (tx_status) {
  2843. case HTT_TX_FW2WBM_TX_STATUS_OK:
  2844. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  2845. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  2846. {
  2847. uint8_t tid;
  2848. if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
  2849. ts.peer_id =
  2850. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
  2851. htt_desc[2]);
  2852. ts.tid =
  2853. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
  2854. htt_desc[2]);
  2855. } else {
  2856. ts.peer_id = HTT_INVALID_PEER;
  2857. ts.tid = HTT_INVALID_TID;
  2858. }
  2859. ts.ppdu_id =
  2860. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
  2861. htt_desc[1]);
  2862. ts.ack_frame_rssi =
  2863. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
  2864. htt_desc[1]);
  2865. ts.first_msdu = 1;
  2866. ts.last_msdu = 1;
  2867. tid = ts.tid;
  2868. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  2869. tid = CDP_MAX_DATA_TIDS - 1;
  2870. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  2871. if (qdf_unlikely(pdev->delay_stats_flag))
  2872. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  2873. if (tx_status < CDP_MAX_TX_HTT_STATUS) {
  2874. tid_stats->htt_status_cnt[tx_status]++;
  2875. }
  2876. peer = dp_peer_find_by_id(soc, ts.peer_id);
  2877. if (qdf_likely(peer))
  2878. dp_peer_unref_del_find_by_id(peer);
  2879. dp_tx_comp_process_tx_status(tx_desc, &ts, peer, ring_id);
  2880. dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
  2881. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2882. break;
  2883. }
  2884. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  2885. {
  2886. dp_tx_reinject_handler(tx_desc, status);
  2887. break;
  2888. }
  2889. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  2890. {
  2891. dp_tx_inspect_handler(tx_desc, status);
  2892. break;
  2893. }
  2894. case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
  2895. {
  2896. dp_tx_mec_handler(vdev, status);
  2897. break;
  2898. }
  2899. default:
  2900. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2901. "%s Invalid HTT tx_status %d\n",
  2902. __func__, tx_status);
  2903. break;
  2904. }
  2905. }
  2906. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  2907. static inline
  2908. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  2909. {
  2910. bool limit_hit = false;
  2911. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  2912. limit_hit =
  2913. (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
  2914. if (limit_hit)
  2915. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  2916. return limit_hit;
  2917. }
  2918. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  2919. {
  2920. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  2921. }
  2922. #else
  2923. static inline
  2924. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  2925. {
  2926. return false;
  2927. }
  2928. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  2929. {
  2930. return false;
  2931. }
  2932. #endif
  2933. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  2934. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  2935. uint32_t quota)
  2936. {
  2937. void *tx_comp_hal_desc;
  2938. uint8_t buffer_src;
  2939. uint8_t pool_id;
  2940. uint32_t tx_desc_id;
  2941. struct dp_tx_desc_s *tx_desc = NULL;
  2942. struct dp_tx_desc_s *head_desc = NULL;
  2943. struct dp_tx_desc_s *tail_desc = NULL;
  2944. uint32_t num_processed = 0;
  2945. uint32_t count = 0;
  2946. bool force_break = false;
  2947. DP_HIST_INIT();
  2948. more_data:
  2949. /* Re-initialize local variables to be re-used */
  2950. head_desc = NULL;
  2951. tail_desc = NULL;
  2952. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  2953. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  2954. "%s %d : HAL RING Access Failed -- %pK",
  2955. __func__, __LINE__, hal_ring_hdl);
  2956. return 0;
  2957. }
  2958. /* Find head descriptor from completion ring */
  2959. while (qdf_likely(tx_comp_hal_desc =
  2960. hal_srng_dst_get_next(soc->hal_soc, hal_ring_hdl))) {
  2961. buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
  2962. /* If this buffer was not released by TQM or FW, then it is not
  2963. * Tx completion indication, assert */
  2964. if ((buffer_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  2965. (buffer_src != HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  2966. QDF_TRACE(QDF_MODULE_ID_DP,
  2967. QDF_TRACE_LEVEL_FATAL,
  2968. "Tx comp release_src != TQM | FW but from %d",
  2969. buffer_src);
  2970. hal_dump_comp_desc(tx_comp_hal_desc);
  2971. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  2972. qdf_assert_always(0);
  2973. }
  2974. /* Get descriptor id */
  2975. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  2976. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  2977. DP_TX_DESC_ID_POOL_OS;
  2978. /* Find Tx descriptor */
  2979. tx_desc = dp_tx_desc_find(soc, pool_id,
  2980. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  2981. DP_TX_DESC_ID_PAGE_OS,
  2982. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  2983. DP_TX_DESC_ID_OFFSET_OS);
  2984. /*
  2985. * If the descriptor is already freed in vdev_detach,
  2986. * continue to next descriptor
  2987. */
  2988. if (!tx_desc->vdev && !tx_desc->flags) {
  2989. QDF_TRACE(QDF_MODULE_ID_DP,
  2990. QDF_TRACE_LEVEL_INFO,
  2991. "Descriptor freed in vdev_detach %d",
  2992. tx_desc_id);
  2993. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  2994. count++;
  2995. continue;
  2996. }
  2997. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  2998. QDF_TRACE(QDF_MODULE_ID_DP,
  2999. QDF_TRACE_LEVEL_INFO,
  3000. "pdev in down state %d",
  3001. tx_desc_id);
  3002. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  3003. count++;
  3004. dp_tx_comp_free_buf(soc, tx_desc);
  3005. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3006. continue;
  3007. }
  3008. /*
  3009. * If the release source is FW, process the HTT status
  3010. */
  3011. if (qdf_unlikely(buffer_src ==
  3012. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  3013. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  3014. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  3015. htt_tx_status);
  3016. dp_tx_process_htt_completion(tx_desc,
  3017. htt_tx_status, ring_id);
  3018. } else {
  3019. /* Pool id is not matching. Error */
  3020. if (tx_desc->pool_id != pool_id) {
  3021. QDF_TRACE(QDF_MODULE_ID_DP,
  3022. QDF_TRACE_LEVEL_FATAL,
  3023. "Tx Comp pool id %d not matched %d",
  3024. pool_id, tx_desc->pool_id);
  3025. qdf_assert_always(0);
  3026. }
  3027. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  3028. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  3029. QDF_TRACE(QDF_MODULE_ID_DP,
  3030. QDF_TRACE_LEVEL_FATAL,
  3031. "Txdesc invalid, flgs = %x,id = %d",
  3032. tx_desc->flags, tx_desc_id);
  3033. qdf_assert_always(0);
  3034. }
  3035. /* First ring descriptor on the cycle */
  3036. if (!head_desc) {
  3037. head_desc = tx_desc;
  3038. tail_desc = tx_desc;
  3039. }
  3040. tail_desc->next = tx_desc;
  3041. tx_desc->next = NULL;
  3042. tail_desc = tx_desc;
  3043. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  3044. /* Collect hw completion contents */
  3045. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  3046. &tx_desc->comp, 1);
  3047. }
  3048. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  3049. /*
  3050. * Processed packet count is more than given quota
  3051. * stop to processing
  3052. */
  3053. if (num_processed >= quota) {
  3054. force_break = true;
  3055. break;
  3056. }
  3057. count++;
  3058. if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
  3059. break;
  3060. }
  3061. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  3062. /* Process the reaped descriptors */
  3063. if (head_desc)
  3064. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  3065. if (dp_tx_comp_enable_eol_data_check(soc)) {
  3066. if (!force_break &&
  3067. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  3068. hal_ring_hdl)) {
  3069. DP_STATS_INC(soc, tx.hp_oos2, 1);
  3070. if (!hif_exec_should_yield(soc->hif_handle,
  3071. int_ctx->dp_intr_id))
  3072. goto more_data;
  3073. }
  3074. }
  3075. DP_TX_HIST_STATS_PER_PDEV();
  3076. return num_processed;
  3077. }
  3078. #ifdef FEATURE_WLAN_TDLS
  3079. /**
  3080. * dp_tx_non_std() - Allow the control-path SW to send data frames
  3081. *
  3082. * @data_vdev - which vdev should transmit the tx data frames
  3083. * @tx_spec - what non-standard handling to apply to the tx data frames
  3084. * @msdu_list - NULL-terminated list of tx MSDUs
  3085. *
  3086. * Return: NULL on success,
  3087. * nbuf when it fails to send
  3088. */
  3089. qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,
  3090. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  3091. {
  3092. struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
  3093. if (tx_spec & OL_TX_SPEC_NO_FREE)
  3094. vdev->is_tdls_frame = true;
  3095. return dp_tx_send(vdev_handle, msdu_list);
  3096. }
  3097. #endif
  3098. /**
  3099. * dp_tx_vdev_attach() - attach vdev to dp tx
  3100. * @vdev: virtual device instance
  3101. *
  3102. * Return: QDF_STATUS_SUCCESS: success
  3103. * QDF_STATUS_E_RESOURCES: Error return
  3104. */
  3105. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  3106. {
  3107. /*
  3108. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  3109. */
  3110. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  3111. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  3112. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  3113. vdev->vdev_id);
  3114. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
  3115. DP_SW2HW_MACID(vdev->pdev->pdev_id));
  3116. /*
  3117. * Set HTT Extension Valid bit to 0 by default
  3118. */
  3119. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  3120. dp_tx_vdev_update_search_flags(vdev);
  3121. return QDF_STATUS_SUCCESS;
  3122. }
  3123. #ifndef FEATURE_WDS
  3124. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  3125. {
  3126. return false;
  3127. }
  3128. #endif
  3129. /**
  3130. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  3131. * @vdev: virtual device instance
  3132. *
  3133. * Return: void
  3134. *
  3135. */
  3136. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  3137. {
  3138. struct dp_soc *soc = vdev->pdev->soc;
  3139. /*
  3140. * Enable both AddrY (SA based search) and AddrX (Da based search)
  3141. * for TDLS link
  3142. *
  3143. * Enable AddrY (SA based search) only for non-WDS STA and
  3144. * ProxySTA VAP (in HKv1) modes.
  3145. *
  3146. * In all other VAP modes, only DA based search should be
  3147. * enabled
  3148. */
  3149. if (vdev->opmode == wlan_op_mode_sta &&
  3150. vdev->tdls_link_connected)
  3151. vdev->hal_desc_addr_search_flags =
  3152. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  3153. else if ((vdev->opmode == wlan_op_mode_sta) &&
  3154. !dp_tx_da_search_override(vdev))
  3155. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  3156. else
  3157. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  3158. /* Set search type only when peer map v2 messaging is enabled
  3159. * as we will have the search index (AST hash) only when v2 is
  3160. * enabled
  3161. */
  3162. if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
  3163. vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
  3164. else
  3165. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  3166. }
  3167. static inline bool
  3168. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  3169. struct dp_vdev *vdev,
  3170. struct dp_tx_desc_s *tx_desc)
  3171. {
  3172. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  3173. return false;
  3174. /*
  3175. * if vdev is given, then only check whether desc
  3176. * vdev match. if vdev is NULL, then check whether
  3177. * desc pdev match.
  3178. */
  3179. return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev);
  3180. }
  3181. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3182. /**
  3183. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  3184. *
  3185. * @soc: Handle to DP SoC structure
  3186. * @tx_desc: pointer of one TX desc
  3187. * @desc_pool_id: TX Desc pool id
  3188. */
  3189. static inline void
  3190. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  3191. uint8_t desc_pool_id)
  3192. {
  3193. struct dp_tx_desc_pool_s *pool = &soc->tx_desc[desc_pool_id];
  3194. qdf_spin_lock_bh(&pool->flow_pool_lock);
  3195. tx_desc->vdev = NULL;
  3196. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  3197. }
  3198. /**
  3199. * dp_tx_desc_flush() - release resources associated
  3200. * to TX Desc
  3201. *
  3202. * @dp_pdev: Handle to DP pdev structure
  3203. * @vdev: virtual device instance
  3204. * NULL: no specific Vdev is required and check all allcated TX desc
  3205. * on this pdev.
  3206. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  3207. *
  3208. * @force_free:
  3209. * true: flush the TX desc.
  3210. * false: only reset the Vdev in each allocated TX desc
  3211. * that associated to current Vdev.
  3212. *
  3213. * This function will go through the TX desc pool to flush
  3214. * the outstanding TX data or reset Vdev to NULL in associated TX
  3215. * Desc.
  3216. */
  3217. static void dp_tx_desc_flush(struct dp_pdev *pdev,
  3218. struct dp_vdev *vdev,
  3219. bool force_free)
  3220. {
  3221. uint8_t i;
  3222. uint32_t j;
  3223. uint32_t num_desc, page_id, offset;
  3224. uint16_t num_desc_per_page;
  3225. struct dp_soc *soc = pdev->soc;
  3226. struct dp_tx_desc_s *tx_desc = NULL;
  3227. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  3228. if (!vdev && !force_free) {
  3229. dp_err("Reset TX desc vdev, Vdev param is required!");
  3230. return;
  3231. }
  3232. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  3233. tx_desc_pool = &soc->tx_desc[i];
  3234. if (!(tx_desc_pool->pool_size) ||
  3235. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  3236. !(tx_desc_pool->desc_pages.cacheable_pages))
  3237. continue;
  3238. num_desc = tx_desc_pool->pool_size;
  3239. num_desc_per_page =
  3240. tx_desc_pool->desc_pages.num_element_per_page;
  3241. for (j = 0; j < num_desc; j++) {
  3242. page_id = j / num_desc_per_page;
  3243. offset = j % num_desc_per_page;
  3244. if (qdf_unlikely(!(tx_desc_pool->
  3245. desc_pages.cacheable_pages)))
  3246. break;
  3247. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  3248. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  3249. /*
  3250. * Free TX desc if force free is
  3251. * required, otherwise only reset vdev
  3252. * in this TX desc.
  3253. */
  3254. if (force_free) {
  3255. dp_tx_comp_free_buf(soc, tx_desc);
  3256. dp_tx_desc_release(tx_desc, i);
  3257. } else {
  3258. dp_tx_desc_reset_vdev(soc, tx_desc,
  3259. i);
  3260. }
  3261. }
  3262. }
  3263. }
  3264. }
  3265. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  3266. static inline void
  3267. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  3268. uint8_t desc_pool_id)
  3269. {
  3270. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  3271. tx_desc->vdev = NULL;
  3272. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  3273. }
  3274. static void dp_tx_desc_flush(struct dp_pdev *pdev,
  3275. struct dp_vdev *vdev,
  3276. bool force_free)
  3277. {
  3278. uint8_t i, num_pool;
  3279. uint32_t j;
  3280. uint32_t num_desc, page_id, offset;
  3281. uint16_t num_desc_per_page;
  3282. struct dp_soc *soc = pdev->soc;
  3283. struct dp_tx_desc_s *tx_desc = NULL;
  3284. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  3285. if (!vdev && !force_free) {
  3286. dp_err("Reset TX desc vdev, Vdev param is required!");
  3287. return;
  3288. }
  3289. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3290. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3291. for (i = 0; i < num_pool; i++) {
  3292. tx_desc_pool = &soc->tx_desc[i];
  3293. if (!tx_desc_pool->desc_pages.cacheable_pages)
  3294. continue;
  3295. num_desc_per_page =
  3296. tx_desc_pool->desc_pages.num_element_per_page;
  3297. for (j = 0; j < num_desc; j++) {
  3298. page_id = j / num_desc_per_page;
  3299. offset = j % num_desc_per_page;
  3300. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  3301. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  3302. if (force_free) {
  3303. dp_tx_comp_free_buf(soc, tx_desc);
  3304. dp_tx_desc_release(tx_desc, i);
  3305. } else {
  3306. dp_tx_desc_reset_vdev(soc, tx_desc,
  3307. i);
  3308. }
  3309. }
  3310. }
  3311. }
  3312. }
  3313. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  3314. /**
  3315. * dp_tx_vdev_detach() - detach vdev from dp tx
  3316. * @vdev: virtual device instance
  3317. *
  3318. * Return: QDF_STATUS_SUCCESS: success
  3319. * QDF_STATUS_E_RESOURCES: Error return
  3320. */
  3321. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  3322. {
  3323. struct dp_pdev *pdev = vdev->pdev;
  3324. /* Reset TX desc associated to this Vdev as NULL */
  3325. dp_tx_desc_flush(pdev, vdev, false);
  3326. dp_tx_vdev_multipass_deinit(vdev);
  3327. return QDF_STATUS_SUCCESS;
  3328. }
  3329. /**
  3330. * dp_tx_pdev_attach() - attach pdev to dp tx
  3331. * @pdev: physical device instance
  3332. *
  3333. * Return: QDF_STATUS_SUCCESS: success
  3334. * QDF_STATUS_E_RESOURCES: Error return
  3335. */
  3336. QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev)
  3337. {
  3338. struct dp_soc *soc = pdev->soc;
  3339. /* Initialize Flow control counters */
  3340. qdf_atomic_init(&pdev->num_tx_exception);
  3341. qdf_atomic_init(&pdev->num_tx_outstanding);
  3342. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3343. /* Initialize descriptors in TCL Ring */
  3344. hal_tx_init_data_ring(soc->hal_soc,
  3345. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  3346. }
  3347. return QDF_STATUS_SUCCESS;
  3348. }
  3349. /**
  3350. * dp_tx_pdev_detach() - detach pdev from dp tx
  3351. * @pdev: physical device instance
  3352. *
  3353. * Return: QDF_STATUS_SUCCESS: success
  3354. * QDF_STATUS_E_RESOURCES: Error return
  3355. */
  3356. QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
  3357. {
  3358. /* flush TX outstanding data per pdev */
  3359. dp_tx_desc_flush(pdev, NULL, true);
  3360. dp_tx_me_exit(pdev);
  3361. return QDF_STATUS_SUCCESS;
  3362. }
  3363. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3364. /* Pools will be allocated dynamically */
  3365. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  3366. int num_desc)
  3367. {
  3368. uint8_t i;
  3369. for (i = 0; i < num_pool; i++) {
  3370. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  3371. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  3372. }
  3373. return 0;
  3374. }
  3375. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  3376. {
  3377. uint8_t i;
  3378. for (i = 0; i < num_pool; i++)
  3379. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  3380. }
  3381. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  3382. static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  3383. int num_desc)
  3384. {
  3385. uint8_t i;
  3386. /* Allocate software Tx descriptor pools */
  3387. for (i = 0; i < num_pool; i++) {
  3388. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  3389. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3390. "%s Tx Desc Pool alloc %d failed %pK",
  3391. __func__, i, soc);
  3392. return ENOMEM;
  3393. }
  3394. }
  3395. return 0;
  3396. }
  3397. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  3398. {
  3399. uint8_t i;
  3400. for (i = 0; i < num_pool; i++) {
  3401. qdf_assert_always(!soc->tx_desc[i].num_allocated);
  3402. if (dp_tx_desc_pool_free(soc, i)) {
  3403. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3404. "%s Tx Desc Pool Free failed", __func__);
  3405. }
  3406. }
  3407. }
  3408. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  3409. #ifndef QCA_MEM_ATTACH_ON_WIFI3
  3410. /**
  3411. * dp_tso_attach_wifi3() - TSO attach handler
  3412. * @txrx_soc: Opaque Dp handle
  3413. *
  3414. * Reserve TSO descriptor buffers
  3415. *
  3416. * Return: QDF_STATUS_E_FAILURE on failure or
  3417. * QDF_STATUS_SUCCESS on success
  3418. */
  3419. static
  3420. QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
  3421. {
  3422. return dp_tso_soc_attach(txrx_soc);
  3423. }
  3424. /**
  3425. * dp_tso_detach_wifi3() - TSO Detach handler
  3426. * @txrx_soc: Opaque Dp handle
  3427. *
  3428. * Deallocate TSO descriptor buffers
  3429. *
  3430. * Return: QDF_STATUS_E_FAILURE on failure or
  3431. * QDF_STATUS_SUCCESS on success
  3432. */
  3433. static
  3434. QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
  3435. {
  3436. return dp_tso_soc_detach(txrx_soc);
  3437. }
  3438. #else
  3439. static
  3440. QDF_STATUS dp_tso_attach_wifi3(void *txrx_soc)
  3441. {
  3442. return QDF_STATUS_SUCCESS;
  3443. }
  3444. static
  3445. QDF_STATUS dp_tso_detach_wifi3(void *txrx_soc)
  3446. {
  3447. return QDF_STATUS_SUCCESS;
  3448. }
  3449. #endif
  3450. QDF_STATUS dp_tso_soc_detach(void *txrx_soc)
  3451. {
  3452. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3453. uint8_t i;
  3454. uint8_t num_pool;
  3455. uint32_t num_desc;
  3456. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3457. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3458. for (i = 0; i < num_pool; i++)
  3459. dp_tx_tso_desc_pool_free(soc, i);
  3460. dp_info("%s TSO Desc Pool %d Free descs = %d",
  3461. __func__, num_pool, num_desc);
  3462. for (i = 0; i < num_pool; i++)
  3463. dp_tx_tso_num_seg_pool_free(soc, i);
  3464. dp_info("%s TSO Num of seg Desc Pool %d Free descs = %d",
  3465. __func__, num_pool, num_desc);
  3466. return QDF_STATUS_SUCCESS;
  3467. }
  3468. /**
  3469. * dp_tso_attach() - TSO attach handler
  3470. * @txrx_soc: Opaque Dp handle
  3471. *
  3472. * Reserve TSO descriptor buffers
  3473. *
  3474. * Return: QDF_STATUS_E_FAILURE on failure or
  3475. * QDF_STATUS_SUCCESS on success
  3476. */
  3477. QDF_STATUS dp_tso_soc_attach(void *txrx_soc)
  3478. {
  3479. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3480. uint8_t i;
  3481. uint8_t num_pool;
  3482. uint32_t num_desc;
  3483. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3484. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3485. for (i = 0; i < num_pool; i++) {
  3486. if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
  3487. dp_err("TSO Desc Pool alloc %d failed %pK",
  3488. i, soc);
  3489. return QDF_STATUS_E_FAILURE;
  3490. }
  3491. }
  3492. dp_info("%s TSO Desc Alloc %d, descs = %d",
  3493. __func__, num_pool, num_desc);
  3494. for (i = 0; i < num_pool; i++) {
  3495. if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
  3496. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  3497. i, soc);
  3498. return QDF_STATUS_E_FAILURE;
  3499. }
  3500. }
  3501. return QDF_STATUS_SUCCESS;
  3502. }
  3503. /**
  3504. * dp_tx_soc_detach() - detach soc from dp tx
  3505. * @soc: core txrx main context
  3506. *
  3507. * This function will detach dp tx into main device context
  3508. * will free dp tx resource and initialize resources
  3509. *
  3510. * Return: QDF_STATUS_SUCCESS: success
  3511. * QDF_STATUS_E_RESOURCES: Error return
  3512. */
  3513. QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
  3514. {
  3515. uint8_t num_pool;
  3516. uint16_t num_desc;
  3517. uint16_t num_ext_desc;
  3518. uint8_t i;
  3519. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3520. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3521. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3522. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  3523. dp_tx_flow_control_deinit(soc);
  3524. dp_tx_delete_static_pools(soc, num_pool);
  3525. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3526. "%s Tx Desc Pool Free num_pool = %d, descs = %d",
  3527. __func__, num_pool, num_desc);
  3528. for (i = 0; i < num_pool; i++) {
  3529. if (dp_tx_ext_desc_pool_free(soc, i)) {
  3530. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3531. "%s Tx Ext Desc Pool Free failed",
  3532. __func__);
  3533. return QDF_STATUS_E_RESOURCES;
  3534. }
  3535. }
  3536. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3537. "%s MSDU Ext Desc Pool %d Free descs = %d",
  3538. __func__, num_pool, num_ext_desc);
  3539. status = dp_tso_detach_wifi3(soc);
  3540. if (status != QDF_STATUS_SUCCESS)
  3541. return status;
  3542. return QDF_STATUS_SUCCESS;
  3543. }
  3544. /**
  3545. * dp_tx_soc_attach() - attach soc to dp tx
  3546. * @soc: core txrx main context
  3547. *
  3548. * This function will attach dp tx into main device context
  3549. * will allocate dp tx resource and initialize resources
  3550. *
  3551. * Return: QDF_STATUS_SUCCESS: success
  3552. * QDF_STATUS_E_RESOURCES: Error return
  3553. */
  3554. QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
  3555. {
  3556. uint8_t i;
  3557. uint8_t num_pool;
  3558. uint32_t num_desc;
  3559. uint32_t num_ext_desc;
  3560. QDF_STATUS status = QDF_STATUS_SUCCESS;
  3561. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3562. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3563. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  3564. if (num_pool > MAX_TXDESC_POOLS)
  3565. goto fail;
  3566. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  3567. goto fail;
  3568. dp_tx_flow_control_init(soc);
  3569. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3570. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  3571. __func__, num_pool, num_desc);
  3572. /* Allocate extension tx descriptor pools */
  3573. for (i = 0; i < num_pool; i++) {
  3574. if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
  3575. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3576. "MSDU Ext Desc Pool alloc %d failed %pK",
  3577. i, soc);
  3578. goto fail;
  3579. }
  3580. }
  3581. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3582. "%s MSDU Ext Desc Alloc %d, descs = %d",
  3583. __func__, num_pool, num_ext_desc);
  3584. status = dp_tso_attach_wifi3((void *)soc);
  3585. if (status != QDF_STATUS_SUCCESS)
  3586. goto fail;
  3587. /* Initialize descriptors in TCL Rings */
  3588. if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3589. for (i = 0; i < soc->num_tcl_data_rings; i++) {
  3590. hal_tx_init_data_ring(soc->hal_soc,
  3591. soc->tcl_data_ring[i].hal_srng);
  3592. }
  3593. }
  3594. /*
  3595. * todo - Add a runtime config option to enable this.
  3596. */
  3597. /*
  3598. * Due to multiple issues on NPR EMU, enable it selectively
  3599. * only for NPR EMU, should be removed, once NPR platforms
  3600. * are stable.
  3601. */
  3602. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  3603. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3604. "%s HAL Tx init Success", __func__);
  3605. return QDF_STATUS_SUCCESS;
  3606. fail:
  3607. /* Detach will take care of freeing only allocated resources */
  3608. dp_tx_soc_detach(soc);
  3609. return QDF_STATUS_E_RESOURCES;
  3610. }