dp_tx.c 122 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592
  1. /*
  2. * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_htt.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include "dp_peer.h"
  24. #include "dp_types.h"
  25. #include "hal_tx.h"
  26. #include "qdf_mem.h"
  27. #include "qdf_nbuf.h"
  28. #include "qdf_net_types.h"
  29. #include <wlan_cfg.h>
  30. #include "dp_ipa.h"
  31. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  32. #include "if_meta_hdr.h"
  33. #endif
  34. #include "enet.h"
  35. #include "dp_internal.h"
  36. #ifdef FEATURE_WDS
  37. #include "dp_txrx_wds.h"
  38. #endif
  39. #ifdef ATH_SUPPORT_IQUE
  40. #include "dp_txrx_me.h"
  41. #endif
  42. /* TODO Add support in TSO */
  43. #define DP_DESC_NUM_FRAG(x) 0
  44. /* disable TQM_BYPASS */
  45. #define TQM_BYPASS_WAR 0
  46. /* invalid peer id for reinject*/
  47. #define DP_INVALID_PEER 0XFFFE
  48. /*mapping between hal encrypt type and cdp_sec_type*/
  49. #define MAX_CDP_SEC_TYPE 12
  50. static const uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {
  51. HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  52. HAL_TX_ENCRYPT_TYPE_WEP_128,
  53. HAL_TX_ENCRYPT_TYPE_WEP_104,
  54. HAL_TX_ENCRYPT_TYPE_WEP_40,
  55. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  56. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  57. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  58. HAL_TX_ENCRYPT_TYPE_WAPI,
  59. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  60. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  61. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  62. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  63. #ifdef QCA_TX_LIMIT_CHECK
  64. /**
  65. * dp_tx_limit_check - Check if allocated tx descriptors reached
  66. * soc max limit and pdev max limit
  67. * @vdev: DP vdev handle
  68. *
  69. * Return: true if allocated tx descriptors reached max configured value, else
  70. * false
  71. */
  72. static inline bool
  73. dp_tx_limit_check(struct dp_vdev *vdev)
  74. {
  75. struct dp_pdev *pdev = vdev->pdev;
  76. struct dp_soc *soc = pdev->soc;
  77. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  78. soc->num_tx_allowed) {
  79. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  80. "%s: queued packets are more than max tx, drop the frame",
  81. __func__);
  82. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  83. return true;
  84. }
  85. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  86. pdev->num_tx_allowed) {
  87. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  88. "%s: queued packets are more than max tx, drop the frame",
  89. __func__);
  90. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  91. return true;
  92. }
  93. return false;
  94. }
  95. /**
  96. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  97. * @vdev: DP pdev handle
  98. *
  99. * Return: void
  100. */
  101. static inline void
  102. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  103. {
  104. struct dp_soc *soc = pdev->soc;
  105. qdf_atomic_inc(&pdev->num_tx_outstanding);
  106. qdf_atomic_inc(&soc->num_tx_outstanding);
  107. }
  108. /**
  109. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  110. * @vdev: DP pdev handle
  111. *
  112. * Return: void
  113. */
  114. static inline void
  115. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  116. {
  117. struct dp_soc *soc = pdev->soc;
  118. qdf_atomic_dec(&pdev->num_tx_outstanding);
  119. qdf_atomic_dec(&soc->num_tx_outstanding);
  120. }
  121. #else //QCA_TX_LIMIT_CHECK
  122. static inline bool
  123. dp_tx_limit_check(struct dp_vdev *vdev)
  124. {
  125. return false;
  126. }
  127. static inline void
  128. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  129. {
  130. qdf_atomic_inc(&pdev->num_tx_outstanding);
  131. }
  132. static inline void
  133. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  134. {
  135. qdf_atomic_dec(&pdev->num_tx_outstanding);
  136. }
  137. #endif //QCA_TX_LIMIT_CHECK
  138. #if defined(FEATURE_TSO)
  139. /**
  140. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  141. *
  142. * @soc - core txrx main context
  143. * @seg_desc - tso segment descriptor
  144. * @num_seg_desc - tso number segment descriptor
  145. */
  146. static void dp_tx_tso_unmap_segment(
  147. struct dp_soc *soc,
  148. struct qdf_tso_seg_elem_t *seg_desc,
  149. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  150. {
  151. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  152. if (qdf_unlikely(!seg_desc)) {
  153. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  154. __func__, __LINE__);
  155. qdf_assert(0);
  156. } else if (qdf_unlikely(!num_seg_desc)) {
  157. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  158. __func__, __LINE__);
  159. qdf_assert(0);
  160. } else {
  161. bool is_last_seg;
  162. /* no tso segment left to do dma unmap */
  163. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  164. return;
  165. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  166. true : false;
  167. qdf_nbuf_unmap_tso_segment(soc->osdev,
  168. seg_desc, is_last_seg);
  169. num_seg_desc->num_seg.tso_cmn_num_seg--;
  170. }
  171. }
  172. /**
  173. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  174. * back to the freelist
  175. *
  176. * @soc - soc device handle
  177. * @tx_desc - Tx software descriptor
  178. */
  179. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  180. struct dp_tx_desc_s *tx_desc)
  181. {
  182. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  183. if (qdf_unlikely(!tx_desc->tso_desc)) {
  184. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  185. "%s %d TSO desc is NULL!",
  186. __func__, __LINE__);
  187. qdf_assert(0);
  188. } else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
  189. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  190. "%s %d TSO num desc is NULL!",
  191. __func__, __LINE__);
  192. qdf_assert(0);
  193. } else {
  194. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  195. (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
  196. /* Add the tso num segment into the free list */
  197. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  198. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  199. tx_desc->tso_num_desc);
  200. tx_desc->tso_num_desc = NULL;
  201. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  202. }
  203. /* Add the tso segment into the free list*/
  204. dp_tx_tso_desc_free(soc,
  205. tx_desc->pool_id, tx_desc->tso_desc);
  206. tx_desc->tso_desc = NULL;
  207. }
  208. }
  209. #else
  210. static void dp_tx_tso_unmap_segment(
  211. struct dp_soc *soc,
  212. struct qdf_tso_seg_elem_t *seg_desc,
  213. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  214. {
  215. }
  216. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  217. struct dp_tx_desc_s *tx_desc)
  218. {
  219. }
  220. #endif
  221. /**
  222. * dp_tx_desc_release() - Release Tx Descriptor
  223. * @tx_desc : Tx Descriptor
  224. * @desc_pool_id: Descriptor Pool ID
  225. *
  226. * Deallocate all resources attached to Tx descriptor and free the Tx
  227. * descriptor.
  228. *
  229. * Return:
  230. */
  231. static void
  232. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  233. {
  234. struct dp_pdev *pdev = tx_desc->pdev;
  235. struct dp_soc *soc;
  236. uint8_t comp_status = 0;
  237. qdf_assert(pdev);
  238. soc = pdev->soc;
  239. dp_tx_outstanding_dec(pdev);
  240. if (tx_desc->frm_type == dp_tx_frm_tso)
  241. dp_tx_tso_desc_release(soc, tx_desc);
  242. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  243. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  244. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  245. dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
  246. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  247. qdf_atomic_dec(&pdev->num_tx_exception);
  248. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  249. hal_tx_comp_get_buffer_source(&tx_desc->comp))
  250. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  251. soc->hal_soc);
  252. else
  253. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  254. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  255. "Tx Completion Release desc %d status %d outstanding %d",
  256. tx_desc->id, comp_status,
  257. qdf_atomic_read(&pdev->num_tx_outstanding));
  258. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  259. return;
  260. }
  261. /**
  262. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  263. * @vdev: DP vdev Handle
  264. * @nbuf: skb
  265. * @msdu_info: msdu_info required to create HTT metadata
  266. *
  267. * Prepares and fills HTT metadata in the frame pre-header for special frames
  268. * that should be transmitted using varying transmit parameters.
  269. * There are 2 VDEV modes that currently needs this special metadata -
  270. * 1) Mesh Mode
  271. * 2) DSRC Mode
  272. *
  273. * Return: HTT metadata size
  274. *
  275. */
  276. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  277. struct dp_tx_msdu_info_s *msdu_info)
  278. {
  279. uint32_t *meta_data = msdu_info->meta_data;
  280. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  281. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  282. uint8_t htt_desc_size;
  283. /* Size rounded of multiple of 8 bytes */
  284. uint8_t htt_desc_size_aligned;
  285. uint8_t *hdr = NULL;
  286. /*
  287. * Metadata - HTT MSDU Extension header
  288. */
  289. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  290. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  291. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  292. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  293. meta_data[0])) {
  294. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  295. htt_desc_size_aligned)) {
  296. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  297. htt_desc_size_aligned);
  298. if (!nbuf) {
  299. /*
  300. * qdf_nbuf_realloc_headroom won't do skb_clone
  301. * as skb_realloc_headroom does. so, no free is
  302. * needed here.
  303. */
  304. DP_STATS_INC(vdev,
  305. tx_i.dropped.headroom_insufficient,
  306. 1);
  307. qdf_print(" %s[%d] skb_realloc_headroom failed",
  308. __func__, __LINE__);
  309. return 0;
  310. }
  311. }
  312. /* Fill and add HTT metaheader */
  313. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  314. if (!hdr) {
  315. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  316. "Error in filling HTT metadata");
  317. return 0;
  318. }
  319. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  320. } else if (vdev->opmode == wlan_op_mode_ocb) {
  321. /* Todo - Add support for DSRC */
  322. }
  323. return htt_desc_size_aligned;
  324. }
  325. /**
  326. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  327. * @tso_seg: TSO segment to process
  328. * @ext_desc: Pointer to MSDU extension descriptor
  329. *
  330. * Return: void
  331. */
  332. #if defined(FEATURE_TSO)
  333. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  334. void *ext_desc)
  335. {
  336. uint8_t num_frag;
  337. uint32_t tso_flags;
  338. /*
  339. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  340. * tcp_flag_mask
  341. *
  342. * Checksum enable flags are set in TCL descriptor and not in Extension
  343. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  344. */
  345. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  346. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  347. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  348. tso_seg->tso_flags.ip_len);
  349. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  350. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  351. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  352. uint32_t lo = 0;
  353. uint32_t hi = 0;
  354. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  355. (tso_seg->tso_frags[num_frag].length));
  356. qdf_dmaaddr_to_32s(
  357. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  358. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  359. tso_seg->tso_frags[num_frag].length);
  360. }
  361. return;
  362. }
  363. #else
  364. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  365. void *ext_desc)
  366. {
  367. return;
  368. }
  369. #endif
  370. #if defined(FEATURE_TSO)
  371. /**
  372. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  373. * allocated and free them
  374. *
  375. * @soc: soc handle
  376. * @free_seg: list of tso segments
  377. * @msdu_info: msdu descriptor
  378. *
  379. * Return - void
  380. */
  381. static void dp_tx_free_tso_seg_list(
  382. struct dp_soc *soc,
  383. struct qdf_tso_seg_elem_t *free_seg,
  384. struct dp_tx_msdu_info_s *msdu_info)
  385. {
  386. struct qdf_tso_seg_elem_t *next_seg;
  387. while (free_seg) {
  388. next_seg = free_seg->next;
  389. dp_tx_tso_desc_free(soc,
  390. msdu_info->tx_queue.desc_pool_id,
  391. free_seg);
  392. free_seg = next_seg;
  393. }
  394. }
  395. /**
  396. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  397. * allocated and free them
  398. *
  399. * @soc: soc handle
  400. * @free_num_seg: list of tso number segments
  401. * @msdu_info: msdu descriptor
  402. * Return - void
  403. */
  404. static void dp_tx_free_tso_num_seg_list(
  405. struct dp_soc *soc,
  406. struct qdf_tso_num_seg_elem_t *free_num_seg,
  407. struct dp_tx_msdu_info_s *msdu_info)
  408. {
  409. struct qdf_tso_num_seg_elem_t *next_num_seg;
  410. while (free_num_seg) {
  411. next_num_seg = free_num_seg->next;
  412. dp_tso_num_seg_free(soc,
  413. msdu_info->tx_queue.desc_pool_id,
  414. free_num_seg);
  415. free_num_seg = next_num_seg;
  416. }
  417. }
  418. /**
  419. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  420. * do dma unmap for each segment
  421. *
  422. * @soc: soc handle
  423. * @free_seg: list of tso segments
  424. * @num_seg_desc: tso number segment descriptor
  425. *
  426. * Return - void
  427. */
  428. static void dp_tx_unmap_tso_seg_list(
  429. struct dp_soc *soc,
  430. struct qdf_tso_seg_elem_t *free_seg,
  431. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  432. {
  433. struct qdf_tso_seg_elem_t *next_seg;
  434. if (qdf_unlikely(!num_seg_desc)) {
  435. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  436. return;
  437. }
  438. while (free_seg) {
  439. next_seg = free_seg->next;
  440. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  441. free_seg = next_seg;
  442. }
  443. }
  444. #ifdef FEATURE_TSO_STATS
  445. /**
  446. * dp_tso_get_stats_idx: Retrieve the tso packet id
  447. * @pdev - pdev handle
  448. *
  449. * Return: id
  450. */
  451. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  452. {
  453. uint32_t stats_idx;
  454. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  455. % CDP_MAX_TSO_PACKETS);
  456. return stats_idx;
  457. }
  458. #else
  459. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  460. {
  461. return 0;
  462. }
  463. #endif /* FEATURE_TSO_STATS */
  464. /**
  465. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  466. * free the tso segments descriptor and
  467. * tso num segments descriptor
  468. *
  469. * @soc: soc handle
  470. * @msdu_info: msdu descriptor
  471. * @tso_seg_unmap: flag to show if dma unmap is necessary
  472. *
  473. * Return - void
  474. */
  475. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  476. struct dp_tx_msdu_info_s *msdu_info,
  477. bool tso_seg_unmap)
  478. {
  479. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  480. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  481. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  482. tso_info->tso_num_seg_list;
  483. /* do dma unmap for each segment */
  484. if (tso_seg_unmap)
  485. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  486. /* free all tso number segment descriptor though looks only have 1 */
  487. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  488. /* free all tso segment descriptor */
  489. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  490. }
  491. /**
  492. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  493. * @vdev: virtual device handle
  494. * @msdu: network buffer
  495. * @msdu_info: meta data associated with the msdu
  496. *
  497. * Return: QDF_STATUS_SUCCESS success
  498. */
  499. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  500. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  501. {
  502. struct qdf_tso_seg_elem_t *tso_seg;
  503. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  504. struct dp_soc *soc = vdev->pdev->soc;
  505. struct dp_pdev *pdev = vdev->pdev;
  506. struct qdf_tso_info_t *tso_info;
  507. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  508. tso_info = &msdu_info->u.tso_info;
  509. tso_info->curr_seg = NULL;
  510. tso_info->tso_seg_list = NULL;
  511. tso_info->num_segs = num_seg;
  512. msdu_info->frm_type = dp_tx_frm_tso;
  513. tso_info->tso_num_seg_list = NULL;
  514. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  515. while (num_seg) {
  516. tso_seg = dp_tx_tso_desc_alloc(
  517. soc, msdu_info->tx_queue.desc_pool_id);
  518. if (tso_seg) {
  519. tso_seg->next = tso_info->tso_seg_list;
  520. tso_info->tso_seg_list = tso_seg;
  521. num_seg--;
  522. } else {
  523. dp_err_rl("Failed to alloc tso seg desc");
  524. DP_STATS_INC_PKT(vdev->pdev,
  525. tso_stats.tso_no_mem_dropped, 1,
  526. qdf_nbuf_len(msdu));
  527. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  528. return QDF_STATUS_E_NOMEM;
  529. }
  530. }
  531. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  532. tso_num_seg = dp_tso_num_seg_alloc(soc,
  533. msdu_info->tx_queue.desc_pool_id);
  534. if (tso_num_seg) {
  535. tso_num_seg->next = tso_info->tso_num_seg_list;
  536. tso_info->tso_num_seg_list = tso_num_seg;
  537. } else {
  538. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  539. __func__);
  540. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  541. return QDF_STATUS_E_NOMEM;
  542. }
  543. msdu_info->num_seg =
  544. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  545. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  546. msdu_info->num_seg);
  547. if (!(msdu_info->num_seg)) {
  548. /*
  549. * Free allocated TSO seg desc and number seg desc,
  550. * do unmap for segments if dma map has done.
  551. */
  552. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  553. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  554. return QDF_STATUS_E_INVAL;
  555. }
  556. tso_info->curr_seg = tso_info->tso_seg_list;
  557. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  558. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  559. msdu, msdu_info->num_seg);
  560. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  561. tso_info->msdu_stats_idx);
  562. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  563. return QDF_STATUS_SUCCESS;
  564. }
  565. #else
  566. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  567. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  568. {
  569. return QDF_STATUS_E_NOMEM;
  570. }
  571. #endif
  572. /**
  573. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  574. * @vdev: DP Vdev handle
  575. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  576. * @desc_pool_id: Descriptor Pool ID
  577. *
  578. * Return:
  579. */
  580. static
  581. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  582. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  583. {
  584. uint8_t i;
  585. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  586. struct dp_tx_seg_info_s *seg_info;
  587. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  588. struct dp_soc *soc = vdev->pdev->soc;
  589. /* Allocate an extension descriptor */
  590. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  591. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  592. if (!msdu_ext_desc) {
  593. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  594. return NULL;
  595. }
  596. if (msdu_info->exception_fw &&
  597. qdf_unlikely(vdev->mesh_vdev)) {
  598. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  599. &msdu_info->meta_data[0],
  600. sizeof(struct htt_tx_msdu_desc_ext2_t));
  601. qdf_atomic_inc(&vdev->pdev->num_tx_exception);
  602. }
  603. switch (msdu_info->frm_type) {
  604. case dp_tx_frm_sg:
  605. case dp_tx_frm_me:
  606. case dp_tx_frm_raw:
  607. seg_info = msdu_info->u.sg_info.curr_seg;
  608. /* Update the buffer pointers in MSDU Extension Descriptor */
  609. for (i = 0; i < seg_info->frag_cnt; i++) {
  610. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  611. seg_info->frags[i].paddr_lo,
  612. seg_info->frags[i].paddr_hi,
  613. seg_info->frags[i].len);
  614. }
  615. break;
  616. case dp_tx_frm_tso:
  617. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  618. &cached_ext_desc[0]);
  619. break;
  620. default:
  621. break;
  622. }
  623. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  624. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  625. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  626. msdu_ext_desc->vaddr);
  627. return msdu_ext_desc;
  628. }
  629. /**
  630. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  631. *
  632. * @skb: skb to be traced
  633. * @msdu_id: msdu_id of the packet
  634. * @vdev_id: vdev_id of the packet
  635. *
  636. * Return: None
  637. */
  638. #ifdef DP_DISABLE_TX_PKT_TRACE
  639. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  640. uint8_t vdev_id)
  641. {
  642. }
  643. #else
  644. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  645. uint8_t vdev_id)
  646. {
  647. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  648. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  649. DPTRACE(qdf_dp_trace_ptr(skb,
  650. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  651. QDF_TRACE_DEFAULT_PDEV_ID,
  652. qdf_nbuf_data_addr(skb),
  653. sizeof(qdf_nbuf_data(skb)),
  654. msdu_id, vdev_id));
  655. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  656. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  657. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  658. msdu_id, QDF_TX));
  659. }
  660. #endif
  661. /**
  662. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  663. * @vdev: DP vdev handle
  664. * @nbuf: skb
  665. * @desc_pool_id: Descriptor pool ID
  666. * @meta_data: Metadata to the fw
  667. * @tx_exc_metadata: Handle that holds exception path metadata
  668. * Allocate and prepare Tx descriptor with msdu information.
  669. *
  670. * Return: Pointer to Tx Descriptor on success,
  671. * NULL on failure
  672. */
  673. static
  674. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  675. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  676. struct dp_tx_msdu_info_s *msdu_info,
  677. struct cdp_tx_exception_metadata *tx_exc_metadata)
  678. {
  679. uint8_t align_pad;
  680. uint8_t is_exception = 0;
  681. uint8_t htt_hdr_size;
  682. struct dp_tx_desc_s *tx_desc;
  683. struct dp_pdev *pdev = vdev->pdev;
  684. struct dp_soc *soc = pdev->soc;
  685. if (dp_tx_limit_check(vdev))
  686. return NULL;
  687. /* Allocate software Tx descriptor */
  688. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  689. if (qdf_unlikely(!tx_desc)) {
  690. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  691. return NULL;
  692. }
  693. dp_tx_outstanding_inc(pdev);
  694. /* Initialize the SW tx descriptor */
  695. tx_desc->nbuf = nbuf;
  696. tx_desc->frm_type = dp_tx_frm_std;
  697. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  698. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  699. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  700. tx_desc->vdev = vdev;
  701. tx_desc->pdev = pdev;
  702. tx_desc->msdu_ext_desc = NULL;
  703. tx_desc->pkt_offset = 0;
  704. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  705. if (qdf_unlikely(vdev->multipass_en)) {
  706. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  707. goto failure;
  708. }
  709. /*
  710. * For special modes (vdev_type == ocb or mesh), data frames should be
  711. * transmitted using varying transmit parameters (tx spec) which include
  712. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  713. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  714. * These frames are sent as exception packets to firmware.
  715. *
  716. * HW requirement is that metadata should always point to a
  717. * 8-byte aligned address. So we add alignment pad to start of buffer.
  718. * HTT Metadata should be ensured to be multiple of 8-bytes,
  719. * to get 8-byte aligned start address along with align_pad added
  720. *
  721. * |-----------------------------|
  722. * | |
  723. * |-----------------------------| <-----Buffer Pointer Address given
  724. * | | ^ in HW descriptor (aligned)
  725. * | HTT Metadata | |
  726. * | | |
  727. * | | | Packet Offset given in descriptor
  728. * | | |
  729. * |-----------------------------| |
  730. * | Alignment Pad | v
  731. * |-----------------------------| <----- Actual buffer start address
  732. * | SKB Data | (Unaligned)
  733. * | |
  734. * | |
  735. * | |
  736. * | |
  737. * | |
  738. * |-----------------------------|
  739. */
  740. if (qdf_unlikely((msdu_info->exception_fw)) ||
  741. (vdev->opmode == wlan_op_mode_ocb) ||
  742. (tx_exc_metadata &&
  743. tx_exc_metadata->is_tx_sniffer)) {
  744. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  745. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  746. DP_STATS_INC(vdev,
  747. tx_i.dropped.headroom_insufficient, 1);
  748. goto failure;
  749. }
  750. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  751. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  752. "qdf_nbuf_push_head failed");
  753. goto failure;
  754. }
  755. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  756. msdu_info);
  757. if (htt_hdr_size == 0)
  758. goto failure;
  759. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  760. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  761. is_exception = 1;
  762. }
  763. #if !TQM_BYPASS_WAR
  764. if (is_exception || tx_exc_metadata)
  765. #endif
  766. {
  767. /* Temporary WAR due to TQM VP issues */
  768. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  769. qdf_atomic_inc(&pdev->num_tx_exception);
  770. }
  771. return tx_desc;
  772. failure:
  773. dp_tx_desc_release(tx_desc, desc_pool_id);
  774. return NULL;
  775. }
  776. /**
  777. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  778. * @vdev: DP vdev handle
  779. * @nbuf: skb
  780. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  781. * @desc_pool_id : Descriptor Pool ID
  782. *
  783. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  784. * information. For frames wth fragments, allocate and prepare
  785. * an MSDU extension descriptor
  786. *
  787. * Return: Pointer to Tx Descriptor on success,
  788. * NULL on failure
  789. */
  790. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  791. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  792. uint8_t desc_pool_id)
  793. {
  794. struct dp_tx_desc_s *tx_desc;
  795. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  796. struct dp_pdev *pdev = vdev->pdev;
  797. struct dp_soc *soc = pdev->soc;
  798. if (dp_tx_limit_check(vdev))
  799. return NULL;
  800. /* Allocate software Tx descriptor */
  801. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  802. if (!tx_desc) {
  803. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  804. return NULL;
  805. }
  806. dp_tx_outstanding_inc(pdev);
  807. /* Initialize the SW tx descriptor */
  808. tx_desc->nbuf = nbuf;
  809. tx_desc->frm_type = msdu_info->frm_type;
  810. tx_desc->tx_encap_type = vdev->tx_encap_type;
  811. tx_desc->vdev = vdev;
  812. tx_desc->pdev = pdev;
  813. tx_desc->pkt_offset = 0;
  814. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  815. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  816. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  817. /* Handle scattered frames - TSO/SG/ME */
  818. /* Allocate and prepare an extension descriptor for scattered frames */
  819. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  820. if (!msdu_ext_desc) {
  821. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  822. "%s Tx Extension Descriptor Alloc Fail",
  823. __func__);
  824. goto failure;
  825. }
  826. #if TQM_BYPASS_WAR
  827. /* Temporary WAR due to TQM VP issues */
  828. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  829. qdf_atomic_inc(&pdev->num_tx_exception);
  830. #endif
  831. if (qdf_unlikely(msdu_info->exception_fw))
  832. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  833. tx_desc->msdu_ext_desc = msdu_ext_desc;
  834. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  835. return tx_desc;
  836. failure:
  837. dp_tx_desc_release(tx_desc, desc_pool_id);
  838. return NULL;
  839. }
  840. /**
  841. * dp_tx_prepare_raw() - Prepare RAW packet TX
  842. * @vdev: DP vdev handle
  843. * @nbuf: buffer pointer
  844. * @seg_info: Pointer to Segment info Descriptor to be prepared
  845. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  846. * descriptor
  847. *
  848. * Return:
  849. */
  850. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  851. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  852. {
  853. qdf_nbuf_t curr_nbuf = NULL;
  854. uint16_t total_len = 0;
  855. qdf_dma_addr_t paddr;
  856. int32_t i;
  857. int32_t mapped_buf_num = 0;
  858. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  859. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  860. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  861. /* Continue only if frames are of DATA type */
  862. if (!DP_FRAME_IS_DATA(qos_wh)) {
  863. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  864. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  865. "Pkt. recd is of not data type");
  866. goto error;
  867. }
  868. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  869. if (vdev->raw_mode_war &&
  870. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  871. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  872. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  873. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  874. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  875. if (QDF_STATUS_SUCCESS !=
  876. qdf_nbuf_map_nbytes_single(vdev->osdev,
  877. curr_nbuf,
  878. QDF_DMA_TO_DEVICE,
  879. curr_nbuf->len)) {
  880. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  881. "%s dma map error ", __func__);
  882. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  883. mapped_buf_num = i;
  884. goto error;
  885. }
  886. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  887. seg_info->frags[i].paddr_lo = paddr;
  888. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  889. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  890. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  891. total_len += qdf_nbuf_len(curr_nbuf);
  892. }
  893. seg_info->frag_cnt = i;
  894. seg_info->total_len = total_len;
  895. seg_info->next = NULL;
  896. sg_info->curr_seg = seg_info;
  897. msdu_info->frm_type = dp_tx_frm_raw;
  898. msdu_info->num_seg = 1;
  899. return nbuf;
  900. error:
  901. i = 0;
  902. while (nbuf) {
  903. curr_nbuf = nbuf;
  904. if (i < mapped_buf_num) {
  905. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  906. QDF_DMA_TO_DEVICE,
  907. curr_nbuf->len);
  908. i++;
  909. }
  910. nbuf = qdf_nbuf_next(nbuf);
  911. qdf_nbuf_free(curr_nbuf);
  912. }
  913. return NULL;
  914. }
  915. /**
  916. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  917. * @soc: DP soc handle
  918. * @nbuf: Buffer pointer
  919. *
  920. * unmap the chain of nbufs that belong to this RAW frame.
  921. *
  922. * Return: None
  923. */
  924. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  925. qdf_nbuf_t nbuf)
  926. {
  927. qdf_nbuf_t cur_nbuf = nbuf;
  928. do {
  929. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  930. QDF_DMA_TO_DEVICE,
  931. cur_nbuf->len);
  932. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  933. } while (cur_nbuf);
  934. }
  935. #ifdef VDEV_PEER_PROTOCOL_COUNT
  936. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, nbuf) \
  937. { \
  938. qdf_nbuf_t nbuf_local; \
  939. struct dp_vdev *vdev_local = vdev_hdl; \
  940. do { \
  941. if (qdf_likely(!((vdev_local)->peer_protocol_count_track))) \
  942. break; \
  943. nbuf_local = nbuf; \
  944. if (qdf_unlikely(((vdev_local)->tx_encap_type) == \
  945. htt_cmn_pkt_type_raw)) \
  946. break; \
  947. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local)))) \
  948. break; \
  949. else if (qdf_nbuf_is_tso((nbuf_local))) \
  950. break; \
  951. dp_vdev_peer_stats_update_protocol_cnt((vdev_local), \
  952. (nbuf_local), \
  953. NULL, 1, 0); \
  954. } while (0); \
  955. }
  956. #else
  957. #define dp_vdev_peer_stats_update_protocol_cnt_tx(vdev_hdl, skb)
  958. #endif
  959. /**
  960. * dp_tx_hw_enqueue() - Enqueue to TCL HW for transmit
  961. * @soc: DP Soc Handle
  962. * @vdev: DP vdev handle
  963. * @tx_desc: Tx Descriptor Handle
  964. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  965. * @fw_metadata: Metadata to send to Target Firmware along with frame
  966. * @ring_id: Ring ID of H/W ring to which we enqueue the packet
  967. * @tx_exc_metadata: Handle that holds exception path meta data
  968. *
  969. * Gets the next free TCL HW DMA descriptor and sets up required parameters
  970. * from software Tx descriptor
  971. *
  972. * Return: QDF_STATUS_SUCCESS: success
  973. * QDF_STATUS_E_RESOURCES: Error return
  974. */
  975. static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
  976. struct dp_tx_desc_s *tx_desc, uint8_t tid,
  977. uint16_t fw_metadata, uint8_t ring_id,
  978. struct cdp_tx_exception_metadata
  979. *tx_exc_metadata)
  980. {
  981. uint8_t type;
  982. void *hal_tx_desc;
  983. uint32_t *hal_tx_desc_cached;
  984. /*
  985. * Setting it initialization statically here to avoid
  986. * a memset call jump with qdf_mem_set call
  987. */
  988. uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
  989. enum cdp_sec_type sec_type = ((tx_exc_metadata &&
  990. tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
  991. tx_exc_metadata->sec_type : vdev->sec_type);
  992. /* Return Buffer Manager ID */
  993. uint8_t bm_id = dp_tx_get_rbm_id(soc, ring_id);
  994. hal_ring_handle_t hal_ring_hdl = NULL;
  995. QDF_STATUS status = QDF_STATUS_E_RESOURCES;
  996. if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
  997. dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
  998. return QDF_STATUS_E_RESOURCES;
  999. }
  1000. hal_tx_desc_cached = (void *) cached_desc;
  1001. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
  1002. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1003. type = HAL_TX_BUF_TYPE_EXT_DESC;
  1004. tx_desc->dma_addr = tx_desc->msdu_ext_desc->paddr;
  1005. } else {
  1006. tx_desc->length = qdf_nbuf_len(tx_desc->nbuf) -
  1007. tx_desc->pkt_offset;
  1008. type = HAL_TX_BUF_TYPE_BUFFER;
  1009. tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  1010. }
  1011. qdf_assert_always(tx_desc->dma_addr);
  1012. hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
  1013. tx_desc->dma_addr, bm_id, tx_desc->id,
  1014. type);
  1015. hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
  1016. vdev->lmac_id);
  1017. hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
  1018. vdev->search_type);
  1019. hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
  1020. vdev->bss_ast_idx);
  1021. hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
  1022. vdev->dscp_tid_map_id);
  1023. hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
  1024. sec_type_map[sec_type]);
  1025. hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
  1026. (vdev->bss_ast_hash & 0xF));
  1027. hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
  1028. hal_tx_desc_set_buf_length(hal_tx_desc_cached, tx_desc->length);
  1029. hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
  1030. hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
  1031. hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
  1032. vdev->hal_desc_addr_search_flags);
  1033. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  1034. hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
  1035. /* verify checksum offload configuration*/
  1036. if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
  1037. ((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
  1038. || qdf_nbuf_is_tso(tx_desc->nbuf))) {
  1039. hal_tx_desc_set_l3_checksum_en(hal_tx_desc_cached, 1);
  1040. hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
  1041. }
  1042. if (tid != HTT_TX_EXT_TID_INVALID)
  1043. hal_tx_desc_set_hlos_tid(hal_tx_desc_cached, tid);
  1044. if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
  1045. hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
  1046. if (qdf_unlikely(vdev->pdev->delay_stats_flag))
  1047. tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
  1048. dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
  1049. tx_desc->length, type, (uint64_t)tx_desc->dma_addr,
  1050. tx_desc->pkt_offset, tx_desc->id);
  1051. hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
  1052. if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
  1053. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  1054. "%s %d : HAL RING Access Failed -- %pK",
  1055. __func__, __LINE__, hal_ring_hdl);
  1056. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1057. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1058. return status;
  1059. }
  1060. /* Sync cached descriptor with HW */
  1061. hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
  1062. if (qdf_unlikely(!hal_tx_desc)) {
  1063. dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
  1064. DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
  1065. DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
  1066. goto ring_access_fail;
  1067. }
  1068. tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
  1069. dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
  1070. hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
  1071. DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
  1072. status = QDF_STATUS_SUCCESS;
  1073. ring_access_fail:
  1074. if (hif_pm_runtime_get(soc->hif_handle,
  1075. RTPM_ID_DW_TX_HW_ENQUEUE) == 0) {
  1076. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1077. hif_pm_runtime_put(soc->hif_handle,
  1078. RTPM_ID_DW_TX_HW_ENQUEUE);
  1079. } else {
  1080. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1081. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1082. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1083. }
  1084. return status;
  1085. }
  1086. /**
  1087. * dp_cce_classify() - Classify the frame based on CCE rules
  1088. * @vdev: DP vdev handle
  1089. * @nbuf: skb
  1090. *
  1091. * Classify frames based on CCE rules
  1092. * Return: bool( true if classified,
  1093. * else false)
  1094. */
  1095. static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1096. {
  1097. qdf_ether_header_t *eh = NULL;
  1098. uint16_t ether_type;
  1099. qdf_llc_t *llcHdr;
  1100. qdf_nbuf_t nbuf_clone = NULL;
  1101. qdf_dot3_qosframe_t *qos_wh = NULL;
  1102. /* for mesh packets don't do any classification */
  1103. if (qdf_unlikely(vdev->mesh_vdev))
  1104. return false;
  1105. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1106. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1107. ether_type = eh->ether_type;
  1108. llcHdr = (qdf_llc_t *)(nbuf->data +
  1109. sizeof(qdf_ether_header_t));
  1110. } else {
  1111. qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1112. /* For encrypted packets don't do any classification */
  1113. if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
  1114. return false;
  1115. if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
  1116. if (qdf_unlikely(
  1117. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
  1118. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
  1119. ether_type = *(uint16_t *)(nbuf->data
  1120. + QDF_IEEE80211_4ADDR_HDR_LEN
  1121. + sizeof(qdf_llc_t)
  1122. - sizeof(ether_type));
  1123. llcHdr = (qdf_llc_t *)(nbuf->data +
  1124. QDF_IEEE80211_4ADDR_HDR_LEN);
  1125. } else {
  1126. ether_type = *(uint16_t *)(nbuf->data
  1127. + QDF_IEEE80211_3ADDR_HDR_LEN
  1128. + sizeof(qdf_llc_t)
  1129. - sizeof(ether_type));
  1130. llcHdr = (qdf_llc_t *)(nbuf->data +
  1131. QDF_IEEE80211_3ADDR_HDR_LEN);
  1132. }
  1133. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
  1134. && (ether_type ==
  1135. qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
  1136. DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
  1137. return true;
  1138. }
  1139. }
  1140. return false;
  1141. }
  1142. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
  1143. ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1144. sizeof(*llcHdr));
  1145. nbuf_clone = qdf_nbuf_clone(nbuf);
  1146. if (qdf_unlikely(nbuf_clone)) {
  1147. qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
  1148. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1149. qdf_nbuf_pull_head(nbuf_clone,
  1150. sizeof(qdf_net_vlanhdr_t));
  1151. }
  1152. }
  1153. } else {
  1154. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1155. nbuf_clone = qdf_nbuf_clone(nbuf);
  1156. if (qdf_unlikely(nbuf_clone)) {
  1157. qdf_nbuf_pull_head(nbuf_clone,
  1158. sizeof(qdf_net_vlanhdr_t));
  1159. }
  1160. }
  1161. }
  1162. if (qdf_unlikely(nbuf_clone))
  1163. nbuf = nbuf_clone;
  1164. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
  1165. || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
  1166. || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
  1167. || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
  1168. || (qdf_nbuf_is_ipv4_pkt(nbuf)
  1169. && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  1170. || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
  1171. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
  1172. if (qdf_unlikely(nbuf_clone))
  1173. qdf_nbuf_free(nbuf_clone);
  1174. return true;
  1175. }
  1176. if (qdf_unlikely(nbuf_clone))
  1177. qdf_nbuf_free(nbuf_clone);
  1178. return false;
  1179. }
  1180. /**
  1181. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1182. * @vdev: DP vdev handle
  1183. * @nbuf: skb
  1184. *
  1185. * Extract the DSCP or PCP information from frame and map into TID value.
  1186. *
  1187. * Return: void
  1188. */
  1189. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1190. struct dp_tx_msdu_info_s *msdu_info)
  1191. {
  1192. uint8_t tos = 0, dscp_tid_override = 0;
  1193. uint8_t *hdr_ptr, *L3datap;
  1194. uint8_t is_mcast = 0;
  1195. qdf_ether_header_t *eh = NULL;
  1196. qdf_ethervlan_header_t *evh = NULL;
  1197. uint16_t ether_type;
  1198. qdf_llc_t *llcHdr;
  1199. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1200. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1201. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1202. eh = (qdf_ether_header_t *)nbuf->data;
  1203. hdr_ptr = eh->ether_dhost;
  1204. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1205. } else {
  1206. qdf_dot3_qosframe_t *qos_wh =
  1207. (qdf_dot3_qosframe_t *) nbuf->data;
  1208. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1209. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1210. return;
  1211. }
  1212. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1213. ether_type = eh->ether_type;
  1214. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1215. /*
  1216. * Check if packet is dot3 or eth2 type.
  1217. */
  1218. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1219. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1220. sizeof(*llcHdr));
  1221. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1222. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1223. sizeof(*llcHdr);
  1224. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1225. + sizeof(*llcHdr) +
  1226. sizeof(qdf_net_vlanhdr_t));
  1227. } else {
  1228. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1229. sizeof(*llcHdr);
  1230. }
  1231. } else {
  1232. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1233. evh = (qdf_ethervlan_header_t *) eh;
  1234. ether_type = evh->ether_type;
  1235. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1236. }
  1237. }
  1238. /*
  1239. * Find priority from IP TOS DSCP field
  1240. */
  1241. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1242. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1243. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1244. /* Only for unicast frames */
  1245. if (!is_mcast) {
  1246. /* send it on VO queue */
  1247. msdu_info->tid = DP_VO_TID;
  1248. }
  1249. } else {
  1250. /*
  1251. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1252. * from TOS byte.
  1253. */
  1254. tos = ip->ip_tos;
  1255. dscp_tid_override = 1;
  1256. }
  1257. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1258. /* TODO
  1259. * use flowlabel
  1260. *igmpmld cases to be handled in phase 2
  1261. */
  1262. unsigned long ver_pri_flowlabel;
  1263. unsigned long pri;
  1264. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1265. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1266. DP_IPV6_PRIORITY_SHIFT;
  1267. tos = pri;
  1268. dscp_tid_override = 1;
  1269. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1270. msdu_info->tid = DP_VO_TID;
  1271. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1272. /* Only for unicast frames */
  1273. if (!is_mcast) {
  1274. /* send ucast arp on VO queue */
  1275. msdu_info->tid = DP_VO_TID;
  1276. }
  1277. }
  1278. /*
  1279. * Assign all MCAST packets to BE
  1280. */
  1281. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1282. if (is_mcast) {
  1283. tos = 0;
  1284. dscp_tid_override = 1;
  1285. }
  1286. }
  1287. if (dscp_tid_override == 1) {
  1288. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1289. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1290. }
  1291. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1292. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1293. return;
  1294. }
  1295. /**
  1296. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1297. * @vdev: DP vdev handle
  1298. * @nbuf: skb
  1299. *
  1300. * Software based TID classification is required when more than 2 DSCP-TID
  1301. * mapping tables are needed.
  1302. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1303. *
  1304. * Return: void
  1305. */
  1306. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1307. struct dp_tx_msdu_info_s *msdu_info)
  1308. {
  1309. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1310. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1311. if (pdev->soc && vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map)
  1312. return;
  1313. /* for mesh packets don't do any classification */
  1314. if (qdf_unlikely(vdev->mesh_vdev))
  1315. return;
  1316. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1317. }
  1318. #ifdef FEATURE_WLAN_TDLS
  1319. /**
  1320. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1321. * @tx_desc: TX descriptor
  1322. *
  1323. * Return: None
  1324. */
  1325. static void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
  1326. {
  1327. if (tx_desc->vdev) {
  1328. if (tx_desc->vdev->is_tdls_frame) {
  1329. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1330. tx_desc->vdev->is_tdls_frame = false;
  1331. }
  1332. }
  1333. }
  1334. /**
  1335. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1336. * @soc: dp_soc handle
  1337. * @tx_desc: TX descriptor
  1338. * @vdev: datapath vdev handle
  1339. *
  1340. * Return: None
  1341. */
  1342. static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1343. struct dp_tx_desc_s *tx_desc,
  1344. struct dp_vdev *vdev)
  1345. {
  1346. struct hal_tx_completion_status ts = {0};
  1347. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1348. if (qdf_unlikely(!vdev)) {
  1349. dp_err_rl("vdev is null!");
  1350. goto error;
  1351. }
  1352. hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
  1353. if (vdev->tx_non_std_data_callback.func) {
  1354. qdf_nbuf_set_next(nbuf, NULL);
  1355. vdev->tx_non_std_data_callback.func(
  1356. vdev->tx_non_std_data_callback.ctxt,
  1357. nbuf, ts.status);
  1358. return;
  1359. } else {
  1360. dp_err_rl("callback func is null");
  1361. }
  1362. error:
  1363. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1364. qdf_nbuf_free(nbuf);
  1365. }
  1366. /**
  1367. * dp_tx_msdu_single_map() - do nbuf map
  1368. * @vdev: DP vdev handle
  1369. * @tx_desc: DP TX descriptor pointer
  1370. * @nbuf: skb pointer
  1371. *
  1372. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1373. * operation done in other component.
  1374. *
  1375. * Return: QDF_STATUS
  1376. */
  1377. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1378. struct dp_tx_desc_s *tx_desc,
  1379. qdf_nbuf_t nbuf)
  1380. {
  1381. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1382. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1383. nbuf,
  1384. QDF_DMA_TO_DEVICE,
  1385. nbuf->len);
  1386. else
  1387. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1388. QDF_DMA_TO_DEVICE);
  1389. }
  1390. #else
  1391. static inline void dp_tx_update_tdls_flags(struct dp_tx_desc_s *tx_desc)
  1392. {
  1393. }
  1394. static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1395. struct dp_tx_desc_s *tx_desc,
  1396. struct dp_vdev *vdev)
  1397. {
  1398. }
  1399. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1400. struct dp_tx_desc_s *tx_desc,
  1401. qdf_nbuf_t nbuf)
  1402. {
  1403. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1404. nbuf,
  1405. QDF_DMA_TO_DEVICE,
  1406. nbuf->len);
  1407. }
  1408. #endif
  1409. /**
  1410. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1411. * @vdev: DP vdev handle
  1412. * @nbuf: skb
  1413. *
  1414. * Return: 1 if frame needs to be dropped else 0
  1415. */
  1416. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1417. {
  1418. struct dp_pdev *pdev = NULL;
  1419. struct dp_ast_entry *src_ast_entry = NULL;
  1420. struct dp_ast_entry *dst_ast_entry = NULL;
  1421. struct dp_soc *soc = NULL;
  1422. qdf_assert(vdev);
  1423. pdev = vdev->pdev;
  1424. qdf_assert(pdev);
  1425. soc = pdev->soc;
  1426. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1427. (soc, dstmac, vdev->pdev->pdev_id);
  1428. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1429. (soc, srcmac, vdev->pdev->pdev_id);
  1430. if (dst_ast_entry && src_ast_entry) {
  1431. if (dst_ast_entry->peer->peer_ids[0] ==
  1432. src_ast_entry->peer->peer_ids[0])
  1433. return 1;
  1434. }
  1435. return 0;
  1436. }
  1437. /**
  1438. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1439. * @vdev: DP vdev handle
  1440. * @nbuf: skb
  1441. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1442. * @meta_data: Metadata to the fw
  1443. * @tx_q: Tx queue to be used for this Tx frame
  1444. * @peer_id: peer_id of the peer in case of NAWDS frames
  1445. * @tx_exc_metadata: Handle that holds exception path metadata
  1446. *
  1447. * Return: NULL on success,
  1448. * nbuf when it fails to send
  1449. */
  1450. qdf_nbuf_t
  1451. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1452. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1453. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1454. {
  1455. struct dp_pdev *pdev = vdev->pdev;
  1456. struct dp_soc *soc = pdev->soc;
  1457. struct dp_tx_desc_s *tx_desc;
  1458. QDF_STATUS status;
  1459. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1460. uint16_t htt_tcl_metadata = 0;
  1461. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  1462. uint8_t tid = msdu_info->tid;
  1463. struct cdp_tid_tx_stats *tid_stats = NULL;
  1464. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1465. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1466. msdu_info, tx_exc_metadata);
  1467. if (!tx_desc) {
  1468. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  1469. vdev, tx_q->desc_pool_id);
  1470. drop_code = TX_DESC_ERR;
  1471. goto fail_return;
  1472. }
  1473. if (qdf_unlikely(soc->cce_disable)) {
  1474. if (dp_cce_classify(vdev, nbuf) == true) {
  1475. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1476. tid = DP_VO_TID;
  1477. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1478. }
  1479. }
  1480. dp_tx_update_tdls_flags(tx_desc);
  1481. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1482. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1483. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1484. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1485. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1486. HTT_TCL_METADATA_TYPE_PEER_BASED);
  1487. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1488. peer_id);
  1489. } else
  1490. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1491. if (msdu_info->exception_fw)
  1492. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1493. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  1494. !pdev->enhanced_stats_en);
  1495. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  1496. dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
  1497. /* Handle failure */
  1498. dp_err("qdf_nbuf_map failed");
  1499. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1500. drop_code = TX_DMA_MAP_ERR;
  1501. goto release_desc;
  1502. }
  1503. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1504. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
  1505. htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
  1506. if (status != QDF_STATUS_SUCCESS) {
  1507. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1508. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1509. __func__, tx_desc, tx_q->ring_id);
  1510. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  1511. QDF_DMA_TO_DEVICE,
  1512. nbuf->len);
  1513. drop_code = TX_HW_ENQUEUE;
  1514. goto release_desc;
  1515. }
  1516. return NULL;
  1517. release_desc:
  1518. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1519. fail_return:
  1520. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1521. tid_stats = &pdev->stats.tid_stats.
  1522. tid_tx_stats[tx_q->ring_id][tid];
  1523. tid_stats->swdrop_cnt[drop_code]++;
  1524. return nbuf;
  1525. }
  1526. /**
  1527. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1528. * @vdev: DP vdev handle
  1529. * @nbuf: skb
  1530. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1531. *
  1532. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1533. *
  1534. * Return: NULL on success,
  1535. * nbuf when it fails to send
  1536. */
  1537. #if QDF_LOCK_STATS
  1538. noinline
  1539. #else
  1540. #endif
  1541. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1542. struct dp_tx_msdu_info_s *msdu_info)
  1543. {
  1544. uint8_t i;
  1545. struct dp_pdev *pdev = vdev->pdev;
  1546. struct dp_soc *soc = pdev->soc;
  1547. struct dp_tx_desc_s *tx_desc;
  1548. bool is_cce_classified = false;
  1549. QDF_STATUS status;
  1550. uint16_t htt_tcl_metadata = 0;
  1551. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1552. struct cdp_tid_tx_stats *tid_stats = NULL;
  1553. if (qdf_unlikely(soc->cce_disable)) {
  1554. is_cce_classified = dp_cce_classify(vdev, nbuf);
  1555. if (is_cce_classified) {
  1556. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1557. msdu_info->tid = DP_VO_TID;
  1558. }
  1559. }
  1560. if (msdu_info->frm_type == dp_tx_frm_me)
  1561. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1562. i = 0;
  1563. /* Print statement to track i and num_seg */
  1564. /*
  1565. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1566. * descriptors using information in msdu_info
  1567. */
  1568. while (i < msdu_info->num_seg) {
  1569. /*
  1570. * Setup Tx descriptor for an MSDU, and MSDU extension
  1571. * descriptor
  1572. */
  1573. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1574. tx_q->desc_pool_id);
  1575. if (!tx_desc) {
  1576. if (msdu_info->frm_type == dp_tx_frm_me) {
  1577. dp_tx_me_free_buf(pdev,
  1578. (void *)(msdu_info->u.sg_info
  1579. .curr_seg->frags[0].vaddr));
  1580. i++;
  1581. continue;
  1582. }
  1583. goto done;
  1584. }
  1585. if (msdu_info->frm_type == dp_tx_frm_me) {
  1586. tx_desc->me_buffer =
  1587. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1588. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1589. }
  1590. if (is_cce_classified)
  1591. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1592. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1593. if (msdu_info->exception_fw) {
  1594. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1595. }
  1596. /*
  1597. * Enqueue the Tx MSDU descriptor to HW for transmit
  1598. */
  1599. status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
  1600. htt_tcl_metadata, tx_q->ring_id, NULL);
  1601. if (status != QDF_STATUS_SUCCESS) {
  1602. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1603. "%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1604. __func__, tx_desc, tx_q->ring_id);
  1605. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1606. tid_stats = &pdev->stats.tid_stats.
  1607. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  1608. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  1609. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1610. if (msdu_info->frm_type == dp_tx_frm_me) {
  1611. i++;
  1612. continue;
  1613. }
  1614. goto done;
  1615. }
  1616. /*
  1617. * TODO
  1618. * if tso_info structure can be modified to have curr_seg
  1619. * as first element, following 2 blocks of code (for TSO and SG)
  1620. * can be combined into 1
  1621. */
  1622. /*
  1623. * For frames with multiple segments (TSO, ME), jump to next
  1624. * segment.
  1625. */
  1626. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1627. if (msdu_info->u.tso_info.curr_seg->next) {
  1628. msdu_info->u.tso_info.curr_seg =
  1629. msdu_info->u.tso_info.curr_seg->next;
  1630. /*
  1631. * If this is a jumbo nbuf, then increment the number of
  1632. * nbuf users for each additional segment of the msdu.
  1633. * This will ensure that the skb is freed only after
  1634. * receiving tx completion for all segments of an nbuf
  1635. */
  1636. qdf_nbuf_inc_users(nbuf);
  1637. /* Check with MCL if this is needed */
  1638. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf; */
  1639. }
  1640. }
  1641. /*
  1642. * For Multicast-Unicast converted packets,
  1643. * each converted frame (for a client) is represented as
  1644. * 1 segment
  1645. */
  1646. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  1647. (msdu_info->frm_type == dp_tx_frm_me)) {
  1648. if (msdu_info->u.sg_info.curr_seg->next) {
  1649. msdu_info->u.sg_info.curr_seg =
  1650. msdu_info->u.sg_info.curr_seg->next;
  1651. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1652. }
  1653. }
  1654. i++;
  1655. }
  1656. nbuf = NULL;
  1657. done:
  1658. return nbuf;
  1659. }
  1660. /**
  1661. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  1662. * for SG frames
  1663. * @vdev: DP vdev handle
  1664. * @nbuf: skb
  1665. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1666. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1667. *
  1668. * Return: NULL on success,
  1669. * nbuf when it fails to send
  1670. */
  1671. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1672. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1673. {
  1674. uint32_t cur_frag, nr_frags;
  1675. qdf_dma_addr_t paddr;
  1676. struct dp_tx_sg_info_s *sg_info;
  1677. sg_info = &msdu_info->u.sg_info;
  1678. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  1679. if (QDF_STATUS_SUCCESS !=
  1680. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  1681. QDF_DMA_TO_DEVICE, nbuf->len)) {
  1682. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1683. "dma map error");
  1684. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1685. qdf_nbuf_free(nbuf);
  1686. return NULL;
  1687. }
  1688. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  1689. seg_info->frags[0].paddr_lo = paddr;
  1690. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  1691. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  1692. seg_info->frags[0].vaddr = (void *) nbuf;
  1693. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  1694. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  1695. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  1696. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1697. "frag dma map error");
  1698. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  1699. qdf_nbuf_free(nbuf);
  1700. return NULL;
  1701. }
  1702. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  1703. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  1704. seg_info->frags[cur_frag + 1].paddr_hi =
  1705. ((uint64_t) paddr) >> 32;
  1706. seg_info->frags[cur_frag + 1].len =
  1707. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  1708. }
  1709. seg_info->frag_cnt = (cur_frag + 1);
  1710. seg_info->total_len = qdf_nbuf_len(nbuf);
  1711. seg_info->next = NULL;
  1712. sg_info->curr_seg = seg_info;
  1713. msdu_info->frm_type = dp_tx_frm_sg;
  1714. msdu_info->num_seg = 1;
  1715. return nbuf;
  1716. }
  1717. /**
  1718. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  1719. * @vdev: DP vdev handle
  1720. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1721. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  1722. *
  1723. * Return: NULL on failure,
  1724. * nbuf when extracted successfully
  1725. */
  1726. static
  1727. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  1728. struct dp_tx_msdu_info_s *msdu_info,
  1729. uint16_t ppdu_cookie)
  1730. {
  1731. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1732. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1733. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1734. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  1735. (msdu_info->meta_data[5], 1);
  1736. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  1737. (msdu_info->meta_data[5], 1);
  1738. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  1739. (msdu_info->meta_data[6], ppdu_cookie);
  1740. msdu_info->exception_fw = 1;
  1741. msdu_info->is_tx_sniffer = 1;
  1742. }
  1743. #ifdef MESH_MODE_SUPPORT
  1744. /**
  1745. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  1746. and prepare msdu_info for mesh frames.
  1747. * @vdev: DP vdev handle
  1748. * @nbuf: skb
  1749. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  1750. *
  1751. * Return: NULL on failure,
  1752. * nbuf when extracted successfully
  1753. */
  1754. static
  1755. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1756. struct dp_tx_msdu_info_s *msdu_info)
  1757. {
  1758. struct meta_hdr_s *mhdr;
  1759. struct htt_tx_msdu_desc_ext2_t *meta_data =
  1760. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  1761. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1762. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  1763. msdu_info->exception_fw = 0;
  1764. goto remove_meta_hdr;
  1765. }
  1766. msdu_info->exception_fw = 1;
  1767. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  1768. meta_data->host_tx_desc_pool = 1;
  1769. meta_data->update_peer_cache = 1;
  1770. meta_data->learning_frame = 1;
  1771. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  1772. meta_data->power = mhdr->power;
  1773. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  1774. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  1775. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  1776. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  1777. meta_data->dyn_bw = 1;
  1778. meta_data->valid_pwr = 1;
  1779. meta_data->valid_mcs_mask = 1;
  1780. meta_data->valid_nss_mask = 1;
  1781. meta_data->valid_preamble_type = 1;
  1782. meta_data->valid_retries = 1;
  1783. meta_data->valid_bw_info = 1;
  1784. }
  1785. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  1786. meta_data->encrypt_type = 0;
  1787. meta_data->valid_encrypt_type = 1;
  1788. meta_data->learning_frame = 0;
  1789. }
  1790. meta_data->valid_key_flags = 1;
  1791. meta_data->key_flags = (mhdr->keyix & 0x3);
  1792. remove_meta_hdr:
  1793. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  1794. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1795. "qdf_nbuf_pull_head failed");
  1796. qdf_nbuf_free(nbuf);
  1797. return NULL;
  1798. }
  1799. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1800. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1801. "%s , Meta hdr %0x %0x %0x %0x %0x %0x"
  1802. " tid %d to_fw %d",
  1803. __func__, msdu_info->meta_data[0],
  1804. msdu_info->meta_data[1],
  1805. msdu_info->meta_data[2],
  1806. msdu_info->meta_data[3],
  1807. msdu_info->meta_data[4],
  1808. msdu_info->meta_data[5],
  1809. msdu_info->tid, msdu_info->exception_fw);
  1810. return nbuf;
  1811. }
  1812. #else
  1813. static
  1814. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1815. struct dp_tx_msdu_info_s *msdu_info)
  1816. {
  1817. return nbuf;
  1818. }
  1819. #endif
  1820. /**
  1821. * dp_check_exc_metadata() - Checks if parameters are valid
  1822. * @tx_exc - holds all exception path parameters
  1823. *
  1824. * Returns true when all the parameters are valid else false
  1825. *
  1826. */
  1827. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  1828. {
  1829. bool invalid_tid = (tx_exc->tid > DP_MAX_TIDS && tx_exc->tid !=
  1830. HTT_INVALID_TID);
  1831. bool invalid_encap_type =
  1832. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  1833. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  1834. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  1835. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  1836. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  1837. tx_exc->ppdu_cookie == 0);
  1838. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  1839. invalid_cookie) {
  1840. return false;
  1841. }
  1842. return true;
  1843. }
  1844. /**
  1845. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  1846. * @soc: DP soc handle
  1847. * @vdev_id: id of DP vdev handle
  1848. * @nbuf: skb
  1849. * @tx_exc_metadata: Handle that holds exception path meta data
  1850. *
  1851. * Entry point for Core Tx layer (DP_TX) invoked from
  1852. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  1853. *
  1854. * Return: NULL on success,
  1855. * nbuf when it fails to send
  1856. */
  1857. qdf_nbuf_t
  1858. dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf,
  1859. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1860. {
  1861. qdf_ether_header_t *eh = NULL;
  1862. struct dp_tx_msdu_info_s msdu_info;
  1863. struct dp_vdev *vdev =
  1864. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  1865. vdev_id);
  1866. if (qdf_unlikely(!vdev))
  1867. goto fail;
  1868. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  1869. if (!tx_exc_metadata)
  1870. goto fail;
  1871. msdu_info.tid = tx_exc_metadata->tid;
  1872. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1873. dp_verbose_debug("skb %pM", nbuf->data);
  1874. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  1875. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  1876. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1877. "Invalid parameters in exception path");
  1878. goto fail;
  1879. }
  1880. /* Basic sanity checks for unsupported packets */
  1881. /* MESH mode */
  1882. if (qdf_unlikely(vdev->mesh_vdev)) {
  1883. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1884. "Mesh mode is not supported in exception path");
  1885. goto fail;
  1886. }
  1887. /* TSO or SG */
  1888. if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
  1889. qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  1890. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1891. "TSO and SG are not supported in exception path");
  1892. goto fail;
  1893. }
  1894. /* RAW */
  1895. if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
  1896. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1897. "Raw frame is not supported in exception path");
  1898. goto fail;
  1899. }
  1900. /* Mcast enhancement*/
  1901. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  1902. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  1903. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  1904. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1905. "Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
  1906. }
  1907. }
  1908. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  1909. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  1910. qdf_nbuf_len(nbuf));
  1911. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  1912. tx_exc_metadata->ppdu_cookie);
  1913. }
  1914. /*
  1915. * Get HW Queue to use for this frame.
  1916. * TCL supports upto 4 DMA rings, out of which 3 rings are
  1917. * dedicated for data and 1 for command.
  1918. * "queue_id" maps to one hardware ring.
  1919. * With each ring, we also associate a unique Tx descriptor pool
  1920. * to minimize lock contention for these resources.
  1921. */
  1922. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  1923. /* Single linear frame */
  1924. /*
  1925. * If nbuf is a simple linear frame, use send_single function to
  1926. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  1927. * SRNG. There is no need to setup a MSDU extension descriptor.
  1928. */
  1929. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  1930. tx_exc_metadata->peer_id, tx_exc_metadata);
  1931. return nbuf;
  1932. fail:
  1933. dp_verbose_debug("pkt send failed");
  1934. return nbuf;
  1935. }
  1936. /**
  1937. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  1938. * @soc: DP soc handle
  1939. * @vdev_id: DP vdev handle
  1940. * @nbuf: skb
  1941. *
  1942. * Entry point for Core Tx layer (DP_TX) invoked from
  1943. * hard_start_xmit in OSIF/HDD
  1944. *
  1945. * Return: NULL on success,
  1946. * nbuf when it fails to send
  1947. */
  1948. #ifdef MESH_MODE_SUPPORT
  1949. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  1950. qdf_nbuf_t nbuf)
  1951. {
  1952. struct meta_hdr_s *mhdr;
  1953. qdf_nbuf_t nbuf_mesh = NULL;
  1954. qdf_nbuf_t nbuf_clone = NULL;
  1955. struct dp_vdev *vdev;
  1956. uint8_t no_enc_frame = 0;
  1957. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  1958. if (!nbuf_mesh) {
  1959. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1960. "qdf_nbuf_unshare failed");
  1961. return nbuf;
  1962. }
  1963. vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  1964. vdev_id);
  1965. if (!vdev) {
  1966. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1967. "vdev is NULL for vdev_id %d", vdev_id);
  1968. return nbuf;
  1969. }
  1970. nbuf = nbuf_mesh;
  1971. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  1972. if ((vdev->sec_type != cdp_sec_type_none) &&
  1973. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  1974. no_enc_frame = 1;
  1975. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  1976. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  1977. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  1978. !no_enc_frame) {
  1979. nbuf_clone = qdf_nbuf_clone(nbuf);
  1980. if (!nbuf_clone) {
  1981. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1982. "qdf_nbuf_clone failed");
  1983. return nbuf;
  1984. }
  1985. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  1986. }
  1987. if (nbuf_clone) {
  1988. if (!dp_tx_send(soc, vdev_id, nbuf_clone)) {
  1989. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  1990. } else {
  1991. qdf_nbuf_free(nbuf_clone);
  1992. }
  1993. }
  1994. if (no_enc_frame)
  1995. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  1996. else
  1997. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  1998. nbuf = dp_tx_send(soc, vdev_id, nbuf);
  1999. if ((!nbuf) && no_enc_frame) {
  2000. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2001. }
  2002. return nbuf;
  2003. }
  2004. #else
  2005. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  2006. qdf_nbuf_t nbuf)
  2007. {
  2008. return dp_tx_send(soc, vdev_id, nbuf);
  2009. }
  2010. #endif
  2011. /**
  2012. * dp_tx_nawds_handler() - NAWDS handler
  2013. *
  2014. * @soc: DP soc handle
  2015. * @vdev_id: id of DP vdev handle
  2016. * @msdu_info: msdu_info required to create HTT metadata
  2017. * @nbuf: skb
  2018. *
  2019. * This API transfers the multicast frames with the peer id
  2020. * on NAWDS enabled peer.
  2021. * Return: none
  2022. */
  2023. static inline
  2024. void dp_tx_nawds_handler(struct cdp_soc_t *soc, struct dp_vdev *vdev,
  2025. struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
  2026. {
  2027. struct dp_peer *peer = NULL;
  2028. qdf_nbuf_t nbuf_clone = NULL;
  2029. struct dp_soc *dp_soc = (struct dp_soc *)soc;
  2030. uint16_t peer_id = DP_INVALID_PEER;
  2031. struct dp_peer *sa_peer = NULL;
  2032. struct dp_ast_entry *ast_entry = NULL;
  2033. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2034. if (qdf_nbuf_get_tx_ftype(nbuf) == CB_FTYPE_INTRABSS_FWD) {
  2035. qdf_spin_lock_bh(&dp_soc->ast_lock);
  2036. ast_entry = dp_peer_ast_hash_find_by_pdevid
  2037. (dp_soc,
  2038. (uint8_t *)(eh->ether_shost),
  2039. vdev->pdev->pdev_id);
  2040. if (ast_entry)
  2041. sa_peer = ast_entry->peer;
  2042. qdf_spin_unlock_bh(&dp_soc->ast_lock);
  2043. }
  2044. qdf_spin_lock_bh(&dp_soc->peer_ref_mutex);
  2045. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2046. if (!peer->bss_peer && peer->nawds_enabled) {
  2047. peer_id = peer->peer_ids[0];
  2048. /* Multicast packets needs to be
  2049. * dropped in case of intra bss forwarding
  2050. */
  2051. if (sa_peer == peer) {
  2052. QDF_TRACE(QDF_MODULE_ID_DP,
  2053. QDF_TRACE_LEVEL_DEBUG,
  2054. " %s: multicast packet", __func__);
  2055. DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
  2056. continue;
  2057. }
  2058. nbuf_clone = qdf_nbuf_clone(nbuf);
  2059. if (!nbuf_clone) {
  2060. QDF_TRACE(QDF_MODULE_ID_DP,
  2061. QDF_TRACE_LEVEL_ERROR,
  2062. FL("nbuf clone failed"));
  2063. break;
  2064. }
  2065. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2066. msdu_info, peer_id,
  2067. NULL);
  2068. if (nbuf_clone) {
  2069. QDF_TRACE(QDF_MODULE_ID_DP,
  2070. QDF_TRACE_LEVEL_DEBUG,
  2071. FL("pkt send failed"));
  2072. qdf_nbuf_free(nbuf_clone);
  2073. } else {
  2074. if (peer_id != DP_INVALID_PEER)
  2075. DP_STATS_INC_PKT(peer, tx.nawds_mcast,
  2076. 1, qdf_nbuf_len(nbuf));
  2077. }
  2078. }
  2079. }
  2080. qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex);
  2081. }
  2082. /**
  2083. * dp_tx_send() - Transmit a frame on a given VAP
  2084. * @soc: DP soc handle
  2085. * @vdev_id: id of DP vdev handle
  2086. * @nbuf: skb
  2087. *
  2088. * Entry point for Core Tx layer (DP_TX) invoked from
  2089. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  2090. * cases
  2091. *
  2092. * Return: NULL on success,
  2093. * nbuf when it fails to send
  2094. */
  2095. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf)
  2096. {
  2097. uint16_t peer_id = HTT_INVALID_PEER;
  2098. /*
  2099. * doing a memzero is causing additional function call overhead
  2100. * so doing static stack clearing
  2101. */
  2102. struct dp_tx_msdu_info_s msdu_info = {0};
  2103. struct dp_vdev *vdev =
  2104. dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
  2105. vdev_id);
  2106. if (qdf_unlikely(!vdev))
  2107. return nbuf;
  2108. dp_verbose_debug("skb %pM", nbuf->data);
  2109. /*
  2110. * Set Default Host TID value to invalid TID
  2111. * (TID override disabled)
  2112. */
  2113. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  2114. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2115. if (qdf_unlikely(vdev->mesh_vdev)) {
  2116. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  2117. &msdu_info);
  2118. if (!nbuf_mesh) {
  2119. dp_verbose_debug("Extracting mesh metadata failed");
  2120. return nbuf;
  2121. }
  2122. nbuf = nbuf_mesh;
  2123. }
  2124. /*
  2125. * Get HW Queue to use for this frame.
  2126. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2127. * dedicated for data and 1 for command.
  2128. * "queue_id" maps to one hardware ring.
  2129. * With each ring, we also associate a unique Tx descriptor pool
  2130. * to minimize lock contention for these resources.
  2131. */
  2132. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2133. /*
  2134. * TCL H/W supports 2 DSCP-TID mapping tables.
  2135. * Table 1 - Default DSCP-TID mapping table
  2136. * Table 2 - 1 DSCP-TID override table
  2137. *
  2138. * If we need a different DSCP-TID mapping for this vap,
  2139. * call tid_classify to extract DSCP/ToS from frame and
  2140. * map to a TID and store in msdu_info. This is later used
  2141. * to fill in TCL Input descriptor (per-packet TID override).
  2142. */
  2143. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  2144. /*
  2145. * Classify the frame and call corresponding
  2146. * "prepare" function which extracts the segment (TSO)
  2147. * and fragmentation information (for TSO , SG, ME, or Raw)
  2148. * into MSDU_INFO structure which is later used to fill
  2149. * SW and HW descriptors.
  2150. */
  2151. if (qdf_nbuf_is_tso(nbuf)) {
  2152. dp_verbose_debug("TSO frame %pK", vdev);
  2153. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2154. qdf_nbuf_len(nbuf));
  2155. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2156. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2157. qdf_nbuf_len(nbuf));
  2158. return nbuf;
  2159. }
  2160. goto send_multiple;
  2161. }
  2162. /* SG */
  2163. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2164. struct dp_tx_seg_info_s seg_info = {0};
  2165. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2166. if (!nbuf)
  2167. return NULL;
  2168. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2169. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2170. qdf_nbuf_len(nbuf));
  2171. goto send_multiple;
  2172. }
  2173. #ifdef ATH_SUPPORT_IQUE
  2174. /* Mcast to Ucast Conversion*/
  2175. if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
  2176. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2177. qdf_nbuf_data(nbuf);
  2178. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2179. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2180. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2181. DP_STATS_INC_PKT(vdev,
  2182. tx_i.mcast_en.mcast_pkt, 1,
  2183. qdf_nbuf_len(nbuf));
  2184. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2185. QDF_STATUS_SUCCESS) {
  2186. return NULL;
  2187. }
  2188. }
  2189. }
  2190. #endif
  2191. /* RAW */
  2192. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  2193. struct dp_tx_seg_info_s seg_info = {0};
  2194. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  2195. if (!nbuf)
  2196. return NULL;
  2197. dp_verbose_debug("Raw frame %pK", vdev);
  2198. goto send_multiple;
  2199. }
  2200. if (qdf_unlikely(vdev->nawds_enabled)) {
  2201. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2202. qdf_nbuf_data(nbuf);
  2203. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
  2204. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
  2205. peer_id = DP_INVALID_PEER;
  2206. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2207. 1, qdf_nbuf_len(nbuf));
  2208. }
  2209. /* Single linear frame */
  2210. /*
  2211. * If nbuf is a simple linear frame, use send_single function to
  2212. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2213. * SRNG. There is no need to setup a MSDU extension descriptor.
  2214. */
  2215. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  2216. return nbuf;
  2217. send_multiple:
  2218. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2219. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  2220. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  2221. return nbuf;
  2222. }
  2223. /**
  2224. * dp_tx_reinject_handler() - Tx Reinject Handler
  2225. * @tx_desc: software descriptor head pointer
  2226. * @status : Tx completion status from HTT descriptor
  2227. *
  2228. * This function reinjects frames back to Target.
  2229. * Todo - Host queue needs to be added
  2230. *
  2231. * Return: none
  2232. */
  2233. static
  2234. void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  2235. {
  2236. struct dp_vdev *vdev;
  2237. struct dp_peer *peer = NULL;
  2238. uint32_t peer_id = HTT_INVALID_PEER;
  2239. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2240. qdf_nbuf_t nbuf_copy = NULL;
  2241. struct dp_tx_msdu_info_s msdu_info;
  2242. struct dp_soc *soc = NULL;
  2243. #ifdef WDS_VENDOR_EXTENSION
  2244. int is_mcast = 0, is_ucast = 0;
  2245. int num_peers_3addr = 0;
  2246. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  2247. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  2248. #endif
  2249. vdev = tx_desc->vdev;
  2250. soc = vdev->pdev->soc;
  2251. qdf_assert(vdev);
  2252. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2253. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2254. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2255. "%s Tx reinject path", __func__);
  2256. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  2257. qdf_nbuf_len(tx_desc->nbuf));
  2258. #ifdef WDS_VENDOR_EXTENSION
  2259. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  2260. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  2261. } else {
  2262. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  2263. }
  2264. is_ucast = !is_mcast;
  2265. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2266. if (peer->bss_peer)
  2267. continue;
  2268. /* Detect wds peers that use 3-addr framing for mcast.
  2269. * if there are any, the bss_peer is used to send the
  2270. * the mcast frame using 3-addr format. all wds enabled
  2271. * peers that use 4-addr framing for mcast frames will
  2272. * be duplicated and sent as 4-addr frames below.
  2273. */
  2274. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  2275. num_peers_3addr = 1;
  2276. break;
  2277. }
  2278. }
  2279. #endif
  2280. if (qdf_unlikely(vdev->mesh_vdev)) {
  2281. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  2282. } else {
  2283. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2284. if ((peer->peer_ids[0] != HTT_INVALID_PEER) &&
  2285. #ifdef WDS_VENDOR_EXTENSION
  2286. /*
  2287. * . if 3-addr STA, then send on BSS Peer
  2288. * . if Peer WDS enabled and accept 4-addr mcast,
  2289. * send mcast on that peer only
  2290. * . if Peer WDS enabled and accept 4-addr ucast,
  2291. * send ucast on that peer only
  2292. */
  2293. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  2294. (peer->wds_enabled &&
  2295. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  2296. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  2297. #else
  2298. ((peer->bss_peer &&
  2299. !(vdev->osif_proxy_arp(vdev->osif_vdev, nbuf))))) {
  2300. #endif
  2301. peer_id = DP_INVALID_PEER;
  2302. nbuf_copy = qdf_nbuf_copy(nbuf);
  2303. if (!nbuf_copy) {
  2304. QDF_TRACE(QDF_MODULE_ID_DP,
  2305. QDF_TRACE_LEVEL_DEBUG,
  2306. FL("nbuf copy failed"));
  2307. break;
  2308. }
  2309. nbuf_copy = dp_tx_send_msdu_single(vdev,
  2310. nbuf_copy,
  2311. &msdu_info,
  2312. peer_id,
  2313. NULL);
  2314. if (nbuf_copy) {
  2315. QDF_TRACE(QDF_MODULE_ID_DP,
  2316. QDF_TRACE_LEVEL_DEBUG,
  2317. FL("pkt send failed"));
  2318. qdf_nbuf_free(nbuf_copy);
  2319. } else {
  2320. if (peer_id != DP_INVALID_PEER)
  2321. DP_STATS_INC_PKT(peer,
  2322. tx.nawds_mcast,
  2323. 1, qdf_nbuf_len(nbuf));
  2324. }
  2325. }
  2326. }
  2327. }
  2328. qdf_nbuf_free(nbuf);
  2329. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2330. }
  2331. /**
  2332. * dp_tx_inspect_handler() - Tx Inspect Handler
  2333. * @tx_desc: software descriptor head pointer
  2334. * @status : Tx completion status from HTT descriptor
  2335. *
  2336. * Handles Tx frames sent back to Host for inspection
  2337. * (ProxyARP)
  2338. *
  2339. * Return: none
  2340. */
  2341. static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
  2342. {
  2343. struct dp_soc *soc;
  2344. struct dp_pdev *pdev = tx_desc->pdev;
  2345. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2346. "%s Tx inspect path",
  2347. __func__);
  2348. qdf_assert(pdev);
  2349. soc = pdev->soc;
  2350. DP_STATS_INC_PKT(tx_desc->vdev, tx_i.inspect_pkts, 1,
  2351. qdf_nbuf_len(tx_desc->nbuf));
  2352. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  2353. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2354. }
  2355. #ifdef FEATURE_PERPKT_INFO
  2356. /**
  2357. * dp_get_completion_indication_for_stack() - send completion to stack
  2358. * @soc : dp_soc handle
  2359. * @pdev: dp_pdev handle
  2360. * @peer: dp peer handle
  2361. * @ts: transmit completion status structure
  2362. * @netbuf: Buffer pointer for free
  2363. *
  2364. * This function is used for indication whether buffer needs to be
  2365. * sent to stack for freeing or not
  2366. */
  2367. QDF_STATUS
  2368. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2369. struct dp_pdev *pdev,
  2370. struct dp_peer *peer,
  2371. struct hal_tx_completion_status *ts,
  2372. qdf_nbuf_t netbuf,
  2373. uint64_t time_latency)
  2374. {
  2375. struct tx_capture_hdr *ppdu_hdr;
  2376. uint16_t peer_id = ts->peer_id;
  2377. uint32_t ppdu_id = ts->ppdu_id;
  2378. uint8_t first_msdu = ts->first_msdu;
  2379. uint8_t last_msdu = ts->last_msdu;
  2380. if (qdf_unlikely(!pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
  2381. !pdev->latency_capture_enable))
  2382. return QDF_STATUS_E_NOSUPPORT;
  2383. if (!peer) {
  2384. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2385. FL("Peer Invalid"));
  2386. return QDF_STATUS_E_INVAL;
  2387. }
  2388. if (pdev->mcopy_mode) {
  2389. if ((pdev->m_copy_id.tx_ppdu_id == ppdu_id) &&
  2390. (pdev->m_copy_id.tx_peer_id == peer_id)) {
  2391. return QDF_STATUS_E_INVAL;
  2392. }
  2393. pdev->m_copy_id.tx_ppdu_id = ppdu_id;
  2394. pdev->m_copy_id.tx_peer_id = peer_id;
  2395. }
  2396. if (!qdf_nbuf_push_head(netbuf, sizeof(struct tx_capture_hdr))) {
  2397. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2398. FL("No headroom"));
  2399. return QDF_STATUS_E_NOMEM;
  2400. }
  2401. ppdu_hdr = (struct tx_capture_hdr *)qdf_nbuf_data(netbuf);
  2402. qdf_mem_copy(ppdu_hdr->ta, peer->vdev->mac_addr.raw,
  2403. QDF_MAC_ADDR_SIZE);
  2404. qdf_mem_copy(ppdu_hdr->ra, peer->mac_addr.raw,
  2405. QDF_MAC_ADDR_SIZE);
  2406. ppdu_hdr->ppdu_id = ppdu_id;
  2407. ppdu_hdr->peer_id = peer_id;
  2408. ppdu_hdr->first_msdu = first_msdu;
  2409. ppdu_hdr->last_msdu = last_msdu;
  2410. if (qdf_unlikely(pdev->latency_capture_enable)) {
  2411. ppdu_hdr->tsf = ts->tsf;
  2412. ppdu_hdr->time_latency = time_latency;
  2413. }
  2414. return QDF_STATUS_SUCCESS;
  2415. }
  2416. /**
  2417. * dp_send_completion_to_stack() - send completion to stack
  2418. * @soc : dp_soc handle
  2419. * @pdev: dp_pdev handle
  2420. * @peer_id: peer_id of the peer for which completion came
  2421. * @ppdu_id: ppdu_id
  2422. * @netbuf: Buffer pointer for free
  2423. *
  2424. * This function is used to send completion to stack
  2425. * to free buffer
  2426. */
  2427. void dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  2428. uint16_t peer_id, uint32_t ppdu_id,
  2429. qdf_nbuf_t netbuf)
  2430. {
  2431. dp_wdi_event_handler(WDI_EVENT_TX_DATA, soc,
  2432. netbuf, peer_id,
  2433. WDI_NO_VAL, pdev->pdev_id);
  2434. }
  2435. #else
  2436. static QDF_STATUS
  2437. dp_get_completion_indication_for_stack(struct dp_soc *soc,
  2438. struct dp_pdev *pdev,
  2439. struct dp_peer *peer,
  2440. struct hal_tx_completion_status *ts,
  2441. qdf_nbuf_t netbuf,
  2442. uint64_t time_latency)
  2443. {
  2444. return QDF_STATUS_E_NOSUPPORT;
  2445. }
  2446. static void
  2447. dp_send_completion_to_stack(struct dp_soc *soc, struct dp_pdev *pdev,
  2448. uint16_t peer_id, uint32_t ppdu_id, qdf_nbuf_t netbuf)
  2449. {
  2450. }
  2451. #endif
  2452. /**
  2453. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  2454. * @soc: Soc handle
  2455. * @desc: software Tx descriptor to be processed
  2456. *
  2457. * Return: none
  2458. */
  2459. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  2460. struct dp_tx_desc_s *desc)
  2461. {
  2462. struct dp_vdev *vdev = desc->vdev;
  2463. qdf_nbuf_t nbuf = desc->nbuf;
  2464. /* nbuf already freed in vdev detach path */
  2465. if (!nbuf)
  2466. return;
  2467. /* If it is TDLS mgmt, don't unmap or free the frame */
  2468. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  2469. return dp_non_std_tx_comp_free_buff(soc, desc, vdev);
  2470. /* 0 : MSDU buffer, 1 : MLE */
  2471. if (desc->msdu_ext_desc) {
  2472. /* TSO free */
  2473. if (hal_tx_ext_desc_get_tso_enable(
  2474. desc->msdu_ext_desc->vaddr)) {
  2475. /* unmap eash TSO seg before free the nbuf */
  2476. dp_tx_tso_unmap_segment(soc, desc->tso_desc,
  2477. desc->tso_num_desc);
  2478. qdf_nbuf_free(nbuf);
  2479. return;
  2480. }
  2481. }
  2482. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  2483. QDF_DMA_TO_DEVICE, nbuf->len);
  2484. if (qdf_unlikely(!vdev)) {
  2485. qdf_nbuf_free(nbuf);
  2486. return;
  2487. }
  2488. if (qdf_likely(!vdev->mesh_vdev))
  2489. qdf_nbuf_free(nbuf);
  2490. else {
  2491. if (desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  2492. qdf_nbuf_free(nbuf);
  2493. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  2494. } else
  2495. vdev->osif_tx_free_ext((nbuf));
  2496. }
  2497. }
  2498. #ifdef MESH_MODE_SUPPORT
  2499. /**
  2500. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  2501. * in mesh meta header
  2502. * @tx_desc: software descriptor head pointer
  2503. * @ts: pointer to tx completion stats
  2504. * Return: none
  2505. */
  2506. static
  2507. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2508. struct hal_tx_completion_status *ts)
  2509. {
  2510. struct meta_hdr_s *mhdr;
  2511. qdf_nbuf_t netbuf = tx_desc->nbuf;
  2512. if (!tx_desc->msdu_ext_desc) {
  2513. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  2514. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2515. "netbuf %pK offset %d",
  2516. netbuf, tx_desc->pkt_offset);
  2517. return;
  2518. }
  2519. }
  2520. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2521. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2522. "netbuf %pK offset %lu", netbuf,
  2523. sizeof(struct meta_hdr_s));
  2524. return;
  2525. }
  2526. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  2527. mhdr->rssi = ts->ack_frame_rssi;
  2528. mhdr->band = tx_desc->pdev->operating_channel.band;
  2529. mhdr->channel = tx_desc->pdev->operating_channel.num;
  2530. }
  2531. #else
  2532. static
  2533. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2534. struct hal_tx_completion_status *ts)
  2535. {
  2536. }
  2537. #endif
  2538. /**
  2539. * dp_tx_compute_delay() - Compute and fill in all timestamps
  2540. * to pass in correct fields
  2541. *
  2542. * @vdev: pdev handle
  2543. * @tx_desc: tx descriptor
  2544. * @tid: tid value
  2545. * @ring_id: TCL or WBM ring number for transmit path
  2546. * Return: none
  2547. */
  2548. static void dp_tx_compute_delay(struct dp_vdev *vdev,
  2549. struct dp_tx_desc_s *tx_desc,
  2550. uint8_t tid, uint8_t ring_id)
  2551. {
  2552. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  2553. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  2554. if (qdf_likely(!vdev->pdev->delay_stats_flag))
  2555. return;
  2556. current_timestamp = qdf_ktime_to_ms(qdf_ktime_get());
  2557. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  2558. timestamp_hw_enqueue = tx_desc->timestamp;
  2559. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  2560. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  2561. timestamp_hw_enqueue);
  2562. interframe_delay = (uint32_t)(timestamp_ingress -
  2563. vdev->prev_tx_enq_tstamp);
  2564. /*
  2565. * Delay in software enqueue
  2566. */
  2567. dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
  2568. CDP_DELAY_STATS_SW_ENQ, ring_id);
  2569. /*
  2570. * Delay between packet enqueued to HW and Tx completion
  2571. */
  2572. dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
  2573. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
  2574. /*
  2575. * Update interframe delay stats calculated at hardstart receive point.
  2576. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  2577. * interframe delay will not be calculate correctly for 1st frame.
  2578. * On the other side, this will help in avoiding extra per packet check
  2579. * of !vdev->prev_tx_enq_tstamp.
  2580. */
  2581. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  2582. CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
  2583. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  2584. }
  2585. #ifdef DISABLE_DP_STATS
  2586. static
  2587. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  2588. {
  2589. }
  2590. #else
  2591. static
  2592. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  2593. {
  2594. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  2595. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  2596. if (subtype != QDF_PROTO_INVALID)
  2597. DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
  2598. }
  2599. #endif
  2600. /**
  2601. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  2602. * per wbm ring
  2603. *
  2604. * @tx_desc: software descriptor head pointer
  2605. * @ts: Tx completion status
  2606. * @peer: peer handle
  2607. * @ring_id: ring number
  2608. *
  2609. * Return: None
  2610. */
  2611. static inline void
  2612. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  2613. struct hal_tx_completion_status *ts,
  2614. struct dp_peer *peer, uint8_t ring_id)
  2615. {
  2616. struct dp_pdev *pdev = peer->vdev->pdev;
  2617. struct dp_soc *soc = NULL;
  2618. uint8_t mcs, pkt_type;
  2619. uint8_t tid = ts->tid;
  2620. uint32_t length;
  2621. struct cdp_tid_tx_stats *tid_stats;
  2622. if (!pdev)
  2623. return;
  2624. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  2625. tid = CDP_MAX_DATA_TIDS - 1;
  2626. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  2627. soc = pdev->soc;
  2628. mcs = ts->mcs;
  2629. pkt_type = ts->pkt_type;
  2630. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  2631. dp_err("Release source is not from TQM");
  2632. return;
  2633. }
  2634. length = qdf_nbuf_len(tx_desc->nbuf);
  2635. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  2636. if (qdf_unlikely(pdev->delay_stats_flag))
  2637. dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
  2638. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  2639. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  2640. DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
  2641. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  2642. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  2643. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  2644. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  2645. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  2646. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  2647. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  2648. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  2649. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  2650. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  2651. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  2652. /*
  2653. * tx_failed is ideally supposed to be updated from HTT ppdu completion
  2654. * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
  2655. * are no completions for failed cases. Hence updating tx_failed from
  2656. * data path. Please note that if tx_failed is fixed to be from ppdu,
  2657. * then this has to be removed
  2658. */
  2659. peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
  2660. peer->stats.tx.dropped.fw_rem_notx +
  2661. peer->stats.tx.dropped.fw_rem_tx +
  2662. peer->stats.tx.dropped.age_out +
  2663. peer->stats.tx.dropped.fw_reason1 +
  2664. peer->stats.tx.dropped.fw_reason2 +
  2665. peer->stats.tx.dropped.fw_reason3;
  2666. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  2667. tid_stats->tqm_status_cnt[ts->status]++;
  2668. }
  2669. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  2670. dp_update_no_ack_stats(tx_desc->nbuf, peer);
  2671. return;
  2672. }
  2673. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  2674. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  2675. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  2676. /*
  2677. * Following Rate Statistics are updated from HTT PPDU events from FW.
  2678. * Return from here if HTT PPDU events are enabled.
  2679. */
  2680. if (!(soc->process_tx_status))
  2681. return;
  2682. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2683. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  2684. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2685. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  2686. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2687. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2688. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2689. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  2690. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2691. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2692. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2693. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  2694. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2695. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2696. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2697. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  2698. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  2699. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2700. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  2701. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  2702. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  2703. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  2704. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  2705. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  2706. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  2707. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  2708. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  2709. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  2710. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  2711. &peer->stats, ts->peer_id,
  2712. UPDATE_PEER_STATS, pdev->pdev_id);
  2713. #endif
  2714. }
  2715. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  2716. /**
  2717. * dp_tx_flow_pool_lock() - take flow pool lock
  2718. * @soc: core txrx main context
  2719. * @tx_desc: tx desc
  2720. *
  2721. * Return: None
  2722. */
  2723. static inline
  2724. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  2725. struct dp_tx_desc_s *tx_desc)
  2726. {
  2727. struct dp_tx_desc_pool_s *pool;
  2728. uint8_t desc_pool_id;
  2729. desc_pool_id = tx_desc->pool_id;
  2730. pool = &soc->tx_desc[desc_pool_id];
  2731. qdf_spin_lock_bh(&pool->flow_pool_lock);
  2732. }
  2733. /**
  2734. * dp_tx_flow_pool_unlock() - release flow pool lock
  2735. * @soc: core txrx main context
  2736. * @tx_desc: tx desc
  2737. *
  2738. * Return: None
  2739. */
  2740. static inline
  2741. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  2742. struct dp_tx_desc_s *tx_desc)
  2743. {
  2744. struct dp_tx_desc_pool_s *pool;
  2745. uint8_t desc_pool_id;
  2746. desc_pool_id = tx_desc->pool_id;
  2747. pool = &soc->tx_desc[desc_pool_id];
  2748. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  2749. }
  2750. #else
  2751. static inline
  2752. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  2753. {
  2754. }
  2755. static inline
  2756. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  2757. {
  2758. }
  2759. #endif
  2760. /**
  2761. * dp_tx_notify_completion() - Notify tx completion for this desc
  2762. * @soc: core txrx main context
  2763. * @tx_desc: tx desc
  2764. * @netbuf: buffer
  2765. *
  2766. * Return: none
  2767. */
  2768. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  2769. struct dp_tx_desc_s *tx_desc,
  2770. qdf_nbuf_t netbuf)
  2771. {
  2772. void *osif_dev;
  2773. ol_txrx_completion_fp tx_compl_cbk = NULL;
  2774. qdf_assert(tx_desc);
  2775. dp_tx_flow_pool_lock(soc, tx_desc);
  2776. if (!tx_desc->vdev ||
  2777. !tx_desc->vdev->osif_vdev) {
  2778. dp_tx_flow_pool_unlock(soc, tx_desc);
  2779. return;
  2780. }
  2781. osif_dev = tx_desc->vdev->osif_vdev;
  2782. tx_compl_cbk = tx_desc->vdev->tx_comp;
  2783. dp_tx_flow_pool_unlock(soc, tx_desc);
  2784. if (tx_compl_cbk)
  2785. tx_compl_cbk(netbuf, osif_dev);
  2786. }
  2787. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  2788. * @pdev: pdev handle
  2789. * @tid: tid value
  2790. * @txdesc_ts: timestamp from txdesc
  2791. * @ppdu_id: ppdu id
  2792. *
  2793. * Return: none
  2794. */
  2795. #ifdef FEATURE_PERPKT_INFO
  2796. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  2797. struct dp_peer *peer,
  2798. uint8_t tid,
  2799. uint64_t txdesc_ts,
  2800. uint32_t ppdu_id)
  2801. {
  2802. uint64_t delta_ms;
  2803. struct cdp_tx_sojourn_stats *sojourn_stats;
  2804. if (qdf_unlikely(pdev->enhanced_stats_en == 0))
  2805. return;
  2806. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  2807. tid >= CDP_DATA_TID_MAX))
  2808. return;
  2809. if (qdf_unlikely(!pdev->sojourn_buf))
  2810. return;
  2811. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  2812. qdf_nbuf_data(pdev->sojourn_buf);
  2813. sojourn_stats->cookie = (void *)peer->wlanstats_ctx;
  2814. delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
  2815. txdesc_ts;
  2816. qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
  2817. delta_ms);
  2818. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  2819. sojourn_stats->num_msdus[tid] = 1;
  2820. sojourn_stats->avg_sojourn_msdu[tid].internal =
  2821. peer->avg_sojourn_msdu[tid].internal;
  2822. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  2823. pdev->sojourn_buf, HTT_INVALID_PEER,
  2824. WDI_NO_VAL, pdev->pdev_id);
  2825. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  2826. sojourn_stats->num_msdus[tid] = 0;
  2827. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  2828. }
  2829. #else
  2830. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  2831. struct dp_peer *peer,
  2832. uint8_t tid,
  2833. uint64_t txdesc_ts,
  2834. uint32_t ppdu_id)
  2835. {
  2836. }
  2837. #endif
  2838. /**
  2839. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  2840. * @soc: DP Soc handle
  2841. * @tx_desc: software Tx descriptor
  2842. * @ts : Tx completion status from HAL/HTT descriptor
  2843. *
  2844. * Return: none
  2845. */
  2846. static inline void
  2847. dp_tx_comp_process_desc(struct dp_soc *soc,
  2848. struct dp_tx_desc_s *desc,
  2849. struct hal_tx_completion_status *ts,
  2850. struct dp_peer *peer)
  2851. {
  2852. uint64_t time_latency = 0;
  2853. /*
  2854. * m_copy/tx_capture modes are not supported for
  2855. * scatter gather packets
  2856. */
  2857. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  2858. time_latency = (qdf_ktime_to_ms(qdf_ktime_get()) -
  2859. desc->timestamp);
  2860. }
  2861. if (!(desc->msdu_ext_desc)) {
  2862. if (QDF_STATUS_SUCCESS ==
  2863. dp_tx_add_to_comp_queue(soc, desc, ts, peer)) {
  2864. return;
  2865. }
  2866. if (QDF_STATUS_SUCCESS ==
  2867. dp_get_completion_indication_for_stack(soc,
  2868. desc->pdev,
  2869. peer, ts,
  2870. desc->nbuf,
  2871. time_latency)) {
  2872. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  2873. QDF_DMA_TO_DEVICE,
  2874. desc->nbuf->len);
  2875. dp_send_completion_to_stack(soc,
  2876. desc->pdev,
  2877. ts->peer_id,
  2878. ts->ppdu_id,
  2879. desc->nbuf);
  2880. return;
  2881. }
  2882. }
  2883. dp_tx_comp_free_buf(soc, desc);
  2884. }
  2885. /**
  2886. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  2887. * @tx_desc: software descriptor head pointer
  2888. * @ts: Tx completion status
  2889. * @peer: peer handle
  2890. * @ring_id: ring number
  2891. *
  2892. * Return: none
  2893. */
  2894. static inline
  2895. void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
  2896. struct hal_tx_completion_status *ts,
  2897. struct dp_peer *peer, uint8_t ring_id)
  2898. {
  2899. uint32_t length;
  2900. qdf_ether_header_t *eh;
  2901. struct dp_soc *soc = NULL;
  2902. struct dp_vdev *vdev = tx_desc->vdev;
  2903. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2904. if (!vdev || !nbuf) {
  2905. dp_info_rl("invalid tx descriptor. vdev or nbuf NULL");
  2906. goto out;
  2907. }
  2908. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2909. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  2910. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  2911. QDF_TRACE_DEFAULT_PDEV_ID,
  2912. qdf_nbuf_data_addr(nbuf),
  2913. sizeof(qdf_nbuf_data(nbuf)),
  2914. tx_desc->id,
  2915. ts->status));
  2916. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  2917. "-------------------- \n"
  2918. "Tx Completion Stats: \n"
  2919. "-------------------- \n"
  2920. "ack_frame_rssi = %d \n"
  2921. "first_msdu = %d \n"
  2922. "last_msdu = %d \n"
  2923. "msdu_part_of_amsdu = %d \n"
  2924. "rate_stats valid = %d \n"
  2925. "bw = %d \n"
  2926. "pkt_type = %d \n"
  2927. "stbc = %d \n"
  2928. "ldpc = %d \n"
  2929. "sgi = %d \n"
  2930. "mcs = %d \n"
  2931. "ofdma = %d \n"
  2932. "tones_in_ru = %d \n"
  2933. "tsf = %d \n"
  2934. "ppdu_id = %d \n"
  2935. "transmit_cnt = %d \n"
  2936. "tid = %d \n"
  2937. "peer_id = %d\n",
  2938. ts->ack_frame_rssi, ts->first_msdu,
  2939. ts->last_msdu, ts->msdu_part_of_amsdu,
  2940. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  2941. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  2942. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  2943. ts->transmit_cnt, ts->tid, ts->peer_id);
  2944. soc = vdev->pdev->soc;
  2945. /* Update SoC level stats */
  2946. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  2947. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  2948. /* Update per-packet stats for mesh mode */
  2949. if (qdf_unlikely(vdev->mesh_vdev) &&
  2950. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  2951. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  2952. length = qdf_nbuf_len(nbuf);
  2953. /* Update peer level stats */
  2954. if (!peer) {
  2955. QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_DP,
  2956. "peer is null or deletion in progress");
  2957. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  2958. goto out;
  2959. }
  2960. if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
  2961. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  2962. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  2963. if ((peer->vdev->tx_encap_type ==
  2964. htt_cmn_pkt_type_ethernet) &&
  2965. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  2966. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  2967. }
  2968. }
  2969. } else {
  2970. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  2971. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED)
  2972. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  2973. }
  2974. dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
  2975. #ifdef QCA_SUPPORT_RDK_STATS
  2976. if (soc->wlanstats_enabled)
  2977. dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
  2978. tx_desc->timestamp,
  2979. ts->ppdu_id);
  2980. #endif
  2981. out:
  2982. return;
  2983. }
  2984. /**
  2985. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  2986. * @soc: core txrx main context
  2987. * @comp_head: software descriptor head pointer
  2988. * @ring_id: ring number
  2989. *
  2990. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  2991. * and release the software descriptors after processing is complete
  2992. *
  2993. * Return: none
  2994. */
  2995. static void
  2996. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  2997. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  2998. {
  2999. struct dp_tx_desc_s *desc;
  3000. struct dp_tx_desc_s *next;
  3001. struct hal_tx_completion_status ts;
  3002. struct dp_peer *peer;
  3003. qdf_nbuf_t netbuf;
  3004. desc = comp_head;
  3005. while (desc) {
  3006. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  3007. struct dp_pdev *pdev = desc->pdev;
  3008. peer = dp_peer_find_by_id(soc, desc->peer_id);
  3009. if (qdf_likely(peer)) {
  3010. /*
  3011. * Increment peer statistics
  3012. * Minimal statistics update done here
  3013. */
  3014. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
  3015. desc->length);
  3016. if (desc->tx_status !=
  3017. HAL_TX_TQM_RR_FRAME_ACKED)
  3018. peer->stats.tx.tx_failed++;
  3019. dp_peer_unref_del_find_by_id(peer);
  3020. }
  3021. qdf_assert(pdev);
  3022. dp_tx_outstanding_dec(pdev);
  3023. /*
  3024. * Calling a QDF WRAPPER here is creating signifcant
  3025. * performance impact so avoided the wrapper call here
  3026. */
  3027. next = desc->next;
  3028. qdf_mem_unmap_nbytes_single(soc->osdev,
  3029. desc->dma_addr,
  3030. QDF_DMA_TO_DEVICE,
  3031. desc->length);
  3032. qdf_nbuf_free(desc->nbuf);
  3033. dp_tx_desc_free(soc, desc, desc->pool_id);
  3034. desc = next;
  3035. continue;
  3036. }
  3037. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  3038. peer = dp_peer_find_by_id(soc, ts.peer_id);
  3039. dp_tx_comp_process_tx_status(desc, &ts, peer, ring_id);
  3040. netbuf = desc->nbuf;
  3041. /* check tx complete notification */
  3042. if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_NOTIFY_COMP(netbuf))
  3043. dp_tx_notify_completion(soc, desc, netbuf);
  3044. dp_tx_comp_process_desc(soc, desc, &ts, peer);
  3045. if (peer)
  3046. dp_peer_unref_del_find_by_id(peer);
  3047. next = desc->next;
  3048. dp_tx_desc_release(desc, desc->pool_id);
  3049. desc = next;
  3050. }
  3051. }
  3052. /**
  3053. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  3054. * @tx_desc: software descriptor head pointer
  3055. * @status : Tx completion status from HTT descriptor
  3056. * @ring_id: ring number
  3057. *
  3058. * This function will process HTT Tx indication messages from Target
  3059. *
  3060. * Return: none
  3061. */
  3062. static
  3063. void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status,
  3064. uint8_t ring_id)
  3065. {
  3066. uint8_t tx_status;
  3067. struct dp_pdev *pdev;
  3068. struct dp_vdev *vdev;
  3069. struct dp_soc *soc;
  3070. struct hal_tx_completion_status ts = {0};
  3071. uint32_t *htt_desc = (uint32_t *)status;
  3072. struct dp_peer *peer;
  3073. struct cdp_tid_tx_stats *tid_stats = NULL;
  3074. struct htt_soc *htt_handle;
  3075. /*
  3076. * If the descriptor is already freed in vdev_detach,
  3077. * continue to next descriptor
  3078. */
  3079. if (!tx_desc->vdev && !tx_desc->flags) {
  3080. QDF_TRACE(QDF_MODULE_ID_DP,
  3081. QDF_TRACE_LEVEL_INFO,
  3082. "Descriptor freed in vdev_detach %d",
  3083. tx_desc->id);
  3084. return;
  3085. }
  3086. pdev = tx_desc->pdev;
  3087. soc = pdev->soc;
  3088. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3089. QDF_TRACE(QDF_MODULE_ID_DP,
  3090. QDF_TRACE_LEVEL_INFO,
  3091. "pdev in down state %d",
  3092. tx_desc->id);
  3093. dp_tx_comp_free_buf(soc, tx_desc);
  3094. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3095. return;
  3096. }
  3097. qdf_assert(tx_desc->pdev);
  3098. vdev = tx_desc->vdev;
  3099. if (!vdev)
  3100. return;
  3101. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  3102. htt_handle = (struct htt_soc *)soc->htt_handle;
  3103. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  3104. switch (tx_status) {
  3105. case HTT_TX_FW2WBM_TX_STATUS_OK:
  3106. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  3107. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  3108. {
  3109. uint8_t tid;
  3110. if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
  3111. ts.peer_id =
  3112. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
  3113. htt_desc[2]);
  3114. ts.tid =
  3115. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
  3116. htt_desc[2]);
  3117. } else {
  3118. ts.peer_id = HTT_INVALID_PEER;
  3119. ts.tid = HTT_INVALID_TID;
  3120. }
  3121. ts.ppdu_id =
  3122. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
  3123. htt_desc[1]);
  3124. ts.ack_frame_rssi =
  3125. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
  3126. htt_desc[1]);
  3127. ts.first_msdu = 1;
  3128. ts.last_msdu = 1;
  3129. tid = ts.tid;
  3130. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3131. tid = CDP_MAX_DATA_TIDS - 1;
  3132. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3133. if (qdf_unlikely(pdev->delay_stats_flag))
  3134. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  3135. if (tx_status < CDP_MAX_TX_HTT_STATUS) {
  3136. tid_stats->htt_status_cnt[tx_status]++;
  3137. }
  3138. peer = dp_peer_find_by_id(soc, ts.peer_id);
  3139. if (qdf_likely(peer))
  3140. dp_peer_unref_del_find_by_id(peer);
  3141. dp_tx_comp_process_tx_status(tx_desc, &ts, peer, ring_id);
  3142. dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
  3143. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3144. break;
  3145. }
  3146. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  3147. {
  3148. dp_tx_reinject_handler(tx_desc, status);
  3149. break;
  3150. }
  3151. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  3152. {
  3153. dp_tx_inspect_handler(tx_desc, status);
  3154. break;
  3155. }
  3156. case HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY:
  3157. {
  3158. dp_tx_mec_handler(vdev, status);
  3159. break;
  3160. }
  3161. default:
  3162. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  3163. "%s Invalid HTT tx_status %d\n",
  3164. __func__, tx_status);
  3165. break;
  3166. }
  3167. }
  3168. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  3169. static inline
  3170. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3171. {
  3172. bool limit_hit = false;
  3173. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  3174. limit_hit =
  3175. (num_reaped >= cfg->tx_comp_loop_pkt_limit) ? true : false;
  3176. if (limit_hit)
  3177. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  3178. return limit_hit;
  3179. }
  3180. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3181. {
  3182. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  3183. }
  3184. #else
  3185. static inline
  3186. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped)
  3187. {
  3188. return false;
  3189. }
  3190. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3191. {
  3192. return false;
  3193. }
  3194. #endif
  3195. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  3196. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  3197. uint32_t quota)
  3198. {
  3199. void *tx_comp_hal_desc;
  3200. uint8_t buffer_src;
  3201. uint8_t pool_id;
  3202. uint32_t tx_desc_id;
  3203. struct dp_tx_desc_s *tx_desc = NULL;
  3204. struct dp_tx_desc_s *head_desc = NULL;
  3205. struct dp_tx_desc_s *tail_desc = NULL;
  3206. uint32_t num_processed = 0;
  3207. uint32_t count = 0;
  3208. uint32_t num_avail_for_reap = 0;
  3209. bool force_break = false;
  3210. DP_HIST_INIT();
  3211. more_data:
  3212. /* Re-initialize local variables to be re-used */
  3213. head_desc = NULL;
  3214. tail_desc = NULL;
  3215. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  3216. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  3217. return 0;
  3218. }
  3219. num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
  3220. if (num_avail_for_reap >= quota)
  3221. num_avail_for_reap = quota;
  3222. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  3223. /* Find head descriptor from completion ring */
  3224. while (qdf_likely(num_avail_for_reap)) {
  3225. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  3226. if (qdf_unlikely(!tx_comp_hal_desc))
  3227. break;
  3228. buffer_src = hal_tx_comp_get_buffer_source(tx_comp_hal_desc);
  3229. /* If this buffer was not released by TQM or FW, then it is not
  3230. * Tx completion indication, assert */
  3231. if (qdf_unlikely(buffer_src !=
  3232. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  3233. (qdf_unlikely(buffer_src !=
  3234. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  3235. uint8_t wbm_internal_error;
  3236. dp_err_rl(
  3237. "Tx comp release_src != TQM | FW but from %d",
  3238. buffer_src);
  3239. hal_dump_comp_desc(tx_comp_hal_desc);
  3240. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  3241. /* When WBM sees NULL buffer_addr_info in any of
  3242. * ingress rings it sends an error indication,
  3243. * with wbm_internal_error=1, to a specific ring.
  3244. * The WBM2SW ring used to indicate these errors is
  3245. * fixed in HW, and that ring is being used as Tx
  3246. * completion ring. These errors are not related to
  3247. * Tx completions, and should just be ignored
  3248. */
  3249. wbm_internal_error = hal_get_wbm_internal_error(
  3250. soc->hal_soc,
  3251. tx_comp_hal_desc);
  3252. if (wbm_internal_error) {
  3253. dp_err_rl("Tx comp wbm_internal_error!!");
  3254. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  3255. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  3256. buffer_src)
  3257. dp_handle_wbm_internal_error(
  3258. soc,
  3259. tx_comp_hal_desc,
  3260. hal_tx_comp_get_buffer_type(
  3261. tx_comp_hal_desc));
  3262. } else {
  3263. dp_err_rl("Tx comp wbm_internal_error false");
  3264. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  3265. }
  3266. continue;
  3267. }
  3268. /* Get descriptor id */
  3269. tx_desc_id = hal_tx_comp_get_desc_id(tx_comp_hal_desc);
  3270. pool_id = (tx_desc_id & DP_TX_DESC_ID_POOL_MASK) >>
  3271. DP_TX_DESC_ID_POOL_OS;
  3272. /* Find Tx descriptor */
  3273. tx_desc = dp_tx_desc_find(soc, pool_id,
  3274. (tx_desc_id & DP_TX_DESC_ID_PAGE_MASK) >>
  3275. DP_TX_DESC_ID_PAGE_OS,
  3276. (tx_desc_id & DP_TX_DESC_ID_OFFSET_MASK) >>
  3277. DP_TX_DESC_ID_OFFSET_OS);
  3278. /*
  3279. * If the release source is FW, process the HTT status
  3280. */
  3281. if (qdf_unlikely(buffer_src ==
  3282. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  3283. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  3284. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  3285. htt_tx_status);
  3286. dp_tx_process_htt_completion(tx_desc,
  3287. htt_tx_status, ring_id);
  3288. } else {
  3289. /*
  3290. * If the fast completion mode is enabled extended
  3291. * metadata from descriptor is not copied
  3292. */
  3293. if (qdf_likely(tx_desc->flags &
  3294. DP_TX_DESC_FLAG_SIMPLE)) {
  3295. tx_desc->peer_id =
  3296. hal_tx_comp_get_peer_id(tx_comp_hal_desc);
  3297. tx_desc->tx_status =
  3298. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  3299. goto add_to_pool;
  3300. }
  3301. /*
  3302. * If the descriptor is already freed in vdev_detach,
  3303. * continue to next descriptor
  3304. */
  3305. if (qdf_unlikely(!tx_desc->vdev) &&
  3306. qdf_unlikely(!tx_desc->flags)) {
  3307. QDF_TRACE(QDF_MODULE_ID_DP,
  3308. QDF_TRACE_LEVEL_INFO,
  3309. "Descriptor freed in vdev_detach %d",
  3310. tx_desc_id);
  3311. continue;
  3312. }
  3313. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3314. QDF_TRACE(QDF_MODULE_ID_DP,
  3315. QDF_TRACE_LEVEL_INFO,
  3316. "pdev in down state %d",
  3317. tx_desc_id);
  3318. dp_tx_comp_free_buf(soc, tx_desc);
  3319. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3320. goto next_desc;
  3321. }
  3322. /* Pool id is not matching. Error */
  3323. if (tx_desc->pool_id != pool_id) {
  3324. QDF_TRACE(QDF_MODULE_ID_DP,
  3325. QDF_TRACE_LEVEL_FATAL,
  3326. "Tx Comp pool id %d not matched %d",
  3327. pool_id, tx_desc->pool_id);
  3328. qdf_assert_always(0);
  3329. }
  3330. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  3331. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  3332. QDF_TRACE(QDF_MODULE_ID_DP,
  3333. QDF_TRACE_LEVEL_FATAL,
  3334. "Txdesc invalid, flgs = %x,id = %d",
  3335. tx_desc->flags, tx_desc_id);
  3336. qdf_assert_always(0);
  3337. }
  3338. /* Collect hw completion contents */
  3339. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  3340. &tx_desc->comp, 1);
  3341. add_to_pool:
  3342. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  3343. /* First ring descriptor on the cycle */
  3344. if (!head_desc) {
  3345. head_desc = tx_desc;
  3346. tail_desc = tx_desc;
  3347. }
  3348. tail_desc->next = tx_desc;
  3349. tx_desc->next = NULL;
  3350. tail_desc = tx_desc;
  3351. }
  3352. next_desc:
  3353. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  3354. /*
  3355. * Processed packet count is more than given quota
  3356. * stop to processing
  3357. */
  3358. count++;
  3359. if (dp_tx_comp_loop_pkt_limit_hit(soc, count))
  3360. break;
  3361. }
  3362. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  3363. /* Process the reaped descriptors */
  3364. if (head_desc)
  3365. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  3366. if (dp_tx_comp_enable_eol_data_check(soc)) {
  3367. if (num_processed >= quota)
  3368. force_break = true;
  3369. if (!force_break &&
  3370. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  3371. hal_ring_hdl)) {
  3372. DP_STATS_INC(soc, tx.hp_oos2, 1);
  3373. if (!hif_exec_should_yield(soc->hif_handle,
  3374. int_ctx->dp_intr_id))
  3375. goto more_data;
  3376. }
  3377. }
  3378. DP_TX_HIST_STATS_PER_PDEV();
  3379. return num_processed;
  3380. }
  3381. #ifdef FEATURE_WLAN_TDLS
  3382. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3383. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  3384. {
  3385. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3386. struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
  3387. if (!vdev) {
  3388. dp_err("vdev handle for id %d is NULL", vdev_id);
  3389. return NULL;
  3390. }
  3391. if (tx_spec & OL_TX_SPEC_NO_FREE)
  3392. vdev->is_tdls_frame = true;
  3393. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  3394. }
  3395. #endif
  3396. /**
  3397. * dp_tx_vdev_attach() - attach vdev to dp tx
  3398. * @vdev: virtual device instance
  3399. *
  3400. * Return: QDF_STATUS_SUCCESS: success
  3401. * QDF_STATUS_E_RESOURCES: Error return
  3402. */
  3403. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  3404. {
  3405. int pdev_id;
  3406. /*
  3407. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  3408. */
  3409. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  3410. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  3411. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  3412. vdev->vdev_id);
  3413. pdev_id =
  3414. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  3415. vdev->pdev->pdev_id);
  3416. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  3417. /*
  3418. * Set HTT Extension Valid bit to 0 by default
  3419. */
  3420. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  3421. dp_tx_vdev_update_search_flags(vdev);
  3422. return QDF_STATUS_SUCCESS;
  3423. }
  3424. #ifndef FEATURE_WDS
  3425. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  3426. {
  3427. return false;
  3428. }
  3429. #endif
  3430. /**
  3431. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  3432. * @vdev: virtual device instance
  3433. *
  3434. * Return: void
  3435. *
  3436. */
  3437. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  3438. {
  3439. struct dp_soc *soc = vdev->pdev->soc;
  3440. /*
  3441. * Enable both AddrY (SA based search) and AddrX (Da based search)
  3442. * for TDLS link
  3443. *
  3444. * Enable AddrY (SA based search) only for non-WDS STA and
  3445. * ProxySTA VAP (in HKv1) modes.
  3446. *
  3447. * In all other VAP modes, only DA based search should be
  3448. * enabled
  3449. */
  3450. if (vdev->opmode == wlan_op_mode_sta &&
  3451. vdev->tdls_link_connected)
  3452. vdev->hal_desc_addr_search_flags =
  3453. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  3454. else if ((vdev->opmode == wlan_op_mode_sta) &&
  3455. !dp_tx_da_search_override(vdev))
  3456. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  3457. else
  3458. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  3459. /* Set search type only when peer map v2 messaging is enabled
  3460. * as we will have the search index (AST hash) only when v2 is
  3461. * enabled
  3462. */
  3463. if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
  3464. vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
  3465. else
  3466. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  3467. }
  3468. static inline bool
  3469. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  3470. struct dp_vdev *vdev,
  3471. struct dp_tx_desc_s *tx_desc)
  3472. {
  3473. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  3474. return false;
  3475. /*
  3476. * if vdev is given, then only check whether desc
  3477. * vdev match. if vdev is NULL, then check whether
  3478. * desc pdev match.
  3479. */
  3480. return vdev ? (tx_desc->vdev == vdev) : (tx_desc->pdev == pdev);
  3481. }
  3482. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3483. /**
  3484. * dp_tx_desc_flush() - release resources associated
  3485. * to TX Desc
  3486. *
  3487. * @dp_pdev: Handle to DP pdev structure
  3488. * @vdev: virtual device instance
  3489. * NULL: no specific Vdev is required and check all allcated TX desc
  3490. * on this pdev.
  3491. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  3492. *
  3493. * @force_free:
  3494. * true: flush the TX desc.
  3495. * false: only reset the Vdev in each allocated TX desc
  3496. * that associated to current Vdev.
  3497. *
  3498. * This function will go through the TX desc pool to flush
  3499. * the outstanding TX data or reset Vdev to NULL in associated TX
  3500. * Desc.
  3501. */
  3502. static void dp_tx_desc_flush(struct dp_pdev *pdev,
  3503. struct dp_vdev *vdev,
  3504. bool force_free)
  3505. {
  3506. uint8_t i;
  3507. uint32_t j;
  3508. uint32_t num_desc, page_id, offset;
  3509. uint16_t num_desc_per_page;
  3510. struct dp_soc *soc = pdev->soc;
  3511. struct dp_tx_desc_s *tx_desc = NULL;
  3512. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  3513. if (!vdev && !force_free) {
  3514. dp_err("Reset TX desc vdev, Vdev param is required!");
  3515. return;
  3516. }
  3517. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  3518. tx_desc_pool = &soc->tx_desc[i];
  3519. if (!(tx_desc_pool->pool_size) ||
  3520. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  3521. !(tx_desc_pool->desc_pages.cacheable_pages))
  3522. continue;
  3523. /*
  3524. * Add flow pool lock protection in case pool is freed
  3525. * due to all tx_desc is recycled when handle TX completion.
  3526. * this is not necessary when do force flush as:
  3527. * a. double lock will happen if dp_tx_desc_release is
  3528. * also trying to acquire it.
  3529. * b. dp interrupt has been disabled before do force TX desc
  3530. * flush in dp_pdev_deinit().
  3531. */
  3532. if (!force_free)
  3533. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  3534. num_desc = tx_desc_pool->pool_size;
  3535. num_desc_per_page =
  3536. tx_desc_pool->desc_pages.num_element_per_page;
  3537. for (j = 0; j < num_desc; j++) {
  3538. page_id = j / num_desc_per_page;
  3539. offset = j % num_desc_per_page;
  3540. if (qdf_unlikely(!(tx_desc_pool->
  3541. desc_pages.cacheable_pages)))
  3542. break;
  3543. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  3544. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  3545. /*
  3546. * Free TX desc if force free is
  3547. * required, otherwise only reset vdev
  3548. * in this TX desc.
  3549. */
  3550. if (force_free) {
  3551. dp_tx_comp_free_buf(soc, tx_desc);
  3552. dp_tx_desc_release(tx_desc, i);
  3553. } else {
  3554. tx_desc->vdev = NULL;
  3555. }
  3556. }
  3557. }
  3558. if (!force_free)
  3559. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  3560. }
  3561. }
  3562. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  3563. /**
  3564. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  3565. *
  3566. * @soc: Handle to DP soc structure
  3567. * @tx_desc: pointer of one TX desc
  3568. * @desc_pool_id: TX Desc pool id
  3569. */
  3570. static inline void
  3571. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  3572. uint8_t desc_pool_id)
  3573. {
  3574. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  3575. tx_desc->vdev = NULL;
  3576. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  3577. }
  3578. static void dp_tx_desc_flush(struct dp_pdev *pdev,
  3579. struct dp_vdev *vdev,
  3580. bool force_free)
  3581. {
  3582. uint8_t i, num_pool;
  3583. uint32_t j;
  3584. uint32_t num_desc, page_id, offset;
  3585. uint16_t num_desc_per_page;
  3586. struct dp_soc *soc = pdev->soc;
  3587. struct dp_tx_desc_s *tx_desc = NULL;
  3588. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  3589. if (!vdev && !force_free) {
  3590. dp_err("Reset TX desc vdev, Vdev param is required!");
  3591. return;
  3592. }
  3593. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3594. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3595. for (i = 0; i < num_pool; i++) {
  3596. tx_desc_pool = &soc->tx_desc[i];
  3597. if (!tx_desc_pool->desc_pages.cacheable_pages)
  3598. continue;
  3599. num_desc_per_page =
  3600. tx_desc_pool->desc_pages.num_element_per_page;
  3601. for (j = 0; j < num_desc; j++) {
  3602. page_id = j / num_desc_per_page;
  3603. offset = j % num_desc_per_page;
  3604. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  3605. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  3606. if (force_free) {
  3607. dp_tx_comp_free_buf(soc, tx_desc);
  3608. dp_tx_desc_release(tx_desc, i);
  3609. } else {
  3610. dp_tx_desc_reset_vdev(soc, tx_desc,
  3611. i);
  3612. }
  3613. }
  3614. }
  3615. }
  3616. }
  3617. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  3618. /**
  3619. * dp_tx_vdev_detach() - detach vdev from dp tx
  3620. * @vdev: virtual device instance
  3621. *
  3622. * Return: QDF_STATUS_SUCCESS: success
  3623. * QDF_STATUS_E_RESOURCES: Error return
  3624. */
  3625. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  3626. {
  3627. struct dp_pdev *pdev = vdev->pdev;
  3628. /* Reset TX desc associated to this Vdev as NULL */
  3629. dp_tx_desc_flush(pdev, vdev, false);
  3630. dp_tx_vdev_multipass_deinit(vdev);
  3631. return QDF_STATUS_SUCCESS;
  3632. }
  3633. /**
  3634. * dp_tx_pdev_attach() - attach pdev to dp tx
  3635. * @pdev: physical device instance
  3636. *
  3637. * Return: QDF_STATUS_SUCCESS: success
  3638. * QDF_STATUS_E_RESOURCES: Error return
  3639. */
  3640. QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
  3641. {
  3642. struct dp_soc *soc = pdev->soc;
  3643. /* Initialize Flow control counters */
  3644. qdf_atomic_init(&pdev->num_tx_exception);
  3645. qdf_atomic_init(&pdev->num_tx_outstanding);
  3646. if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
  3647. /* Initialize descriptors in TCL Ring */
  3648. hal_tx_init_data_ring(soc->hal_soc,
  3649. soc->tcl_data_ring[pdev->pdev_id].hal_srng);
  3650. }
  3651. return QDF_STATUS_SUCCESS;
  3652. }
  3653. /**
  3654. * dp_tx_pdev_detach() - detach pdev from dp tx
  3655. * @pdev: physical device instance
  3656. *
  3657. * Return: QDF_STATUS_SUCCESS: success
  3658. * QDF_STATUS_E_RESOURCES: Error return
  3659. */
  3660. QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev)
  3661. {
  3662. /* flush TX outstanding data per pdev */
  3663. dp_tx_desc_flush(pdev, NULL, true);
  3664. dp_tx_me_exit(pdev);
  3665. return QDF_STATUS_SUCCESS;
  3666. }
  3667. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3668. /* Pools will be allocated dynamically */
  3669. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  3670. int num_desc)
  3671. {
  3672. uint8_t i;
  3673. for (i = 0; i < num_pool; i++) {
  3674. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  3675. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  3676. }
  3677. return QDF_STATUS_SUCCESS;
  3678. }
  3679. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  3680. int num_desc)
  3681. {
  3682. return QDF_STATUS_SUCCESS;
  3683. }
  3684. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  3685. {
  3686. }
  3687. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  3688. {
  3689. uint8_t i;
  3690. for (i = 0; i < num_pool; i++)
  3691. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  3692. }
  3693. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  3694. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  3695. int num_desc)
  3696. {
  3697. uint8_t i, count;
  3698. /* Allocate software Tx descriptor pools */
  3699. for (i = 0; i < num_pool; i++) {
  3700. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  3701. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3702. FL("Tx Desc Pool alloc %d failed %pK"),
  3703. i, soc);
  3704. goto fail;
  3705. }
  3706. }
  3707. return QDF_STATUS_SUCCESS;
  3708. fail:
  3709. for (count = 0; count < i; count++)
  3710. dp_tx_desc_pool_free(soc, count);
  3711. return QDF_STATUS_E_NOMEM;
  3712. }
  3713. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  3714. int num_desc)
  3715. {
  3716. uint8_t i;
  3717. for (i = 0; i < num_pool; i++) {
  3718. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  3719. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3720. FL("Tx Desc Pool init %d failed %pK"),
  3721. i, soc);
  3722. return QDF_STATUS_E_NOMEM;
  3723. }
  3724. }
  3725. return QDF_STATUS_SUCCESS;
  3726. }
  3727. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  3728. {
  3729. uint8_t i;
  3730. for (i = 0; i < num_pool; i++)
  3731. dp_tx_desc_pool_deinit(soc, i);
  3732. }
  3733. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  3734. {
  3735. uint8_t i;
  3736. for (i = 0; i < num_pool; i++)
  3737. dp_tx_desc_pool_free(soc, i);
  3738. }
  3739. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  3740. /**
  3741. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  3742. * @soc: core txrx main context
  3743. * @num_pool: number of pools
  3744. *
  3745. */
  3746. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  3747. {
  3748. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  3749. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  3750. }
  3751. /**
  3752. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  3753. * @soc: core txrx main context
  3754. * @num_pool: number of pools
  3755. *
  3756. */
  3757. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  3758. {
  3759. dp_tx_tso_desc_pool_free(soc, num_pool);
  3760. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  3761. }
  3762. /**
  3763. * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
  3764. * @soc: core txrx main context
  3765. *
  3766. * This function frees all tx related descriptors as below
  3767. * 1. Regular TX descriptors (static pools)
  3768. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  3769. * 3. TSO descriptors
  3770. *
  3771. */
  3772. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  3773. {
  3774. uint8_t num_pool;
  3775. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3776. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  3777. dp_tx_ext_desc_pool_free(soc, num_pool);
  3778. dp_tx_delete_static_pools(soc, num_pool);
  3779. }
  3780. /**
  3781. * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
  3782. * @soc: core txrx main context
  3783. *
  3784. * This function de-initializes all tx related descriptors as below
  3785. * 1. Regular TX descriptors (static pools)
  3786. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  3787. * 3. TSO descriptors
  3788. *
  3789. */
  3790. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  3791. {
  3792. uint8_t num_pool;
  3793. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3794. dp_tx_flow_control_deinit(soc);
  3795. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  3796. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  3797. dp_tx_deinit_static_pools(soc, num_pool);
  3798. }
  3799. /**
  3800. * dp_tso_attach() - TSO attach handler
  3801. * @txrx_soc: Opaque Dp handle
  3802. *
  3803. * Reserve TSO descriptor buffers
  3804. *
  3805. * Return: QDF_STATUS_E_FAILURE on failure or
  3806. * QDF_STATUS_SUCCESS on success
  3807. */
  3808. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  3809. uint8_t num_pool,
  3810. uint16_t num_desc)
  3811. {
  3812. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  3813. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  3814. return QDF_STATUS_E_FAILURE;
  3815. }
  3816. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  3817. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  3818. num_pool, soc);
  3819. return QDF_STATUS_E_FAILURE;
  3820. }
  3821. return QDF_STATUS_SUCCESS;
  3822. }
  3823. /**
  3824. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  3825. * @soc: DP soc handle
  3826. * @num_pool: Number of pools
  3827. * @num_desc: Number of descriptors
  3828. *
  3829. * Initialize TSO descriptor pools
  3830. *
  3831. * Return: QDF_STATUS_E_FAILURE on failure or
  3832. * QDF_STATUS_SUCCESS on success
  3833. */
  3834. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  3835. uint8_t num_pool,
  3836. uint16_t num_desc)
  3837. {
  3838. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  3839. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  3840. return QDF_STATUS_E_FAILURE;
  3841. }
  3842. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  3843. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  3844. num_pool, soc);
  3845. return QDF_STATUS_E_FAILURE;
  3846. }
  3847. return QDF_STATUS_SUCCESS;
  3848. }
  3849. /**
  3850. * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
  3851. * @soc: core txrx main context
  3852. *
  3853. * This function allocates memory for following descriptor pools
  3854. * 1. regular sw tx descriptor pools (static pools)
  3855. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  3856. * 3. TSO descriptor pools
  3857. *
  3858. * Return: QDF_STATUS_SUCCESS: success
  3859. * QDF_STATUS_E_RESOURCES: Error return
  3860. */
  3861. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  3862. {
  3863. uint8_t num_pool;
  3864. uint32_t num_desc;
  3865. uint32_t num_ext_desc;
  3866. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3867. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3868. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  3869. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3870. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  3871. __func__, num_pool, num_desc);
  3872. if ((num_pool > MAX_TXDESC_POOLS) ||
  3873. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  3874. goto fail1;
  3875. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  3876. goto fail1;
  3877. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  3878. goto fail2;
  3879. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  3880. return QDF_STATUS_SUCCESS;
  3881. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  3882. goto fail3;
  3883. return QDF_STATUS_SUCCESS;
  3884. fail3:
  3885. dp_tx_ext_desc_pool_free(soc, num_pool);
  3886. fail2:
  3887. dp_tx_delete_static_pools(soc, num_pool);
  3888. fail1:
  3889. return QDF_STATUS_E_RESOURCES;
  3890. }
  3891. /**
  3892. * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
  3893. * @soc: core txrx main context
  3894. *
  3895. * This function initializes the following TX descriptor pools
  3896. * 1. regular sw tx descriptor pools (static pools)
  3897. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  3898. * 3. TSO descriptor pools
  3899. *
  3900. * Return: QDF_STATUS_SUCCESS: success
  3901. * QDF_STATUS_E_RESOURCES: Error return
  3902. */
  3903. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  3904. {
  3905. uint8_t num_pool;
  3906. uint32_t num_desc;
  3907. uint32_t num_ext_desc;
  3908. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3909. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3910. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  3911. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  3912. goto fail1;
  3913. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  3914. goto fail2;
  3915. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  3916. return QDF_STATUS_SUCCESS;
  3917. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  3918. goto fail3;
  3919. dp_tx_flow_control_init(soc);
  3920. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  3921. return QDF_STATUS_SUCCESS;
  3922. fail3:
  3923. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  3924. fail2:
  3925. dp_tx_deinit_static_pools(soc, num_pool);
  3926. fail1:
  3927. return QDF_STATUS_E_RESOURCES;
  3928. }
  3929. /**
  3930. * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
  3931. * @txrx_soc: dp soc handle
  3932. *
  3933. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  3934. * QDF_STATUS_E_FAILURE
  3935. */
  3936. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  3937. {
  3938. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3939. uint8_t num_pool;
  3940. uint32_t num_desc;
  3941. uint32_t num_ext_desc;
  3942. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3943. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  3944. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  3945. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  3946. return QDF_STATUS_E_FAILURE;
  3947. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  3948. return QDF_STATUS_E_FAILURE;
  3949. return QDF_STATUS_SUCCESS;
  3950. }
  3951. /**
  3952. * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
  3953. * @txrx_soc: dp soc handle
  3954. *
  3955. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  3956. */
  3957. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  3958. {
  3959. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  3960. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  3961. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  3962. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  3963. return QDF_STATUS_SUCCESS;
  3964. }