dp_tx.c 144 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "htt.h"
  19. #include "dp_htt.h"
  20. #include "hal_hw_headers.h"
  21. #include "dp_tx.h"
  22. #include "dp_tx_desc.h"
  23. #include "dp_peer.h"
  24. #include "dp_types.h"
  25. #include "hal_tx.h"
  26. #include "qdf_mem.h"
  27. #include "qdf_nbuf.h"
  28. #include "qdf_net_types.h"
  29. #include "qdf_module.h"
  30. #include <wlan_cfg.h>
  31. #include "dp_ipa.h"
  32. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  33. #include "if_meta_hdr.h"
  34. #endif
  35. #include "enet.h"
  36. #include "dp_internal.h"
  37. #ifdef ATH_SUPPORT_IQUE
  38. #include "dp_txrx_me.h"
  39. #endif
  40. #include "dp_hist.h"
  41. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  42. #include <dp_swlm.h>
  43. #endif
  44. #ifdef WIFI_MONITOR_SUPPORT
  45. #include <dp_mon.h>
  46. #endif
  47. #ifdef FEATURE_WDS
  48. #include "dp_txrx_wds.h"
  49. #endif
  50. /* Flag to skip CCE classify when mesh or tid override enabled */
  51. #define DP_TX_SKIP_CCE_CLASSIFY \
  52. (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
  53. /* TODO Add support in TSO */
  54. #define DP_DESC_NUM_FRAG(x) 0
  55. /* disable TQM_BYPASS */
  56. #define TQM_BYPASS_WAR 0
  57. /* invalid peer id for reinject*/
  58. #define DP_INVALID_PEER 0XFFFE
  59. /*mapping between hal encrypt type and cdp_sec_type*/
  60. uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  61. HAL_TX_ENCRYPT_TYPE_WEP_128,
  62. HAL_TX_ENCRYPT_TYPE_WEP_104,
  63. HAL_TX_ENCRYPT_TYPE_WEP_40,
  64. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  65. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  66. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  67. HAL_TX_ENCRYPT_TYPE_WAPI,
  68. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  69. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  70. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  71. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  72. qdf_export_symbol(sec_type_map);
  73. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  74. /**
  75. * dp_update_tx_desc_stats - Update the increase or decrease in
  76. * outstanding tx desc count
  77. * values on pdev and soc
  78. * @vdev: DP pdev handle
  79. *
  80. * Return: void
  81. */
  82. static inline void
  83. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  84. {
  85. int32_t tx_descs_cnt =
  86. qdf_atomic_read(&pdev->num_tx_outstanding);
  87. if (pdev->tx_descs_max < tx_descs_cnt)
  88. pdev->tx_descs_max = tx_descs_cnt;
  89. qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
  90. pdev->tx_descs_max);
  91. }
  92. #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
  93. static inline void
  94. dp_update_tx_desc_stats(struct dp_pdev *pdev)
  95. {
  96. }
  97. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  98. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  99. static inline
  100. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  101. {
  102. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  103. QDF_DMA_TO_DEVICE,
  104. desc->nbuf->len);
  105. desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
  106. }
  107. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  108. {
  109. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
  110. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  111. QDF_DMA_TO_DEVICE,
  112. desc->nbuf->len);
  113. }
  114. #else
  115. static inline
  116. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  117. {
  118. }
  119. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  120. {
  121. qdf_nbuf_unmap_nbytes_single(soc->osdev, desc->nbuf,
  122. QDF_DMA_TO_DEVICE, desc->nbuf->len);
  123. }
  124. #endif
  125. #ifdef QCA_TX_LIMIT_CHECK
  126. /**
  127. * dp_tx_limit_check - Check if allocated tx descriptors reached
  128. * soc max limit and pdev max limit
  129. * @vdev: DP vdev handle
  130. *
  131. * Return: true if allocated tx descriptors reached max configured value, else
  132. * false
  133. */
  134. static inline bool
  135. dp_tx_limit_check(struct dp_vdev *vdev)
  136. {
  137. struct dp_pdev *pdev = vdev->pdev;
  138. struct dp_soc *soc = pdev->soc;
  139. if (qdf_atomic_read(&soc->num_tx_outstanding) >=
  140. soc->num_tx_allowed) {
  141. dp_tx_info("queued packets are more than max tx, drop the frame");
  142. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  143. return true;
  144. }
  145. if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
  146. pdev->num_tx_allowed) {
  147. dp_tx_info("queued packets are more than max tx, drop the frame");
  148. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  149. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_outstand.num, 1);
  150. return true;
  151. }
  152. return false;
  153. }
  154. /**
  155. * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
  156. * reached soc max limit
  157. * @vdev: DP vdev handle
  158. *
  159. * Return: true if allocated tx descriptors reached max configured value, else
  160. * false
  161. */
  162. static inline bool
  163. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  164. {
  165. struct dp_pdev *pdev = vdev->pdev;
  166. struct dp_soc *soc = pdev->soc;
  167. if (qdf_atomic_read(&soc->num_tx_exception) >=
  168. soc->num_msdu_exception_desc) {
  169. dp_info("exc packets are more than max drop the exc pkt");
  170. DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
  171. return true;
  172. }
  173. return false;
  174. }
  175. /**
  176. * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
  177. * @vdev: DP pdev handle
  178. *
  179. * Return: void
  180. */
  181. static inline void
  182. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  183. {
  184. struct dp_soc *soc = pdev->soc;
  185. qdf_atomic_inc(&pdev->num_tx_outstanding);
  186. qdf_atomic_inc(&soc->num_tx_outstanding);
  187. dp_update_tx_desc_stats(pdev);
  188. }
  189. /**
  190. * dp_tx_outstanding__dec - Decrement outstanding tx desc values on pdev and soc
  191. * @vdev: DP pdev handle
  192. *
  193. * Return: void
  194. */
  195. static inline void
  196. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  197. {
  198. struct dp_soc *soc = pdev->soc;
  199. qdf_atomic_dec(&pdev->num_tx_outstanding);
  200. qdf_atomic_dec(&soc->num_tx_outstanding);
  201. dp_update_tx_desc_stats(pdev);
  202. }
  203. #else //QCA_TX_LIMIT_CHECK
  204. static inline bool
  205. dp_tx_limit_check(struct dp_vdev *vdev)
  206. {
  207. return false;
  208. }
  209. static inline bool
  210. dp_tx_exception_limit_check(struct dp_vdev *vdev)
  211. {
  212. return false;
  213. }
  214. static inline void
  215. dp_tx_outstanding_inc(struct dp_pdev *pdev)
  216. {
  217. qdf_atomic_inc(&pdev->num_tx_outstanding);
  218. dp_update_tx_desc_stats(pdev);
  219. }
  220. static inline void
  221. dp_tx_outstanding_dec(struct dp_pdev *pdev)
  222. {
  223. qdf_atomic_dec(&pdev->num_tx_outstanding);
  224. dp_update_tx_desc_stats(pdev);
  225. }
  226. #endif //QCA_TX_LIMIT_CHECK
  227. #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
  228. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  229. {
  230. enum dp_tx_event_type type;
  231. if (flags & DP_TX_DESC_FLAG_FLUSH)
  232. type = DP_TX_DESC_FLUSH;
  233. else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
  234. type = DP_TX_COMP_UNMAP_ERR;
  235. else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
  236. type = DP_TX_COMP_UNMAP;
  237. else
  238. type = DP_TX_DESC_UNMAP;
  239. return type;
  240. }
  241. static inline void
  242. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  243. qdf_nbuf_t skb, uint32_t sw_cookie,
  244. enum dp_tx_event_type type)
  245. {
  246. struct dp_tx_desc_event *entry;
  247. uint32_t idx;
  248. if (qdf_unlikely(!soc->tx_tcl_history || !soc->tx_comp_history))
  249. return;
  250. switch (type) {
  251. case DP_TX_COMP_UNMAP:
  252. case DP_TX_COMP_UNMAP_ERR:
  253. case DP_TX_COMP_MSDU_EXT:
  254. idx = dp_history_get_next_index(&soc->tx_comp_history->index,
  255. DP_TX_COMP_HISTORY_SIZE);
  256. entry = &soc->tx_comp_history->entry[idx];
  257. break;
  258. case DP_TX_DESC_MAP:
  259. case DP_TX_DESC_UNMAP:
  260. case DP_TX_DESC_COOKIE:
  261. case DP_TX_DESC_FLUSH:
  262. idx = dp_history_get_next_index(&soc->tx_tcl_history->index,
  263. DP_TX_TCL_HISTORY_SIZE);
  264. entry = &soc->tx_tcl_history->entry[idx];
  265. break;
  266. default:
  267. dp_info_rl("Invalid dp_tx_event_type: %d", type);
  268. return;
  269. }
  270. entry->skb = skb;
  271. entry->paddr = paddr;
  272. entry->sw_cookie = sw_cookie;
  273. entry->type = type;
  274. entry->ts = qdf_get_log_timestamp();
  275. }
  276. static inline void
  277. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  278. struct qdf_tso_seg_elem_t *tso_seg,
  279. qdf_nbuf_t skb, uint32_t sw_cookie,
  280. enum dp_tx_event_type type)
  281. {
  282. int i;
  283. for (i = 1; i < tso_seg->seg.num_frags; i++) {
  284. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
  285. skb, sw_cookie, type);
  286. }
  287. if (!tso_seg->next)
  288. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
  289. skb, 0xFFFFFFFF, type);
  290. }
  291. static inline void
  292. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  293. qdf_nbuf_t skb, uint32_t sw_cookie,
  294. enum dp_tx_event_type type)
  295. {
  296. struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
  297. uint32_t num_segs = tso_info.num_segs;
  298. while (num_segs) {
  299. dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
  300. curr_seg = curr_seg->next;
  301. num_segs--;
  302. }
  303. }
  304. #else
  305. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  306. {
  307. return DP_TX_DESC_INVAL_EVT;
  308. }
  309. static inline void
  310. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  311. qdf_nbuf_t skb, uint32_t sw_cookie,
  312. enum dp_tx_event_type type)
  313. {
  314. }
  315. static inline void
  316. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  317. struct qdf_tso_seg_elem_t *tso_seg,
  318. qdf_nbuf_t skb, uint32_t sw_cookie,
  319. enum dp_tx_event_type type)
  320. {
  321. }
  322. static inline void
  323. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  324. qdf_nbuf_t skb, uint32_t sw_cookie,
  325. enum dp_tx_event_type type)
  326. {
  327. }
  328. #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
  329. #if defined(FEATURE_TSO)
  330. /**
  331. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  332. *
  333. * @soc - core txrx main context
  334. * @seg_desc - tso segment descriptor
  335. * @num_seg_desc - tso number segment descriptor
  336. */
  337. static void dp_tx_tso_unmap_segment(
  338. struct dp_soc *soc,
  339. struct qdf_tso_seg_elem_t *seg_desc,
  340. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  341. {
  342. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  343. if (qdf_unlikely(!seg_desc)) {
  344. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  345. __func__, __LINE__);
  346. qdf_assert(0);
  347. } else if (qdf_unlikely(!num_seg_desc)) {
  348. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  349. __func__, __LINE__);
  350. qdf_assert(0);
  351. } else {
  352. bool is_last_seg;
  353. /* no tso segment left to do dma unmap */
  354. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  355. return;
  356. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  357. true : false;
  358. qdf_nbuf_unmap_tso_segment(soc->osdev,
  359. seg_desc, is_last_seg);
  360. num_seg_desc->num_seg.tso_cmn_num_seg--;
  361. }
  362. }
  363. /**
  364. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  365. * back to the freelist
  366. *
  367. * @soc - soc device handle
  368. * @tx_desc - Tx software descriptor
  369. */
  370. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  371. struct dp_tx_desc_s *tx_desc)
  372. {
  373. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  374. if (qdf_unlikely(!tx_desc->tso_desc)) {
  375. dp_tx_err("SO desc is NULL!");
  376. qdf_assert(0);
  377. } else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
  378. dp_tx_err("TSO num desc is NULL!");
  379. qdf_assert(0);
  380. } else {
  381. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  382. (struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
  383. /* Add the tso num segment into the free list */
  384. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  385. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  386. tx_desc->tso_num_desc);
  387. tx_desc->tso_num_desc = NULL;
  388. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  389. }
  390. /* Add the tso segment into the free list*/
  391. dp_tx_tso_desc_free(soc,
  392. tx_desc->pool_id, tx_desc->tso_desc);
  393. tx_desc->tso_desc = NULL;
  394. }
  395. }
  396. #else
  397. static void dp_tx_tso_unmap_segment(
  398. struct dp_soc *soc,
  399. struct qdf_tso_seg_elem_t *seg_desc,
  400. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  401. {
  402. }
  403. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  404. struct dp_tx_desc_s *tx_desc)
  405. {
  406. }
  407. #endif
  408. /**
  409. * dp_tx_desc_release() - Release Tx Descriptor
  410. * @tx_desc : Tx Descriptor
  411. * @desc_pool_id: Descriptor Pool ID
  412. *
  413. * Deallocate all resources attached to Tx descriptor and free the Tx
  414. * descriptor.
  415. *
  416. * Return:
  417. */
  418. static void
  419. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  420. {
  421. struct dp_pdev *pdev = tx_desc->pdev;
  422. struct dp_soc *soc;
  423. uint8_t comp_status = 0;
  424. qdf_assert(pdev);
  425. soc = pdev->soc;
  426. dp_tx_outstanding_dec(pdev);
  427. if (tx_desc->frm_type == dp_tx_frm_tso)
  428. dp_tx_tso_desc_release(soc, tx_desc);
  429. if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG)
  430. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  431. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  432. dp_tx_me_free_buf(tx_desc->pdev, tx_desc->me_buffer);
  433. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  434. qdf_atomic_dec(&soc->num_tx_exception);
  435. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  436. tx_desc->buffer_src)
  437. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  438. soc->hal_soc);
  439. else
  440. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  441. dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
  442. tx_desc->id, comp_status,
  443. qdf_atomic_read(&pdev->num_tx_outstanding));
  444. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  445. return;
  446. }
  447. /**
  448. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  449. * @vdev: DP vdev Handle
  450. * @nbuf: skb
  451. * @msdu_info: msdu_info required to create HTT metadata
  452. *
  453. * Prepares and fills HTT metadata in the frame pre-header for special frames
  454. * that should be transmitted using varying transmit parameters.
  455. * There are 2 VDEV modes that currently needs this special metadata -
  456. * 1) Mesh Mode
  457. * 2) DSRC Mode
  458. *
  459. * Return: HTT metadata size
  460. *
  461. */
  462. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  463. struct dp_tx_msdu_info_s *msdu_info)
  464. {
  465. uint32_t *meta_data = msdu_info->meta_data;
  466. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  467. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  468. uint8_t htt_desc_size;
  469. /* Size rounded of multiple of 8 bytes */
  470. uint8_t htt_desc_size_aligned;
  471. uint8_t *hdr = NULL;
  472. /*
  473. * Metadata - HTT MSDU Extension header
  474. */
  475. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  476. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  477. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  478. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  479. meta_data[0])) {
  480. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  481. htt_desc_size_aligned)) {
  482. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  483. htt_desc_size_aligned);
  484. if (!nbuf) {
  485. /*
  486. * qdf_nbuf_realloc_headroom won't do skb_clone
  487. * as skb_realloc_headroom does. so, no free is
  488. * needed here.
  489. */
  490. DP_STATS_INC(vdev,
  491. tx_i.dropped.headroom_insufficient,
  492. 1);
  493. qdf_print(" %s[%d] skb_realloc_headroom failed",
  494. __func__, __LINE__);
  495. return 0;
  496. }
  497. }
  498. /* Fill and add HTT metaheader */
  499. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  500. if (!hdr) {
  501. dp_tx_err("Error in filling HTT metadata");
  502. return 0;
  503. }
  504. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  505. } else if (vdev->opmode == wlan_op_mode_ocb) {
  506. /* Todo - Add support for DSRC */
  507. }
  508. return htt_desc_size_aligned;
  509. }
  510. /**
  511. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  512. * @tso_seg: TSO segment to process
  513. * @ext_desc: Pointer to MSDU extension descriptor
  514. *
  515. * Return: void
  516. */
  517. #if defined(FEATURE_TSO)
  518. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  519. void *ext_desc)
  520. {
  521. uint8_t num_frag;
  522. uint32_t tso_flags;
  523. /*
  524. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  525. * tcp_flag_mask
  526. *
  527. * Checksum enable flags are set in TCL descriptor and not in Extension
  528. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  529. */
  530. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  531. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  532. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  533. tso_seg->tso_flags.ip_len);
  534. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  535. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  536. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  537. uint32_t lo = 0;
  538. uint32_t hi = 0;
  539. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  540. (tso_seg->tso_frags[num_frag].length));
  541. qdf_dmaaddr_to_32s(
  542. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  543. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  544. tso_seg->tso_frags[num_frag].length);
  545. }
  546. return;
  547. }
  548. #else
  549. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  550. void *ext_desc)
  551. {
  552. return;
  553. }
  554. #endif
  555. #if defined(FEATURE_TSO)
  556. /**
  557. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  558. * allocated and free them
  559. *
  560. * @soc: soc handle
  561. * @free_seg: list of tso segments
  562. * @msdu_info: msdu descriptor
  563. *
  564. * Return - void
  565. */
  566. static void dp_tx_free_tso_seg_list(
  567. struct dp_soc *soc,
  568. struct qdf_tso_seg_elem_t *free_seg,
  569. struct dp_tx_msdu_info_s *msdu_info)
  570. {
  571. struct qdf_tso_seg_elem_t *next_seg;
  572. while (free_seg) {
  573. next_seg = free_seg->next;
  574. dp_tx_tso_desc_free(soc,
  575. msdu_info->tx_queue.desc_pool_id,
  576. free_seg);
  577. free_seg = next_seg;
  578. }
  579. }
  580. /**
  581. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  582. * allocated and free them
  583. *
  584. * @soc: soc handle
  585. * @free_num_seg: list of tso number segments
  586. * @msdu_info: msdu descriptor
  587. * Return - void
  588. */
  589. static void dp_tx_free_tso_num_seg_list(
  590. struct dp_soc *soc,
  591. struct qdf_tso_num_seg_elem_t *free_num_seg,
  592. struct dp_tx_msdu_info_s *msdu_info)
  593. {
  594. struct qdf_tso_num_seg_elem_t *next_num_seg;
  595. while (free_num_seg) {
  596. next_num_seg = free_num_seg->next;
  597. dp_tso_num_seg_free(soc,
  598. msdu_info->tx_queue.desc_pool_id,
  599. free_num_seg);
  600. free_num_seg = next_num_seg;
  601. }
  602. }
  603. /**
  604. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  605. * do dma unmap for each segment
  606. *
  607. * @soc: soc handle
  608. * @free_seg: list of tso segments
  609. * @num_seg_desc: tso number segment descriptor
  610. *
  611. * Return - void
  612. */
  613. static void dp_tx_unmap_tso_seg_list(
  614. struct dp_soc *soc,
  615. struct qdf_tso_seg_elem_t *free_seg,
  616. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  617. {
  618. struct qdf_tso_seg_elem_t *next_seg;
  619. if (qdf_unlikely(!num_seg_desc)) {
  620. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  621. return;
  622. }
  623. while (free_seg) {
  624. next_seg = free_seg->next;
  625. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  626. free_seg = next_seg;
  627. }
  628. }
  629. #ifdef FEATURE_TSO_STATS
  630. /**
  631. * dp_tso_get_stats_idx: Retrieve the tso packet id
  632. * @pdev - pdev handle
  633. *
  634. * Return: id
  635. */
  636. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  637. {
  638. uint32_t stats_idx;
  639. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  640. % CDP_MAX_TSO_PACKETS);
  641. return stats_idx;
  642. }
  643. #else
  644. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  645. {
  646. return 0;
  647. }
  648. #endif /* FEATURE_TSO_STATS */
  649. /**
  650. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  651. * free the tso segments descriptor and
  652. * tso num segments descriptor
  653. *
  654. * @soc: soc handle
  655. * @msdu_info: msdu descriptor
  656. * @tso_seg_unmap: flag to show if dma unmap is necessary
  657. *
  658. * Return - void
  659. */
  660. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  661. struct dp_tx_msdu_info_s *msdu_info,
  662. bool tso_seg_unmap)
  663. {
  664. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  665. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  666. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  667. tso_info->tso_num_seg_list;
  668. /* do dma unmap for each segment */
  669. if (tso_seg_unmap)
  670. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  671. /* free all tso number segment descriptor though looks only have 1 */
  672. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  673. /* free all tso segment descriptor */
  674. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  675. }
  676. /**
  677. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  678. * @vdev: virtual device handle
  679. * @msdu: network buffer
  680. * @msdu_info: meta data associated with the msdu
  681. *
  682. * Return: QDF_STATUS_SUCCESS success
  683. */
  684. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  685. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  686. {
  687. struct qdf_tso_seg_elem_t *tso_seg;
  688. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  689. struct dp_soc *soc = vdev->pdev->soc;
  690. struct dp_pdev *pdev = vdev->pdev;
  691. struct qdf_tso_info_t *tso_info;
  692. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  693. tso_info = &msdu_info->u.tso_info;
  694. tso_info->curr_seg = NULL;
  695. tso_info->tso_seg_list = NULL;
  696. tso_info->num_segs = num_seg;
  697. msdu_info->frm_type = dp_tx_frm_tso;
  698. tso_info->tso_num_seg_list = NULL;
  699. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  700. while (num_seg) {
  701. tso_seg = dp_tx_tso_desc_alloc(
  702. soc, msdu_info->tx_queue.desc_pool_id);
  703. if (tso_seg) {
  704. tso_seg->next = tso_info->tso_seg_list;
  705. tso_info->tso_seg_list = tso_seg;
  706. num_seg--;
  707. } else {
  708. dp_err_rl("Failed to alloc tso seg desc");
  709. DP_STATS_INC_PKT(vdev->pdev,
  710. tso_stats.tso_no_mem_dropped, 1,
  711. qdf_nbuf_len(msdu));
  712. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  713. return QDF_STATUS_E_NOMEM;
  714. }
  715. }
  716. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  717. tso_num_seg = dp_tso_num_seg_alloc(soc,
  718. msdu_info->tx_queue.desc_pool_id);
  719. if (tso_num_seg) {
  720. tso_num_seg->next = tso_info->tso_num_seg_list;
  721. tso_info->tso_num_seg_list = tso_num_seg;
  722. } else {
  723. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  724. __func__);
  725. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  726. return QDF_STATUS_E_NOMEM;
  727. }
  728. msdu_info->num_seg =
  729. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  730. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  731. msdu_info->num_seg);
  732. if (!(msdu_info->num_seg)) {
  733. /*
  734. * Free allocated TSO seg desc and number seg desc,
  735. * do unmap for segments if dma map has done.
  736. */
  737. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  738. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  739. return QDF_STATUS_E_INVAL;
  740. }
  741. dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
  742. msdu, 0, DP_TX_DESC_MAP);
  743. tso_info->curr_seg = tso_info->tso_seg_list;
  744. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  745. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  746. msdu, msdu_info->num_seg);
  747. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  748. tso_info->msdu_stats_idx);
  749. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  750. return QDF_STATUS_SUCCESS;
  751. }
  752. #else
  753. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  754. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  755. {
  756. return QDF_STATUS_E_NOMEM;
  757. }
  758. #endif
  759. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  760. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  761. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  762. /**
  763. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  764. * @vdev: DP Vdev handle
  765. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  766. * @desc_pool_id: Descriptor Pool ID
  767. *
  768. * Return:
  769. */
  770. static
  771. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  772. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  773. {
  774. uint8_t i;
  775. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  776. struct dp_tx_seg_info_s *seg_info;
  777. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  778. struct dp_soc *soc = vdev->pdev->soc;
  779. /* Allocate an extension descriptor */
  780. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  781. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  782. if (!msdu_ext_desc) {
  783. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  784. return NULL;
  785. }
  786. if (msdu_info->exception_fw &&
  787. qdf_unlikely(vdev->mesh_vdev)) {
  788. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  789. &msdu_info->meta_data[0],
  790. sizeof(struct htt_tx_msdu_desc_ext2_t));
  791. qdf_atomic_inc(&soc->num_tx_exception);
  792. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  793. }
  794. switch (msdu_info->frm_type) {
  795. case dp_tx_frm_sg:
  796. case dp_tx_frm_me:
  797. case dp_tx_frm_raw:
  798. seg_info = msdu_info->u.sg_info.curr_seg;
  799. /* Update the buffer pointers in MSDU Extension Descriptor */
  800. for (i = 0; i < seg_info->frag_cnt; i++) {
  801. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  802. seg_info->frags[i].paddr_lo,
  803. seg_info->frags[i].paddr_hi,
  804. seg_info->frags[i].len);
  805. }
  806. break;
  807. case dp_tx_frm_tso:
  808. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  809. &cached_ext_desc[0]);
  810. break;
  811. default:
  812. break;
  813. }
  814. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  815. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  816. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  817. msdu_ext_desc->vaddr);
  818. return msdu_ext_desc;
  819. }
  820. /**
  821. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  822. *
  823. * @skb: skb to be traced
  824. * @msdu_id: msdu_id of the packet
  825. * @vdev_id: vdev_id of the packet
  826. *
  827. * Return: None
  828. */
  829. #ifdef DP_DISABLE_TX_PKT_TRACE
  830. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  831. uint8_t vdev_id)
  832. {
  833. }
  834. #else
  835. static void dp_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
  836. uint8_t vdev_id)
  837. {
  838. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  839. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  840. DPTRACE(qdf_dp_trace_ptr(skb,
  841. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  842. QDF_TRACE_DEFAULT_PDEV_ID,
  843. qdf_nbuf_data_addr(skb),
  844. sizeof(qdf_nbuf_data(skb)),
  845. msdu_id, vdev_id, 0));
  846. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  847. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  848. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  849. msdu_id, QDF_TX));
  850. }
  851. #endif
  852. #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
  853. /**
  854. * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
  855. * exception by the upper layer (OS_IF)
  856. * @soc: DP soc handle
  857. * @nbuf: packet to be transmitted
  858. *
  859. * Returns: 1 if the packet is marked as exception,
  860. * 0, if the packet is not marked as exception.
  861. */
  862. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  863. qdf_nbuf_t nbuf)
  864. {
  865. return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
  866. }
  867. #else
  868. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  869. qdf_nbuf_t nbuf)
  870. {
  871. return 0;
  872. }
  873. #endif
  874. /**
  875. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  876. * @vdev: DP vdev handle
  877. * @nbuf: skb
  878. * @desc_pool_id: Descriptor pool ID
  879. * @meta_data: Metadata to the fw
  880. * @tx_exc_metadata: Handle that holds exception path metadata
  881. * Allocate and prepare Tx descriptor with msdu information.
  882. *
  883. * Return: Pointer to Tx Descriptor on success,
  884. * NULL on failure
  885. */
  886. static
  887. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  888. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  889. struct dp_tx_msdu_info_s *msdu_info,
  890. struct cdp_tx_exception_metadata *tx_exc_metadata)
  891. {
  892. uint8_t align_pad;
  893. uint8_t is_exception = 0;
  894. uint8_t htt_hdr_size;
  895. struct dp_tx_desc_s *tx_desc;
  896. struct dp_pdev *pdev = vdev->pdev;
  897. struct dp_soc *soc = pdev->soc;
  898. if (dp_tx_limit_check(vdev))
  899. return NULL;
  900. /* Allocate software Tx descriptor */
  901. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  902. if (qdf_unlikely(!tx_desc)) {
  903. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  904. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  905. return NULL;
  906. }
  907. dp_tx_outstanding_inc(pdev);
  908. /* Initialize the SW tx descriptor */
  909. tx_desc->nbuf = nbuf;
  910. tx_desc->frm_type = dp_tx_frm_std;
  911. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  912. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  913. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  914. tx_desc->vdev_id = vdev->vdev_id;
  915. tx_desc->pdev = pdev;
  916. tx_desc->msdu_ext_desc = NULL;
  917. tx_desc->pkt_offset = 0;
  918. tx_desc->length = qdf_nbuf_headlen(nbuf);
  919. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  920. if (qdf_unlikely(vdev->multipass_en)) {
  921. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  922. goto failure;
  923. }
  924. /* Packets marked by upper layer (OS-IF) to be sent to FW */
  925. if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
  926. is_exception = 1;
  927. /*
  928. * For special modes (vdev_type == ocb or mesh), data frames should be
  929. * transmitted using varying transmit parameters (tx spec) which include
  930. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  931. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  932. * These frames are sent as exception packets to firmware.
  933. *
  934. * HW requirement is that metadata should always point to a
  935. * 8-byte aligned address. So we add alignment pad to start of buffer.
  936. * HTT Metadata should be ensured to be multiple of 8-bytes,
  937. * to get 8-byte aligned start address along with align_pad added
  938. *
  939. * |-----------------------------|
  940. * | |
  941. * |-----------------------------| <-----Buffer Pointer Address given
  942. * | | ^ in HW descriptor (aligned)
  943. * | HTT Metadata | |
  944. * | | |
  945. * | | | Packet Offset given in descriptor
  946. * | | |
  947. * |-----------------------------| |
  948. * | Alignment Pad | v
  949. * |-----------------------------| <----- Actual buffer start address
  950. * | SKB Data | (Unaligned)
  951. * | |
  952. * | |
  953. * | |
  954. * | |
  955. * | |
  956. * |-----------------------------|
  957. */
  958. if (qdf_unlikely((msdu_info->exception_fw)) ||
  959. (vdev->opmode == wlan_op_mode_ocb) ||
  960. (tx_exc_metadata &&
  961. tx_exc_metadata->is_tx_sniffer)) {
  962. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  963. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  964. DP_STATS_INC(vdev,
  965. tx_i.dropped.headroom_insufficient, 1);
  966. goto failure;
  967. }
  968. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  969. dp_tx_err("qdf_nbuf_push_head failed");
  970. goto failure;
  971. }
  972. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  973. msdu_info);
  974. if (htt_hdr_size == 0)
  975. goto failure;
  976. tx_desc->length = qdf_nbuf_headlen(nbuf);
  977. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  978. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  979. is_exception = 1;
  980. tx_desc->length -= tx_desc->pkt_offset;
  981. }
  982. #if !TQM_BYPASS_WAR
  983. if (is_exception || tx_exc_metadata)
  984. #endif
  985. {
  986. /* Temporary WAR due to TQM VP issues */
  987. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  988. qdf_atomic_inc(&soc->num_tx_exception);
  989. }
  990. return tx_desc;
  991. failure:
  992. dp_tx_desc_release(tx_desc, desc_pool_id);
  993. return NULL;
  994. }
  995. /**
  996. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  997. * @vdev: DP vdev handle
  998. * @nbuf: skb
  999. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  1000. * @desc_pool_id : Descriptor Pool ID
  1001. *
  1002. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  1003. * information. For frames wth fragments, allocate and prepare
  1004. * an MSDU extension descriptor
  1005. *
  1006. * Return: Pointer to Tx Descriptor on success,
  1007. * NULL on failure
  1008. */
  1009. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  1010. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  1011. uint8_t desc_pool_id)
  1012. {
  1013. struct dp_tx_desc_s *tx_desc;
  1014. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  1015. struct dp_pdev *pdev = vdev->pdev;
  1016. struct dp_soc *soc = pdev->soc;
  1017. if (dp_tx_limit_check(vdev))
  1018. return NULL;
  1019. /* Allocate software Tx descriptor */
  1020. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1021. if (!tx_desc) {
  1022. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1023. return NULL;
  1024. }
  1025. dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
  1026. nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
  1027. dp_tx_outstanding_inc(pdev);
  1028. /* Initialize the SW tx descriptor */
  1029. tx_desc->nbuf = nbuf;
  1030. tx_desc->frm_type = msdu_info->frm_type;
  1031. tx_desc->tx_encap_type = vdev->tx_encap_type;
  1032. tx_desc->vdev_id = vdev->vdev_id;
  1033. tx_desc->pdev = pdev;
  1034. tx_desc->pkt_offset = 0;
  1035. tx_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  1036. tx_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  1037. dp_tx_trace_pkt(nbuf, tx_desc->id, vdev->vdev_id);
  1038. /* Handle scattered frames - TSO/SG/ME */
  1039. /* Allocate and prepare an extension descriptor for scattered frames */
  1040. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  1041. if (!msdu_ext_desc) {
  1042. dp_tx_info("Tx Extension Descriptor Alloc Fail");
  1043. goto failure;
  1044. }
  1045. #if TQM_BYPASS_WAR
  1046. /* Temporary WAR due to TQM VP issues */
  1047. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1048. qdf_atomic_inc(&soc->num_tx_exception);
  1049. #endif
  1050. if (qdf_unlikely(msdu_info->exception_fw))
  1051. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1052. tx_desc->msdu_ext_desc = msdu_ext_desc;
  1053. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  1054. tx_desc->dma_addr = msdu_ext_desc->paddr;
  1055. if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  1056. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1057. else
  1058. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  1059. return tx_desc;
  1060. failure:
  1061. dp_tx_desc_release(tx_desc, desc_pool_id);
  1062. return NULL;
  1063. }
  1064. /**
  1065. * dp_tx_prepare_raw() - Prepare RAW packet TX
  1066. * @vdev: DP vdev handle
  1067. * @nbuf: buffer pointer
  1068. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1069. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  1070. * descriptor
  1071. *
  1072. * Return:
  1073. */
  1074. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1075. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1076. {
  1077. qdf_nbuf_t curr_nbuf = NULL;
  1078. uint16_t total_len = 0;
  1079. qdf_dma_addr_t paddr;
  1080. int32_t i;
  1081. int32_t mapped_buf_num = 0;
  1082. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  1083. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1084. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  1085. /* Continue only if frames are of DATA type */
  1086. if (!DP_FRAME_IS_DATA(qos_wh)) {
  1087. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  1088. dp_tx_debug("Pkt. recd is of not data type");
  1089. goto error;
  1090. }
  1091. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  1092. if (vdev->raw_mode_war &&
  1093. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  1094. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  1095. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  1096. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  1097. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  1098. /*
  1099. * Number of nbuf's must not exceed the size of the frags
  1100. * array in seg_info.
  1101. */
  1102. if (i >= DP_TX_MAX_NUM_FRAGS) {
  1103. dp_err_rl("nbuf cnt exceeds the max number of segs");
  1104. DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
  1105. goto error;
  1106. }
  1107. if (QDF_STATUS_SUCCESS !=
  1108. qdf_nbuf_map_nbytes_single(vdev->osdev,
  1109. curr_nbuf,
  1110. QDF_DMA_TO_DEVICE,
  1111. curr_nbuf->len)) {
  1112. dp_tx_err("%s dma map error ", __func__);
  1113. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  1114. goto error;
  1115. }
  1116. /* Update the count of mapped nbuf's */
  1117. mapped_buf_num++;
  1118. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  1119. seg_info->frags[i].paddr_lo = paddr;
  1120. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  1121. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  1122. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  1123. total_len += qdf_nbuf_len(curr_nbuf);
  1124. }
  1125. seg_info->frag_cnt = i;
  1126. seg_info->total_len = total_len;
  1127. seg_info->next = NULL;
  1128. sg_info->curr_seg = seg_info;
  1129. msdu_info->frm_type = dp_tx_frm_raw;
  1130. msdu_info->num_seg = 1;
  1131. return nbuf;
  1132. error:
  1133. i = 0;
  1134. while (nbuf) {
  1135. curr_nbuf = nbuf;
  1136. if (i < mapped_buf_num) {
  1137. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  1138. QDF_DMA_TO_DEVICE,
  1139. curr_nbuf->len);
  1140. i++;
  1141. }
  1142. nbuf = qdf_nbuf_next(nbuf);
  1143. qdf_nbuf_free(curr_nbuf);
  1144. }
  1145. return NULL;
  1146. }
  1147. /**
  1148. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  1149. * @soc: DP soc handle
  1150. * @nbuf: Buffer pointer
  1151. *
  1152. * unmap the chain of nbufs that belong to this RAW frame.
  1153. *
  1154. * Return: None
  1155. */
  1156. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  1157. qdf_nbuf_t nbuf)
  1158. {
  1159. qdf_nbuf_t cur_nbuf = nbuf;
  1160. do {
  1161. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  1162. QDF_DMA_TO_DEVICE,
  1163. cur_nbuf->len);
  1164. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  1165. } while (cur_nbuf);
  1166. }
  1167. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1168. void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
  1169. qdf_nbuf_t nbuf)
  1170. {
  1171. qdf_nbuf_t nbuf_local;
  1172. struct dp_vdev *vdev_local = vdev_hdl;
  1173. do {
  1174. if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
  1175. break;
  1176. nbuf_local = nbuf;
  1177. if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
  1178. htt_cmn_pkt_type_raw))
  1179. break;
  1180. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
  1181. break;
  1182. else if (qdf_nbuf_is_tso((nbuf_local)))
  1183. break;
  1184. dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
  1185. (nbuf_local),
  1186. NULL, 1, 0);
  1187. } while (0);
  1188. }
  1189. #endif
  1190. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1191. /**
  1192. * dp_tx_update_stats() - Update soc level tx stats
  1193. * @soc: DP soc handle
  1194. * @nbuf: packet being transmitted
  1195. *
  1196. * Returns: none
  1197. */
  1198. void dp_tx_update_stats(struct dp_soc *soc,
  1199. qdf_nbuf_t nbuf)
  1200. {
  1201. DP_STATS_INC_PKT(soc, tx.egress, 1, qdf_nbuf_len(nbuf));
  1202. }
  1203. int
  1204. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1205. struct dp_tx_desc_s *tx_desc,
  1206. uint8_t tid)
  1207. {
  1208. struct dp_swlm *swlm = &soc->swlm;
  1209. union swlm_data swlm_query_data;
  1210. struct dp_swlm_tcl_data tcl_data;
  1211. QDF_STATUS status;
  1212. int ret;
  1213. if (qdf_unlikely(!swlm->is_enabled))
  1214. return 0;
  1215. tcl_data.nbuf = tx_desc->nbuf;
  1216. tcl_data.tid = tid;
  1217. tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
  1218. swlm_query_data.tcl_data = &tcl_data;
  1219. status = dp_swlm_tcl_pre_check(soc, &tcl_data);
  1220. if (QDF_IS_STATUS_ERROR(status)) {
  1221. dp_swlm_tcl_reset_session_data(soc);
  1222. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1223. return 0;
  1224. }
  1225. ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
  1226. if (ret) {
  1227. DP_STATS_INC(swlm, tcl.coalesce_success, 1);
  1228. } else {
  1229. DP_STATS_INC(swlm, tcl.coalesce_fail, 1);
  1230. }
  1231. return ret;
  1232. }
  1233. void
  1234. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1235. int coalesce)
  1236. {
  1237. if (coalesce)
  1238. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1239. else
  1240. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1241. }
  1242. #endif
  1243. #ifdef FEATURE_RUNTIME_PM
  1244. /**
  1245. * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
  1246. * @soc: Datapath soc handle
  1247. * @hal_ring_hdl: HAL ring handle
  1248. * @coalesce: Coalesce the current write or not
  1249. *
  1250. * Wrapper for HAL ring access end for data transmission for
  1251. * FEATURE_RUNTIME_PM
  1252. *
  1253. * Returns: none
  1254. */
  1255. void
  1256. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1257. hal_ring_handle_t hal_ring_hdl,
  1258. int coalesce)
  1259. {
  1260. int ret;
  1261. ret = hif_pm_runtime_get(soc->hif_handle,
  1262. RTPM_ID_DW_TX_HW_ENQUEUE, true);
  1263. switch (ret) {
  1264. case 0:
  1265. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1266. hif_pm_runtime_put(soc->hif_handle,
  1267. RTPM_ID_DW_TX_HW_ENQUEUE);
  1268. break;
  1269. /*
  1270. * If hif_pm_runtime_get returns -EBUSY or -EINPROGRESS,
  1271. * take the dp runtime refcount using dp_runtime_get,
  1272. * check link state,if up, write TX ring HP, else just set flush event.
  1273. * In dp_runtime_resume, wait until dp runtime refcount becomes
  1274. * zero or time out, then flush pending tx.
  1275. */
  1276. case -EBUSY:
  1277. case -EINPROGRESS:
  1278. dp_runtime_get(soc);
  1279. if (hif_pm_get_link_state(soc->hif_handle) ==
  1280. HIF_PM_LINK_STATE_UP) {
  1281. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1282. } else {
  1283. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1284. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1285. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1286. }
  1287. dp_runtime_put(soc);
  1288. break;
  1289. default:
  1290. dp_runtime_get(soc);
  1291. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1292. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1293. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1294. dp_runtime_put(soc);
  1295. }
  1296. }
  1297. #endif
  1298. /**
  1299. * dp_cce_classify() - Classify the frame based on CCE rules
  1300. * @vdev: DP vdev handle
  1301. * @nbuf: skb
  1302. *
  1303. * Classify frames based on CCE rules
  1304. * Return: bool( true if classified,
  1305. * else false)
  1306. */
  1307. static bool dp_cce_classify(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  1308. {
  1309. qdf_ether_header_t *eh = NULL;
  1310. uint16_t ether_type;
  1311. qdf_llc_t *llcHdr;
  1312. qdf_nbuf_t nbuf_clone = NULL;
  1313. qdf_dot3_qosframe_t *qos_wh = NULL;
  1314. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1315. /*
  1316. * In case of mesh packets or hlos tid override enabled,
  1317. * don't do any classification
  1318. */
  1319. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1320. & DP_TX_SKIP_CCE_CLASSIFY))
  1321. return false;
  1322. }
  1323. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1324. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  1325. ether_type = eh->ether_type;
  1326. llcHdr = (qdf_llc_t *)(nbuf->data +
  1327. sizeof(qdf_ether_header_t));
  1328. } else {
  1329. qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1330. /* For encrypted packets don't do any classification */
  1331. if (qdf_unlikely(qos_wh->i_fc[1] & IEEE80211_FC1_WEP))
  1332. return false;
  1333. if (qdf_unlikely(qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS)) {
  1334. if (qdf_unlikely(
  1335. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_TODS &&
  1336. qos_wh->i_fc[1] & QDF_IEEE80211_FC1_FROMDS)) {
  1337. ether_type = *(uint16_t *)(nbuf->data
  1338. + QDF_IEEE80211_4ADDR_HDR_LEN
  1339. + sizeof(qdf_llc_t)
  1340. - sizeof(ether_type));
  1341. llcHdr = (qdf_llc_t *)(nbuf->data +
  1342. QDF_IEEE80211_4ADDR_HDR_LEN);
  1343. } else {
  1344. ether_type = *(uint16_t *)(nbuf->data
  1345. + QDF_IEEE80211_3ADDR_HDR_LEN
  1346. + sizeof(qdf_llc_t)
  1347. - sizeof(ether_type));
  1348. llcHdr = (qdf_llc_t *)(nbuf->data +
  1349. QDF_IEEE80211_3ADDR_HDR_LEN);
  1350. }
  1351. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr)
  1352. && (ether_type ==
  1353. qdf_htons(QDF_NBUF_TRAC_EAPOL_ETH_TYPE)))) {
  1354. DP_STATS_INC(vdev, tx_i.cce_classified_raw, 1);
  1355. return true;
  1356. }
  1357. }
  1358. return false;
  1359. }
  1360. if (qdf_unlikely(DP_FRAME_IS_SNAP(llcHdr))) {
  1361. ether_type = *(uint16_t *)(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1362. sizeof(*llcHdr));
  1363. nbuf_clone = qdf_nbuf_clone(nbuf);
  1364. if (qdf_unlikely(nbuf_clone)) {
  1365. qdf_nbuf_pull_head(nbuf_clone, sizeof(*llcHdr));
  1366. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1367. qdf_nbuf_pull_head(nbuf_clone,
  1368. sizeof(qdf_net_vlanhdr_t));
  1369. }
  1370. }
  1371. } else {
  1372. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1373. nbuf_clone = qdf_nbuf_clone(nbuf);
  1374. if (qdf_unlikely(nbuf_clone)) {
  1375. qdf_nbuf_pull_head(nbuf_clone,
  1376. sizeof(qdf_net_vlanhdr_t));
  1377. }
  1378. }
  1379. }
  1380. if (qdf_unlikely(nbuf_clone))
  1381. nbuf = nbuf_clone;
  1382. if (qdf_unlikely(qdf_nbuf_is_ipv4_eapol_pkt(nbuf)
  1383. || qdf_nbuf_is_ipv4_arp_pkt(nbuf)
  1384. || qdf_nbuf_is_ipv4_wapi_pkt(nbuf)
  1385. || qdf_nbuf_is_ipv4_tdls_pkt(nbuf)
  1386. || (qdf_nbuf_is_ipv4_pkt(nbuf)
  1387. && qdf_nbuf_is_ipv4_dhcp_pkt(nbuf))
  1388. || (qdf_nbuf_is_ipv6_pkt(nbuf) &&
  1389. qdf_nbuf_is_ipv6_dhcp_pkt(nbuf)))) {
  1390. if (qdf_unlikely(nbuf_clone))
  1391. qdf_nbuf_free(nbuf_clone);
  1392. return true;
  1393. }
  1394. if (qdf_unlikely(nbuf_clone))
  1395. qdf_nbuf_free(nbuf_clone);
  1396. return false;
  1397. }
  1398. /**
  1399. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1400. * @vdev: DP vdev handle
  1401. * @nbuf: skb
  1402. *
  1403. * Extract the DSCP or PCP information from frame and map into TID value.
  1404. *
  1405. * Return: void
  1406. */
  1407. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1408. struct dp_tx_msdu_info_s *msdu_info)
  1409. {
  1410. uint8_t tos = 0, dscp_tid_override = 0;
  1411. uint8_t *hdr_ptr, *L3datap;
  1412. uint8_t is_mcast = 0;
  1413. qdf_ether_header_t *eh = NULL;
  1414. qdf_ethervlan_header_t *evh = NULL;
  1415. uint16_t ether_type;
  1416. qdf_llc_t *llcHdr;
  1417. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1418. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1419. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1420. eh = (qdf_ether_header_t *)nbuf->data;
  1421. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1422. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1423. } else {
  1424. qdf_dot3_qosframe_t *qos_wh =
  1425. (qdf_dot3_qosframe_t *) nbuf->data;
  1426. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1427. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1428. return;
  1429. }
  1430. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1431. ether_type = eh->ether_type;
  1432. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1433. /*
  1434. * Check if packet is dot3 or eth2 type.
  1435. */
  1436. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1437. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1438. sizeof(*llcHdr));
  1439. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1440. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1441. sizeof(*llcHdr);
  1442. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1443. + sizeof(*llcHdr) +
  1444. sizeof(qdf_net_vlanhdr_t));
  1445. } else {
  1446. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1447. sizeof(*llcHdr);
  1448. }
  1449. } else {
  1450. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1451. evh = (qdf_ethervlan_header_t *) eh;
  1452. ether_type = evh->ether_type;
  1453. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1454. }
  1455. }
  1456. /*
  1457. * Find priority from IP TOS DSCP field
  1458. */
  1459. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1460. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1461. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1462. /* Only for unicast frames */
  1463. if (!is_mcast) {
  1464. /* send it on VO queue */
  1465. msdu_info->tid = DP_VO_TID;
  1466. }
  1467. } else {
  1468. /*
  1469. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1470. * from TOS byte.
  1471. */
  1472. tos = ip->ip_tos;
  1473. dscp_tid_override = 1;
  1474. }
  1475. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1476. /* TODO
  1477. * use flowlabel
  1478. *igmpmld cases to be handled in phase 2
  1479. */
  1480. unsigned long ver_pri_flowlabel;
  1481. unsigned long pri;
  1482. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1483. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1484. DP_IPV6_PRIORITY_SHIFT;
  1485. tos = pri;
  1486. dscp_tid_override = 1;
  1487. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1488. msdu_info->tid = DP_VO_TID;
  1489. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1490. /* Only for unicast frames */
  1491. if (!is_mcast) {
  1492. /* send ucast arp on VO queue */
  1493. msdu_info->tid = DP_VO_TID;
  1494. }
  1495. }
  1496. /*
  1497. * Assign all MCAST packets to BE
  1498. */
  1499. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1500. if (is_mcast) {
  1501. tos = 0;
  1502. dscp_tid_override = 1;
  1503. }
  1504. }
  1505. if (dscp_tid_override == 1) {
  1506. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1507. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1508. }
  1509. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1510. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1511. return;
  1512. }
  1513. /**
  1514. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1515. * @vdev: DP vdev handle
  1516. * @nbuf: skb
  1517. *
  1518. * Software based TID classification is required when more than 2 DSCP-TID
  1519. * mapping tables are needed.
  1520. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1521. *
  1522. * Return: void
  1523. */
  1524. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1525. struct dp_tx_msdu_info_s *msdu_info)
  1526. {
  1527. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1528. /*
  1529. * skip_sw_tid_classification flag will set in below cases-
  1530. * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
  1531. * 2. hlos_tid_override enabled for vdev
  1532. * 3. mesh mode enabled for vdev
  1533. */
  1534. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1535. /* Update tid in msdu_info from skb priority */
  1536. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1537. & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
  1538. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  1539. return;
  1540. }
  1541. return;
  1542. }
  1543. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1544. }
  1545. #ifdef FEATURE_WLAN_TDLS
  1546. /**
  1547. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1548. * @soc: datapath SOC
  1549. * @vdev: datapath vdev
  1550. * @tx_desc: TX descriptor
  1551. *
  1552. * Return: None
  1553. */
  1554. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1555. struct dp_vdev *vdev,
  1556. struct dp_tx_desc_s *tx_desc)
  1557. {
  1558. if (vdev) {
  1559. if (vdev->is_tdls_frame) {
  1560. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1561. vdev->is_tdls_frame = false;
  1562. }
  1563. }
  1564. }
  1565. /**
  1566. * dp_non_std_tx_comp_free_buff() - Free the non std tx packet buffer
  1567. * @soc: dp_soc handle
  1568. * @tx_desc: TX descriptor
  1569. * @vdev: datapath vdev handle
  1570. *
  1571. * Return: None
  1572. */
  1573. static void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1574. struct dp_tx_desc_s *tx_desc)
  1575. {
  1576. struct hal_tx_completion_status ts = {0};
  1577. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1578. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1579. DP_MOD_ID_TDLS);
  1580. if (qdf_unlikely(!vdev)) {
  1581. dp_err_rl("vdev is null!");
  1582. goto error;
  1583. }
  1584. hal_tx_comp_get_status(&tx_desc->comp, &ts, vdev->pdev->soc->hal_soc);
  1585. if (vdev->tx_non_std_data_callback.func) {
  1586. qdf_nbuf_set_next(nbuf, NULL);
  1587. vdev->tx_non_std_data_callback.func(
  1588. vdev->tx_non_std_data_callback.ctxt,
  1589. nbuf, ts.status);
  1590. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1591. return;
  1592. } else {
  1593. dp_err_rl("callback func is null");
  1594. }
  1595. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1596. error:
  1597. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1598. qdf_nbuf_free(nbuf);
  1599. }
  1600. /**
  1601. * dp_tx_msdu_single_map() - do nbuf map
  1602. * @vdev: DP vdev handle
  1603. * @tx_desc: DP TX descriptor pointer
  1604. * @nbuf: skb pointer
  1605. *
  1606. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1607. * operation done in other component.
  1608. *
  1609. * Return: QDF_STATUS
  1610. */
  1611. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1612. struct dp_tx_desc_s *tx_desc,
  1613. qdf_nbuf_t nbuf)
  1614. {
  1615. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1616. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1617. nbuf,
  1618. QDF_DMA_TO_DEVICE,
  1619. nbuf->len);
  1620. else
  1621. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1622. QDF_DMA_TO_DEVICE);
  1623. }
  1624. #else
  1625. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1626. struct dp_vdev *vdev,
  1627. struct dp_tx_desc_s *tx_desc)
  1628. {
  1629. }
  1630. static inline void dp_non_std_tx_comp_free_buff(struct dp_soc *soc,
  1631. struct dp_tx_desc_s *tx_desc)
  1632. {
  1633. }
  1634. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1635. struct dp_tx_desc_s *tx_desc,
  1636. qdf_nbuf_t nbuf)
  1637. {
  1638. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1639. nbuf,
  1640. QDF_DMA_TO_DEVICE,
  1641. nbuf->len);
  1642. }
  1643. #endif
  1644. #ifdef MESH_MODE_SUPPORT
  1645. /**
  1646. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1647. * @soc: datapath SOC
  1648. * @vdev: datapath vdev
  1649. * @tx_desc: TX descriptor
  1650. *
  1651. * Return: None
  1652. */
  1653. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1654. struct dp_vdev *vdev,
  1655. struct dp_tx_desc_s *tx_desc)
  1656. {
  1657. if (qdf_unlikely(vdev->mesh_vdev))
  1658. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1659. }
  1660. /**
  1661. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1662. * @soc: dp_soc handle
  1663. * @tx_desc: TX descriptor
  1664. * @vdev: datapath vdev handle
  1665. *
  1666. * Return: None
  1667. */
  1668. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1669. struct dp_tx_desc_s *tx_desc)
  1670. {
  1671. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1672. struct dp_vdev *vdev = NULL;
  1673. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1674. qdf_nbuf_free(nbuf);
  1675. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1676. } else {
  1677. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1678. DP_MOD_ID_MESH);
  1679. if (vdev && vdev->osif_tx_free_ext)
  1680. vdev->osif_tx_free_ext((nbuf));
  1681. else
  1682. qdf_nbuf_free(nbuf);
  1683. if (vdev)
  1684. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1685. }
  1686. }
  1687. #else
  1688. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1689. struct dp_vdev *vdev,
  1690. struct dp_tx_desc_s *tx_desc)
  1691. {
  1692. }
  1693. static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1694. struct dp_tx_desc_s *tx_desc)
  1695. {
  1696. }
  1697. #endif
  1698. /**
  1699. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1700. * @vdev: DP vdev handle
  1701. * @nbuf: skb
  1702. *
  1703. * Return: 1 if frame needs to be dropped else 0
  1704. */
  1705. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1706. {
  1707. struct dp_pdev *pdev = NULL;
  1708. struct dp_ast_entry *src_ast_entry = NULL;
  1709. struct dp_ast_entry *dst_ast_entry = NULL;
  1710. struct dp_soc *soc = NULL;
  1711. qdf_assert(vdev);
  1712. pdev = vdev->pdev;
  1713. qdf_assert(pdev);
  1714. soc = pdev->soc;
  1715. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1716. (soc, dstmac, vdev->pdev->pdev_id);
  1717. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1718. (soc, srcmac, vdev->pdev->pdev_id);
  1719. if (dst_ast_entry && src_ast_entry) {
  1720. if (dst_ast_entry->peer_id ==
  1721. src_ast_entry->peer_id)
  1722. return 1;
  1723. }
  1724. return 0;
  1725. }
  1726. /**
  1727. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1728. * @vdev: DP vdev handle
  1729. * @nbuf: skb
  1730. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1731. * @meta_data: Metadata to the fw
  1732. * @tx_q: Tx queue to be used for this Tx frame
  1733. * @peer_id: peer_id of the peer in case of NAWDS frames
  1734. * @tx_exc_metadata: Handle that holds exception path metadata
  1735. *
  1736. * Return: NULL on success,
  1737. * nbuf when it fails to send
  1738. */
  1739. qdf_nbuf_t
  1740. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1741. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1742. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1743. {
  1744. struct dp_pdev *pdev = vdev->pdev;
  1745. struct dp_soc *soc = pdev->soc;
  1746. struct dp_tx_desc_s *tx_desc;
  1747. QDF_STATUS status;
  1748. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1749. uint16_t htt_tcl_metadata = 0;
  1750. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  1751. uint8_t tid = msdu_info->tid;
  1752. struct cdp_tid_tx_stats *tid_stats = NULL;
  1753. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1754. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1755. msdu_info, tx_exc_metadata);
  1756. if (!tx_desc) {
  1757. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  1758. vdev, tx_q->desc_pool_id);
  1759. drop_code = TX_DESC_ERR;
  1760. goto fail_return;
  1761. }
  1762. if (qdf_unlikely(soc->cce_disable)) {
  1763. if (dp_cce_classify(vdev, nbuf) == true) {
  1764. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1765. tid = DP_VO_TID;
  1766. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1767. }
  1768. }
  1769. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  1770. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  1771. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1772. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  1773. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  1774. HTT_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  1775. HTT_TCL_METADATA_TYPE_PEER_BASED);
  1776. HTT_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  1777. peer_id);
  1778. } else
  1779. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1780. if (msdu_info->exception_fw)
  1781. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1782. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  1783. !pdev->enhanced_stats_en);
  1784. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  1785. if (qdf_unlikely(QDF_STATUS_SUCCESS !=
  1786. dp_tx_msdu_single_map(vdev, tx_desc, nbuf))) {
  1787. /* Handle failure */
  1788. dp_err("qdf_nbuf_map failed");
  1789. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  1790. drop_code = TX_DMA_MAP_ERR;
  1791. goto release_desc;
  1792. }
  1793. tx_desc->dma_addr = qdf_nbuf_mapped_paddr_get(tx_desc->nbuf);
  1794. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  1795. tx_desc->id, DP_TX_DESC_MAP);
  1796. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  1797. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  1798. htt_tcl_metadata,
  1799. tx_exc_metadata, msdu_info);
  1800. if (status != QDF_STATUS_SUCCESS) {
  1801. dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  1802. tx_desc, tx_q->ring_id);
  1803. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  1804. tx_desc->id, DP_TX_DESC_UNMAP);
  1805. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  1806. QDF_DMA_TO_DEVICE,
  1807. nbuf->len);
  1808. drop_code = TX_HW_ENQUEUE;
  1809. goto release_desc;
  1810. }
  1811. return NULL;
  1812. release_desc:
  1813. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  1814. fail_return:
  1815. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1816. tid_stats = &pdev->stats.tid_stats.
  1817. tid_tx_stats[tx_q->ring_id][tid];
  1818. tid_stats->swdrop_cnt[drop_code]++;
  1819. return nbuf;
  1820. }
  1821. /**
  1822. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  1823. * @soc: Soc handle
  1824. * @desc: software Tx descriptor to be processed
  1825. *
  1826. * Return: none
  1827. */
  1828. static inline void dp_tx_comp_free_buf(struct dp_soc *soc,
  1829. struct dp_tx_desc_s *desc)
  1830. {
  1831. qdf_nbuf_t nbuf = desc->nbuf;
  1832. enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
  1833. /* nbuf already freed in vdev detach path */
  1834. if (!nbuf)
  1835. return;
  1836. /* If it is TDLS mgmt, don't unmap or free the frame */
  1837. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
  1838. return dp_non_std_tx_comp_free_buff(soc, desc);
  1839. /* 0 : MSDU buffer, 1 : MLE */
  1840. if (desc->msdu_ext_desc) {
  1841. /* TSO free */
  1842. if (hal_tx_ext_desc_get_tso_enable(
  1843. desc->msdu_ext_desc->vaddr)) {
  1844. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  1845. desc->id, DP_TX_COMP_MSDU_EXT);
  1846. dp_tx_tso_seg_history_add(soc, desc->tso_desc,
  1847. desc->nbuf, desc->id, type);
  1848. /* unmap eash TSO seg before free the nbuf */
  1849. dp_tx_tso_unmap_segment(soc, desc->tso_desc,
  1850. desc->tso_num_desc);
  1851. qdf_nbuf_free(nbuf);
  1852. return;
  1853. }
  1854. }
  1855. /* If it's ME frame, dont unmap the cloned nbuf's */
  1856. if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
  1857. goto nbuf_free;
  1858. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
  1859. dp_tx_unmap(soc, desc);
  1860. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  1861. return dp_mesh_tx_comp_free_buff(soc, desc);
  1862. nbuf_free:
  1863. qdf_nbuf_free(nbuf);
  1864. }
  1865. /**
  1866. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  1867. * @vdev: DP vdev handle
  1868. * @nbuf: skb
  1869. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  1870. *
  1871. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  1872. *
  1873. * Return: NULL on success,
  1874. * nbuf when it fails to send
  1875. */
  1876. #if QDF_LOCK_STATS
  1877. noinline
  1878. #else
  1879. #endif
  1880. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1881. struct dp_tx_msdu_info_s *msdu_info)
  1882. {
  1883. uint32_t i;
  1884. struct dp_pdev *pdev = vdev->pdev;
  1885. struct dp_soc *soc = pdev->soc;
  1886. struct dp_tx_desc_s *tx_desc;
  1887. bool is_cce_classified = false;
  1888. QDF_STATUS status;
  1889. uint16_t htt_tcl_metadata = 0;
  1890. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  1891. struct cdp_tid_tx_stats *tid_stats = NULL;
  1892. uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
  1893. if (qdf_unlikely(soc->cce_disable)) {
  1894. is_cce_classified = dp_cce_classify(vdev, nbuf);
  1895. if (is_cce_classified) {
  1896. DP_STATS_INC(vdev, tx_i.cce_classified, 1);
  1897. msdu_info->tid = DP_VO_TID;
  1898. }
  1899. }
  1900. if (msdu_info->frm_type == dp_tx_frm_me)
  1901. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  1902. i = 0;
  1903. /* Print statement to track i and num_seg */
  1904. /*
  1905. * For each segment (maps to 1 MSDU) , prepare software and hardware
  1906. * descriptors using information in msdu_info
  1907. */
  1908. while (i < msdu_info->num_seg) {
  1909. /*
  1910. * Setup Tx descriptor for an MSDU, and MSDU extension
  1911. * descriptor
  1912. */
  1913. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  1914. tx_q->desc_pool_id);
  1915. if (!tx_desc) {
  1916. if (msdu_info->frm_type == dp_tx_frm_me) {
  1917. prep_desc_fail++;
  1918. dp_tx_me_free_buf(pdev,
  1919. (void *)(msdu_info->u.sg_info
  1920. .curr_seg->frags[0].vaddr));
  1921. if (prep_desc_fail == msdu_info->num_seg) {
  1922. /*
  1923. * Unmap is needed only if descriptor
  1924. * preparation failed for all segments.
  1925. */
  1926. qdf_nbuf_unmap(soc->osdev,
  1927. msdu_info->u.sg_info.
  1928. curr_seg->nbuf,
  1929. QDF_DMA_TO_DEVICE);
  1930. }
  1931. /*
  1932. * Free the nbuf for the current segment
  1933. * and make it point to the next in the list.
  1934. * For me, there are as many segments as there
  1935. * are no of clients.
  1936. */
  1937. qdf_nbuf_free(msdu_info->u.sg_info
  1938. .curr_seg->nbuf);
  1939. if (msdu_info->u.sg_info.curr_seg->next) {
  1940. msdu_info->u.sg_info.curr_seg =
  1941. msdu_info->u.sg_info
  1942. .curr_seg->next;
  1943. nbuf = msdu_info->u.sg_info
  1944. .curr_seg->nbuf;
  1945. }
  1946. i++;
  1947. continue;
  1948. }
  1949. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1950. dp_tx_tso_seg_history_add(
  1951. soc,
  1952. msdu_info->u.tso_info.curr_seg,
  1953. nbuf, 0, DP_TX_DESC_UNMAP);
  1954. dp_tx_tso_unmap_segment(soc,
  1955. msdu_info->u.tso_info.
  1956. curr_seg,
  1957. msdu_info->u.tso_info.
  1958. tso_num_seg_list);
  1959. if (msdu_info->u.tso_info.curr_seg->next) {
  1960. msdu_info->u.tso_info.curr_seg =
  1961. msdu_info->u.tso_info.curr_seg->next;
  1962. i++;
  1963. continue;
  1964. }
  1965. }
  1966. goto done;
  1967. }
  1968. if (msdu_info->frm_type == dp_tx_frm_me) {
  1969. tx_desc->me_buffer =
  1970. msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
  1971. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  1972. }
  1973. if (is_cce_classified)
  1974. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1975. htt_tcl_metadata = vdev->htt_tcl_metadata;
  1976. if (msdu_info->exception_fw) {
  1977. HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  1978. }
  1979. /*
  1980. * For frames with multiple segments (TSO, ME), jump to next
  1981. * segment.
  1982. */
  1983. if (msdu_info->frm_type == dp_tx_frm_tso) {
  1984. if (msdu_info->u.tso_info.curr_seg->next) {
  1985. msdu_info->u.tso_info.curr_seg =
  1986. msdu_info->u.tso_info.curr_seg->next;
  1987. /*
  1988. * If this is a jumbo nbuf, then increment the
  1989. * number of nbuf users for each additional
  1990. * segment of the msdu. This will ensure that
  1991. * the skb is freed only after receiving tx
  1992. * completion for all segments of an nbuf
  1993. */
  1994. qdf_nbuf_inc_users(nbuf);
  1995. /* Check with MCL if this is needed */
  1996. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
  1997. */
  1998. }
  1999. }
  2000. /*
  2001. * Enqueue the Tx MSDU descriptor to HW for transmit
  2002. */
  2003. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2004. htt_tcl_metadata,
  2005. NULL, msdu_info);
  2006. if (status != QDF_STATUS_SUCCESS) {
  2007. dp_info("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2008. tx_desc, tx_q->ring_id);
  2009. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2010. tid_stats = &pdev->stats.tid_stats.
  2011. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  2012. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  2013. if (msdu_info->frm_type == dp_tx_frm_me) {
  2014. hw_enq_fail++;
  2015. if (hw_enq_fail == msdu_info->num_seg) {
  2016. /*
  2017. * Unmap is needed only if enqueue
  2018. * failed for all segments.
  2019. */
  2020. qdf_nbuf_unmap(soc->osdev,
  2021. msdu_info->u.sg_info.
  2022. curr_seg->nbuf,
  2023. QDF_DMA_TO_DEVICE);
  2024. }
  2025. /*
  2026. * Free the nbuf for the current segment
  2027. * and make it point to the next in the list.
  2028. * For me, there are as many segments as there
  2029. * are no of clients.
  2030. */
  2031. qdf_nbuf_free(msdu_info->u.sg_info
  2032. .curr_seg->nbuf);
  2033. if (msdu_info->u.sg_info.curr_seg->next) {
  2034. msdu_info->u.sg_info.curr_seg =
  2035. msdu_info->u.sg_info
  2036. .curr_seg->next;
  2037. nbuf = msdu_info->u.sg_info
  2038. .curr_seg->nbuf;
  2039. } else
  2040. break;
  2041. i++;
  2042. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2043. continue;
  2044. }
  2045. /*
  2046. * For TSO frames, the nbuf users increment done for
  2047. * the current segment has to be reverted, since the
  2048. * hw enqueue for this segment failed
  2049. */
  2050. if (msdu_info->frm_type == dp_tx_frm_tso &&
  2051. msdu_info->u.tso_info.curr_seg) {
  2052. /*
  2053. * unmap and free current,
  2054. * retransmit remaining segments
  2055. */
  2056. dp_tx_comp_free_buf(soc, tx_desc);
  2057. i++;
  2058. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2059. continue;
  2060. }
  2061. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2062. goto done;
  2063. }
  2064. /*
  2065. * TODO
  2066. * if tso_info structure can be modified to have curr_seg
  2067. * as first element, following 2 blocks of code (for TSO and SG)
  2068. * can be combined into 1
  2069. */
  2070. /*
  2071. * For Multicast-Unicast converted packets,
  2072. * each converted frame (for a client) is represented as
  2073. * 1 segment
  2074. */
  2075. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  2076. (msdu_info->frm_type == dp_tx_frm_me)) {
  2077. if (msdu_info->u.sg_info.curr_seg->next) {
  2078. msdu_info->u.sg_info.curr_seg =
  2079. msdu_info->u.sg_info.curr_seg->next;
  2080. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2081. } else
  2082. break;
  2083. }
  2084. i++;
  2085. }
  2086. nbuf = NULL;
  2087. done:
  2088. return nbuf;
  2089. }
  2090. /**
  2091. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  2092. * for SG frames
  2093. * @vdev: DP vdev handle
  2094. * @nbuf: skb
  2095. * @seg_info: Pointer to Segment info Descriptor to be prepared
  2096. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2097. *
  2098. * Return: NULL on success,
  2099. * nbuf when it fails to send
  2100. */
  2101. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2102. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  2103. {
  2104. uint32_t cur_frag, nr_frags, i;
  2105. qdf_dma_addr_t paddr;
  2106. struct dp_tx_sg_info_s *sg_info;
  2107. sg_info = &msdu_info->u.sg_info;
  2108. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2109. if (QDF_STATUS_SUCCESS !=
  2110. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  2111. QDF_DMA_TO_DEVICE,
  2112. qdf_nbuf_headlen(nbuf))) {
  2113. dp_tx_err("dma map error");
  2114. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2115. qdf_nbuf_free(nbuf);
  2116. return NULL;
  2117. }
  2118. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  2119. seg_info->frags[0].paddr_lo = paddr;
  2120. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  2121. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  2122. seg_info->frags[0].vaddr = (void *) nbuf;
  2123. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  2124. if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
  2125. nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
  2126. dp_tx_err("frag dma map error");
  2127. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2128. goto map_err;
  2129. }
  2130. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  2131. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  2132. seg_info->frags[cur_frag + 1].paddr_hi =
  2133. ((uint64_t) paddr) >> 32;
  2134. seg_info->frags[cur_frag + 1].len =
  2135. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  2136. }
  2137. seg_info->frag_cnt = (cur_frag + 1);
  2138. seg_info->total_len = qdf_nbuf_len(nbuf);
  2139. seg_info->next = NULL;
  2140. sg_info->curr_seg = seg_info;
  2141. msdu_info->frm_type = dp_tx_frm_sg;
  2142. msdu_info->num_seg = 1;
  2143. return nbuf;
  2144. map_err:
  2145. /* restore paddr into nbuf before calling unmap */
  2146. qdf_nbuf_mapped_paddr_set(nbuf,
  2147. (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
  2148. ((uint64_t)
  2149. seg_info->frags[0].paddr_hi) << 32));
  2150. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2151. QDF_DMA_TO_DEVICE,
  2152. seg_info->frags[0].len);
  2153. for (i = 1; i <= cur_frag; i++) {
  2154. qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
  2155. (seg_info->frags[i].paddr_lo | ((uint64_t)
  2156. seg_info->frags[i].paddr_hi) << 32),
  2157. seg_info->frags[i].len,
  2158. QDF_DMA_TO_DEVICE);
  2159. }
  2160. qdf_nbuf_free(nbuf);
  2161. return NULL;
  2162. }
  2163. /**
  2164. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  2165. * @vdev: DP vdev handle
  2166. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2167. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  2168. *
  2169. * Return: NULL on failure,
  2170. * nbuf when extracted successfully
  2171. */
  2172. static
  2173. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  2174. struct dp_tx_msdu_info_s *msdu_info,
  2175. uint16_t ppdu_cookie)
  2176. {
  2177. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2178. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2179. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2180. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  2181. (msdu_info->meta_data[5], 1);
  2182. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  2183. (msdu_info->meta_data[5], 1);
  2184. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  2185. (msdu_info->meta_data[6], ppdu_cookie);
  2186. msdu_info->exception_fw = 1;
  2187. msdu_info->is_tx_sniffer = 1;
  2188. }
  2189. #ifdef MESH_MODE_SUPPORT
  2190. /**
  2191. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  2192. and prepare msdu_info for mesh frames.
  2193. * @vdev: DP vdev handle
  2194. * @nbuf: skb
  2195. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2196. *
  2197. * Return: NULL on failure,
  2198. * nbuf when extracted successfully
  2199. */
  2200. static
  2201. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2202. struct dp_tx_msdu_info_s *msdu_info)
  2203. {
  2204. struct meta_hdr_s *mhdr;
  2205. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2206. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2207. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2208. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  2209. msdu_info->exception_fw = 0;
  2210. goto remove_meta_hdr;
  2211. }
  2212. msdu_info->exception_fw = 1;
  2213. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2214. meta_data->host_tx_desc_pool = 1;
  2215. meta_data->update_peer_cache = 1;
  2216. meta_data->learning_frame = 1;
  2217. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  2218. meta_data->power = mhdr->power;
  2219. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  2220. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  2221. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  2222. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  2223. meta_data->dyn_bw = 1;
  2224. meta_data->valid_pwr = 1;
  2225. meta_data->valid_mcs_mask = 1;
  2226. meta_data->valid_nss_mask = 1;
  2227. meta_data->valid_preamble_type = 1;
  2228. meta_data->valid_retries = 1;
  2229. meta_data->valid_bw_info = 1;
  2230. }
  2231. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  2232. meta_data->encrypt_type = 0;
  2233. meta_data->valid_encrypt_type = 1;
  2234. meta_data->learning_frame = 0;
  2235. }
  2236. meta_data->valid_key_flags = 1;
  2237. meta_data->key_flags = (mhdr->keyix & 0x3);
  2238. remove_meta_hdr:
  2239. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2240. dp_tx_err("qdf_nbuf_pull_head failed");
  2241. qdf_nbuf_free(nbuf);
  2242. return NULL;
  2243. }
  2244. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  2245. dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
  2246. " tid %d to_fw %d",
  2247. msdu_info->meta_data[0],
  2248. msdu_info->meta_data[1],
  2249. msdu_info->meta_data[2],
  2250. msdu_info->meta_data[3],
  2251. msdu_info->meta_data[4],
  2252. msdu_info->meta_data[5],
  2253. msdu_info->tid, msdu_info->exception_fw);
  2254. return nbuf;
  2255. }
  2256. #else
  2257. static
  2258. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2259. struct dp_tx_msdu_info_s *msdu_info)
  2260. {
  2261. return nbuf;
  2262. }
  2263. #endif
  2264. /**
  2265. * dp_check_exc_metadata() - Checks if parameters are valid
  2266. * @tx_exc - holds all exception path parameters
  2267. *
  2268. * Returns true when all the parameters are valid else false
  2269. *
  2270. */
  2271. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  2272. {
  2273. bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
  2274. HTT_INVALID_TID);
  2275. bool invalid_encap_type =
  2276. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  2277. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  2278. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  2279. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  2280. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  2281. tx_exc->ppdu_cookie == 0);
  2282. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  2283. invalid_cookie) {
  2284. return false;
  2285. }
  2286. return true;
  2287. }
  2288. #ifdef ATH_SUPPORT_IQUE
  2289. /**
  2290. * dp_tx_mcast_enhance() - Multicast enhancement on TX
  2291. * @vdev: vdev handle
  2292. * @nbuf: skb
  2293. *
  2294. * Return: true on success,
  2295. * false on failure
  2296. */
  2297. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2298. {
  2299. qdf_ether_header_t *eh;
  2300. /* Mcast to Ucast Conversion*/
  2301. if (qdf_likely(!vdev->mcast_enhancement_en))
  2302. return true;
  2303. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2304. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2305. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2306. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2307. qdf_nbuf_set_next(nbuf, NULL);
  2308. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
  2309. qdf_nbuf_len(nbuf));
  2310. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2311. QDF_STATUS_SUCCESS) {
  2312. return false;
  2313. }
  2314. if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
  2315. if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
  2316. QDF_STATUS_SUCCESS) {
  2317. return false;
  2318. }
  2319. }
  2320. }
  2321. return true;
  2322. }
  2323. #else
  2324. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2325. {
  2326. return true;
  2327. }
  2328. #endif
  2329. /**
  2330. * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
  2331. * @nbuf: qdf_nbuf_t
  2332. * @vdev: struct dp_vdev *
  2333. *
  2334. * Allow packet for processing only if it is for peer client which is
  2335. * connected with same vap. Drop packet if client is connected to
  2336. * different vap.
  2337. *
  2338. * Return: QDF_STATUS
  2339. */
  2340. static inline QDF_STATUS
  2341. dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  2342. {
  2343. struct dp_ast_entry *dst_ast_entry = NULL;
  2344. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2345. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
  2346. DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
  2347. return QDF_STATUS_SUCCESS;
  2348. qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
  2349. dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
  2350. eh->ether_dhost,
  2351. vdev->vdev_id);
  2352. /* If there is no ast entry, return failure */
  2353. if (qdf_unlikely(!dst_ast_entry)) {
  2354. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2355. return QDF_STATUS_E_FAILURE;
  2356. }
  2357. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2358. return QDF_STATUS_SUCCESS;
  2359. }
  2360. /**
  2361. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  2362. * @soc: DP soc handle
  2363. * @vdev_id: id of DP vdev handle
  2364. * @nbuf: skb
  2365. * @tx_exc_metadata: Handle that holds exception path meta data
  2366. *
  2367. * Entry point for Core Tx layer (DP_TX) invoked from
  2368. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2369. *
  2370. * Return: NULL on success,
  2371. * nbuf when it fails to send
  2372. */
  2373. qdf_nbuf_t
  2374. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2375. qdf_nbuf_t nbuf,
  2376. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2377. {
  2378. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2379. qdf_ether_header_t *eh = NULL;
  2380. struct dp_tx_msdu_info_s msdu_info;
  2381. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2382. DP_MOD_ID_TX_EXCEPTION);
  2383. if (qdf_unlikely(!vdev))
  2384. goto fail;
  2385. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2386. if (!tx_exc_metadata)
  2387. goto fail;
  2388. msdu_info.tid = tx_exc_metadata->tid;
  2389. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2390. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2391. QDF_MAC_ADDR_REF(nbuf->data));
  2392. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2393. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  2394. dp_tx_err("Invalid parameters in exception path");
  2395. goto fail;
  2396. }
  2397. /* Basic sanity checks for unsupported packets */
  2398. /* MESH mode */
  2399. if (qdf_unlikely(vdev->mesh_vdev)) {
  2400. dp_tx_err("Mesh mode is not supported in exception path");
  2401. goto fail;
  2402. }
  2403. /*
  2404. * Classify the frame and call corresponding
  2405. * "prepare" function which extracts the segment (TSO)
  2406. * and fragmentation information (for TSO , SG, ME, or Raw)
  2407. * into MSDU_INFO structure which is later used to fill
  2408. * SW and HW descriptors.
  2409. */
  2410. if (qdf_nbuf_is_tso(nbuf)) {
  2411. dp_verbose_debug("TSO frame %pK", vdev);
  2412. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2413. qdf_nbuf_len(nbuf));
  2414. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2415. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2416. qdf_nbuf_len(nbuf));
  2417. goto fail;
  2418. }
  2419. goto send_multiple;
  2420. }
  2421. /* SG */
  2422. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2423. struct dp_tx_seg_info_s seg_info = {0};
  2424. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2425. if (!nbuf)
  2426. goto fail;
  2427. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2428. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2429. qdf_nbuf_len(nbuf));
  2430. goto send_multiple;
  2431. }
  2432. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2433. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2434. qdf_nbuf_len(nbuf));
  2435. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2436. tx_exc_metadata->ppdu_cookie);
  2437. }
  2438. /*
  2439. * Get HW Queue to use for this frame.
  2440. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2441. * dedicated for data and 1 for command.
  2442. * "queue_id" maps to one hardware ring.
  2443. * With each ring, we also associate a unique Tx descriptor pool
  2444. * to minimize lock contention for these resources.
  2445. */
  2446. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2447. /*
  2448. * Check exception descriptors
  2449. */
  2450. if (dp_tx_exception_limit_check(vdev))
  2451. goto fail;
  2452. /* Single linear frame */
  2453. /*
  2454. * If nbuf is a simple linear frame, use send_single function to
  2455. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2456. * SRNG. There is no need to setup a MSDU extension descriptor.
  2457. */
  2458. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2459. tx_exc_metadata->peer_id, tx_exc_metadata);
  2460. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2461. return nbuf;
  2462. send_multiple:
  2463. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2464. fail:
  2465. if (vdev)
  2466. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2467. dp_verbose_debug("pkt send failed");
  2468. return nbuf;
  2469. }
  2470. /**
  2471. * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
  2472. * in exception path in special case to avoid regular exception path chk.
  2473. * @soc: DP soc handle
  2474. * @vdev_id: id of DP vdev handle
  2475. * @nbuf: skb
  2476. * @tx_exc_metadata: Handle that holds exception path meta data
  2477. *
  2478. * Entry point for Core Tx layer (DP_TX) invoked from
  2479. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2480. *
  2481. * Return: NULL on success,
  2482. * nbuf when it fails to send
  2483. */
  2484. qdf_nbuf_t
  2485. dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2486. uint8_t vdev_id, qdf_nbuf_t nbuf,
  2487. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2488. {
  2489. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2490. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2491. DP_MOD_ID_TX_EXCEPTION);
  2492. if (qdf_unlikely(!vdev))
  2493. goto fail;
  2494. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2495. == QDF_STATUS_E_FAILURE)) {
  2496. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2497. goto fail;
  2498. }
  2499. /* Unref count as it will agin be taken inside dp_tx_exception */
  2500. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2501. return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
  2502. fail:
  2503. if (vdev)
  2504. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2505. dp_verbose_debug("pkt send failed");
  2506. return nbuf;
  2507. }
  2508. /**
  2509. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  2510. * @soc: DP soc handle
  2511. * @vdev_id: DP vdev handle
  2512. * @nbuf: skb
  2513. *
  2514. * Entry point for Core Tx layer (DP_TX) invoked from
  2515. * hard_start_xmit in OSIF/HDD
  2516. *
  2517. * Return: NULL on success,
  2518. * nbuf when it fails to send
  2519. */
  2520. #ifdef MESH_MODE_SUPPORT
  2521. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2522. qdf_nbuf_t nbuf)
  2523. {
  2524. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2525. struct meta_hdr_s *mhdr;
  2526. qdf_nbuf_t nbuf_mesh = NULL;
  2527. qdf_nbuf_t nbuf_clone = NULL;
  2528. struct dp_vdev *vdev;
  2529. uint8_t no_enc_frame = 0;
  2530. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  2531. if (!nbuf_mesh) {
  2532. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2533. "qdf_nbuf_unshare failed");
  2534. return nbuf;
  2535. }
  2536. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  2537. if (!vdev) {
  2538. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2539. "vdev is NULL for vdev_id %d", vdev_id);
  2540. return nbuf;
  2541. }
  2542. nbuf = nbuf_mesh;
  2543. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2544. if ((vdev->sec_type != cdp_sec_type_none) &&
  2545. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  2546. no_enc_frame = 1;
  2547. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  2548. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  2549. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  2550. !no_enc_frame) {
  2551. nbuf_clone = qdf_nbuf_clone(nbuf);
  2552. if (!nbuf_clone) {
  2553. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2554. "qdf_nbuf_clone failed");
  2555. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2556. return nbuf;
  2557. }
  2558. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  2559. }
  2560. if (nbuf_clone) {
  2561. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  2562. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2563. } else {
  2564. qdf_nbuf_free(nbuf_clone);
  2565. }
  2566. }
  2567. if (no_enc_frame)
  2568. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  2569. else
  2570. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  2571. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  2572. if ((!nbuf) && no_enc_frame) {
  2573. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2574. }
  2575. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2576. return nbuf;
  2577. }
  2578. #else
  2579. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  2580. qdf_nbuf_t nbuf)
  2581. {
  2582. return dp_tx_send(soc, vdev_id, nbuf);
  2583. }
  2584. #endif
  2585. /**
  2586. * dp_tx_nawds_handler() - NAWDS handler
  2587. *
  2588. * @soc: DP soc handle
  2589. * @vdev_id: id of DP vdev handle
  2590. * @msdu_info: msdu_info required to create HTT metadata
  2591. * @nbuf: skb
  2592. *
  2593. * This API transfers the multicast frames with the peer id
  2594. * on NAWDS enabled peer.
  2595. * Return: none
  2596. */
  2597. static inline
  2598. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2599. struct dp_tx_msdu_info_s *msdu_info, qdf_nbuf_t nbuf)
  2600. {
  2601. struct dp_peer *peer = NULL;
  2602. qdf_nbuf_t nbuf_clone = NULL;
  2603. uint16_t peer_id = DP_INVALID_PEER;
  2604. uint16_t sa_peer_id = DP_INVALID_PEER;
  2605. struct dp_ast_entry *ast_entry = NULL;
  2606. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2607. qdf_spin_lock_bh(&soc->ast_lock);
  2608. ast_entry = dp_peer_ast_hash_find_by_pdevid
  2609. (soc,
  2610. (uint8_t *)(eh->ether_shost),
  2611. vdev->pdev->pdev_id);
  2612. if (ast_entry)
  2613. sa_peer_id = ast_entry->peer_id;
  2614. qdf_spin_unlock_bh(&soc->ast_lock);
  2615. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2616. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2617. if (!peer->bss_peer && peer->nawds_enabled) {
  2618. peer_id = peer->peer_id;
  2619. /* Multicast packets needs to be
  2620. * dropped in case of intra bss forwarding
  2621. */
  2622. if (sa_peer_id == peer->peer_id) {
  2623. dp_tx_debug("multicast packet");
  2624. DP_STATS_INC(peer, tx.nawds_mcast_drop, 1);
  2625. continue;
  2626. }
  2627. nbuf_clone = qdf_nbuf_clone(nbuf);
  2628. if (!nbuf_clone) {
  2629. QDF_TRACE(QDF_MODULE_ID_DP,
  2630. QDF_TRACE_LEVEL_ERROR,
  2631. FL("nbuf clone failed"));
  2632. break;
  2633. }
  2634. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2635. msdu_info, peer_id,
  2636. NULL);
  2637. if (nbuf_clone) {
  2638. dp_tx_debug("pkt send failed");
  2639. qdf_nbuf_free(nbuf_clone);
  2640. } else {
  2641. if (peer_id != DP_INVALID_PEER)
  2642. DP_STATS_INC_PKT(peer, tx.nawds_mcast,
  2643. 1, qdf_nbuf_len(nbuf));
  2644. }
  2645. }
  2646. }
  2647. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2648. }
  2649. /**
  2650. * dp_tx_send() - Transmit a frame on a given VAP
  2651. * @soc: DP soc handle
  2652. * @vdev_id: id of DP vdev handle
  2653. * @nbuf: skb
  2654. *
  2655. * Entry point for Core Tx layer (DP_TX) invoked from
  2656. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  2657. * cases
  2658. *
  2659. * Return: NULL on success,
  2660. * nbuf when it fails to send
  2661. */
  2662. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2663. qdf_nbuf_t nbuf)
  2664. {
  2665. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2666. uint16_t peer_id = HTT_INVALID_PEER;
  2667. /*
  2668. * doing a memzero is causing additional function call overhead
  2669. * so doing static stack clearing
  2670. */
  2671. struct dp_tx_msdu_info_s msdu_info = {0};
  2672. struct dp_vdev *vdev = NULL;
  2673. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2674. return nbuf;
  2675. /*
  2676. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2677. * this in per packet path.
  2678. *
  2679. * As in this path vdev memory is already protected with netdev
  2680. * tx lock
  2681. */
  2682. vdev = soc->vdev_id_map[vdev_id];
  2683. if (qdf_unlikely(!vdev))
  2684. return nbuf;
  2685. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2686. QDF_MAC_ADDR_REF(nbuf->data));
  2687. /*
  2688. * Set Default Host TID value to invalid TID
  2689. * (TID override disabled)
  2690. */
  2691. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  2692. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2693. if (qdf_unlikely(vdev->mesh_vdev)) {
  2694. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  2695. &msdu_info);
  2696. if (!nbuf_mesh) {
  2697. dp_verbose_debug("Extracting mesh metadata failed");
  2698. return nbuf;
  2699. }
  2700. nbuf = nbuf_mesh;
  2701. }
  2702. /*
  2703. * Get HW Queue to use for this frame.
  2704. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2705. * dedicated for data and 1 for command.
  2706. * "queue_id" maps to one hardware ring.
  2707. * With each ring, we also associate a unique Tx descriptor pool
  2708. * to minimize lock contention for these resources.
  2709. */
  2710. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2711. /*
  2712. * TCL H/W supports 2 DSCP-TID mapping tables.
  2713. * Table 1 - Default DSCP-TID mapping table
  2714. * Table 2 - 1 DSCP-TID override table
  2715. *
  2716. * If we need a different DSCP-TID mapping for this vap,
  2717. * call tid_classify to extract DSCP/ToS from frame and
  2718. * map to a TID and store in msdu_info. This is later used
  2719. * to fill in TCL Input descriptor (per-packet TID override).
  2720. */
  2721. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  2722. /*
  2723. * Classify the frame and call corresponding
  2724. * "prepare" function which extracts the segment (TSO)
  2725. * and fragmentation information (for TSO , SG, ME, or Raw)
  2726. * into MSDU_INFO structure which is later used to fill
  2727. * SW and HW descriptors.
  2728. */
  2729. if (qdf_nbuf_is_tso(nbuf)) {
  2730. dp_verbose_debug("TSO frame %pK", vdev);
  2731. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2732. qdf_nbuf_len(nbuf));
  2733. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2734. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2735. qdf_nbuf_len(nbuf));
  2736. return nbuf;
  2737. }
  2738. goto send_multiple;
  2739. }
  2740. /* SG */
  2741. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2742. struct dp_tx_seg_info_s seg_info = {0};
  2743. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2744. if (!nbuf)
  2745. return NULL;
  2746. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2747. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2748. qdf_nbuf_len(nbuf));
  2749. goto send_multiple;
  2750. }
  2751. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  2752. return NULL;
  2753. /* RAW */
  2754. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  2755. struct dp_tx_seg_info_s seg_info = {0};
  2756. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  2757. if (!nbuf)
  2758. return NULL;
  2759. dp_verbose_debug("Raw frame %pK", vdev);
  2760. goto send_multiple;
  2761. }
  2762. if (qdf_unlikely(vdev->nawds_enabled)) {
  2763. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  2764. qdf_nbuf_data(nbuf);
  2765. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost))
  2766. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf);
  2767. peer_id = DP_INVALID_PEER;
  2768. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2769. 1, qdf_nbuf_len(nbuf));
  2770. }
  2771. /* Single linear frame */
  2772. /*
  2773. * If nbuf is a simple linear frame, use send_single function to
  2774. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2775. * SRNG. There is no need to setup a MSDU extension descriptor.
  2776. */
  2777. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info, peer_id, NULL);
  2778. return nbuf;
  2779. send_multiple:
  2780. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2781. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  2782. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  2783. return nbuf;
  2784. }
  2785. /**
  2786. * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
  2787. * case to vaoid check in perpkt path.
  2788. * @soc: DP soc handle
  2789. * @vdev_id: id of DP vdev handle
  2790. * @nbuf: skb
  2791. *
  2792. * Entry point for Core Tx layer (DP_TX) invoked from
  2793. * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
  2794. * with special condition to avoid per pkt check in dp_tx_send
  2795. *
  2796. * Return: NULL on success,
  2797. * nbuf when it fails to send
  2798. */
  2799. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2800. uint8_t vdev_id, qdf_nbuf_t nbuf)
  2801. {
  2802. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2803. struct dp_vdev *vdev = NULL;
  2804. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  2805. return nbuf;
  2806. /*
  2807. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  2808. * this in per packet path.
  2809. *
  2810. * As in this path vdev memory is already protected with netdev
  2811. * tx lock
  2812. */
  2813. vdev = soc->vdev_id_map[vdev_id];
  2814. if (qdf_unlikely(!vdev))
  2815. return nbuf;
  2816. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2817. == QDF_STATUS_E_FAILURE)) {
  2818. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2819. return nbuf;
  2820. }
  2821. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  2822. }
  2823. #ifdef UMAC_SUPPORT_PROXY_ARP
  2824. /**
  2825. * dp_tx_proxy_arp() - Tx proxy arp handler
  2826. * @vdev: datapath vdev handle
  2827. * @buf: sk buffer
  2828. *
  2829. * Return: status
  2830. */
  2831. static inline
  2832. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2833. {
  2834. if (vdev->osif_proxy_arp)
  2835. return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
  2836. /*
  2837. * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
  2838. * osif_proxy_arp has a valid function pointer assigned
  2839. * to it
  2840. */
  2841. dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
  2842. return QDF_STATUS_NOT_INITIALIZED;
  2843. }
  2844. #else
  2845. /**
  2846. * dp_tx_proxy_arp() - Tx proxy arp handler
  2847. * @vdev: datapath vdev handle
  2848. * @buf: sk buffer
  2849. *
  2850. * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
  2851. * is not defined.
  2852. *
  2853. * Return: status
  2854. */
  2855. static inline
  2856. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2857. {
  2858. return QDF_STATUS_SUCCESS;
  2859. }
  2860. #endif
  2861. /**
  2862. * dp_tx_reinject_handler() - Tx Reinject Handler
  2863. * @soc: datapath soc handle
  2864. * @vdev: datapath vdev handle
  2865. * @tx_desc: software descriptor head pointer
  2866. * @status : Tx completion status from HTT descriptor
  2867. *
  2868. * This function reinjects frames back to Target.
  2869. * Todo - Host queue needs to be added
  2870. *
  2871. * Return: none
  2872. */
  2873. static
  2874. void dp_tx_reinject_handler(struct dp_soc *soc,
  2875. struct dp_vdev *vdev,
  2876. struct dp_tx_desc_s *tx_desc,
  2877. uint8_t *status)
  2878. {
  2879. struct dp_peer *peer = NULL;
  2880. uint32_t peer_id = HTT_INVALID_PEER;
  2881. qdf_nbuf_t nbuf = tx_desc->nbuf;
  2882. qdf_nbuf_t nbuf_copy = NULL;
  2883. struct dp_tx_msdu_info_s msdu_info;
  2884. #ifdef WDS_VENDOR_EXTENSION
  2885. int is_mcast = 0, is_ucast = 0;
  2886. int num_peers_3addr = 0;
  2887. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  2888. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  2889. #endif
  2890. qdf_assert(vdev);
  2891. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2892. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2893. dp_tx_debug("Tx reinject path");
  2894. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  2895. qdf_nbuf_len(tx_desc->nbuf));
  2896. #ifdef WDS_VENDOR_EXTENSION
  2897. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  2898. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  2899. } else {
  2900. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  2901. }
  2902. is_ucast = !is_mcast;
  2903. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2904. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2905. if (peer->bss_peer)
  2906. continue;
  2907. /* Detect wds peers that use 3-addr framing for mcast.
  2908. * if there are any, the bss_peer is used to send the
  2909. * the mcast frame using 3-addr format. all wds enabled
  2910. * peers that use 4-addr framing for mcast frames will
  2911. * be duplicated and sent as 4-addr frames below.
  2912. */
  2913. if (!peer->wds_enabled || !peer->wds_ecm.wds_tx_mcast_4addr) {
  2914. num_peers_3addr = 1;
  2915. break;
  2916. }
  2917. }
  2918. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2919. #endif
  2920. if (qdf_unlikely(vdev->mesh_vdev)) {
  2921. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  2922. } else {
  2923. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2924. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2925. if ((peer->peer_id != HTT_INVALID_PEER) &&
  2926. #ifdef WDS_VENDOR_EXTENSION
  2927. /*
  2928. * . if 3-addr STA, then send on BSS Peer
  2929. * . if Peer WDS enabled and accept 4-addr mcast,
  2930. * send mcast on that peer only
  2931. * . if Peer WDS enabled and accept 4-addr ucast,
  2932. * send ucast on that peer only
  2933. */
  2934. ((peer->bss_peer && num_peers_3addr && is_mcast) ||
  2935. (peer->wds_enabled &&
  2936. ((is_mcast && peer->wds_ecm.wds_tx_mcast_4addr) ||
  2937. (is_ucast && peer->wds_ecm.wds_tx_ucast_4addr))))) {
  2938. #else
  2939. (peer->bss_peer &&
  2940. (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
  2941. #endif
  2942. peer_id = DP_INVALID_PEER;
  2943. nbuf_copy = qdf_nbuf_copy(nbuf);
  2944. if (!nbuf_copy) {
  2945. dp_tx_debug("nbuf copy failed");
  2946. break;
  2947. }
  2948. nbuf_copy = dp_tx_send_msdu_single(vdev,
  2949. nbuf_copy,
  2950. &msdu_info,
  2951. peer_id,
  2952. NULL);
  2953. if (nbuf_copy) {
  2954. dp_tx_debug("pkt send failed");
  2955. qdf_nbuf_free(nbuf_copy);
  2956. }
  2957. }
  2958. }
  2959. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2960. }
  2961. qdf_nbuf_free(nbuf);
  2962. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2963. }
  2964. /**
  2965. * dp_tx_inspect_handler() - Tx Inspect Handler
  2966. * @soc: datapath soc handle
  2967. * @vdev: datapath vdev handle
  2968. * @tx_desc: software descriptor head pointer
  2969. * @status : Tx completion status from HTT descriptor
  2970. *
  2971. * Handles Tx frames sent back to Host for inspection
  2972. * (ProxyARP)
  2973. *
  2974. * Return: none
  2975. */
  2976. static void dp_tx_inspect_handler(struct dp_soc *soc,
  2977. struct dp_vdev *vdev,
  2978. struct dp_tx_desc_s *tx_desc,
  2979. uint8_t *status)
  2980. {
  2981. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  2982. "%s Tx inspect path",
  2983. __func__);
  2984. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  2985. qdf_nbuf_len(tx_desc->nbuf));
  2986. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  2987. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  2988. }
  2989. #ifdef MESH_MODE_SUPPORT
  2990. /**
  2991. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  2992. * in mesh meta header
  2993. * @tx_desc: software descriptor head pointer
  2994. * @ts: pointer to tx completion stats
  2995. * Return: none
  2996. */
  2997. static
  2998. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  2999. struct hal_tx_completion_status *ts)
  3000. {
  3001. struct meta_hdr_s *mhdr;
  3002. qdf_nbuf_t netbuf = tx_desc->nbuf;
  3003. if (!tx_desc->msdu_ext_desc) {
  3004. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  3005. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3006. "netbuf %pK offset %d",
  3007. netbuf, tx_desc->pkt_offset);
  3008. return;
  3009. }
  3010. }
  3011. if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
  3012. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3013. "netbuf %pK offset %zu", netbuf,
  3014. sizeof(struct meta_hdr_s));
  3015. return;
  3016. }
  3017. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(netbuf);
  3018. mhdr->rssi = ts->ack_frame_rssi;
  3019. mhdr->band = tx_desc->pdev->operating_channel.band;
  3020. mhdr->channel = tx_desc->pdev->operating_channel.num;
  3021. }
  3022. #else
  3023. static
  3024. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3025. struct hal_tx_completion_status *ts)
  3026. {
  3027. }
  3028. #endif
  3029. #ifdef QCA_PEER_EXT_STATS
  3030. /*
  3031. * dp_tx_compute_tid_delay() - Compute per TID delay
  3032. * @stats: Per TID delay stats
  3033. * @tx_desc: Software Tx descriptor
  3034. *
  3035. * Compute the software enqueue and hw enqueue delays and
  3036. * update the respective histograms
  3037. *
  3038. * Return: void
  3039. */
  3040. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3041. struct dp_tx_desc_s *tx_desc)
  3042. {
  3043. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3044. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3045. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  3046. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3047. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3048. timestamp_hw_enqueue = tx_desc->timestamp;
  3049. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3050. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3051. timestamp_hw_enqueue);
  3052. /*
  3053. * Update the Tx software enqueue delay and HW enque-Completion delay.
  3054. */
  3055. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3056. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  3057. }
  3058. /*
  3059. * dp_tx_update_peer_ext_stats() - Update the peer extended stats
  3060. * @peer: DP peer context
  3061. * @tx_desc: Tx software descriptor
  3062. * @tid: Transmission ID
  3063. * @ring_id: Rx CPU context ID/CPU_ID
  3064. *
  3065. * Update the peer extended stats. These are enhanced other
  3066. * delay stats per msdu level.
  3067. *
  3068. * Return: void
  3069. */
  3070. static void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3071. struct dp_tx_desc_s *tx_desc,
  3072. uint8_t tid, uint8_t ring_id)
  3073. {
  3074. struct dp_pdev *pdev = peer->vdev->pdev;
  3075. struct dp_soc *soc = NULL;
  3076. struct cdp_peer_ext_stats *pext_stats = NULL;
  3077. soc = pdev->soc;
  3078. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  3079. return;
  3080. pext_stats = peer->pext_stats;
  3081. qdf_assert(pext_stats);
  3082. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3083. /*
  3084. * For non-TID packets use the TID 9
  3085. */
  3086. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3087. tid = CDP_MAX_DATA_TIDS - 1;
  3088. dp_tx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
  3089. tx_desc);
  3090. }
  3091. #else
  3092. static inline void dp_tx_update_peer_ext_stats(struct dp_peer *peer,
  3093. struct dp_tx_desc_s *tx_desc,
  3094. uint8_t tid, uint8_t ring_id)
  3095. {
  3096. }
  3097. #endif
  3098. /**
  3099. * dp_tx_compute_delay() - Compute and fill in all timestamps
  3100. * to pass in correct fields
  3101. *
  3102. * @vdev: pdev handle
  3103. * @tx_desc: tx descriptor
  3104. * @tid: tid value
  3105. * @ring_id: TCL or WBM ring number for transmit path
  3106. * Return: none
  3107. */
  3108. static void dp_tx_compute_delay(struct dp_vdev *vdev,
  3109. struct dp_tx_desc_s *tx_desc,
  3110. uint8_t tid, uint8_t ring_id)
  3111. {
  3112. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3113. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  3114. if (qdf_likely(!vdev->pdev->delay_stats_flag))
  3115. return;
  3116. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3117. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3118. timestamp_hw_enqueue = tx_desc->timestamp;
  3119. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3120. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3121. timestamp_hw_enqueue);
  3122. interframe_delay = (uint32_t)(timestamp_ingress -
  3123. vdev->prev_tx_enq_tstamp);
  3124. /*
  3125. * Delay in software enqueue
  3126. */
  3127. dp_update_delay_stats(vdev->pdev, sw_enqueue_delay, tid,
  3128. CDP_DELAY_STATS_SW_ENQ, ring_id);
  3129. /*
  3130. * Delay between packet enqueued to HW and Tx completion
  3131. */
  3132. dp_update_delay_stats(vdev->pdev, fwhw_transmit_delay, tid,
  3133. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id);
  3134. /*
  3135. * Update interframe delay stats calculated at hardstart receive point.
  3136. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  3137. * interframe delay will not be calculate correctly for 1st frame.
  3138. * On the other side, this will help in avoiding extra per packet check
  3139. * of !vdev->prev_tx_enq_tstamp.
  3140. */
  3141. dp_update_delay_stats(vdev->pdev, interframe_delay, tid,
  3142. CDP_DELAY_STATS_TX_INTERFRAME, ring_id);
  3143. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  3144. }
  3145. #ifdef DISABLE_DP_STATS
  3146. static
  3147. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3148. {
  3149. }
  3150. #else
  3151. static
  3152. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_peer *peer)
  3153. {
  3154. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  3155. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  3156. if (subtype != QDF_PROTO_INVALID)
  3157. DP_STATS_INC(peer, tx.no_ack_count[subtype], 1);
  3158. }
  3159. #endif
  3160. /**
  3161. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  3162. * per wbm ring
  3163. *
  3164. * @tx_desc: software descriptor head pointer
  3165. * @ts: Tx completion status
  3166. * @peer: peer handle
  3167. * @ring_id: ring number
  3168. *
  3169. * Return: None
  3170. */
  3171. static inline void
  3172. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  3173. struct hal_tx_completion_status *ts,
  3174. struct dp_peer *peer, uint8_t ring_id)
  3175. {
  3176. struct dp_pdev *pdev = peer->vdev->pdev;
  3177. struct dp_soc *soc = NULL;
  3178. uint8_t mcs, pkt_type;
  3179. uint8_t tid = ts->tid;
  3180. uint32_t length;
  3181. struct cdp_tid_tx_stats *tid_stats;
  3182. if (!pdev)
  3183. return;
  3184. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3185. tid = CDP_MAX_DATA_TIDS - 1;
  3186. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3187. soc = pdev->soc;
  3188. mcs = ts->mcs;
  3189. pkt_type = ts->pkt_type;
  3190. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  3191. dp_err("Release source is not from TQM");
  3192. return;
  3193. }
  3194. length = qdf_nbuf_len(tx_desc->nbuf);
  3195. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1, length);
  3196. if (qdf_unlikely(pdev->delay_stats_flag))
  3197. dp_tx_compute_delay(peer->vdev, tx_desc, tid, ring_id);
  3198. DP_STATS_INCC(peer, tx.dropped.age_out, 1,
  3199. (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED));
  3200. DP_STATS_INCC_PKT(peer, tx.dropped.fw_rem, 1, length,
  3201. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3202. DP_STATS_INCC(peer, tx.dropped.fw_rem_notx, 1,
  3203. (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX));
  3204. DP_STATS_INCC(peer, tx.dropped.fw_rem_tx, 1,
  3205. (ts->status == HAL_TX_TQM_RR_REM_CMD_TX));
  3206. DP_STATS_INCC(peer, tx.dropped.fw_reason1, 1,
  3207. (ts->status == HAL_TX_TQM_RR_FW_REASON1));
  3208. DP_STATS_INCC(peer, tx.dropped.fw_reason2, 1,
  3209. (ts->status == HAL_TX_TQM_RR_FW_REASON2));
  3210. DP_STATS_INCC(peer, tx.dropped.fw_reason3, 1,
  3211. (ts->status == HAL_TX_TQM_RR_FW_REASON3));
  3212. /*
  3213. * tx_failed is ideally supposed to be updated from HTT ppdu completion
  3214. * stats. But in IPQ807X/IPQ6018 chipsets owing to hw limitation there
  3215. * are no completions for failed cases. Hence updating tx_failed from
  3216. * data path. Please note that if tx_failed is fixed to be from ppdu,
  3217. * then this has to be removed
  3218. */
  3219. peer->stats.tx.tx_failed = peer->stats.tx.dropped.fw_rem.num +
  3220. peer->stats.tx.dropped.fw_rem_notx +
  3221. peer->stats.tx.dropped.fw_rem_tx +
  3222. peer->stats.tx.dropped.age_out +
  3223. peer->stats.tx.dropped.fw_reason1 +
  3224. peer->stats.tx.dropped.fw_reason2 +
  3225. peer->stats.tx.dropped.fw_reason3;
  3226. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  3227. tid_stats->tqm_status_cnt[ts->status]++;
  3228. }
  3229. if (ts->status != HAL_TX_TQM_RR_FRAME_ACKED) {
  3230. dp_update_no_ack_stats(tx_desc->nbuf, peer);
  3231. return;
  3232. }
  3233. DP_STATS_INCC(peer, tx.ofdma, 1, ts->ofdma);
  3234. DP_STATS_INCC(peer, tx.amsdu_cnt, 1, ts->msdu_part_of_amsdu);
  3235. DP_STATS_INCC(peer, tx.non_amsdu_cnt, 1, !ts->msdu_part_of_amsdu);
  3236. /*
  3237. * Following Rate Statistics are updated from HTT PPDU events from FW.
  3238. * Return from here if HTT PPDU events are enabled.
  3239. */
  3240. if (!(soc->process_tx_status))
  3241. return;
  3242. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3243. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_A)));
  3244. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3245. ((mcs < (MAX_MCS_11A)) && (pkt_type == DOT11_A)));
  3246. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3247. ((mcs >= MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3248. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3249. ((mcs < MAX_MCS_11B) && (pkt_type == DOT11_B)));
  3250. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3251. ((mcs >= MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3252. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3253. ((mcs < MAX_MCS_11A) && (pkt_type == DOT11_N)));
  3254. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3255. ((mcs >= MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3256. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3257. ((mcs < MAX_MCS_11AC) && (pkt_type == DOT11_AC)));
  3258. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[MAX_MCS - 1], 1,
  3259. ((mcs >= (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3260. DP_STATS_INCC(peer, tx.pkt_type[pkt_type].mcs_count[mcs], 1,
  3261. ((mcs < (MAX_MCS - 1)) && (pkt_type == DOT11_AX)));
  3262. DP_STATS_INC(peer, tx.sgi_count[ts->sgi], 1);
  3263. DP_STATS_INC(peer, tx.bw[ts->bw], 1);
  3264. DP_STATS_UPD(peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  3265. DP_STATS_INC(peer, tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  3266. DP_STATS_INCC(peer, tx.stbc, 1, ts->stbc);
  3267. DP_STATS_INCC(peer, tx.ldpc, 1, ts->ldpc);
  3268. DP_STATS_INCC(peer, tx.retries, 1, ts->transmit_cnt > 1);
  3269. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  3270. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  3271. &peer->stats, ts->peer_id,
  3272. UPDATE_PEER_STATS, pdev->pdev_id);
  3273. #endif
  3274. }
  3275. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3276. /**
  3277. * dp_tx_flow_pool_lock() - take flow pool lock
  3278. * @soc: core txrx main context
  3279. * @tx_desc: tx desc
  3280. *
  3281. * Return: None
  3282. */
  3283. static inline
  3284. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  3285. struct dp_tx_desc_s *tx_desc)
  3286. {
  3287. struct dp_tx_desc_pool_s *pool;
  3288. uint8_t desc_pool_id;
  3289. desc_pool_id = tx_desc->pool_id;
  3290. pool = &soc->tx_desc[desc_pool_id];
  3291. qdf_spin_lock_bh(&pool->flow_pool_lock);
  3292. }
  3293. /**
  3294. * dp_tx_flow_pool_unlock() - release flow pool lock
  3295. * @soc: core txrx main context
  3296. * @tx_desc: tx desc
  3297. *
  3298. * Return: None
  3299. */
  3300. static inline
  3301. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  3302. struct dp_tx_desc_s *tx_desc)
  3303. {
  3304. struct dp_tx_desc_pool_s *pool;
  3305. uint8_t desc_pool_id;
  3306. desc_pool_id = tx_desc->pool_id;
  3307. pool = &soc->tx_desc[desc_pool_id];
  3308. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  3309. }
  3310. #else
  3311. static inline
  3312. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3313. {
  3314. }
  3315. static inline
  3316. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3317. {
  3318. }
  3319. #endif
  3320. /**
  3321. * dp_tx_notify_completion() - Notify tx completion for this desc
  3322. * @soc: core txrx main context
  3323. * @vdev: datapath vdev handle
  3324. * @tx_desc: tx desc
  3325. * @netbuf: buffer
  3326. * @status: tx status
  3327. *
  3328. * Return: none
  3329. */
  3330. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  3331. struct dp_vdev *vdev,
  3332. struct dp_tx_desc_s *tx_desc,
  3333. qdf_nbuf_t netbuf,
  3334. uint8_t status)
  3335. {
  3336. void *osif_dev;
  3337. ol_txrx_completion_fp tx_compl_cbk = NULL;
  3338. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  3339. qdf_assert(tx_desc);
  3340. dp_tx_flow_pool_lock(soc, tx_desc);
  3341. if (!vdev ||
  3342. !vdev->osif_vdev) {
  3343. dp_tx_flow_pool_unlock(soc, tx_desc);
  3344. return;
  3345. }
  3346. osif_dev = vdev->osif_vdev;
  3347. tx_compl_cbk = vdev->tx_comp;
  3348. dp_tx_flow_pool_unlock(soc, tx_desc);
  3349. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3350. flag |= BIT(QDF_TX_RX_STATUS_OK);
  3351. if (tx_compl_cbk)
  3352. tx_compl_cbk(netbuf, osif_dev, flag);
  3353. }
  3354. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  3355. * @pdev: pdev handle
  3356. * @tid: tid value
  3357. * @txdesc_ts: timestamp from txdesc
  3358. * @ppdu_id: ppdu id
  3359. *
  3360. * Return: none
  3361. */
  3362. #ifdef FEATURE_PERPKT_INFO
  3363. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3364. struct dp_peer *peer,
  3365. uint8_t tid,
  3366. uint64_t txdesc_ts,
  3367. uint32_t ppdu_id)
  3368. {
  3369. uint64_t delta_ms;
  3370. struct cdp_tx_sojourn_stats *sojourn_stats;
  3371. if (qdf_unlikely(!pdev->enhanced_stats_en))
  3372. return;
  3373. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  3374. tid >= CDP_DATA_TID_MAX))
  3375. return;
  3376. if (qdf_unlikely(!pdev->sojourn_buf))
  3377. return;
  3378. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  3379. qdf_nbuf_data(pdev->sojourn_buf);
  3380. sojourn_stats->cookie = (void *)peer->rdkstats_ctx;
  3381. delta_ms = qdf_ktime_to_ms(qdf_ktime_get()) -
  3382. txdesc_ts;
  3383. qdf_ewma_tx_lag_add(&peer->avg_sojourn_msdu[tid],
  3384. delta_ms);
  3385. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  3386. sojourn_stats->num_msdus[tid] = 1;
  3387. sojourn_stats->avg_sojourn_msdu[tid].internal =
  3388. peer->avg_sojourn_msdu[tid].internal;
  3389. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  3390. pdev->sojourn_buf, HTT_INVALID_PEER,
  3391. WDI_NO_VAL, pdev->pdev_id);
  3392. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  3393. sojourn_stats->num_msdus[tid] = 0;
  3394. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  3395. }
  3396. #else
  3397. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3398. struct dp_peer *peer,
  3399. uint8_t tid,
  3400. uint64_t txdesc_ts,
  3401. uint32_t ppdu_id)
  3402. {
  3403. }
  3404. #endif
  3405. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  3406. /**
  3407. * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
  3408. * @soc: dp_soc handle
  3409. * @desc: Tx Descriptor
  3410. * @ts: HAL Tx completion descriptor contents
  3411. *
  3412. * This function is used to send tx completion to packet capture
  3413. */
  3414. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  3415. struct dp_tx_desc_s *desc,
  3416. struct hal_tx_completion_status *ts)
  3417. {
  3418. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
  3419. desc, ts->peer_id,
  3420. WDI_NO_VAL, desc->pdev->pdev_id);
  3421. }
  3422. #endif
  3423. /**
  3424. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  3425. * @soc: DP Soc handle
  3426. * @tx_desc: software Tx descriptor
  3427. * @ts : Tx completion status from HAL/HTT descriptor
  3428. *
  3429. * Return: none
  3430. */
  3431. static inline void
  3432. dp_tx_comp_process_desc(struct dp_soc *soc,
  3433. struct dp_tx_desc_s *desc,
  3434. struct hal_tx_completion_status *ts,
  3435. struct dp_peer *peer)
  3436. {
  3437. uint64_t time_latency = 0;
  3438. /*
  3439. * m_copy/tx_capture modes are not supported for
  3440. * scatter gather packets
  3441. */
  3442. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  3443. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  3444. desc->timestamp);
  3445. }
  3446. dp_send_completion_to_pkt_capture(soc, desc, ts);
  3447. if (!(desc->msdu_ext_desc)) {
  3448. dp_tx_enh_unmap(soc, desc);
  3449. if (QDF_STATUS_SUCCESS ==
  3450. dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer)) {
  3451. return;
  3452. }
  3453. if (QDF_STATUS_SUCCESS ==
  3454. dp_get_completion_indication_for_stack(soc,
  3455. desc->pdev,
  3456. peer, ts,
  3457. desc->nbuf,
  3458. time_latency)) {
  3459. dp_send_completion_to_stack(soc,
  3460. desc->pdev,
  3461. ts->peer_id,
  3462. ts->ppdu_id,
  3463. desc->nbuf);
  3464. return;
  3465. }
  3466. }
  3467. desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
  3468. dp_tx_comp_free_buf(soc, desc);
  3469. }
  3470. #ifdef DISABLE_DP_STATS
  3471. /**
  3472. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  3473. * @soc: core txrx main context
  3474. * @tx_desc: tx desc
  3475. * @status: tx status
  3476. *
  3477. * Return: none
  3478. */
  3479. static inline
  3480. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3481. struct dp_vdev *vdev,
  3482. struct dp_tx_desc_s *tx_desc,
  3483. uint8_t status)
  3484. {
  3485. }
  3486. #else
  3487. static inline
  3488. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  3489. struct dp_vdev *vdev,
  3490. struct dp_tx_desc_s *tx_desc,
  3491. uint8_t status)
  3492. {
  3493. void *osif_dev;
  3494. ol_txrx_stats_rx_fp stats_cbk;
  3495. uint8_t pkt_type;
  3496. qdf_assert(tx_desc);
  3497. if (!vdev ||
  3498. !vdev->osif_vdev ||
  3499. !vdev->stats_cb)
  3500. return;
  3501. osif_dev = vdev->osif_vdev;
  3502. stats_cbk = vdev->stats_cb;
  3503. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  3504. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3505. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  3506. &pkt_type);
  3507. }
  3508. #endif
  3509. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  3510. void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3511. uint32_t delta_tsf)
  3512. {
  3513. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3514. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3515. DP_MOD_ID_CDP);
  3516. if (!vdev) {
  3517. dp_err_rl("vdev %d does not exist", vdev_id);
  3518. return;
  3519. }
  3520. vdev->delta_tsf = delta_tsf;
  3521. dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
  3522. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3523. }
  3524. QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
  3525. uint8_t vdev_id, bool enable)
  3526. {
  3527. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3528. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3529. DP_MOD_ID_CDP);
  3530. if (!vdev) {
  3531. dp_err_rl("vdev %d does not exist", vdev_id);
  3532. return QDF_STATUS_E_FAILURE;
  3533. }
  3534. qdf_atomic_set(&vdev->ul_delay_report, enable);
  3535. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3536. return QDF_STATUS_SUCCESS;
  3537. }
  3538. QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3539. uint32_t *val)
  3540. {
  3541. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3542. struct dp_vdev *vdev;
  3543. uint32_t delay_accum;
  3544. uint32_t pkts_accum;
  3545. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  3546. if (!vdev) {
  3547. dp_err_rl("vdev %d does not exist", vdev_id);
  3548. return QDF_STATUS_E_FAILURE;
  3549. }
  3550. if (!qdf_atomic_read(&vdev->ul_delay_report)) {
  3551. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3552. return QDF_STATUS_E_FAILURE;
  3553. }
  3554. /* Average uplink delay based on current accumulated values */
  3555. delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
  3556. pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
  3557. *val = delay_accum / pkts_accum;
  3558. dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
  3559. delay_accum, pkts_accum);
  3560. /* Reset accumulated values to 0 */
  3561. qdf_atomic_set(&vdev->ul_delay_accum, 0);
  3562. qdf_atomic_set(&vdev->ul_pkts_accum, 0);
  3563. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  3564. return QDF_STATUS_SUCCESS;
  3565. }
  3566. static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  3567. struct hal_tx_completion_status *ts)
  3568. {
  3569. uint32_t buffer_ts;
  3570. uint32_t delta_tsf;
  3571. uint32_t ul_delay;
  3572. /* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
  3573. if (!ts->valid)
  3574. return;
  3575. if (qdf_unlikely(!vdev)) {
  3576. dp_info_rl("vdev is null or delete in progrss");
  3577. return;
  3578. }
  3579. if (!qdf_atomic_read(&vdev->ul_delay_report))
  3580. return;
  3581. delta_tsf = vdev->delta_tsf;
  3582. /* buffer_timestamp is in units of 1024 us and is [31:13] of
  3583. * WBM_RELEASE_RING_4. After left shift 10 bits, it's
  3584. * valid up to 29 bits.
  3585. */
  3586. buffer_ts = ts->buffer_timestamp << 10;
  3587. ul_delay = ts->tsf - buffer_ts - delta_tsf;
  3588. ul_delay &= 0x1FFFFFFF; /* mask 29 BITS */
  3589. if (ul_delay > 0x1000000) {
  3590. dp_info_rl("----------------------\n"
  3591. "Tx completion status:\n"
  3592. "----------------------\n"
  3593. "release_src = %d\n"
  3594. "ppdu_id = 0x%x\n"
  3595. "release_reason = %d\n"
  3596. "tsf = %u (0x%x)\n"
  3597. "buffer_timestamp = %u (0x%x)\n"
  3598. "delta_tsf = %u (0x%x)\n",
  3599. ts->release_src, ts->ppdu_id, ts->status,
  3600. ts->tsf, ts->tsf, ts->buffer_timestamp,
  3601. ts->buffer_timestamp, delta_tsf, delta_tsf);
  3602. return;
  3603. }
  3604. ul_delay /= 1000; /* in unit of ms */
  3605. qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
  3606. qdf_atomic_inc(&vdev->ul_pkts_accum);
  3607. }
  3608. #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
  3609. static inline
  3610. void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  3611. struct hal_tx_completion_status *ts)
  3612. {
  3613. }
  3614. #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
  3615. /**
  3616. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  3617. * @soc: DP soc handle
  3618. * @tx_desc: software descriptor head pointer
  3619. * @ts: Tx completion status
  3620. * @peer: peer handle
  3621. * @ring_id: ring number
  3622. *
  3623. * Return: none
  3624. */
  3625. static inline
  3626. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  3627. struct dp_tx_desc_s *tx_desc,
  3628. struct hal_tx_completion_status *ts,
  3629. struct dp_peer *peer, uint8_t ring_id)
  3630. {
  3631. uint32_t length;
  3632. qdf_ether_header_t *eh;
  3633. struct dp_vdev *vdev = NULL;
  3634. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3635. enum qdf_dp_tx_rx_status dp_status;
  3636. if (!nbuf) {
  3637. dp_info_rl("invalid tx descriptor. nbuf NULL");
  3638. goto out;
  3639. }
  3640. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  3641. length = qdf_nbuf_len(nbuf);
  3642. dp_status = dp_tx_hw_to_qdf(ts->status);
  3643. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  3644. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  3645. QDF_TRACE_DEFAULT_PDEV_ID,
  3646. qdf_nbuf_data_addr(nbuf),
  3647. sizeof(qdf_nbuf_data(nbuf)),
  3648. tx_desc->id, ts->status, dp_status));
  3649. dp_tx_comp_debug("-------------------- \n"
  3650. "Tx Completion Stats: \n"
  3651. "-------------------- \n"
  3652. "ack_frame_rssi = %d \n"
  3653. "first_msdu = %d \n"
  3654. "last_msdu = %d \n"
  3655. "msdu_part_of_amsdu = %d \n"
  3656. "rate_stats valid = %d \n"
  3657. "bw = %d \n"
  3658. "pkt_type = %d \n"
  3659. "stbc = %d \n"
  3660. "ldpc = %d \n"
  3661. "sgi = %d \n"
  3662. "mcs = %d \n"
  3663. "ofdma = %d \n"
  3664. "tones_in_ru = %d \n"
  3665. "tsf = %d \n"
  3666. "ppdu_id = %d \n"
  3667. "transmit_cnt = %d \n"
  3668. "tid = %d \n"
  3669. "peer_id = %d\n",
  3670. ts->ack_frame_rssi, ts->first_msdu,
  3671. ts->last_msdu, ts->msdu_part_of_amsdu,
  3672. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  3673. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  3674. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  3675. ts->transmit_cnt, ts->tid, ts->peer_id);
  3676. /* Update SoC level stats */
  3677. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  3678. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  3679. if (!peer) {
  3680. dp_info_rl("peer is null or deletion in progress");
  3681. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  3682. goto out;
  3683. }
  3684. vdev = peer->vdev;
  3685. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  3686. dp_tx_update_uplink_delay(soc, vdev, ts);
  3687. /* Update per-packet stats for mesh mode */
  3688. if (qdf_unlikely(vdev->mesh_vdev) &&
  3689. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  3690. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  3691. /* Update peer level stats */
  3692. if (qdf_unlikely(peer->bss_peer && vdev->opmode == wlan_op_mode_ap)) {
  3693. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  3694. DP_STATS_INC_PKT(peer, tx.mcast, 1, length);
  3695. if ((peer->vdev->tx_encap_type ==
  3696. htt_cmn_pkt_type_ethernet) &&
  3697. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  3698. DP_STATS_INC_PKT(peer, tx.bcast, 1, length);
  3699. }
  3700. }
  3701. } else {
  3702. DP_STATS_INC_PKT(peer, tx.ucast, 1, length);
  3703. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  3704. DP_STATS_INC_PKT(peer, tx.tx_success, 1, length);
  3705. if (qdf_unlikely(peer->in_twt)) {
  3706. DP_STATS_INC_PKT(peer,
  3707. tx.tx_success_twt,
  3708. 1, length);
  3709. }
  3710. }
  3711. }
  3712. dp_tx_update_peer_stats(tx_desc, ts, peer, ring_id);
  3713. dp_tx_update_peer_ext_stats(peer, tx_desc, ts->tid, ring_id);
  3714. #ifdef QCA_SUPPORT_RDK_STATS
  3715. if (soc->rdkstats_enabled)
  3716. dp_tx_sojourn_stats_process(vdev->pdev, peer, ts->tid,
  3717. tx_desc->timestamp,
  3718. ts->ppdu_id);
  3719. #endif
  3720. out:
  3721. return;
  3722. }
  3723. /**
  3724. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  3725. * @soc: core txrx main context
  3726. * @comp_head: software descriptor head pointer
  3727. * @ring_id: ring number
  3728. *
  3729. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  3730. * and release the software descriptors after processing is complete
  3731. *
  3732. * Return: none
  3733. */
  3734. static void
  3735. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  3736. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  3737. {
  3738. struct dp_tx_desc_s *desc;
  3739. struct dp_tx_desc_s *next;
  3740. struct hal_tx_completion_status ts;
  3741. struct dp_peer *peer = NULL;
  3742. uint16_t peer_id = DP_INVALID_PEER;
  3743. qdf_nbuf_t netbuf;
  3744. desc = comp_head;
  3745. while (desc) {
  3746. if (peer_id != desc->peer_id) {
  3747. if (peer)
  3748. dp_peer_unref_delete(peer,
  3749. DP_MOD_ID_TX_COMP);
  3750. peer_id = desc->peer_id;
  3751. peer = dp_peer_get_ref_by_id(soc, peer_id,
  3752. DP_MOD_ID_TX_COMP);
  3753. }
  3754. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  3755. struct dp_pdev *pdev = desc->pdev;
  3756. if (qdf_likely(peer)) {
  3757. /*
  3758. * Increment peer statistics
  3759. * Minimal statistics update done here
  3760. */
  3761. DP_STATS_INC_PKT(peer, tx.comp_pkt, 1,
  3762. desc->length);
  3763. if (desc->tx_status !=
  3764. HAL_TX_TQM_RR_FRAME_ACKED)
  3765. DP_STATS_INC(peer, tx.tx_failed, 1);
  3766. }
  3767. qdf_assert(pdev);
  3768. dp_tx_outstanding_dec(pdev);
  3769. /*
  3770. * Calling a QDF WRAPPER here is creating signifcant
  3771. * performance impact so avoided the wrapper call here
  3772. */
  3773. next = desc->next;
  3774. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  3775. desc->id, DP_TX_COMP_UNMAP);
  3776. qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
  3777. desc->nbuf,
  3778. desc->dma_addr,
  3779. QDF_DMA_TO_DEVICE,
  3780. desc->length);
  3781. qdf_nbuf_free(desc->nbuf);
  3782. dp_tx_desc_free(soc, desc, desc->pool_id);
  3783. desc = next;
  3784. continue;
  3785. }
  3786. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  3787. dp_tx_comp_process_tx_status(soc, desc, &ts, peer, ring_id);
  3788. netbuf = desc->nbuf;
  3789. /* check tx complete notification */
  3790. if (peer && qdf_nbuf_tx_notify_comp_get(netbuf))
  3791. dp_tx_notify_completion(soc, peer->vdev, desc,
  3792. netbuf, ts.status);
  3793. dp_tx_comp_process_desc(soc, desc, &ts, peer);
  3794. next = desc->next;
  3795. dp_tx_desc_release(desc, desc->pool_id);
  3796. desc = next;
  3797. }
  3798. if (peer)
  3799. dp_peer_unref_delete(peer, DP_MOD_ID_TX_COMP);
  3800. }
  3801. /**
  3802. * dp_tx_process_htt_completion() - Tx HTT Completion Indication Handler
  3803. * @soc: Handle to DP soc structure
  3804. * @tx_desc: software descriptor head pointer
  3805. * @status : Tx completion status from HTT descriptor
  3806. * @ring_id: ring number
  3807. *
  3808. * This function will process HTT Tx indication messages from Target
  3809. *
  3810. * Return: none
  3811. */
  3812. static
  3813. void dp_tx_process_htt_completion(struct dp_soc *soc,
  3814. struct dp_tx_desc_s *tx_desc, uint8_t *status,
  3815. uint8_t ring_id)
  3816. {
  3817. uint8_t tx_status;
  3818. struct dp_pdev *pdev;
  3819. struct dp_vdev *vdev;
  3820. struct hal_tx_completion_status ts = {0};
  3821. uint32_t *htt_desc = (uint32_t *)status;
  3822. struct dp_peer *peer;
  3823. struct cdp_tid_tx_stats *tid_stats = NULL;
  3824. struct htt_soc *htt_handle;
  3825. uint8_t vdev_id;
  3826. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  3827. htt_handle = (struct htt_soc *)soc->htt_handle;
  3828. htt_wbm_event_record(htt_handle->htt_logger_handle, tx_status, status);
  3829. /*
  3830. * There can be scenario where WBM consuming descriptor enqueued
  3831. * from TQM2WBM first and TQM completion can happen before MEC
  3832. * notification comes from FW2WBM. Avoid access any field of tx
  3833. * descriptor in case of MEC notify.
  3834. */
  3835. if (tx_status == HTT_TX_FW2WBM_TX_STATUS_MEC_NOTIFY) {
  3836. /*
  3837. * Get vdev id from HTT status word in case of MEC
  3838. * notification
  3839. */
  3840. vdev_id = HTT_TX_WBM_COMPLETION_V2_VDEV_ID_GET(htt_desc[3]);
  3841. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3842. return;
  3843. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3844. DP_MOD_ID_HTT_COMP);
  3845. if (!vdev)
  3846. return;
  3847. dp_tx_mec_handler(vdev, status);
  3848. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  3849. return;
  3850. }
  3851. /*
  3852. * If the descriptor is already freed in vdev_detach,
  3853. * continue to next descriptor
  3854. */
  3855. if ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) && !tx_desc->flags) {
  3856. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d", tx_desc->id);
  3857. return;
  3858. }
  3859. pdev = tx_desc->pdev;
  3860. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  3861. dp_tx_comp_info_rl("pdev in down state %d", tx_desc->id);
  3862. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  3863. dp_tx_comp_free_buf(soc, tx_desc);
  3864. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3865. return;
  3866. }
  3867. qdf_assert(tx_desc->pdev);
  3868. vdev_id = tx_desc->vdev_id;
  3869. vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  3870. DP_MOD_ID_HTT_COMP);
  3871. if (!vdev)
  3872. return;
  3873. switch (tx_status) {
  3874. case HTT_TX_FW2WBM_TX_STATUS_OK:
  3875. case HTT_TX_FW2WBM_TX_STATUS_DROP:
  3876. case HTT_TX_FW2WBM_TX_STATUS_TTL:
  3877. {
  3878. uint8_t tid;
  3879. if (HTT_TX_WBM_COMPLETION_V2_VALID_GET(htt_desc[2])) {
  3880. ts.peer_id =
  3881. HTT_TX_WBM_COMPLETION_V2_SW_PEER_ID_GET(
  3882. htt_desc[2]);
  3883. ts.tid =
  3884. HTT_TX_WBM_COMPLETION_V2_TID_NUM_GET(
  3885. htt_desc[2]);
  3886. } else {
  3887. ts.peer_id = HTT_INVALID_PEER;
  3888. ts.tid = HTT_INVALID_TID;
  3889. }
  3890. ts.ppdu_id =
  3891. HTT_TX_WBM_COMPLETION_V2_SCH_CMD_ID_GET(
  3892. htt_desc[1]);
  3893. ts.ack_frame_rssi =
  3894. HTT_TX_WBM_COMPLETION_V2_ACK_FRAME_RSSI_GET(
  3895. htt_desc[1]);
  3896. ts.tsf = htt_desc[3];
  3897. ts.first_msdu = 1;
  3898. ts.last_msdu = 1;
  3899. tid = ts.tid;
  3900. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3901. tid = CDP_MAX_DATA_TIDS - 1;
  3902. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3903. if (qdf_unlikely(pdev->delay_stats_flag))
  3904. dp_tx_compute_delay(vdev, tx_desc, tid, ring_id);
  3905. if (tx_status < CDP_MAX_TX_HTT_STATUS) {
  3906. tid_stats->htt_status_cnt[tx_status]++;
  3907. }
  3908. peer = dp_peer_get_ref_by_id(soc, ts.peer_id,
  3909. DP_MOD_ID_HTT_COMP);
  3910. dp_tx_comp_process_tx_status(soc, tx_desc, &ts, peer, ring_id);
  3911. dp_tx_comp_process_desc(soc, tx_desc, &ts, peer);
  3912. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3913. if (qdf_likely(peer))
  3914. dp_peer_unref_delete(peer, DP_MOD_ID_HTT_COMP);
  3915. break;
  3916. }
  3917. case HTT_TX_FW2WBM_TX_STATUS_REINJECT:
  3918. {
  3919. dp_tx_reinject_handler(soc, vdev, tx_desc, status);
  3920. break;
  3921. }
  3922. case HTT_TX_FW2WBM_TX_STATUS_INSPECT:
  3923. {
  3924. dp_tx_inspect_handler(soc, vdev, tx_desc, status);
  3925. break;
  3926. }
  3927. default:
  3928. dp_tx_comp_debug("Invalid HTT tx_status %d\n",
  3929. tx_status);
  3930. break;
  3931. }
  3932. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
  3933. }
  3934. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  3935. static inline
  3936. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  3937. int max_reap_limit)
  3938. {
  3939. bool limit_hit = false;
  3940. limit_hit =
  3941. (num_reaped >= max_reap_limit) ? true : false;
  3942. if (limit_hit)
  3943. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  3944. return limit_hit;
  3945. }
  3946. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3947. {
  3948. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  3949. }
  3950. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  3951. {
  3952. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  3953. return cfg->tx_comp_loop_pkt_limit;
  3954. }
  3955. #else
  3956. static inline
  3957. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  3958. int max_reap_limit)
  3959. {
  3960. return false;
  3961. }
  3962. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  3963. {
  3964. return false;
  3965. }
  3966. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  3967. {
  3968. return 0;
  3969. }
  3970. #endif
  3971. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  3972. static inline int
  3973. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  3974. int *max_reap_limit)
  3975. {
  3976. return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
  3977. max_reap_limit);
  3978. }
  3979. #else
  3980. static inline int
  3981. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  3982. int *max_reap_limit)
  3983. {
  3984. return 0;
  3985. }
  3986. #endif
  3987. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  3988. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  3989. uint32_t quota)
  3990. {
  3991. void *tx_comp_hal_desc;
  3992. uint8_t buffer_src;
  3993. struct dp_tx_desc_s *tx_desc = NULL;
  3994. struct dp_tx_desc_s *head_desc = NULL;
  3995. struct dp_tx_desc_s *tail_desc = NULL;
  3996. uint32_t num_processed = 0;
  3997. uint32_t count;
  3998. uint32_t num_avail_for_reap = 0;
  3999. bool force_break = false;
  4000. struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
  4001. int max_reap_limit, ring_near_full;
  4002. DP_HIST_INIT();
  4003. more_data:
  4004. /* Re-initialize local variables to be re-used */
  4005. head_desc = NULL;
  4006. tail_desc = NULL;
  4007. count = 0;
  4008. max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
  4009. ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
  4010. &max_reap_limit);
  4011. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  4012. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  4013. return 0;
  4014. }
  4015. num_avail_for_reap = hal_srng_dst_num_valid(soc->hal_soc, hal_ring_hdl, 0);
  4016. if (num_avail_for_reap >= quota)
  4017. num_avail_for_reap = quota;
  4018. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  4019. /* Find head descriptor from completion ring */
  4020. while (qdf_likely(num_avail_for_reap--)) {
  4021. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  4022. if (qdf_unlikely(!tx_comp_hal_desc))
  4023. break;
  4024. buffer_src = hal_tx_comp_get_buffer_source(soc->hal_soc,
  4025. tx_comp_hal_desc);
  4026. /* If this buffer was not released by TQM or FW, then it is not
  4027. * Tx completion indication, assert */
  4028. if (qdf_unlikely(buffer_src !=
  4029. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  4030. (qdf_unlikely(buffer_src !=
  4031. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  4032. uint8_t wbm_internal_error;
  4033. dp_err_rl(
  4034. "Tx comp release_src != TQM | FW but from %d",
  4035. buffer_src);
  4036. hal_dump_comp_desc(tx_comp_hal_desc);
  4037. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  4038. /* When WBM sees NULL buffer_addr_info in any of
  4039. * ingress rings it sends an error indication,
  4040. * with wbm_internal_error=1, to a specific ring.
  4041. * The WBM2SW ring used to indicate these errors is
  4042. * fixed in HW, and that ring is being used as Tx
  4043. * completion ring. These errors are not related to
  4044. * Tx completions, and should just be ignored
  4045. */
  4046. wbm_internal_error = hal_get_wbm_internal_error(
  4047. soc->hal_soc,
  4048. tx_comp_hal_desc);
  4049. if (wbm_internal_error) {
  4050. dp_err_rl("Tx comp wbm_internal_error!!");
  4051. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  4052. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  4053. buffer_src)
  4054. dp_handle_wbm_internal_error(
  4055. soc,
  4056. tx_comp_hal_desc,
  4057. hal_tx_comp_get_buffer_type(
  4058. tx_comp_hal_desc));
  4059. } else {
  4060. dp_err_rl("Tx comp wbm_internal_error false");
  4061. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  4062. }
  4063. continue;
  4064. }
  4065. soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
  4066. tx_comp_hal_desc,
  4067. &tx_desc);
  4068. if (!tx_desc) {
  4069. dp_err("unable to retrieve tx_desc!");
  4070. QDF_BUG(0);
  4071. continue;
  4072. }
  4073. tx_desc->buffer_src = buffer_src;
  4074. /*
  4075. * If the release source is FW, process the HTT status
  4076. */
  4077. if (qdf_unlikely(buffer_src ==
  4078. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  4079. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  4080. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  4081. htt_tx_status);
  4082. dp_tx_process_htt_completion(soc, tx_desc,
  4083. htt_tx_status, ring_id);
  4084. } else {
  4085. tx_desc->peer_id =
  4086. hal_tx_comp_get_peer_id(tx_comp_hal_desc);
  4087. tx_desc->tx_status =
  4088. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  4089. tx_desc->buffer_src = buffer_src;
  4090. /*
  4091. * If the fast completion mode is enabled extended
  4092. * metadata from descriptor is not copied
  4093. */
  4094. if (qdf_likely(tx_desc->flags &
  4095. DP_TX_DESC_FLAG_SIMPLE))
  4096. goto add_to_pool;
  4097. /*
  4098. * If the descriptor is already freed in vdev_detach,
  4099. * continue to next descriptor
  4100. */
  4101. if (qdf_unlikely
  4102. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  4103. !tx_desc->flags)) {
  4104. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  4105. tx_desc->id);
  4106. continue;
  4107. }
  4108. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  4109. dp_tx_comp_info_rl("pdev in down state %d",
  4110. tx_desc->id);
  4111. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  4112. dp_tx_comp_free_buf(soc, tx_desc);
  4113. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  4114. goto next_desc;
  4115. }
  4116. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  4117. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  4118. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  4119. tx_desc->flags, tx_desc->id);
  4120. qdf_assert_always(0);
  4121. }
  4122. /* Collect hw completion contents */
  4123. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  4124. &tx_desc->comp, 1);
  4125. add_to_pool:
  4126. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  4127. /* First ring descriptor on the cycle */
  4128. if (!head_desc) {
  4129. head_desc = tx_desc;
  4130. tail_desc = tx_desc;
  4131. }
  4132. tail_desc->next = tx_desc;
  4133. tx_desc->next = NULL;
  4134. tail_desc = tx_desc;
  4135. }
  4136. next_desc:
  4137. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  4138. /*
  4139. * Processed packet count is more than given quota
  4140. * stop to processing
  4141. */
  4142. count++;
  4143. if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
  4144. break;
  4145. }
  4146. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  4147. /* Process the reaped descriptors */
  4148. if (head_desc)
  4149. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  4150. /*
  4151. * If we are processing in near-full condition, there are 3 scenario
  4152. * 1) Ring entries has reached critical state
  4153. * 2) Ring entries are still near high threshold
  4154. * 3) Ring entries are below the safe level
  4155. *
  4156. * One more loop will move te state to normal processing and yield
  4157. */
  4158. if (ring_near_full)
  4159. goto more_data;
  4160. if (dp_tx_comp_enable_eol_data_check(soc)) {
  4161. if (num_processed >= quota)
  4162. force_break = true;
  4163. if (!force_break &&
  4164. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  4165. hal_ring_hdl)) {
  4166. DP_STATS_INC(soc, tx.hp_oos2, 1);
  4167. if (!hif_exec_should_yield(soc->hif_handle,
  4168. int_ctx->dp_intr_id))
  4169. goto more_data;
  4170. }
  4171. }
  4172. DP_TX_HIST_STATS_PER_PDEV();
  4173. return num_processed;
  4174. }
  4175. #ifdef FEATURE_WLAN_TDLS
  4176. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4177. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  4178. {
  4179. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4180. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4181. DP_MOD_ID_TDLS);
  4182. if (!vdev) {
  4183. dp_err("vdev handle for id %d is NULL", vdev_id);
  4184. return NULL;
  4185. }
  4186. if (tx_spec & OL_TX_SPEC_NO_FREE)
  4187. vdev->is_tdls_frame = true;
  4188. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  4189. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  4190. }
  4191. #endif
  4192. static void dp_tx_vdev_update_feature_flags(struct dp_vdev *vdev)
  4193. {
  4194. struct wlan_cfg_dp_soc_ctxt *cfg;
  4195. struct dp_soc *soc;
  4196. soc = vdev->pdev->soc;
  4197. if (!soc)
  4198. return;
  4199. cfg = soc->wlan_cfg_ctx;
  4200. if (!cfg)
  4201. return;
  4202. if (vdev->opmode == wlan_op_mode_ndi)
  4203. vdev->csum_enabled = wlan_cfg_get_nan_checksum_offload(cfg);
  4204. else if ((vdev->subtype == wlan_op_subtype_p2p_device) ||
  4205. (vdev->subtype == wlan_op_subtype_p2p_cli) ||
  4206. (vdev->subtype == wlan_op_subtype_p2p_go))
  4207. vdev->csum_enabled = wlan_cfg_get_p2p_checksum_offload(cfg);
  4208. else
  4209. vdev->csum_enabled = wlan_cfg_get_checksum_offload(cfg);
  4210. }
  4211. /**
  4212. * dp_tx_vdev_attach() - attach vdev to dp tx
  4213. * @vdev: virtual device instance
  4214. *
  4215. * Return: QDF_STATUS_SUCCESS: success
  4216. * QDF_STATUS_E_RESOURCES: Error return
  4217. */
  4218. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  4219. {
  4220. int pdev_id;
  4221. /*
  4222. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  4223. */
  4224. HTT_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  4225. HTT_TCL_METADATA_TYPE_VDEV_BASED);
  4226. HTT_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  4227. vdev->vdev_id);
  4228. pdev_id =
  4229. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  4230. vdev->pdev->pdev_id);
  4231. HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  4232. /*
  4233. * Set HTT Extension Valid bit to 0 by default
  4234. */
  4235. HTT_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  4236. dp_tx_vdev_update_search_flags(vdev);
  4237. dp_tx_vdev_update_feature_flags(vdev);
  4238. return QDF_STATUS_SUCCESS;
  4239. }
  4240. #ifndef FEATURE_WDS
  4241. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  4242. {
  4243. return false;
  4244. }
  4245. #endif
  4246. /**
  4247. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  4248. * @vdev: virtual device instance
  4249. *
  4250. * Return: void
  4251. *
  4252. */
  4253. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  4254. {
  4255. struct dp_soc *soc = vdev->pdev->soc;
  4256. /*
  4257. * Enable both AddrY (SA based search) and AddrX (Da based search)
  4258. * for TDLS link
  4259. *
  4260. * Enable AddrY (SA based search) only for non-WDS STA and
  4261. * ProxySTA VAP (in HKv1) modes.
  4262. *
  4263. * In all other VAP modes, only DA based search should be
  4264. * enabled
  4265. */
  4266. if (vdev->opmode == wlan_op_mode_sta &&
  4267. vdev->tdls_link_connected)
  4268. vdev->hal_desc_addr_search_flags =
  4269. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  4270. else if ((vdev->opmode == wlan_op_mode_sta) &&
  4271. !dp_tx_da_search_override(vdev))
  4272. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  4273. else
  4274. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  4275. /* Set search type only when peer map v2 messaging is enabled
  4276. * as we will have the search index (AST hash) only when v2 is
  4277. * enabled
  4278. */
  4279. if (soc->is_peer_map_unmap_v2 && vdev->opmode == wlan_op_mode_sta)
  4280. vdev->search_type = HAL_TX_ADDR_INDEX_SEARCH;
  4281. else
  4282. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  4283. }
  4284. static inline bool
  4285. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  4286. struct dp_vdev *vdev,
  4287. struct dp_tx_desc_s *tx_desc)
  4288. {
  4289. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  4290. return false;
  4291. /*
  4292. * if vdev is given, then only check whether desc
  4293. * vdev match. if vdev is NULL, then check whether
  4294. * desc pdev match.
  4295. */
  4296. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  4297. (tx_desc->pdev == pdev);
  4298. }
  4299. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4300. /**
  4301. * dp_tx_desc_flush() - release resources associated
  4302. * to TX Desc
  4303. *
  4304. * @dp_pdev: Handle to DP pdev structure
  4305. * @vdev: virtual device instance
  4306. * NULL: no specific Vdev is required and check all allcated TX desc
  4307. * on this pdev.
  4308. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  4309. *
  4310. * @force_free:
  4311. * true: flush the TX desc.
  4312. * false: only reset the Vdev in each allocated TX desc
  4313. * that associated to current Vdev.
  4314. *
  4315. * This function will go through the TX desc pool to flush
  4316. * the outstanding TX data or reset Vdev to NULL in associated TX
  4317. * Desc.
  4318. */
  4319. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4320. bool force_free)
  4321. {
  4322. uint8_t i;
  4323. uint32_t j;
  4324. uint32_t num_desc, page_id, offset;
  4325. uint16_t num_desc_per_page;
  4326. struct dp_soc *soc = pdev->soc;
  4327. struct dp_tx_desc_s *tx_desc = NULL;
  4328. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4329. if (!vdev && !force_free) {
  4330. dp_err("Reset TX desc vdev, Vdev param is required!");
  4331. return;
  4332. }
  4333. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  4334. tx_desc_pool = &soc->tx_desc[i];
  4335. if (!(tx_desc_pool->pool_size) ||
  4336. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  4337. !(tx_desc_pool->desc_pages.cacheable_pages))
  4338. continue;
  4339. /*
  4340. * Add flow pool lock protection in case pool is freed
  4341. * due to all tx_desc is recycled when handle TX completion.
  4342. * this is not necessary when do force flush as:
  4343. * a. double lock will happen if dp_tx_desc_release is
  4344. * also trying to acquire it.
  4345. * b. dp interrupt has been disabled before do force TX desc
  4346. * flush in dp_pdev_deinit().
  4347. */
  4348. if (!force_free)
  4349. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  4350. num_desc = tx_desc_pool->pool_size;
  4351. num_desc_per_page =
  4352. tx_desc_pool->desc_pages.num_element_per_page;
  4353. for (j = 0; j < num_desc; j++) {
  4354. page_id = j / num_desc_per_page;
  4355. offset = j % num_desc_per_page;
  4356. if (qdf_unlikely(!(tx_desc_pool->
  4357. desc_pages.cacheable_pages)))
  4358. break;
  4359. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4360. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4361. /*
  4362. * Free TX desc if force free is
  4363. * required, otherwise only reset vdev
  4364. * in this TX desc.
  4365. */
  4366. if (force_free) {
  4367. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  4368. dp_tx_comp_free_buf(soc, tx_desc);
  4369. dp_tx_desc_release(tx_desc, i);
  4370. } else {
  4371. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4372. }
  4373. }
  4374. }
  4375. if (!force_free)
  4376. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  4377. }
  4378. }
  4379. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4380. /**
  4381. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  4382. *
  4383. * @soc: Handle to DP soc structure
  4384. * @tx_desc: pointer of one TX desc
  4385. * @desc_pool_id: TX Desc pool id
  4386. */
  4387. static inline void
  4388. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  4389. uint8_t desc_pool_id)
  4390. {
  4391. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  4392. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  4393. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  4394. }
  4395. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4396. bool force_free)
  4397. {
  4398. uint8_t i, num_pool;
  4399. uint32_t j;
  4400. uint32_t num_desc, page_id, offset;
  4401. uint16_t num_desc_per_page;
  4402. struct dp_soc *soc = pdev->soc;
  4403. struct dp_tx_desc_s *tx_desc = NULL;
  4404. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4405. if (!vdev && !force_free) {
  4406. dp_err("Reset TX desc vdev, Vdev param is required!");
  4407. return;
  4408. }
  4409. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4410. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4411. for (i = 0; i < num_pool; i++) {
  4412. tx_desc_pool = &soc->tx_desc[i];
  4413. if (!tx_desc_pool->desc_pages.cacheable_pages)
  4414. continue;
  4415. num_desc_per_page =
  4416. tx_desc_pool->desc_pages.num_element_per_page;
  4417. for (j = 0; j < num_desc; j++) {
  4418. page_id = j / num_desc_per_page;
  4419. offset = j % num_desc_per_page;
  4420. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  4421. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  4422. if (force_free) {
  4423. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  4424. dp_tx_comp_free_buf(soc, tx_desc);
  4425. dp_tx_desc_release(tx_desc, i);
  4426. } else {
  4427. dp_tx_desc_reset_vdev(soc, tx_desc,
  4428. i);
  4429. }
  4430. }
  4431. }
  4432. }
  4433. }
  4434. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4435. /**
  4436. * dp_tx_vdev_detach() - detach vdev from dp tx
  4437. * @vdev: virtual device instance
  4438. *
  4439. * Return: QDF_STATUS_SUCCESS: success
  4440. * QDF_STATUS_E_RESOURCES: Error return
  4441. */
  4442. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  4443. {
  4444. struct dp_pdev *pdev = vdev->pdev;
  4445. /* Reset TX desc associated to this Vdev as NULL */
  4446. dp_tx_desc_flush(pdev, vdev, false);
  4447. return QDF_STATUS_SUCCESS;
  4448. }
  4449. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4450. /* Pools will be allocated dynamically */
  4451. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4452. int num_desc)
  4453. {
  4454. uint8_t i;
  4455. for (i = 0; i < num_pool; i++) {
  4456. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  4457. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  4458. }
  4459. return QDF_STATUS_SUCCESS;
  4460. }
  4461. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4462. int num_desc)
  4463. {
  4464. return QDF_STATUS_SUCCESS;
  4465. }
  4466. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4467. {
  4468. }
  4469. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4470. {
  4471. uint8_t i;
  4472. for (i = 0; i < num_pool; i++)
  4473. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  4474. }
  4475. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  4476. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  4477. int num_desc)
  4478. {
  4479. uint8_t i, count;
  4480. /* Allocate software Tx descriptor pools */
  4481. for (i = 0; i < num_pool; i++) {
  4482. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  4483. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4484. FL("Tx Desc Pool alloc %d failed %pK"),
  4485. i, soc);
  4486. goto fail;
  4487. }
  4488. }
  4489. return QDF_STATUS_SUCCESS;
  4490. fail:
  4491. for (count = 0; count < i; count++)
  4492. dp_tx_desc_pool_free(soc, count);
  4493. return QDF_STATUS_E_NOMEM;
  4494. }
  4495. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  4496. int num_desc)
  4497. {
  4498. uint8_t i;
  4499. for (i = 0; i < num_pool; i++) {
  4500. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  4501. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  4502. FL("Tx Desc Pool init %d failed %pK"),
  4503. i, soc);
  4504. return QDF_STATUS_E_NOMEM;
  4505. }
  4506. }
  4507. return QDF_STATUS_SUCCESS;
  4508. }
  4509. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  4510. {
  4511. uint8_t i;
  4512. for (i = 0; i < num_pool; i++)
  4513. dp_tx_desc_pool_deinit(soc, i);
  4514. }
  4515. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  4516. {
  4517. uint8_t i;
  4518. for (i = 0; i < num_pool; i++)
  4519. dp_tx_desc_pool_free(soc, i);
  4520. }
  4521. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  4522. /**
  4523. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  4524. * @soc: core txrx main context
  4525. * @num_pool: number of pools
  4526. *
  4527. */
  4528. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  4529. {
  4530. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  4531. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  4532. }
  4533. /**
  4534. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  4535. * @soc: core txrx main context
  4536. * @num_pool: number of pools
  4537. *
  4538. */
  4539. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  4540. {
  4541. dp_tx_tso_desc_pool_free(soc, num_pool);
  4542. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  4543. }
  4544. /**
  4545. * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
  4546. * @soc: core txrx main context
  4547. *
  4548. * This function frees all tx related descriptors as below
  4549. * 1. Regular TX descriptors (static pools)
  4550. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4551. * 3. TSO descriptors
  4552. *
  4553. */
  4554. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  4555. {
  4556. uint8_t num_pool;
  4557. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4558. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4559. dp_tx_ext_desc_pool_free(soc, num_pool);
  4560. dp_tx_delete_static_pools(soc, num_pool);
  4561. }
  4562. /**
  4563. * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
  4564. * @soc: core txrx main context
  4565. *
  4566. * This function de-initializes all tx related descriptors as below
  4567. * 1. Regular TX descriptors (static pools)
  4568. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  4569. * 3. TSO descriptors
  4570. *
  4571. */
  4572. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  4573. {
  4574. uint8_t num_pool;
  4575. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4576. dp_tx_flow_control_deinit(soc);
  4577. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4578. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4579. dp_tx_deinit_static_pools(soc, num_pool);
  4580. }
  4581. /**
  4582. * dp_tso_attach() - TSO attach handler
  4583. * @txrx_soc: Opaque Dp handle
  4584. *
  4585. * Reserve TSO descriptor buffers
  4586. *
  4587. * Return: QDF_STATUS_E_FAILURE on failure or
  4588. * QDF_STATUS_SUCCESS on success
  4589. */
  4590. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  4591. uint8_t num_pool,
  4592. uint16_t num_desc)
  4593. {
  4594. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  4595. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4596. return QDF_STATUS_E_FAILURE;
  4597. }
  4598. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  4599. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4600. num_pool, soc);
  4601. return QDF_STATUS_E_FAILURE;
  4602. }
  4603. return QDF_STATUS_SUCCESS;
  4604. }
  4605. /**
  4606. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  4607. * @soc: DP soc handle
  4608. * @num_pool: Number of pools
  4609. * @num_desc: Number of descriptors
  4610. *
  4611. * Initialize TSO descriptor pools
  4612. *
  4613. * Return: QDF_STATUS_E_FAILURE on failure or
  4614. * QDF_STATUS_SUCCESS on success
  4615. */
  4616. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  4617. uint8_t num_pool,
  4618. uint16_t num_desc)
  4619. {
  4620. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  4621. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  4622. return QDF_STATUS_E_FAILURE;
  4623. }
  4624. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  4625. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  4626. num_pool, soc);
  4627. return QDF_STATUS_E_FAILURE;
  4628. }
  4629. return QDF_STATUS_SUCCESS;
  4630. }
  4631. /**
  4632. * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
  4633. * @soc: core txrx main context
  4634. *
  4635. * This function allocates memory for following descriptor pools
  4636. * 1. regular sw tx descriptor pools (static pools)
  4637. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4638. * 3. TSO descriptor pools
  4639. *
  4640. * Return: QDF_STATUS_SUCCESS: success
  4641. * QDF_STATUS_E_RESOURCES: Error return
  4642. */
  4643. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  4644. {
  4645. uint8_t num_pool;
  4646. uint32_t num_desc;
  4647. uint32_t num_ext_desc;
  4648. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4649. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4650. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4651. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  4652. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  4653. __func__, num_pool, num_desc);
  4654. if ((num_pool > MAX_TXDESC_POOLS) ||
  4655. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  4656. goto fail1;
  4657. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  4658. goto fail1;
  4659. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4660. goto fail2;
  4661. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4662. return QDF_STATUS_SUCCESS;
  4663. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4664. goto fail3;
  4665. return QDF_STATUS_SUCCESS;
  4666. fail3:
  4667. dp_tx_ext_desc_pool_free(soc, num_pool);
  4668. fail2:
  4669. dp_tx_delete_static_pools(soc, num_pool);
  4670. fail1:
  4671. return QDF_STATUS_E_RESOURCES;
  4672. }
  4673. /**
  4674. * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
  4675. * @soc: core txrx main context
  4676. *
  4677. * This function initializes the following TX descriptor pools
  4678. * 1. regular sw tx descriptor pools (static pools)
  4679. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  4680. * 3. TSO descriptor pools
  4681. *
  4682. * Return: QDF_STATUS_SUCCESS: success
  4683. * QDF_STATUS_E_RESOURCES: Error return
  4684. */
  4685. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  4686. {
  4687. uint8_t num_pool;
  4688. uint32_t num_desc;
  4689. uint32_t num_ext_desc;
  4690. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4691. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4692. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4693. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  4694. goto fail1;
  4695. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  4696. goto fail2;
  4697. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  4698. return QDF_STATUS_SUCCESS;
  4699. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4700. goto fail3;
  4701. dp_tx_flow_control_init(soc);
  4702. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  4703. return QDF_STATUS_SUCCESS;
  4704. fail3:
  4705. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  4706. fail2:
  4707. dp_tx_deinit_static_pools(soc, num_pool);
  4708. fail1:
  4709. return QDF_STATUS_E_RESOURCES;
  4710. }
  4711. /**
  4712. * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
  4713. * @txrx_soc: dp soc handle
  4714. *
  4715. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4716. * QDF_STATUS_E_FAILURE
  4717. */
  4718. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  4719. {
  4720. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4721. uint8_t num_pool;
  4722. uint32_t num_desc;
  4723. uint32_t num_ext_desc;
  4724. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4725. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  4726. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  4727. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  4728. return QDF_STATUS_E_FAILURE;
  4729. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  4730. return QDF_STATUS_E_FAILURE;
  4731. return QDF_STATUS_SUCCESS;
  4732. }
  4733. /**
  4734. * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
  4735. * @txrx_soc: dp soc handle
  4736. *
  4737. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  4738. */
  4739. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  4740. {
  4741. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  4742. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  4743. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  4744. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  4745. return QDF_STATUS_SUCCESS;
  4746. }