dp_tx.c 164 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "htt.h"
  20. #include "dp_htt.h"
  21. #include "hal_hw_headers.h"
  22. #include "dp_tx.h"
  23. #include "dp_tx_desc.h"
  24. #include "dp_peer.h"
  25. #include "dp_types.h"
  26. #include "hal_tx.h"
  27. #include "qdf_mem.h"
  28. #include "qdf_nbuf.h"
  29. #include "qdf_net_types.h"
  30. #include "qdf_module.h"
  31. #include <wlan_cfg.h>
  32. #include "dp_ipa.h"
  33. #if defined(MESH_MODE_SUPPORT) || defined(FEATURE_PERPKT_INFO)
  34. #include "if_meta_hdr.h"
  35. #endif
  36. #include "enet.h"
  37. #include "dp_internal.h"
  38. #ifdef ATH_SUPPORT_IQUE
  39. #include "dp_txrx_me.h"
  40. #endif
  41. #include "dp_hist.h"
  42. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  43. #include <wlan_dp_swlm.h>
  44. #endif
  45. #ifdef WIFI_MONITOR_SUPPORT
  46. #include <dp_mon.h>
  47. #endif
  48. #ifdef FEATURE_WDS
  49. #include "dp_txrx_wds.h"
  50. #endif
  51. #include "cdp_txrx_cmn_reg.h"
  52. #ifdef CONFIG_SAWF
  53. #include <dp_sawf.h>
  54. #endif
  55. /* Flag to skip CCE classify when mesh or tid override enabled */
  56. #define DP_TX_SKIP_CCE_CLASSIFY \
  57. (DP_TXRX_HLOS_TID_OVERRIDE_ENABLED | DP_TX_MESH_ENABLED)
  58. /* TODO Add support in TSO */
  59. #define DP_DESC_NUM_FRAG(x) 0
  60. /* disable TQM_BYPASS */
  61. #define TQM_BYPASS_WAR 0
  62. /* invalid peer id for reinject*/
  63. #define DP_INVALID_PEER 0XFFFE
  64. #define DP_RETRY_COUNT 7
  65. #ifdef QCA_DP_TX_FW_METADATA_V2
  66. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  67. HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
  68. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  69. HTT_TX_TCL_METADATA_V2_VALID_HTT_SET(_var, _val)
  70. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  71. HTT_TX_TCL_METADATA_TYPE_V2_SET(_var, _val)
  72. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  73. HTT_TX_TCL_METADATA_V2_HOST_INSPECTED_SET(_var, _val)
  74. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  75. HTT_TX_TCL_METADATA_V2_PEER_ID_SET(_var, _val)
  76. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  77. HTT_TX_TCL_METADATA_V2_VDEV_ID_SET(_var, _val)
  78. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  79. HTT_TCL_METADATA_V2_TYPE_PEER_BASED
  80. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  81. HTT_TCL_METADATA_V2_TYPE_VDEV_BASED
  82. #else
  83. #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)\
  84. HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
  85. #define DP_TX_TCL_METADATA_VALID_HTT_SET(_var, _val) \
  86. HTT_TX_TCL_METADATA_VALID_HTT_SET(_var, _val)
  87. #define DP_TX_TCL_METADATA_TYPE_SET(_var, _val) \
  88. HTT_TX_TCL_METADATA_TYPE_SET(_var, _val)
  89. #define DP_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val) \
  90. HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(_var, _val)
  91. #define DP_TX_TCL_METADATA_PEER_ID_SET(_var, _val) \
  92. HTT_TX_TCL_METADATA_PEER_ID_SET(_var, _val)
  93. #define DP_TX_TCL_METADATA_VDEV_ID_SET(_var, _val) \
  94. HTT_TX_TCL_METADATA_VDEV_ID_SET(_var, _val)
  95. #define DP_TCL_METADATA_TYPE_PEER_BASED \
  96. HTT_TCL_METADATA_TYPE_PEER_BASED
  97. #define DP_TCL_METADATA_TYPE_VDEV_BASED \
  98. HTT_TCL_METADATA_TYPE_VDEV_BASED
  99. #endif
  100. /*mapping between hal encrypt type and cdp_sec_type*/
  101. uint8_t sec_type_map[MAX_CDP_SEC_TYPE] = {HAL_TX_ENCRYPT_TYPE_NO_CIPHER,
  102. HAL_TX_ENCRYPT_TYPE_WEP_128,
  103. HAL_TX_ENCRYPT_TYPE_WEP_104,
  104. HAL_TX_ENCRYPT_TYPE_WEP_40,
  105. HAL_TX_ENCRYPT_TYPE_TKIP_WITH_MIC,
  106. HAL_TX_ENCRYPT_TYPE_TKIP_NO_MIC,
  107. HAL_TX_ENCRYPT_TYPE_AES_CCMP_128,
  108. HAL_TX_ENCRYPT_TYPE_WAPI,
  109. HAL_TX_ENCRYPT_TYPE_AES_CCMP_256,
  110. HAL_TX_ENCRYPT_TYPE_AES_GCMP_128,
  111. HAL_TX_ENCRYPT_TYPE_AES_GCMP_256,
  112. HAL_TX_ENCRYPT_TYPE_WAPI_GCM_SM4};
  113. qdf_export_symbol(sec_type_map);
  114. #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
  115. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  116. {
  117. enum dp_tx_event_type type;
  118. if (flags & DP_TX_DESC_FLAG_FLUSH)
  119. type = DP_TX_DESC_FLUSH;
  120. else if (flags & DP_TX_DESC_FLAG_TX_COMP_ERR)
  121. type = DP_TX_COMP_UNMAP_ERR;
  122. else if (flags & DP_TX_DESC_FLAG_COMPLETED_TX)
  123. type = DP_TX_COMP_UNMAP;
  124. else
  125. type = DP_TX_DESC_UNMAP;
  126. return type;
  127. }
  128. static inline void
  129. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  130. qdf_nbuf_t skb, uint32_t sw_cookie,
  131. enum dp_tx_event_type type)
  132. {
  133. struct dp_tx_tcl_history *tx_tcl_history = &soc->tx_tcl_history;
  134. struct dp_tx_comp_history *tx_comp_history = &soc->tx_comp_history;
  135. struct dp_tx_desc_event *entry;
  136. uint32_t idx;
  137. uint16_t slot;
  138. switch (type) {
  139. case DP_TX_COMP_UNMAP:
  140. case DP_TX_COMP_UNMAP_ERR:
  141. case DP_TX_COMP_MSDU_EXT:
  142. if (qdf_unlikely(!tx_comp_history->allocated))
  143. return;
  144. dp_get_frag_hist_next_atomic_idx(&tx_comp_history->index, &idx,
  145. &slot,
  146. DP_TX_COMP_HIST_SLOT_SHIFT,
  147. DP_TX_COMP_HIST_PER_SLOT_MAX,
  148. DP_TX_COMP_HISTORY_SIZE);
  149. entry = &tx_comp_history->entry[slot][idx];
  150. break;
  151. case DP_TX_DESC_MAP:
  152. case DP_TX_DESC_UNMAP:
  153. case DP_TX_DESC_COOKIE:
  154. case DP_TX_DESC_FLUSH:
  155. if (qdf_unlikely(!tx_tcl_history->allocated))
  156. return;
  157. dp_get_frag_hist_next_atomic_idx(&tx_tcl_history->index, &idx,
  158. &slot,
  159. DP_TX_TCL_HIST_SLOT_SHIFT,
  160. DP_TX_TCL_HIST_PER_SLOT_MAX,
  161. DP_TX_TCL_HISTORY_SIZE);
  162. entry = &tx_tcl_history->entry[slot][idx];
  163. break;
  164. default:
  165. dp_info_rl("Invalid dp_tx_event_type: %d", type);
  166. return;
  167. }
  168. entry->skb = skb;
  169. entry->paddr = paddr;
  170. entry->sw_cookie = sw_cookie;
  171. entry->type = type;
  172. entry->ts = qdf_get_log_timestamp();
  173. }
  174. static inline void
  175. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  176. struct qdf_tso_seg_elem_t *tso_seg,
  177. qdf_nbuf_t skb, uint32_t sw_cookie,
  178. enum dp_tx_event_type type)
  179. {
  180. int i;
  181. for (i = 1; i < tso_seg->seg.num_frags; i++) {
  182. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[i].paddr,
  183. skb, sw_cookie, type);
  184. }
  185. if (!tso_seg->next)
  186. dp_tx_desc_history_add(soc, tso_seg->seg.tso_frags[0].paddr,
  187. skb, 0xFFFFFFFF, type);
  188. }
  189. static inline void
  190. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  191. qdf_nbuf_t skb, uint32_t sw_cookie,
  192. enum dp_tx_event_type type)
  193. {
  194. struct qdf_tso_seg_elem_t *curr_seg = tso_info.tso_seg_list;
  195. uint32_t num_segs = tso_info.num_segs;
  196. while (num_segs) {
  197. dp_tx_tso_seg_history_add(soc, curr_seg, skb, sw_cookie, type);
  198. curr_seg = curr_seg->next;
  199. num_segs--;
  200. }
  201. }
  202. #else
  203. static inline enum dp_tx_event_type dp_tx_get_event_type(uint32_t flags)
  204. {
  205. return DP_TX_DESC_INVAL_EVT;
  206. }
  207. static inline void
  208. dp_tx_desc_history_add(struct dp_soc *soc, dma_addr_t paddr,
  209. qdf_nbuf_t skb, uint32_t sw_cookie,
  210. enum dp_tx_event_type type)
  211. {
  212. }
  213. static inline void
  214. dp_tx_tso_seg_history_add(struct dp_soc *soc,
  215. struct qdf_tso_seg_elem_t *tso_seg,
  216. qdf_nbuf_t skb, uint32_t sw_cookie,
  217. enum dp_tx_event_type type)
  218. {
  219. }
  220. static inline void
  221. dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
  222. qdf_nbuf_t skb, uint32_t sw_cookie,
  223. enum dp_tx_event_type type)
  224. {
  225. }
  226. #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
  227. static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
  228. /**
  229. * dp_is_tput_high() - Check if throughput is high
  230. *
  231. * @soc - core txrx main context
  232. *
  233. * The current function is based of the RTPM tput policy variable where RTPM is
  234. * avoided based on throughput.
  235. */
  236. static inline int dp_is_tput_high(struct dp_soc *soc)
  237. {
  238. return dp_get_rtpm_tput_policy_requirement(soc);
  239. }
  240. #if defined(FEATURE_TSO)
  241. /**
  242. * dp_tx_tso_unmap_segment() - Unmap TSO segment
  243. *
  244. * @soc - core txrx main context
  245. * @seg_desc - tso segment descriptor
  246. * @num_seg_desc - tso number segment descriptor
  247. */
  248. static void dp_tx_tso_unmap_segment(
  249. struct dp_soc *soc,
  250. struct qdf_tso_seg_elem_t *seg_desc,
  251. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  252. {
  253. TSO_DEBUG("%s: Unmap the tso segment", __func__);
  254. if (qdf_unlikely(!seg_desc)) {
  255. DP_TRACE(ERROR, "%s %d TSO desc is NULL!",
  256. __func__, __LINE__);
  257. qdf_assert(0);
  258. } else if (qdf_unlikely(!num_seg_desc)) {
  259. DP_TRACE(ERROR, "%s %d TSO num desc is NULL!",
  260. __func__, __LINE__);
  261. qdf_assert(0);
  262. } else {
  263. bool is_last_seg;
  264. /* no tso segment left to do dma unmap */
  265. if (num_seg_desc->num_seg.tso_cmn_num_seg < 1)
  266. return;
  267. is_last_seg = (num_seg_desc->num_seg.tso_cmn_num_seg == 1) ?
  268. true : false;
  269. qdf_nbuf_unmap_tso_segment(soc->osdev,
  270. seg_desc, is_last_seg);
  271. num_seg_desc->num_seg.tso_cmn_num_seg--;
  272. }
  273. }
  274. /**
  275. * dp_tx_tso_desc_release() - Release the tso segment and tso_cmn_num_seg
  276. * back to the freelist
  277. *
  278. * @soc - soc device handle
  279. * @tx_desc - Tx software descriptor
  280. */
  281. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  282. struct dp_tx_desc_s *tx_desc)
  283. {
  284. TSO_DEBUG("%s: Free the tso descriptor", __func__);
  285. if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_desc)) {
  286. dp_tx_err("SO desc is NULL!");
  287. qdf_assert(0);
  288. } else if (qdf_unlikely(!tx_desc->msdu_ext_desc->tso_num_desc)) {
  289. dp_tx_err("TSO num desc is NULL!");
  290. qdf_assert(0);
  291. } else {
  292. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  293. (struct qdf_tso_num_seg_elem_t *)tx_desc->
  294. msdu_ext_desc->tso_num_desc;
  295. /* Add the tso num segment into the free list */
  296. if (tso_num_desc->num_seg.tso_cmn_num_seg == 0) {
  297. dp_tso_num_seg_free(soc, tx_desc->pool_id,
  298. tx_desc->msdu_ext_desc->
  299. tso_num_desc);
  300. tx_desc->msdu_ext_desc->tso_num_desc = NULL;
  301. DP_STATS_INC(tx_desc->pdev, tso_stats.tso_comp, 1);
  302. }
  303. /* Add the tso segment into the free list*/
  304. dp_tx_tso_desc_free(soc,
  305. tx_desc->pool_id, tx_desc->msdu_ext_desc->
  306. tso_desc);
  307. tx_desc->msdu_ext_desc->tso_desc = NULL;
  308. }
  309. }
  310. #else
  311. static void dp_tx_tso_unmap_segment(
  312. struct dp_soc *soc,
  313. struct qdf_tso_seg_elem_t *seg_desc,
  314. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  315. {
  316. }
  317. static void dp_tx_tso_desc_release(struct dp_soc *soc,
  318. struct dp_tx_desc_s *tx_desc)
  319. {
  320. }
  321. #endif
  322. /**
  323. * dp_tx_desc_release() - Release Tx Descriptor
  324. * @tx_desc : Tx Descriptor
  325. * @desc_pool_id: Descriptor Pool ID
  326. *
  327. * Deallocate all resources attached to Tx descriptor and free the Tx
  328. * descriptor.
  329. *
  330. * Return:
  331. */
  332. void
  333. dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
  334. {
  335. struct dp_pdev *pdev = tx_desc->pdev;
  336. struct dp_soc *soc;
  337. uint8_t comp_status = 0;
  338. qdf_assert(pdev);
  339. soc = pdev->soc;
  340. dp_tx_outstanding_dec(pdev);
  341. if (tx_desc->msdu_ext_desc) {
  342. if (tx_desc->frm_type == dp_tx_frm_tso)
  343. dp_tx_tso_desc_release(soc, tx_desc);
  344. if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
  345. dp_tx_me_free_buf(tx_desc->pdev,
  346. tx_desc->msdu_ext_desc->me_buffer);
  347. dp_tx_ext_desc_free(soc, tx_desc->msdu_ext_desc, desc_pool_id);
  348. }
  349. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
  350. qdf_atomic_dec(&soc->num_tx_exception);
  351. if (HAL_TX_COMP_RELEASE_SOURCE_TQM ==
  352. tx_desc->buffer_src)
  353. comp_status = hal_tx_comp_get_release_reason(&tx_desc->comp,
  354. soc->hal_soc);
  355. else
  356. comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
  357. dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
  358. tx_desc->id, comp_status,
  359. qdf_atomic_read(&pdev->num_tx_outstanding));
  360. dp_tx_desc_free(soc, tx_desc, desc_pool_id);
  361. return;
  362. }
  363. /**
  364. * dp_tx_htt_metadata_prepare() - Prepare HTT metadata for special frames
  365. * @vdev: DP vdev Handle
  366. * @nbuf: skb
  367. * @msdu_info: msdu_info required to create HTT metadata
  368. *
  369. * Prepares and fills HTT metadata in the frame pre-header for special frames
  370. * that should be transmitted using varying transmit parameters.
  371. * There are 2 VDEV modes that currently needs this special metadata -
  372. * 1) Mesh Mode
  373. * 2) DSRC Mode
  374. *
  375. * Return: HTT metadata size
  376. *
  377. */
  378. static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  379. struct dp_tx_msdu_info_s *msdu_info)
  380. {
  381. uint32_t *meta_data = msdu_info->meta_data;
  382. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  383. (struct htt_tx_msdu_desc_ext2_t *) meta_data;
  384. uint8_t htt_desc_size;
  385. /* Size rounded of multiple of 8 bytes */
  386. uint8_t htt_desc_size_aligned;
  387. uint8_t *hdr = NULL;
  388. /*
  389. * Metadata - HTT MSDU Extension header
  390. */
  391. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  392. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  393. if (vdev->mesh_vdev || msdu_info->is_tx_sniffer ||
  394. HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_GET(msdu_info->
  395. meta_data[0]) ||
  396. msdu_info->exception_fw) {
  397. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) <
  398. htt_desc_size_aligned)) {
  399. nbuf = qdf_nbuf_realloc_headroom(nbuf,
  400. htt_desc_size_aligned);
  401. if (!nbuf) {
  402. /*
  403. * qdf_nbuf_realloc_headroom won't do skb_clone
  404. * as skb_realloc_headroom does. so, no free is
  405. * needed here.
  406. */
  407. DP_STATS_INC(vdev,
  408. tx_i.dropped.headroom_insufficient,
  409. 1);
  410. qdf_print(" %s[%d] skb_realloc_headroom failed",
  411. __func__, __LINE__);
  412. return 0;
  413. }
  414. }
  415. /* Fill and add HTT metaheader */
  416. hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
  417. if (!hdr) {
  418. dp_tx_err("Error in filling HTT metadata");
  419. return 0;
  420. }
  421. qdf_mem_copy(hdr, desc_ext, htt_desc_size);
  422. } else if (vdev->opmode == wlan_op_mode_ocb) {
  423. /* Todo - Add support for DSRC */
  424. }
  425. return htt_desc_size_aligned;
  426. }
  427. /**
  428. * dp_tx_prepare_tso_ext_desc() - Prepare MSDU extension descriptor for TSO
  429. * @tso_seg: TSO segment to process
  430. * @ext_desc: Pointer to MSDU extension descriptor
  431. *
  432. * Return: void
  433. */
  434. #if defined(FEATURE_TSO)
  435. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  436. void *ext_desc)
  437. {
  438. uint8_t num_frag;
  439. uint32_t tso_flags;
  440. /*
  441. * Set tso_en, tcp_flags(NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN),
  442. * tcp_flag_mask
  443. *
  444. * Checksum enable flags are set in TCL descriptor and not in Extension
  445. * Descriptor (H/W ignores checksum_en flags in MSDU ext descriptor)
  446. */
  447. tso_flags = *(uint32_t *) &tso_seg->tso_flags;
  448. hal_tx_ext_desc_set_tso_flags(ext_desc, tso_flags);
  449. hal_tx_ext_desc_set_msdu_length(ext_desc, tso_seg->tso_flags.l2_len,
  450. tso_seg->tso_flags.ip_len);
  451. hal_tx_ext_desc_set_tcp_seq(ext_desc, tso_seg->tso_flags.tcp_seq_num);
  452. hal_tx_ext_desc_set_ip_id(ext_desc, tso_seg->tso_flags.ip_id);
  453. for (num_frag = 0; num_frag < tso_seg->num_frags; num_frag++) {
  454. uint32_t lo = 0;
  455. uint32_t hi = 0;
  456. qdf_assert_always((tso_seg->tso_frags[num_frag].paddr) &&
  457. (tso_seg->tso_frags[num_frag].length));
  458. qdf_dmaaddr_to_32s(
  459. tso_seg->tso_frags[num_frag].paddr, &lo, &hi);
  460. hal_tx_ext_desc_set_buffer(ext_desc, num_frag, lo, hi,
  461. tso_seg->tso_frags[num_frag].length);
  462. }
  463. return;
  464. }
  465. #else
  466. static void dp_tx_prepare_tso_ext_desc(struct qdf_tso_seg_t *tso_seg,
  467. void *ext_desc)
  468. {
  469. return;
  470. }
  471. #endif
  472. #if defined(FEATURE_TSO)
  473. /**
  474. * dp_tx_free_tso_seg_list() - Loop through the tso segments
  475. * allocated and free them
  476. *
  477. * @soc: soc handle
  478. * @free_seg: list of tso segments
  479. * @msdu_info: msdu descriptor
  480. *
  481. * Return - void
  482. */
  483. static void dp_tx_free_tso_seg_list(
  484. struct dp_soc *soc,
  485. struct qdf_tso_seg_elem_t *free_seg,
  486. struct dp_tx_msdu_info_s *msdu_info)
  487. {
  488. struct qdf_tso_seg_elem_t *next_seg;
  489. while (free_seg) {
  490. next_seg = free_seg->next;
  491. dp_tx_tso_desc_free(soc,
  492. msdu_info->tx_queue.desc_pool_id,
  493. free_seg);
  494. free_seg = next_seg;
  495. }
  496. }
  497. /**
  498. * dp_tx_free_tso_num_seg_list() - Loop through the tso num segments
  499. * allocated and free them
  500. *
  501. * @soc: soc handle
  502. * @free_num_seg: list of tso number segments
  503. * @msdu_info: msdu descriptor
  504. * Return - void
  505. */
  506. static void dp_tx_free_tso_num_seg_list(
  507. struct dp_soc *soc,
  508. struct qdf_tso_num_seg_elem_t *free_num_seg,
  509. struct dp_tx_msdu_info_s *msdu_info)
  510. {
  511. struct qdf_tso_num_seg_elem_t *next_num_seg;
  512. while (free_num_seg) {
  513. next_num_seg = free_num_seg->next;
  514. dp_tso_num_seg_free(soc,
  515. msdu_info->tx_queue.desc_pool_id,
  516. free_num_seg);
  517. free_num_seg = next_num_seg;
  518. }
  519. }
  520. /**
  521. * dp_tx_unmap_tso_seg_list() - Loop through the tso segments
  522. * do dma unmap for each segment
  523. *
  524. * @soc: soc handle
  525. * @free_seg: list of tso segments
  526. * @num_seg_desc: tso number segment descriptor
  527. *
  528. * Return - void
  529. */
  530. static void dp_tx_unmap_tso_seg_list(
  531. struct dp_soc *soc,
  532. struct qdf_tso_seg_elem_t *free_seg,
  533. struct qdf_tso_num_seg_elem_t *num_seg_desc)
  534. {
  535. struct qdf_tso_seg_elem_t *next_seg;
  536. if (qdf_unlikely(!num_seg_desc)) {
  537. DP_TRACE(ERROR, "TSO number seg desc is NULL!");
  538. return;
  539. }
  540. while (free_seg) {
  541. next_seg = free_seg->next;
  542. dp_tx_tso_unmap_segment(soc, free_seg, num_seg_desc);
  543. free_seg = next_seg;
  544. }
  545. }
  546. #ifdef FEATURE_TSO_STATS
  547. /**
  548. * dp_tso_get_stats_idx: Retrieve the tso packet id
  549. * @pdev - pdev handle
  550. *
  551. * Return: id
  552. */
  553. static uint32_t dp_tso_get_stats_idx(struct dp_pdev *pdev)
  554. {
  555. uint32_t stats_idx;
  556. stats_idx = (((uint32_t)qdf_atomic_inc_return(&pdev->tso_idx))
  557. % CDP_MAX_TSO_PACKETS);
  558. return stats_idx;
  559. }
  560. #else
  561. static int dp_tso_get_stats_idx(struct dp_pdev *pdev)
  562. {
  563. return 0;
  564. }
  565. #endif /* FEATURE_TSO_STATS */
  566. /**
  567. * dp_tx_free_remaining_tso_desc() - do dma unmap for tso segments if any,
  568. * free the tso segments descriptor and
  569. * tso num segments descriptor
  570. *
  571. * @soc: soc handle
  572. * @msdu_info: msdu descriptor
  573. * @tso_seg_unmap: flag to show if dma unmap is necessary
  574. *
  575. * Return - void
  576. */
  577. static void dp_tx_free_remaining_tso_desc(struct dp_soc *soc,
  578. struct dp_tx_msdu_info_s *msdu_info,
  579. bool tso_seg_unmap)
  580. {
  581. struct qdf_tso_info_t *tso_info = &msdu_info->u.tso_info;
  582. struct qdf_tso_seg_elem_t *free_seg = tso_info->tso_seg_list;
  583. struct qdf_tso_num_seg_elem_t *tso_num_desc =
  584. tso_info->tso_num_seg_list;
  585. /* do dma unmap for each segment */
  586. if (tso_seg_unmap)
  587. dp_tx_unmap_tso_seg_list(soc, free_seg, tso_num_desc);
  588. /* free all tso number segment descriptor though looks only have 1 */
  589. dp_tx_free_tso_num_seg_list(soc, tso_num_desc, msdu_info);
  590. /* free all tso segment descriptor */
  591. dp_tx_free_tso_seg_list(soc, free_seg, msdu_info);
  592. }
  593. /**
  594. * dp_tx_prepare_tso() - Given a jumbo msdu, prepare the TSO info
  595. * @vdev: virtual device handle
  596. * @msdu: network buffer
  597. * @msdu_info: meta data associated with the msdu
  598. *
  599. * Return: QDF_STATUS_SUCCESS success
  600. */
  601. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  602. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  603. {
  604. struct qdf_tso_seg_elem_t *tso_seg;
  605. int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
  606. struct dp_soc *soc = vdev->pdev->soc;
  607. struct dp_pdev *pdev = vdev->pdev;
  608. struct qdf_tso_info_t *tso_info;
  609. struct qdf_tso_num_seg_elem_t *tso_num_seg;
  610. tso_info = &msdu_info->u.tso_info;
  611. tso_info->curr_seg = NULL;
  612. tso_info->tso_seg_list = NULL;
  613. tso_info->num_segs = num_seg;
  614. msdu_info->frm_type = dp_tx_frm_tso;
  615. tso_info->tso_num_seg_list = NULL;
  616. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  617. while (num_seg) {
  618. tso_seg = dp_tx_tso_desc_alloc(
  619. soc, msdu_info->tx_queue.desc_pool_id);
  620. if (tso_seg) {
  621. tso_seg->next = tso_info->tso_seg_list;
  622. tso_info->tso_seg_list = tso_seg;
  623. num_seg--;
  624. } else {
  625. dp_err_rl("Failed to alloc tso seg desc");
  626. DP_STATS_INC_PKT(vdev->pdev,
  627. tso_stats.tso_no_mem_dropped, 1,
  628. qdf_nbuf_len(msdu));
  629. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  630. return QDF_STATUS_E_NOMEM;
  631. }
  632. }
  633. TSO_DEBUG(" %s: num_seg: %d", __func__, num_seg);
  634. tso_num_seg = dp_tso_num_seg_alloc(soc,
  635. msdu_info->tx_queue.desc_pool_id);
  636. if (tso_num_seg) {
  637. tso_num_seg->next = tso_info->tso_num_seg_list;
  638. tso_info->tso_num_seg_list = tso_num_seg;
  639. } else {
  640. DP_TRACE(ERROR, "%s: Failed to alloc - Number of segs desc",
  641. __func__);
  642. dp_tx_free_remaining_tso_desc(soc, msdu_info, false);
  643. return QDF_STATUS_E_NOMEM;
  644. }
  645. msdu_info->num_seg =
  646. qdf_nbuf_get_tso_info(soc->osdev, msdu, tso_info);
  647. TSO_DEBUG(" %s: msdu_info->num_seg: %d", __func__,
  648. msdu_info->num_seg);
  649. if (!(msdu_info->num_seg)) {
  650. /*
  651. * Free allocated TSO seg desc and number seg desc,
  652. * do unmap for segments if dma map has done.
  653. */
  654. DP_TRACE(ERROR, "%s: Failed to get tso info", __func__);
  655. dp_tx_free_remaining_tso_desc(soc, msdu_info, true);
  656. return QDF_STATUS_E_INVAL;
  657. }
  658. dp_tx_tso_history_add(soc, msdu_info->u.tso_info,
  659. msdu, 0, DP_TX_DESC_MAP);
  660. tso_info->curr_seg = tso_info->tso_seg_list;
  661. tso_info->msdu_stats_idx = dp_tso_get_stats_idx(pdev);
  662. dp_tso_packet_update(pdev, tso_info->msdu_stats_idx,
  663. msdu, msdu_info->num_seg);
  664. dp_tso_segment_stats_update(pdev, tso_info->tso_seg_list,
  665. tso_info->msdu_stats_idx);
  666. dp_stats_tso_segment_histogram_update(pdev, msdu_info->num_seg);
  667. return QDF_STATUS_SUCCESS;
  668. }
  669. #else
  670. static QDF_STATUS dp_tx_prepare_tso(struct dp_vdev *vdev,
  671. qdf_nbuf_t msdu, struct dp_tx_msdu_info_s *msdu_info)
  672. {
  673. return QDF_STATUS_E_NOMEM;
  674. }
  675. #endif
  676. QDF_COMPILE_TIME_ASSERT(dp_tx_htt_metadata_len_check,
  677. (DP_TX_MSDU_INFO_META_DATA_DWORDS * 4 >=
  678. sizeof(struct htt_tx_msdu_desc_ext2_t)));
  679. /**
  680. * dp_tx_prepare_ext_desc() - Allocate and prepare MSDU extension descriptor
  681. * @vdev: DP Vdev handle
  682. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  683. * @desc_pool_id: Descriptor Pool ID
  684. *
  685. * Return:
  686. */
  687. static
  688. struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
  689. struct dp_tx_msdu_info_s *msdu_info, uint8_t desc_pool_id)
  690. {
  691. uint8_t i;
  692. uint8_t cached_ext_desc[HAL_TX_EXT_DESC_WITH_META_DATA];
  693. struct dp_tx_seg_info_s *seg_info;
  694. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  695. struct dp_soc *soc = vdev->pdev->soc;
  696. /* Allocate an extension descriptor */
  697. msdu_ext_desc = dp_tx_ext_desc_alloc(soc, desc_pool_id);
  698. qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
  699. if (!msdu_ext_desc) {
  700. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  701. return NULL;
  702. }
  703. if (msdu_info->exception_fw &&
  704. qdf_unlikely(vdev->mesh_vdev)) {
  705. qdf_mem_copy(&cached_ext_desc[HAL_TX_EXTENSION_DESC_LEN_BYTES],
  706. &msdu_info->meta_data[0],
  707. sizeof(struct htt_tx_msdu_desc_ext2_t));
  708. qdf_atomic_inc(&soc->num_tx_exception);
  709. msdu_ext_desc->flags |= DP_TX_EXT_DESC_FLAG_METADATA_VALID;
  710. }
  711. switch (msdu_info->frm_type) {
  712. case dp_tx_frm_sg:
  713. case dp_tx_frm_me:
  714. case dp_tx_frm_raw:
  715. seg_info = msdu_info->u.sg_info.curr_seg;
  716. /* Update the buffer pointers in MSDU Extension Descriptor */
  717. for (i = 0; i < seg_info->frag_cnt; i++) {
  718. hal_tx_ext_desc_set_buffer(&cached_ext_desc[0], i,
  719. seg_info->frags[i].paddr_lo,
  720. seg_info->frags[i].paddr_hi,
  721. seg_info->frags[i].len);
  722. }
  723. break;
  724. case dp_tx_frm_tso:
  725. dp_tx_prepare_tso_ext_desc(&msdu_info->u.tso_info.curr_seg->seg,
  726. &cached_ext_desc[0]);
  727. break;
  728. default:
  729. break;
  730. }
  731. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  732. cached_ext_desc, HAL_TX_EXT_DESC_WITH_META_DATA);
  733. hal_tx_ext_desc_sync(&cached_ext_desc[0],
  734. msdu_ext_desc->vaddr);
  735. return msdu_ext_desc;
  736. }
  737. /**
  738. * dp_tx_trace_pkt() - Trace TX packet at DP layer
  739. *
  740. * @skb: skb to be traced
  741. * @msdu_id: msdu_id of the packet
  742. * @vdev_id: vdev_id of the packet
  743. *
  744. * Return: None
  745. */
  746. #ifdef DP_DISABLE_TX_PKT_TRACE
  747. static void dp_tx_trace_pkt(struct dp_soc *soc,
  748. qdf_nbuf_t skb, uint16_t msdu_id,
  749. uint8_t vdev_id)
  750. {
  751. }
  752. #else
  753. static void dp_tx_trace_pkt(struct dp_soc *soc,
  754. qdf_nbuf_t skb, uint16_t msdu_id,
  755. uint8_t vdev_id)
  756. {
  757. if (dp_is_tput_high(soc))
  758. return;
  759. QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
  760. QDF_NBUF_CB_TX_DP_TRACE(skb) = 1;
  761. DPTRACE(qdf_dp_trace_ptr(skb,
  762. QDF_DP_TRACE_LI_DP_TX_PACKET_PTR_RECORD,
  763. QDF_TRACE_DEFAULT_PDEV_ID,
  764. qdf_nbuf_data_addr(skb),
  765. sizeof(qdf_nbuf_data(skb)),
  766. msdu_id, vdev_id, 0));
  767. qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX, QDF_TRACE_DEFAULT_PDEV_ID);
  768. DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_TRACE_DEFAULT_PDEV_ID,
  769. QDF_DP_TRACE_LI_DP_TX_PACKET_RECORD,
  770. msdu_id, QDF_TX));
  771. }
  772. #endif
  773. #ifdef WLAN_DP_FEATURE_MARK_ICMP_REQ_TO_FW
  774. /**
  775. * dp_tx_is_nbuf_marked_exception() - Check if the packet has been marked as
  776. * exception by the upper layer (OS_IF)
  777. * @soc: DP soc handle
  778. * @nbuf: packet to be transmitted
  779. *
  780. * Returns: 1 if the packet is marked as exception,
  781. * 0, if the packet is not marked as exception.
  782. */
  783. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  784. qdf_nbuf_t nbuf)
  785. {
  786. return QDF_NBUF_CB_TX_PACKET_TO_FW(nbuf);
  787. }
  788. #else
  789. static inline int dp_tx_is_nbuf_marked_exception(struct dp_soc *soc,
  790. qdf_nbuf_t nbuf)
  791. {
  792. return 0;
  793. }
  794. #endif
  795. #ifdef DP_TRAFFIC_END_INDICATION
  796. /**
  797. * dp_tx_get_traffic_end_indication_pkt() - Allocate and prepare packet to send
  798. * as indication to fw to inform that
  799. * data stream has ended
  800. * @vdev: DP vdev handle
  801. * @nbuf: original buffer from network stack
  802. *
  803. * Return: NULL on failure,
  804. * nbuf on success
  805. */
  806. static inline qdf_nbuf_t
  807. dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
  808. qdf_nbuf_t nbuf)
  809. {
  810. /* Packet length should be enough to copy upto L3 header */
  811. uint8_t end_nbuf_len = 64;
  812. uint8_t htt_desc_size_aligned;
  813. uint8_t htt_desc_size;
  814. qdf_nbuf_t end_nbuf;
  815. if (qdf_unlikely(QDF_NBUF_CB_GET_PACKET_TYPE(nbuf) ==
  816. QDF_NBUF_CB_PACKET_TYPE_END_INDICATION)) {
  817. htt_desc_size = sizeof(struct htt_tx_msdu_desc_ext2_t);
  818. htt_desc_size_aligned = (htt_desc_size + 7) & ~0x7;
  819. end_nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q);
  820. if (!end_nbuf) {
  821. end_nbuf = qdf_nbuf_alloc(NULL,
  822. (htt_desc_size_aligned +
  823. end_nbuf_len),
  824. htt_desc_size_aligned,
  825. 8, false);
  826. if (!end_nbuf) {
  827. dp_err("Packet allocation failed");
  828. goto out;
  829. }
  830. } else {
  831. qdf_nbuf_reset(end_nbuf, htt_desc_size_aligned, 8);
  832. }
  833. qdf_mem_copy(qdf_nbuf_data(end_nbuf), qdf_nbuf_data(nbuf),
  834. end_nbuf_len);
  835. qdf_nbuf_set_pktlen(end_nbuf, end_nbuf_len);
  836. return end_nbuf;
  837. }
  838. out:
  839. return NULL;
  840. }
  841. /**
  842. * dp_tx_send_traffic_end_indication_pkt() - Send indication packet to FW
  843. * via exception path.
  844. * @vdev: DP vdev handle
  845. * @end_nbuf: skb to send as indication
  846. * @msdu_info: msdu_info of original nbuf
  847. * @peer_id: peer id
  848. *
  849. * Return: None
  850. */
  851. static inline void
  852. dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
  853. qdf_nbuf_t end_nbuf,
  854. struct dp_tx_msdu_info_s *msdu_info,
  855. uint16_t peer_id)
  856. {
  857. struct dp_tx_msdu_info_s e_msdu_info = {0};
  858. qdf_nbuf_t nbuf;
  859. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  860. (struct htt_tx_msdu_desc_ext2_t *)(e_msdu_info.meta_data);
  861. e_msdu_info.tx_queue = msdu_info->tx_queue;
  862. e_msdu_info.tid = msdu_info->tid;
  863. e_msdu_info.exception_fw = 1;
  864. desc_ext->host_tx_desc_pool = 1;
  865. desc_ext->traffic_end_indication = 1;
  866. nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
  867. peer_id, NULL);
  868. if (nbuf) {
  869. dp_err("Traffic end indication packet tx failed");
  870. qdf_nbuf_free(nbuf);
  871. }
  872. }
  873. /**
  874. * dp_tx_traffic_end_indication_set_desc_flag() - Set tx descriptor flag to
  875. * mark it trafic end indication
  876. * packet.
  877. * @tx_desc: Tx descriptor pointer
  878. * @msdu_info: msdu_info structure pointer
  879. *
  880. * Return: None
  881. */
  882. static inline void
  883. dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
  884. struct dp_tx_msdu_info_s *msdu_info)
  885. {
  886. struct htt_tx_msdu_desc_ext2_t *desc_ext =
  887. (struct htt_tx_msdu_desc_ext2_t *)(msdu_info->meta_data);
  888. if (qdf_unlikely(desc_ext->traffic_end_indication))
  889. tx_desc->flags |= DP_TX_DESC_FLAG_TRAFFIC_END_IND;
  890. }
  891. /**
  892. * dp_tx_traffic_end_indication_enq_ind_pkt() - Enqueue the packet instead of
  893. * freeing which are associated
  894. * with traffic end indication
  895. * flagged descriptor.
  896. * @soc: dp soc handle
  897. * @desc: Tx descriptor pointer
  898. * @nbuf: buffer pointer
  899. *
  900. * Return: True if packet gets enqueued else false
  901. */
  902. static bool
  903. dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
  904. struct dp_tx_desc_s *desc,
  905. qdf_nbuf_t nbuf)
  906. {
  907. struct dp_vdev *vdev = NULL;
  908. if (qdf_unlikely((desc->flags &
  909. DP_TX_DESC_FLAG_TRAFFIC_END_IND) != 0)) {
  910. vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
  911. DP_MOD_ID_TX_COMP);
  912. if (vdev) {
  913. qdf_nbuf_queue_add(&vdev->end_ind_pkt_q, nbuf);
  914. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_COMP);
  915. return true;
  916. }
  917. }
  918. return false;
  919. }
  920. /**
  921. * dp_tx_traffic_end_indication_is_enabled() - get the feature
  922. * enable/disable status
  923. * @vdev: dp vdev handle
  924. *
  925. * Return: True if feature is enable else false
  926. */
  927. static inline bool
  928. dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
  929. {
  930. return qdf_unlikely(vdev->traffic_end_ind_en);
  931. }
  932. static inline qdf_nbuf_t
  933. dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  934. struct dp_tx_msdu_info_s *msdu_info,
  935. uint16_t peer_id, qdf_nbuf_t end_nbuf)
  936. {
  937. if (dp_tx_traffic_end_indication_is_enabled(vdev))
  938. end_nbuf = dp_tx_get_traffic_end_indication_pkt(vdev, nbuf);
  939. nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
  940. if (qdf_unlikely(end_nbuf))
  941. dp_tx_send_traffic_end_indication_pkt(vdev, end_nbuf,
  942. msdu_info, peer_id);
  943. return nbuf;
  944. }
  945. #else
  946. static inline qdf_nbuf_t
  947. dp_tx_get_traffic_end_indication_pkt(struct dp_vdev *vdev,
  948. qdf_nbuf_t nbuf)
  949. {
  950. return NULL;
  951. }
  952. static inline void
  953. dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
  954. qdf_nbuf_t end_nbuf,
  955. struct dp_tx_msdu_info_s *msdu_info,
  956. uint16_t peer_id)
  957. {}
  958. static inline void
  959. dp_tx_traffic_end_indication_set_desc_flag(struct dp_tx_desc_s *tx_desc,
  960. struct dp_tx_msdu_info_s *msdu_info)
  961. {}
  962. static inline bool
  963. dp_tx_traffic_end_indication_enq_ind_pkt(struct dp_soc *soc,
  964. struct dp_tx_desc_s *desc,
  965. qdf_nbuf_t nbuf)
  966. {
  967. return false;
  968. }
  969. static inline bool
  970. dp_tx_traffic_end_indication_is_enabled(struct dp_vdev *vdev)
  971. {
  972. return false;
  973. }
  974. static inline qdf_nbuf_t
  975. dp_tx_send_msdu_single_wrapper(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  976. struct dp_tx_msdu_info_s *msdu_info,
  977. uint16_t peer_id, qdf_nbuf_t end_nbuf)
  978. {
  979. return dp_tx_send_msdu_single(vdev, nbuf, msdu_info, peer_id, NULL);
  980. }
  981. #endif
  982. /**
  983. * dp_tx_desc_prepare_single - Allocate and prepare Tx descriptor
  984. * @vdev: DP vdev handle
  985. * @nbuf: skb
  986. * @desc_pool_id: Descriptor pool ID
  987. * @meta_data: Metadata to the fw
  988. * @tx_exc_metadata: Handle that holds exception path metadata
  989. * Allocate and prepare Tx descriptor with msdu information.
  990. *
  991. * Return: Pointer to Tx Descriptor on success,
  992. * NULL on failure
  993. */
  994. static
  995. struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
  996. qdf_nbuf_t nbuf, uint8_t desc_pool_id,
  997. struct dp_tx_msdu_info_s *msdu_info,
  998. struct cdp_tx_exception_metadata *tx_exc_metadata)
  999. {
  1000. uint8_t align_pad;
  1001. uint8_t is_exception = 0;
  1002. uint8_t htt_hdr_size;
  1003. struct dp_tx_desc_s *tx_desc;
  1004. struct dp_pdev *pdev = vdev->pdev;
  1005. struct dp_soc *soc = pdev->soc;
  1006. if (dp_tx_limit_check(vdev))
  1007. return NULL;
  1008. /* Allocate software Tx descriptor */
  1009. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1010. if (qdf_unlikely(!tx_desc)) {
  1011. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1012. DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
  1013. return NULL;
  1014. }
  1015. dp_tx_outstanding_inc(pdev);
  1016. /* Initialize the SW tx descriptor */
  1017. tx_desc->nbuf = nbuf;
  1018. tx_desc->frm_type = dp_tx_frm_std;
  1019. tx_desc->tx_encap_type = ((tx_exc_metadata &&
  1020. (tx_exc_metadata->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE)) ?
  1021. tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
  1022. tx_desc->vdev_id = vdev->vdev_id;
  1023. tx_desc->pdev = pdev;
  1024. tx_desc->msdu_ext_desc = NULL;
  1025. tx_desc->pkt_offset = 0;
  1026. tx_desc->length = qdf_nbuf_headlen(nbuf);
  1027. tx_desc->shinfo_addr = skb_end_pointer(nbuf);
  1028. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  1029. if (qdf_unlikely(vdev->multipass_en)) {
  1030. if (!dp_tx_multipass_process(soc, vdev, nbuf, msdu_info))
  1031. goto failure;
  1032. }
  1033. /* Packets marked by upper layer (OS-IF) to be sent to FW */
  1034. if (dp_tx_is_nbuf_marked_exception(soc, nbuf))
  1035. is_exception = 1;
  1036. /*
  1037. * For special modes (vdev_type == ocb or mesh), data frames should be
  1038. * transmitted using varying transmit parameters (tx spec) which include
  1039. * transmit rate, power, priority, channel, channel bandwidth , nss etc.
  1040. * These are filled in HTT MSDU descriptor and sent in frame pre-header.
  1041. * These frames are sent as exception packets to firmware.
  1042. *
  1043. * HW requirement is that metadata should always point to a
  1044. * 8-byte aligned address. So we add alignment pad to start of buffer.
  1045. * HTT Metadata should be ensured to be multiple of 8-bytes,
  1046. * to get 8-byte aligned start address along with align_pad added
  1047. *
  1048. * |-----------------------------|
  1049. * | |
  1050. * |-----------------------------| <-----Buffer Pointer Address given
  1051. * | | ^ in HW descriptor (aligned)
  1052. * | HTT Metadata | |
  1053. * | | |
  1054. * | | | Packet Offset given in descriptor
  1055. * | | |
  1056. * |-----------------------------| |
  1057. * | Alignment Pad | v
  1058. * |-----------------------------| <----- Actual buffer start address
  1059. * | SKB Data | (Unaligned)
  1060. * | |
  1061. * | |
  1062. * | |
  1063. * | |
  1064. * | |
  1065. * |-----------------------------|
  1066. */
  1067. if (qdf_unlikely((msdu_info->exception_fw)) ||
  1068. (vdev->opmode == wlan_op_mode_ocb) ||
  1069. (tx_exc_metadata &&
  1070. tx_exc_metadata->is_tx_sniffer)) {
  1071. align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
  1072. if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
  1073. DP_STATS_INC(vdev,
  1074. tx_i.dropped.headroom_insufficient, 1);
  1075. goto failure;
  1076. }
  1077. if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
  1078. dp_tx_err("qdf_nbuf_push_head failed");
  1079. goto failure;
  1080. }
  1081. htt_hdr_size = dp_tx_prepare_htt_metadata(vdev, nbuf,
  1082. msdu_info);
  1083. if (htt_hdr_size == 0)
  1084. goto failure;
  1085. tx_desc->length = qdf_nbuf_headlen(nbuf);
  1086. tx_desc->pkt_offset = align_pad + htt_hdr_size;
  1087. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1088. dp_tx_traffic_end_indication_set_desc_flag(tx_desc,
  1089. msdu_info);
  1090. is_exception = 1;
  1091. tx_desc->length -= tx_desc->pkt_offset;
  1092. }
  1093. #if !TQM_BYPASS_WAR
  1094. if (is_exception || tx_exc_metadata)
  1095. #endif
  1096. {
  1097. /* Temporary WAR due to TQM VP issues */
  1098. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1099. qdf_atomic_inc(&soc->num_tx_exception);
  1100. }
  1101. return tx_desc;
  1102. failure:
  1103. dp_tx_desc_release(tx_desc, desc_pool_id);
  1104. return NULL;
  1105. }
  1106. /**
  1107. * dp_tx_prepare_desc() - Allocate and prepare Tx descriptor for multisegment frame
  1108. * @vdev: DP vdev handle
  1109. * @nbuf: skb
  1110. * @msdu_info: Info to be setup in MSDU descriptor and MSDU extension descriptor
  1111. * @desc_pool_id : Descriptor Pool ID
  1112. *
  1113. * Allocate and prepare Tx descriptor with msdu and fragment descritor
  1114. * information. For frames wth fragments, allocate and prepare
  1115. * an MSDU extension descriptor
  1116. *
  1117. * Return: Pointer to Tx Descriptor on success,
  1118. * NULL on failure
  1119. */
  1120. static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
  1121. qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info,
  1122. uint8_t desc_pool_id)
  1123. {
  1124. struct dp_tx_desc_s *tx_desc;
  1125. struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
  1126. struct dp_pdev *pdev = vdev->pdev;
  1127. struct dp_soc *soc = pdev->soc;
  1128. if (dp_tx_limit_check(vdev))
  1129. return NULL;
  1130. /* Allocate software Tx descriptor */
  1131. tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
  1132. if (!tx_desc) {
  1133. DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
  1134. return NULL;
  1135. }
  1136. dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
  1137. nbuf, tx_desc->id, DP_TX_DESC_COOKIE);
  1138. dp_tx_outstanding_inc(pdev);
  1139. /* Initialize the SW tx descriptor */
  1140. tx_desc->nbuf = nbuf;
  1141. tx_desc->frm_type = msdu_info->frm_type;
  1142. tx_desc->tx_encap_type = vdev->tx_encap_type;
  1143. tx_desc->vdev_id = vdev->vdev_id;
  1144. tx_desc->pdev = pdev;
  1145. tx_desc->pkt_offset = 0;
  1146. dp_tx_trace_pkt(soc, nbuf, tx_desc->id, vdev->vdev_id);
  1147. /* Handle scattered frames - TSO/SG/ME */
  1148. /* Allocate and prepare an extension descriptor for scattered frames */
  1149. msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
  1150. if (!msdu_ext_desc) {
  1151. dp_tx_info("Tx Extension Descriptor Alloc Fail");
  1152. goto failure;
  1153. }
  1154. #if TQM_BYPASS_WAR
  1155. /* Temporary WAR due to TQM VP issues */
  1156. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1157. qdf_atomic_inc(&soc->num_tx_exception);
  1158. #endif
  1159. if (qdf_unlikely(msdu_info->exception_fw))
  1160. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1161. tx_desc->msdu_ext_desc = msdu_ext_desc;
  1162. tx_desc->flags |= DP_TX_DESC_FLAG_FRAG;
  1163. msdu_ext_desc->tso_desc = msdu_info->u.tso_info.curr_seg;
  1164. msdu_ext_desc->tso_num_desc = msdu_info->u.tso_info.tso_num_seg_list;
  1165. tx_desc->dma_addr = msdu_ext_desc->paddr;
  1166. if (msdu_ext_desc->flags & DP_TX_EXT_DESC_FLAG_METADATA_VALID)
  1167. tx_desc->length = HAL_TX_EXT_DESC_WITH_META_DATA;
  1168. else
  1169. tx_desc->length = HAL_TX_EXTENSION_DESC_LEN_BYTES;
  1170. return tx_desc;
  1171. failure:
  1172. dp_tx_desc_release(tx_desc, desc_pool_id);
  1173. return NULL;
  1174. }
  1175. /**
  1176. * dp_tx_prepare_raw() - Prepare RAW packet TX
  1177. * @vdev: DP vdev handle
  1178. * @nbuf: buffer pointer
  1179. * @seg_info: Pointer to Segment info Descriptor to be prepared
  1180. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension
  1181. * descriptor
  1182. *
  1183. * Return:
  1184. */
  1185. static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1186. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  1187. {
  1188. qdf_nbuf_t curr_nbuf = NULL;
  1189. uint16_t total_len = 0;
  1190. qdf_dma_addr_t paddr;
  1191. int32_t i;
  1192. int32_t mapped_buf_num = 0;
  1193. struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
  1194. qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
  1195. DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
  1196. /* Continue only if frames are of DATA type */
  1197. if (!DP_FRAME_IS_DATA(qos_wh)) {
  1198. DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
  1199. dp_tx_debug("Pkt. recd is of not data type");
  1200. goto error;
  1201. }
  1202. /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
  1203. if (vdev->raw_mode_war &&
  1204. (qos_wh->i_fc[0] & QDF_IEEE80211_FC0_SUBTYPE_QOS) &&
  1205. (qos_wh->i_qos[0] & IEEE80211_QOS_AMSDU))
  1206. qos_wh->i_fc[1] |= IEEE80211_FC1_WEP;
  1207. for (curr_nbuf = nbuf, i = 0; curr_nbuf;
  1208. curr_nbuf = qdf_nbuf_next(curr_nbuf), i++) {
  1209. /*
  1210. * Number of nbuf's must not exceed the size of the frags
  1211. * array in seg_info.
  1212. */
  1213. if (i >= DP_TX_MAX_NUM_FRAGS) {
  1214. dp_err_rl("nbuf cnt exceeds the max number of segs");
  1215. DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
  1216. goto error;
  1217. }
  1218. if (QDF_STATUS_SUCCESS !=
  1219. qdf_nbuf_map_nbytes_single(vdev->osdev,
  1220. curr_nbuf,
  1221. QDF_DMA_TO_DEVICE,
  1222. curr_nbuf->len)) {
  1223. dp_tx_err("%s dma map error ", __func__);
  1224. DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
  1225. goto error;
  1226. }
  1227. /* Update the count of mapped nbuf's */
  1228. mapped_buf_num++;
  1229. paddr = qdf_nbuf_get_frag_paddr(curr_nbuf, 0);
  1230. seg_info->frags[i].paddr_lo = paddr;
  1231. seg_info->frags[i].paddr_hi = ((uint64_t)paddr >> 32);
  1232. seg_info->frags[i].len = qdf_nbuf_len(curr_nbuf);
  1233. seg_info->frags[i].vaddr = (void *) curr_nbuf;
  1234. total_len += qdf_nbuf_len(curr_nbuf);
  1235. }
  1236. seg_info->frag_cnt = i;
  1237. seg_info->total_len = total_len;
  1238. seg_info->next = NULL;
  1239. sg_info->curr_seg = seg_info;
  1240. msdu_info->frm_type = dp_tx_frm_raw;
  1241. msdu_info->num_seg = 1;
  1242. return nbuf;
  1243. error:
  1244. i = 0;
  1245. while (nbuf) {
  1246. curr_nbuf = nbuf;
  1247. if (i < mapped_buf_num) {
  1248. qdf_nbuf_unmap_nbytes_single(vdev->osdev, curr_nbuf,
  1249. QDF_DMA_TO_DEVICE,
  1250. curr_nbuf->len);
  1251. i++;
  1252. }
  1253. nbuf = qdf_nbuf_next(nbuf);
  1254. qdf_nbuf_free(curr_nbuf);
  1255. }
  1256. return NULL;
  1257. }
  1258. /**
  1259. * dp_tx_raw_prepare_unset() - unmap the chain of nbufs belonging to RAW frame.
  1260. * @soc: DP soc handle
  1261. * @nbuf: Buffer pointer
  1262. *
  1263. * unmap the chain of nbufs that belong to this RAW frame.
  1264. *
  1265. * Return: None
  1266. */
  1267. static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
  1268. qdf_nbuf_t nbuf)
  1269. {
  1270. qdf_nbuf_t cur_nbuf = nbuf;
  1271. do {
  1272. qdf_nbuf_unmap_nbytes_single(soc->osdev, cur_nbuf,
  1273. QDF_DMA_TO_DEVICE,
  1274. cur_nbuf->len);
  1275. cur_nbuf = qdf_nbuf_next(cur_nbuf);
  1276. } while (cur_nbuf);
  1277. }
  1278. #ifdef VDEV_PEER_PROTOCOL_COUNT
  1279. void dp_vdev_peer_stats_update_protocol_cnt_tx(struct dp_vdev *vdev_hdl,
  1280. qdf_nbuf_t nbuf)
  1281. {
  1282. qdf_nbuf_t nbuf_local;
  1283. struct dp_vdev *vdev_local = vdev_hdl;
  1284. do {
  1285. if (qdf_likely(!((vdev_local)->peer_protocol_count_track)))
  1286. break;
  1287. nbuf_local = nbuf;
  1288. if (qdf_unlikely(((vdev_local)->tx_encap_type) ==
  1289. htt_cmn_pkt_type_raw))
  1290. break;
  1291. else if (qdf_unlikely(qdf_nbuf_is_nonlinear((nbuf_local))))
  1292. break;
  1293. else if (qdf_nbuf_is_tso((nbuf_local)))
  1294. break;
  1295. dp_vdev_peer_stats_update_protocol_cnt((vdev_local),
  1296. (nbuf_local),
  1297. NULL, 1, 0);
  1298. } while (0);
  1299. }
  1300. #endif
  1301. #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
  1302. /**
  1303. * dp_tx_update_stats() - Update soc level tx stats
  1304. * @soc: DP soc handle
  1305. * @tx_desc: TX descriptor reference
  1306. * @ring_id: TCL ring id
  1307. *
  1308. * Returns: none
  1309. */
  1310. void dp_tx_update_stats(struct dp_soc *soc,
  1311. struct dp_tx_desc_s *tx_desc,
  1312. uint8_t ring_id)
  1313. {
  1314. uint32_t stats_len = 0;
  1315. if (tx_desc->frm_type == dp_tx_frm_tso)
  1316. stats_len = tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
  1317. else
  1318. stats_len = qdf_nbuf_len(tx_desc->nbuf);
  1319. DP_STATS_INC_PKT(soc, tx.egress[ring_id], 1, stats_len);
  1320. }
  1321. int
  1322. dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
  1323. struct dp_tx_desc_s *tx_desc,
  1324. uint8_t tid,
  1325. struct dp_tx_msdu_info_s *msdu_info,
  1326. uint8_t ring_id)
  1327. {
  1328. struct dp_swlm *swlm = &soc->swlm;
  1329. union swlm_data swlm_query_data;
  1330. struct dp_swlm_tcl_data tcl_data;
  1331. QDF_STATUS status;
  1332. int ret;
  1333. if (!swlm->is_enabled)
  1334. return msdu_info->skip_hp_update;
  1335. tcl_data.nbuf = tx_desc->nbuf;
  1336. tcl_data.tid = tid;
  1337. tcl_data.ring_id = ring_id;
  1338. if (tx_desc->frm_type == dp_tx_frm_tso) {
  1339. tcl_data.pkt_len =
  1340. tx_desc->msdu_ext_desc->tso_desc->seg.total_len;
  1341. } else {
  1342. tcl_data.pkt_len = qdf_nbuf_len(tx_desc->nbuf);
  1343. }
  1344. tcl_data.num_ll_connections = vdev->num_latency_critical_conn;
  1345. swlm_query_data.tcl_data = &tcl_data;
  1346. status = dp_swlm_tcl_pre_check(soc, &tcl_data);
  1347. if (QDF_IS_STATUS_ERROR(status)) {
  1348. dp_swlm_tcl_reset_session_data(soc, ring_id);
  1349. DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
  1350. return 0;
  1351. }
  1352. ret = dp_swlm_query_policy(soc, TCL_DATA, swlm_query_data);
  1353. if (ret) {
  1354. DP_STATS_INC(swlm, tcl[ring_id].coalesce_success, 1);
  1355. } else {
  1356. DP_STATS_INC(swlm, tcl[ring_id].coalesce_fail, 1);
  1357. }
  1358. return ret;
  1359. }
  1360. void
  1361. dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
  1362. int coalesce)
  1363. {
  1364. if (coalesce)
  1365. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1366. else
  1367. dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
  1368. }
  1369. static inline void
  1370. dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
  1371. {
  1372. if (((i + 1) < msdu_info->num_seg))
  1373. msdu_info->skip_hp_update = 1;
  1374. else
  1375. msdu_info->skip_hp_update = 0;
  1376. }
  1377. static inline void
  1378. dp_flush_tcp_hp(struct dp_soc *soc, uint8_t ring_id)
  1379. {
  1380. hal_ring_handle_t hal_ring_hdl =
  1381. dp_tx_get_hal_ring_hdl(soc, ring_id);
  1382. if (dp_tx_hal_ring_access_start(soc, hal_ring_hdl)) {
  1383. dp_err("Fillmore: SRNG access start failed");
  1384. return;
  1385. }
  1386. dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
  1387. }
  1388. static inline void
  1389. dp_tx_check_and_flush_hp(struct dp_soc *soc,
  1390. QDF_STATUS status,
  1391. struct dp_tx_msdu_info_s *msdu_info)
  1392. {
  1393. if (QDF_IS_STATUS_ERROR(status) && !msdu_info->skip_hp_update) {
  1394. dp_flush_tcp_hp(soc,
  1395. (msdu_info->tx_queue.ring_id & DP_TX_QUEUE_MASK));
  1396. }
  1397. }
  1398. #else
  1399. static inline void
  1400. dp_tx_is_hp_update_required(uint32_t i, struct dp_tx_msdu_info_s *msdu_info)
  1401. {
  1402. }
  1403. static inline void
  1404. dp_tx_check_and_flush_hp(struct dp_soc *soc,
  1405. QDF_STATUS status,
  1406. struct dp_tx_msdu_info_s *msdu_info)
  1407. {
  1408. }
  1409. #endif
  1410. #ifdef FEATURE_RUNTIME_PM
  1411. static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
  1412. {
  1413. int ret;
  1414. ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
  1415. (hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
  1416. return ret;
  1417. }
  1418. /**
  1419. * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
  1420. * @soc: Datapath soc handle
  1421. * @hal_ring_hdl: HAL ring handle
  1422. * @coalesce: Coalesce the current write or not
  1423. *
  1424. * Wrapper for HAL ring access end for data transmission for
  1425. * FEATURE_RUNTIME_PM
  1426. *
  1427. * Returns: none
  1428. */
  1429. void
  1430. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1431. hal_ring_handle_t hal_ring_hdl,
  1432. int coalesce)
  1433. {
  1434. int ret;
  1435. /*
  1436. * Avoid runtime get and put APIs under high throughput scenarios.
  1437. */
  1438. if (dp_get_rtpm_tput_policy_requirement(soc)) {
  1439. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1440. return;
  1441. }
  1442. ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
  1443. if (QDF_IS_STATUS_SUCCESS(ret)) {
  1444. if (hif_system_pm_state_check(soc->hif_handle)) {
  1445. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1446. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1447. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1448. } else {
  1449. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1450. }
  1451. hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
  1452. } else {
  1453. dp_runtime_get(soc);
  1454. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1455. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1456. qdf_atomic_inc(&soc->tx_pending_rtpm);
  1457. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1458. dp_runtime_put(soc);
  1459. }
  1460. }
  1461. #else
  1462. #ifdef DP_POWER_SAVE
  1463. void
  1464. dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
  1465. hal_ring_handle_t hal_ring_hdl,
  1466. int coalesce)
  1467. {
  1468. if (hif_system_pm_state_check(soc->hif_handle)) {
  1469. dp_tx_hal_ring_access_end_reap(soc, hal_ring_hdl);
  1470. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  1471. hal_srng_inc_flush_cnt(hal_ring_hdl);
  1472. } else {
  1473. dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
  1474. }
  1475. }
  1476. #endif
  1477. static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
  1478. {
  1479. return 0;
  1480. }
  1481. #endif
  1482. /**
  1483. * dp_tx_get_tid() - Obtain TID to be used for this frame
  1484. * @vdev: DP vdev handle
  1485. * @nbuf: skb
  1486. *
  1487. * Extract the DSCP or PCP information from frame and map into TID value.
  1488. *
  1489. * Return: void
  1490. */
  1491. static void dp_tx_get_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1492. struct dp_tx_msdu_info_s *msdu_info)
  1493. {
  1494. uint8_t tos = 0, dscp_tid_override = 0;
  1495. uint8_t *hdr_ptr, *L3datap;
  1496. uint8_t is_mcast = 0;
  1497. qdf_ether_header_t *eh = NULL;
  1498. qdf_ethervlan_header_t *evh = NULL;
  1499. uint16_t ether_type;
  1500. qdf_llc_t *llcHdr;
  1501. struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
  1502. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1503. if (qdf_likely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1504. eh = (qdf_ether_header_t *)nbuf->data;
  1505. hdr_ptr = (uint8_t *)(eh->ether_dhost);
  1506. L3datap = hdr_ptr + sizeof(qdf_ether_header_t);
  1507. } else {
  1508. qdf_dot3_qosframe_t *qos_wh =
  1509. (qdf_dot3_qosframe_t *) nbuf->data;
  1510. msdu_info->tid = qos_wh->i_fc[0] & DP_FC0_SUBTYPE_QOS ?
  1511. qos_wh->i_qos[0] & DP_QOS_TID : 0;
  1512. return;
  1513. }
  1514. is_mcast = DP_FRAME_IS_MULTICAST(hdr_ptr);
  1515. ether_type = eh->ether_type;
  1516. llcHdr = (qdf_llc_t *)(nbuf->data + sizeof(qdf_ether_header_t));
  1517. /*
  1518. * Check if packet is dot3 or eth2 type.
  1519. */
  1520. if (DP_FRAME_IS_LLC(ether_type) && DP_FRAME_IS_SNAP(llcHdr)) {
  1521. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE +
  1522. sizeof(*llcHdr));
  1523. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1524. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t) +
  1525. sizeof(*llcHdr);
  1526. ether_type = (uint16_t)*(nbuf->data + 2*QDF_MAC_ADDR_SIZE
  1527. + sizeof(*llcHdr) +
  1528. sizeof(qdf_net_vlanhdr_t));
  1529. } else {
  1530. L3datap = hdr_ptr + sizeof(qdf_ether_header_t) +
  1531. sizeof(*llcHdr);
  1532. }
  1533. } else {
  1534. if (ether_type == htons(ETHERTYPE_VLAN)) {
  1535. evh = (qdf_ethervlan_header_t *) eh;
  1536. ether_type = evh->ether_type;
  1537. L3datap = hdr_ptr + sizeof(qdf_ethervlan_header_t);
  1538. }
  1539. }
  1540. /*
  1541. * Find priority from IP TOS DSCP field
  1542. */
  1543. if (qdf_nbuf_is_ipv4_pkt(nbuf)) {
  1544. qdf_net_iphdr_t *ip = (qdf_net_iphdr_t *) L3datap;
  1545. if (qdf_nbuf_is_ipv4_dhcp_pkt(nbuf)) {
  1546. /* Only for unicast frames */
  1547. if (!is_mcast) {
  1548. /* send it on VO queue */
  1549. msdu_info->tid = DP_VO_TID;
  1550. }
  1551. } else {
  1552. /*
  1553. * IP frame: exclude ECN bits 0-1 and map DSCP bits 2-7
  1554. * from TOS byte.
  1555. */
  1556. tos = ip->ip_tos;
  1557. dscp_tid_override = 1;
  1558. }
  1559. } else if (qdf_nbuf_is_ipv6_pkt(nbuf)) {
  1560. /* TODO
  1561. * use flowlabel
  1562. *igmpmld cases to be handled in phase 2
  1563. */
  1564. unsigned long ver_pri_flowlabel;
  1565. unsigned long pri;
  1566. ver_pri_flowlabel = *(unsigned long *) L3datap;
  1567. pri = (ntohl(ver_pri_flowlabel) & IPV6_FLOWINFO_PRIORITY) >>
  1568. DP_IPV6_PRIORITY_SHIFT;
  1569. tos = pri;
  1570. dscp_tid_override = 1;
  1571. } else if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
  1572. msdu_info->tid = DP_VO_TID;
  1573. else if (qdf_nbuf_is_ipv4_arp_pkt(nbuf)) {
  1574. /* Only for unicast frames */
  1575. if (!is_mcast) {
  1576. /* send ucast arp on VO queue */
  1577. msdu_info->tid = DP_VO_TID;
  1578. }
  1579. }
  1580. /*
  1581. * Assign all MCAST packets to BE
  1582. */
  1583. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  1584. if (is_mcast) {
  1585. tos = 0;
  1586. dscp_tid_override = 1;
  1587. }
  1588. }
  1589. if (dscp_tid_override == 1) {
  1590. tos = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
  1591. msdu_info->tid = pdev->dscp_tid_map[vdev->dscp_tid_map_id][tos];
  1592. }
  1593. if (msdu_info->tid >= CDP_MAX_DATA_TIDS)
  1594. msdu_info->tid = CDP_MAX_DATA_TIDS - 1;
  1595. return;
  1596. }
  1597. /**
  1598. * dp_tx_classify_tid() - Obtain TID to be used for this frame
  1599. * @vdev: DP vdev handle
  1600. * @nbuf: skb
  1601. *
  1602. * Software based TID classification is required when more than 2 DSCP-TID
  1603. * mapping tables are needed.
  1604. * Hardware supports 2 DSCP-TID mapping tables for HKv1 and 48 for HKv2.
  1605. *
  1606. * Return: void
  1607. */
  1608. static inline void dp_tx_classify_tid(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1609. struct dp_tx_msdu_info_s *msdu_info)
  1610. {
  1611. DP_TX_TID_OVERRIDE(msdu_info, nbuf);
  1612. /*
  1613. * skip_sw_tid_classification flag will set in below cases-
  1614. * 1. vdev->dscp_tid_map_id < pdev->soc->num_hw_dscp_tid_map
  1615. * 2. hlos_tid_override enabled for vdev
  1616. * 3. mesh mode enabled for vdev
  1617. */
  1618. if (qdf_likely(vdev->skip_sw_tid_classification)) {
  1619. /* Update tid in msdu_info from skb priority */
  1620. if (qdf_unlikely(vdev->skip_sw_tid_classification
  1621. & DP_TXRX_HLOS_TID_OVERRIDE_ENABLED)) {
  1622. uint32_t tid = qdf_nbuf_get_priority(nbuf);
  1623. if (tid == DP_TX_INVALID_QOS_TAG)
  1624. return;
  1625. msdu_info->tid = tid;
  1626. return;
  1627. }
  1628. return;
  1629. }
  1630. dp_tx_get_tid(vdev, nbuf, msdu_info);
  1631. }
  1632. #ifdef FEATURE_WLAN_TDLS
  1633. /**
  1634. * dp_tx_update_tdls_flags() - Update descriptor flags for TDLS frame
  1635. * @soc: datapath SOC
  1636. * @vdev: datapath vdev
  1637. * @tx_desc: TX descriptor
  1638. *
  1639. * Return: None
  1640. */
  1641. static void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1642. struct dp_vdev *vdev,
  1643. struct dp_tx_desc_s *tx_desc)
  1644. {
  1645. if (vdev) {
  1646. if (vdev->is_tdls_frame) {
  1647. tx_desc->flags |= DP_TX_DESC_FLAG_TDLS_FRAME;
  1648. vdev->is_tdls_frame = false;
  1649. }
  1650. }
  1651. }
  1652. static uint8_t dp_htt_tx_comp_get_status(struct dp_soc *soc, char *htt_desc)
  1653. {
  1654. uint8_t tx_status = HTT_TX_FW2WBM_TX_STATUS_MAX;
  1655. switch (soc->arch_id) {
  1656. case CDP_ARCH_TYPE_LI:
  1657. tx_status = HTT_TX_WBM_COMPLETION_V2_TX_STATUS_GET(htt_desc[0]);
  1658. break;
  1659. case CDP_ARCH_TYPE_BE:
  1660. tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
  1661. break;
  1662. default:
  1663. dp_err("Incorrect CDP_ARCH %d", soc->arch_id);
  1664. QDF_BUG(0);
  1665. }
  1666. return tx_status;
  1667. }
  1668. /**
  1669. * dp_non_std_htt_tx_comp_free_buff() - Free the non std tx packet buffer
  1670. * @soc: dp_soc handle
  1671. * @tx_desc: TX descriptor
  1672. * @vdev: datapath vdev handle
  1673. *
  1674. * Return: None
  1675. */
  1676. static void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
  1677. struct dp_tx_desc_s *tx_desc)
  1678. {
  1679. uint8_t tx_status = 0;
  1680. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  1681. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1682. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
  1683. DP_MOD_ID_TDLS);
  1684. if (qdf_unlikely(!vdev)) {
  1685. dp_err_rl("vdev is null!");
  1686. goto error;
  1687. }
  1688. hal_tx_comp_get_htt_desc(&tx_desc->comp, htt_tx_status);
  1689. tx_status = dp_htt_tx_comp_get_status(soc, htt_tx_status);
  1690. dp_debug("vdev_id: %d tx_status: %d", tx_desc->vdev_id, tx_status);
  1691. if (vdev->tx_non_std_data_callback.func) {
  1692. qdf_nbuf_set_next(nbuf, NULL);
  1693. vdev->tx_non_std_data_callback.func(
  1694. vdev->tx_non_std_data_callback.ctxt,
  1695. nbuf, tx_status);
  1696. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1697. return;
  1698. } else {
  1699. dp_err_rl("callback func is null");
  1700. }
  1701. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  1702. error:
  1703. qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
  1704. qdf_nbuf_free(nbuf);
  1705. }
  1706. /**
  1707. * dp_tx_msdu_single_map() - do nbuf map
  1708. * @vdev: DP vdev handle
  1709. * @tx_desc: DP TX descriptor pointer
  1710. * @nbuf: skb pointer
  1711. *
  1712. * For TDLS frame, use qdf_nbuf_map_single() to align with the unmap
  1713. * operation done in other component.
  1714. *
  1715. * Return: QDF_STATUS
  1716. */
  1717. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1718. struct dp_tx_desc_s *tx_desc,
  1719. qdf_nbuf_t nbuf)
  1720. {
  1721. if (qdf_likely(!(tx_desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)))
  1722. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1723. nbuf,
  1724. QDF_DMA_TO_DEVICE,
  1725. nbuf->len);
  1726. else
  1727. return qdf_nbuf_map_single(vdev->osdev, nbuf,
  1728. QDF_DMA_TO_DEVICE);
  1729. }
  1730. #else
  1731. static inline void dp_tx_update_tdls_flags(struct dp_soc *soc,
  1732. struct dp_vdev *vdev,
  1733. struct dp_tx_desc_s *tx_desc)
  1734. {
  1735. }
  1736. static inline void dp_non_std_htt_tx_comp_free_buff(struct dp_soc *soc,
  1737. struct dp_tx_desc_s *tx_desc)
  1738. {
  1739. }
  1740. static inline QDF_STATUS dp_tx_msdu_single_map(struct dp_vdev *vdev,
  1741. struct dp_tx_desc_s *tx_desc,
  1742. qdf_nbuf_t nbuf)
  1743. {
  1744. return qdf_nbuf_map_nbytes_single(vdev->osdev,
  1745. nbuf,
  1746. QDF_DMA_TO_DEVICE,
  1747. nbuf->len);
  1748. }
  1749. #endif
  1750. static inline
  1751. qdf_dma_addr_t dp_tx_nbuf_map_regular(struct dp_vdev *vdev,
  1752. struct dp_tx_desc_s *tx_desc,
  1753. qdf_nbuf_t nbuf)
  1754. {
  1755. QDF_STATUS ret = QDF_STATUS_E_FAILURE;
  1756. ret = dp_tx_msdu_single_map(vdev, tx_desc, nbuf);
  1757. if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret)))
  1758. return 0;
  1759. return qdf_nbuf_mapped_paddr_get(nbuf);
  1760. }
  1761. static inline
  1762. void dp_tx_nbuf_unmap_regular(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1763. {
  1764. qdf_nbuf_unmap_nbytes_single_paddr(soc->osdev,
  1765. desc->nbuf,
  1766. desc->dma_addr,
  1767. QDF_DMA_TO_DEVICE,
  1768. desc->length);
  1769. }
  1770. #if defined(QCA_DP_TX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
  1771. static inline
  1772. qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
  1773. struct dp_tx_desc_s *tx_desc,
  1774. qdf_nbuf_t nbuf)
  1775. {
  1776. if (qdf_likely(tx_desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  1777. qdf_nbuf_dma_clean_range((void *)nbuf->data,
  1778. (void *)(nbuf->data + nbuf->len));
  1779. return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
  1780. } else {
  1781. return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
  1782. }
  1783. }
  1784. static inline
  1785. void dp_tx_nbuf_unmap(struct dp_soc *soc,
  1786. struct dp_tx_desc_s *desc)
  1787. {
  1788. if (qdf_unlikely(!(desc->flags & DP_TX_DESC_FLAG_SIMPLE)))
  1789. return dp_tx_nbuf_unmap_regular(soc, desc);
  1790. }
  1791. #else
  1792. static inline
  1793. qdf_dma_addr_t dp_tx_nbuf_map(struct dp_vdev *vdev,
  1794. struct dp_tx_desc_s *tx_desc,
  1795. qdf_nbuf_t nbuf)
  1796. {
  1797. return dp_tx_nbuf_map_regular(vdev, tx_desc, nbuf);
  1798. }
  1799. static inline
  1800. void dp_tx_nbuf_unmap(struct dp_soc *soc,
  1801. struct dp_tx_desc_s *desc)
  1802. {
  1803. return dp_tx_nbuf_unmap_regular(soc, desc);
  1804. }
  1805. #endif
  1806. #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(FEATURE_PERPKT_INFO)
  1807. static inline
  1808. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1809. {
  1810. dp_tx_nbuf_unmap(soc, desc);
  1811. desc->flags |= DP_TX_DESC_FLAG_UNMAP_DONE;
  1812. }
  1813. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1814. {
  1815. if (qdf_likely(!(desc->flags & DP_TX_DESC_FLAG_UNMAP_DONE)))
  1816. dp_tx_nbuf_unmap(soc, desc);
  1817. }
  1818. #else
  1819. static inline
  1820. void dp_tx_enh_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1821. {
  1822. }
  1823. static inline void dp_tx_unmap(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  1824. {
  1825. dp_tx_nbuf_unmap(soc, desc);
  1826. }
  1827. #endif
  1828. #ifdef MESH_MODE_SUPPORT
  1829. /**
  1830. * dp_tx_update_mesh_flags() - Update descriptor flags for mesh VAP
  1831. * @soc: datapath SOC
  1832. * @vdev: datapath vdev
  1833. * @tx_desc: TX descriptor
  1834. *
  1835. * Return: None
  1836. */
  1837. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1838. struct dp_vdev *vdev,
  1839. struct dp_tx_desc_s *tx_desc)
  1840. {
  1841. if (qdf_unlikely(vdev->mesh_vdev))
  1842. tx_desc->flags |= DP_TX_DESC_FLAG_MESH_MODE;
  1843. }
  1844. /**
  1845. * dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
  1846. * @soc: dp_soc handle
  1847. * @tx_desc: TX descriptor
  1848. * @delayed_free: delay the nbuf free
  1849. *
  1850. * Return: nbuf to be freed late
  1851. */
  1852. static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1853. struct dp_tx_desc_s *tx_desc,
  1854. bool delayed_free)
  1855. {
  1856. qdf_nbuf_t nbuf = tx_desc->nbuf;
  1857. struct dp_vdev *vdev = NULL;
  1858. vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
  1859. if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
  1860. if (vdev)
  1861. DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
  1862. if (delayed_free)
  1863. return nbuf;
  1864. qdf_nbuf_free(nbuf);
  1865. } else {
  1866. if (vdev && vdev->osif_tx_free_ext) {
  1867. vdev->osif_tx_free_ext((nbuf));
  1868. } else {
  1869. if (delayed_free)
  1870. return nbuf;
  1871. qdf_nbuf_free(nbuf);
  1872. }
  1873. }
  1874. if (vdev)
  1875. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  1876. return NULL;
  1877. }
  1878. #else
  1879. static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
  1880. struct dp_vdev *vdev,
  1881. struct dp_tx_desc_s *tx_desc)
  1882. {
  1883. }
  1884. static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
  1885. struct dp_tx_desc_s *tx_desc,
  1886. bool delayed_free)
  1887. {
  1888. return NULL;
  1889. }
  1890. #endif
  1891. /**
  1892. * dp_tx_frame_is_drop() - checks if the packet is loopback
  1893. * @vdev: DP vdev handle
  1894. * @nbuf: skb
  1895. *
  1896. * Return: 1 if frame needs to be dropped else 0
  1897. */
  1898. int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac)
  1899. {
  1900. struct dp_pdev *pdev = NULL;
  1901. struct dp_ast_entry *src_ast_entry = NULL;
  1902. struct dp_ast_entry *dst_ast_entry = NULL;
  1903. struct dp_soc *soc = NULL;
  1904. qdf_assert(vdev);
  1905. pdev = vdev->pdev;
  1906. qdf_assert(pdev);
  1907. soc = pdev->soc;
  1908. dst_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1909. (soc, dstmac, vdev->pdev->pdev_id);
  1910. src_ast_entry = dp_peer_ast_hash_find_by_pdevid
  1911. (soc, srcmac, vdev->pdev->pdev_id);
  1912. if (dst_ast_entry && src_ast_entry) {
  1913. if (dst_ast_entry->peer_id ==
  1914. src_ast_entry->peer_id)
  1915. return 1;
  1916. }
  1917. return 0;
  1918. }
  1919. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  1920. defined(WLAN_MCAST_MLO)
  1921. /* MLO peer id for reinject*/
  1922. #define DP_MLO_MCAST_REINJECT_PEER_ID 0XFFFD
  1923. /* MLO vdev id inc offset */
  1924. #define DP_MLO_VDEV_ID_OFFSET 0x80
  1925. static inline void
  1926. dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  1927. {
  1928. if (!(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)) {
  1929. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  1930. qdf_atomic_inc(&soc->num_tx_exception);
  1931. }
  1932. }
  1933. static inline void
  1934. dp_tx_update_mcast_param(uint16_t peer_id,
  1935. uint16_t *htt_tcl_metadata,
  1936. struct dp_vdev *vdev,
  1937. struct dp_tx_msdu_info_s *msdu_info)
  1938. {
  1939. if (peer_id == DP_MLO_MCAST_REINJECT_PEER_ID) {
  1940. *htt_tcl_metadata = 0;
  1941. DP_TX_TCL_METADATA_TYPE_SET(
  1942. *htt_tcl_metadata,
  1943. HTT_TCL_METADATA_V2_TYPE_GLOBAL_SEQ_BASED);
  1944. HTT_TX_TCL_METADATA_GLBL_SEQ_NO_SET(*htt_tcl_metadata,
  1945. msdu_info->gsn);
  1946. msdu_info->vdev_id = vdev->vdev_id + DP_MLO_VDEV_ID_OFFSET;
  1947. if (qdf_unlikely(vdev->nawds_enabled))
  1948. HTT_TX_TCL_METADATA_GLBL_SEQ_HOST_INSPECTED_SET(
  1949. *htt_tcl_metadata, 1);
  1950. } else {
  1951. msdu_info->vdev_id = vdev->vdev_id;
  1952. }
  1953. }
  1954. #else
  1955. static inline void
  1956. dp_tx_bypass_reinjection(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  1957. {
  1958. }
  1959. static inline void
  1960. dp_tx_update_mcast_param(uint16_t peer_id,
  1961. uint16_t *htt_tcl_metadata,
  1962. struct dp_vdev *vdev,
  1963. struct dp_tx_msdu_info_s *msdu_info)
  1964. {
  1965. }
  1966. #endif
  1967. /**
  1968. * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
  1969. * @vdev: DP vdev handle
  1970. * @nbuf: skb
  1971. * @tid: TID from HLOS for overriding default DSCP-TID mapping
  1972. * @meta_data: Metadata to the fw
  1973. * @tx_q: Tx queue to be used for this Tx frame
  1974. * @peer_id: peer_id of the peer in case of NAWDS frames
  1975. * @tx_exc_metadata: Handle that holds exception path metadata
  1976. *
  1977. * Return: NULL on success,
  1978. * nbuf when it fails to send
  1979. */
  1980. qdf_nbuf_t
  1981. dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  1982. struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
  1983. struct cdp_tx_exception_metadata *tx_exc_metadata)
  1984. {
  1985. struct dp_pdev *pdev = vdev->pdev;
  1986. struct dp_soc *soc = pdev->soc;
  1987. struct dp_tx_desc_s *tx_desc;
  1988. QDF_STATUS status;
  1989. struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
  1990. uint16_t htt_tcl_metadata = 0;
  1991. enum cdp_tx_sw_drop drop_code = TX_MAX_DROP;
  1992. uint8_t tid = msdu_info->tid;
  1993. struct cdp_tid_tx_stats *tid_stats = NULL;
  1994. qdf_dma_addr_t paddr;
  1995. /* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
  1996. tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
  1997. msdu_info, tx_exc_metadata);
  1998. if (!tx_desc) {
  1999. dp_err_rl("Tx_desc prepare Fail vdev %pK queue %d",
  2000. vdev, tx_q->desc_pool_id);
  2001. drop_code = TX_DESC_ERR;
  2002. goto fail_return;
  2003. }
  2004. dp_tx_update_tdls_flags(soc, vdev, tx_desc);
  2005. if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
  2006. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2007. DP_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
  2008. } else if (qdf_unlikely(peer_id != HTT_INVALID_PEER)) {
  2009. DP_TX_TCL_METADATA_TYPE_SET(htt_tcl_metadata,
  2010. DP_TCL_METADATA_TYPE_PEER_BASED);
  2011. DP_TX_TCL_METADATA_PEER_ID_SET(htt_tcl_metadata,
  2012. peer_id);
  2013. dp_tx_bypass_reinjection(soc, tx_desc);
  2014. } else
  2015. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2016. if (msdu_info->exception_fw)
  2017. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  2018. dp_tx_desc_update_fast_comp_flag(soc, tx_desc,
  2019. !pdev->enhanced_stats_en);
  2020. dp_tx_update_mesh_flags(soc, vdev, tx_desc);
  2021. paddr = dp_tx_nbuf_map(vdev, tx_desc, nbuf);
  2022. if (!paddr) {
  2023. /* Handle failure */
  2024. dp_err("qdf_nbuf_map failed");
  2025. DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
  2026. drop_code = TX_DMA_MAP_ERR;
  2027. goto release_desc;
  2028. }
  2029. tx_desc->dma_addr = paddr;
  2030. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  2031. tx_desc->id, DP_TX_DESC_MAP);
  2032. dp_tx_update_mcast_param(peer_id, &htt_tcl_metadata, vdev, msdu_info);
  2033. /* Enqueue the Tx MSDU descriptor to HW for transmit */
  2034. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2035. htt_tcl_metadata,
  2036. tx_exc_metadata, msdu_info);
  2037. if (status != QDF_STATUS_SUCCESS) {
  2038. dp_tx_err_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2039. tx_desc, tx_q->ring_id);
  2040. dp_tx_desc_history_add(soc, tx_desc->dma_addr, nbuf,
  2041. tx_desc->id, DP_TX_DESC_UNMAP);
  2042. dp_tx_nbuf_unmap(soc, tx_desc);
  2043. drop_code = TX_HW_ENQUEUE;
  2044. goto release_desc;
  2045. }
  2046. return NULL;
  2047. release_desc:
  2048. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2049. fail_return:
  2050. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2051. tid_stats = &pdev->stats.tid_stats.
  2052. tid_tx_stats[tx_q->ring_id][tid];
  2053. tid_stats->swdrop_cnt[drop_code]++;
  2054. return nbuf;
  2055. }
  2056. /**
  2057. * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
  2058. * @soc: Soc handle
  2059. * @desc: software Tx descriptor to be processed
  2060. * @delayed_free: defer freeing of nbuf
  2061. *
  2062. * Return: nbuf to be freed later
  2063. */
  2064. qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
  2065. bool delayed_free)
  2066. {
  2067. qdf_nbuf_t nbuf = desc->nbuf;
  2068. enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
  2069. /* nbuf already freed in vdev detach path */
  2070. if (!nbuf)
  2071. return NULL;
  2072. /* If it is TDLS mgmt, don't unmap or free the frame */
  2073. if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
  2074. dp_non_std_htt_tx_comp_free_buff(soc, desc);
  2075. return NULL;
  2076. }
  2077. /* 0 : MSDU buffer, 1 : MLE */
  2078. if (desc->msdu_ext_desc) {
  2079. /* TSO free */
  2080. if (hal_tx_ext_desc_get_tso_enable(
  2081. desc->msdu_ext_desc->vaddr)) {
  2082. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  2083. desc->id, DP_TX_COMP_MSDU_EXT);
  2084. dp_tx_tso_seg_history_add(soc,
  2085. desc->msdu_ext_desc->tso_desc,
  2086. desc->nbuf, desc->id, type);
  2087. /* unmap eash TSO seg before free the nbuf */
  2088. dp_tx_tso_unmap_segment(soc,
  2089. desc->msdu_ext_desc->tso_desc,
  2090. desc->msdu_ext_desc->
  2091. tso_num_desc);
  2092. goto nbuf_free;
  2093. }
  2094. if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
  2095. void *msdu_ext_desc = desc->msdu_ext_desc->vaddr;
  2096. qdf_dma_addr_t iova;
  2097. uint32_t frag_len;
  2098. uint32_t i;
  2099. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
  2100. QDF_DMA_TO_DEVICE,
  2101. qdf_nbuf_headlen(nbuf));
  2102. for (i = 1; i < DP_TX_MAX_NUM_FRAGS; i++) {
  2103. hal_tx_ext_desc_get_frag_info(msdu_ext_desc, i,
  2104. &iova,
  2105. &frag_len);
  2106. if (!iova || !frag_len)
  2107. break;
  2108. qdf_mem_unmap_page(soc->osdev, iova, frag_len,
  2109. QDF_DMA_TO_DEVICE);
  2110. }
  2111. goto nbuf_free;
  2112. }
  2113. }
  2114. /* If it's ME frame, dont unmap the cloned nbuf's */
  2115. if ((desc->flags & DP_TX_DESC_FLAG_ME) && qdf_nbuf_is_cloned(nbuf))
  2116. goto nbuf_free;
  2117. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf, desc->id, type);
  2118. dp_tx_unmap(soc, desc);
  2119. if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
  2120. return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
  2121. if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
  2122. return NULL;
  2123. nbuf_free:
  2124. if (delayed_free)
  2125. return nbuf;
  2126. qdf_nbuf_free(nbuf);
  2127. return NULL;
  2128. }
  2129. /**
  2130. * dp_tx_sg_unmap_buf() - Unmap scatter gather fragments
  2131. * @soc: DP soc handle
  2132. * @nbuf: skb
  2133. * @msdu_info: MSDU info
  2134. *
  2135. * Return: None
  2136. */
  2137. static inline void
  2138. dp_tx_sg_unmap_buf(struct dp_soc *soc, qdf_nbuf_t nbuf,
  2139. struct dp_tx_msdu_info_s *msdu_info)
  2140. {
  2141. uint32_t cur_idx;
  2142. struct dp_tx_seg_info_s *seg = msdu_info->u.sg_info.curr_seg;
  2143. qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_TO_DEVICE,
  2144. qdf_nbuf_headlen(nbuf));
  2145. for (cur_idx = 1; cur_idx < seg->frag_cnt; cur_idx++)
  2146. qdf_mem_unmap_page(soc->osdev, (qdf_dma_addr_t)
  2147. (seg->frags[cur_idx].paddr_lo | ((uint64_t)
  2148. seg->frags[cur_idx].paddr_hi) << 32),
  2149. seg->frags[cur_idx].len,
  2150. QDF_DMA_TO_DEVICE);
  2151. }
  2152. /**
  2153. * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
  2154. * @vdev: DP vdev handle
  2155. * @nbuf: skb
  2156. * @msdu_info: MSDU info to be setup in MSDU extension descriptor
  2157. *
  2158. * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
  2159. *
  2160. * Return: NULL on success,
  2161. * nbuf when it fails to send
  2162. */
  2163. #if QDF_LOCK_STATS
  2164. noinline
  2165. #else
  2166. #endif
  2167. qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2168. struct dp_tx_msdu_info_s *msdu_info)
  2169. {
  2170. uint32_t i;
  2171. struct dp_pdev *pdev = vdev->pdev;
  2172. struct dp_soc *soc = pdev->soc;
  2173. struct dp_tx_desc_s *tx_desc;
  2174. bool is_cce_classified = false;
  2175. QDF_STATUS status;
  2176. uint16_t htt_tcl_metadata = 0;
  2177. struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
  2178. struct cdp_tid_tx_stats *tid_stats = NULL;
  2179. uint8_t prep_desc_fail = 0, hw_enq_fail = 0;
  2180. if (msdu_info->frm_type == dp_tx_frm_me)
  2181. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2182. i = 0;
  2183. /* Print statement to track i and num_seg */
  2184. /*
  2185. * For each segment (maps to 1 MSDU) , prepare software and hardware
  2186. * descriptors using information in msdu_info
  2187. */
  2188. while (i < msdu_info->num_seg) {
  2189. /*
  2190. * Setup Tx descriptor for an MSDU, and MSDU extension
  2191. * descriptor
  2192. */
  2193. tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
  2194. tx_q->desc_pool_id);
  2195. if (!tx_desc) {
  2196. if (msdu_info->frm_type == dp_tx_frm_me) {
  2197. prep_desc_fail++;
  2198. dp_tx_me_free_buf(pdev,
  2199. (void *)(msdu_info->u.sg_info
  2200. .curr_seg->frags[0].vaddr));
  2201. if (prep_desc_fail == msdu_info->num_seg) {
  2202. /*
  2203. * Unmap is needed only if descriptor
  2204. * preparation failed for all segments.
  2205. */
  2206. qdf_nbuf_unmap(soc->osdev,
  2207. msdu_info->u.sg_info.
  2208. curr_seg->nbuf,
  2209. QDF_DMA_TO_DEVICE);
  2210. }
  2211. /*
  2212. * Free the nbuf for the current segment
  2213. * and make it point to the next in the list.
  2214. * For me, there are as many segments as there
  2215. * are no of clients.
  2216. */
  2217. qdf_nbuf_free(msdu_info->u.sg_info
  2218. .curr_seg->nbuf);
  2219. if (msdu_info->u.sg_info.curr_seg->next) {
  2220. msdu_info->u.sg_info.curr_seg =
  2221. msdu_info->u.sg_info
  2222. .curr_seg->next;
  2223. nbuf = msdu_info->u.sg_info
  2224. .curr_seg->nbuf;
  2225. }
  2226. i++;
  2227. continue;
  2228. }
  2229. if (msdu_info->frm_type == dp_tx_frm_tso) {
  2230. dp_tx_tso_seg_history_add(
  2231. soc,
  2232. msdu_info->u.tso_info.curr_seg,
  2233. nbuf, 0, DP_TX_DESC_UNMAP);
  2234. dp_tx_tso_unmap_segment(soc,
  2235. msdu_info->u.tso_info.
  2236. curr_seg,
  2237. msdu_info->u.tso_info.
  2238. tso_num_seg_list);
  2239. if (msdu_info->u.tso_info.curr_seg->next) {
  2240. msdu_info->u.tso_info.curr_seg =
  2241. msdu_info->u.tso_info.curr_seg->next;
  2242. i++;
  2243. continue;
  2244. }
  2245. }
  2246. if (msdu_info->frm_type == dp_tx_frm_sg)
  2247. dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
  2248. goto done;
  2249. }
  2250. if (msdu_info->frm_type == dp_tx_frm_me) {
  2251. tx_desc->msdu_ext_desc->me_buffer =
  2252. (struct dp_tx_me_buf_t *)msdu_info->
  2253. u.sg_info.curr_seg->frags[0].vaddr;
  2254. tx_desc->flags |= DP_TX_DESC_FLAG_ME;
  2255. }
  2256. if (is_cce_classified)
  2257. tx_desc->flags |= DP_TX_DESC_FLAG_TO_FW;
  2258. htt_tcl_metadata = vdev->htt_tcl_metadata;
  2259. if (msdu_info->exception_fw) {
  2260. DP_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 1);
  2261. }
  2262. dp_tx_is_hp_update_required(i, msdu_info);
  2263. /*
  2264. * For frames with multiple segments (TSO, ME), jump to next
  2265. * segment.
  2266. */
  2267. if (msdu_info->frm_type == dp_tx_frm_tso) {
  2268. if (msdu_info->u.tso_info.curr_seg->next) {
  2269. msdu_info->u.tso_info.curr_seg =
  2270. msdu_info->u.tso_info.curr_seg->next;
  2271. /*
  2272. * If this is a jumbo nbuf, then increment the
  2273. * number of nbuf users for each additional
  2274. * segment of the msdu. This will ensure that
  2275. * the skb is freed only after receiving tx
  2276. * completion for all segments of an nbuf
  2277. */
  2278. qdf_nbuf_inc_users(nbuf);
  2279. /* Check with MCL if this is needed */
  2280. /* nbuf = msdu_info->u.tso_info.curr_seg->nbuf;
  2281. */
  2282. }
  2283. }
  2284. dp_tx_update_mcast_param(DP_INVALID_PEER,
  2285. &htt_tcl_metadata,
  2286. vdev,
  2287. msdu_info);
  2288. /*
  2289. * Enqueue the Tx MSDU descriptor to HW for transmit
  2290. */
  2291. status = soc->arch_ops.tx_hw_enqueue(soc, vdev, tx_desc,
  2292. htt_tcl_metadata,
  2293. NULL, msdu_info);
  2294. dp_tx_check_and_flush_hp(soc, status, msdu_info);
  2295. if (status != QDF_STATUS_SUCCESS) {
  2296. dp_info_rl("Tx_hw_enqueue Fail tx_desc %pK queue %d",
  2297. tx_desc, tx_q->ring_id);
  2298. dp_tx_get_tid(vdev, nbuf, msdu_info);
  2299. tid_stats = &pdev->stats.tid_stats.
  2300. tid_tx_stats[tx_q->ring_id][msdu_info->tid];
  2301. tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
  2302. if (msdu_info->frm_type == dp_tx_frm_me) {
  2303. hw_enq_fail++;
  2304. if (hw_enq_fail == msdu_info->num_seg) {
  2305. /*
  2306. * Unmap is needed only if enqueue
  2307. * failed for all segments.
  2308. */
  2309. qdf_nbuf_unmap(soc->osdev,
  2310. msdu_info->u.sg_info.
  2311. curr_seg->nbuf,
  2312. QDF_DMA_TO_DEVICE);
  2313. }
  2314. /*
  2315. * Free the nbuf for the current segment
  2316. * and make it point to the next in the list.
  2317. * For me, there are as many segments as there
  2318. * are no of clients.
  2319. */
  2320. qdf_nbuf_free(msdu_info->u.sg_info
  2321. .curr_seg->nbuf);
  2322. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2323. if (msdu_info->u.sg_info.curr_seg->next) {
  2324. msdu_info->u.sg_info.curr_seg =
  2325. msdu_info->u.sg_info
  2326. .curr_seg->next;
  2327. nbuf = msdu_info->u.sg_info
  2328. .curr_seg->nbuf;
  2329. } else
  2330. break;
  2331. i++;
  2332. continue;
  2333. }
  2334. /*
  2335. * For TSO frames, the nbuf users increment done for
  2336. * the current segment has to be reverted, since the
  2337. * hw enqueue for this segment failed
  2338. */
  2339. if (msdu_info->frm_type == dp_tx_frm_tso &&
  2340. msdu_info->u.tso_info.curr_seg) {
  2341. /*
  2342. * unmap and free current,
  2343. * retransmit remaining segments
  2344. */
  2345. dp_tx_comp_free_buf(soc, tx_desc, false);
  2346. i++;
  2347. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2348. continue;
  2349. }
  2350. if (msdu_info->frm_type == dp_tx_frm_sg)
  2351. dp_tx_sg_unmap_buf(soc, nbuf, msdu_info);
  2352. dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
  2353. goto done;
  2354. }
  2355. /*
  2356. * TODO
  2357. * if tso_info structure can be modified to have curr_seg
  2358. * as first element, following 2 blocks of code (for TSO and SG)
  2359. * can be combined into 1
  2360. */
  2361. /*
  2362. * For Multicast-Unicast converted packets,
  2363. * each converted frame (for a client) is represented as
  2364. * 1 segment
  2365. */
  2366. if ((msdu_info->frm_type == dp_tx_frm_sg) ||
  2367. (msdu_info->frm_type == dp_tx_frm_me)) {
  2368. if (msdu_info->u.sg_info.curr_seg->next) {
  2369. msdu_info->u.sg_info.curr_seg =
  2370. msdu_info->u.sg_info.curr_seg->next;
  2371. nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
  2372. } else
  2373. break;
  2374. }
  2375. i++;
  2376. }
  2377. nbuf = NULL;
  2378. done:
  2379. return nbuf;
  2380. }
  2381. /**
  2382. * dp_tx_prepare_sg()- Extract SG info from NBUF and prepare msdu_info
  2383. * for SG frames
  2384. * @vdev: DP vdev handle
  2385. * @nbuf: skb
  2386. * @seg_info: Pointer to Segment info Descriptor to be prepared
  2387. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2388. *
  2389. * Return: NULL on success,
  2390. * nbuf when it fails to send
  2391. */
  2392. static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2393. struct dp_tx_seg_info_s *seg_info, struct dp_tx_msdu_info_s *msdu_info)
  2394. {
  2395. uint32_t cur_frag, nr_frags, i;
  2396. qdf_dma_addr_t paddr;
  2397. struct dp_tx_sg_info_s *sg_info;
  2398. sg_info = &msdu_info->u.sg_info;
  2399. nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2400. if (QDF_STATUS_SUCCESS !=
  2401. qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
  2402. QDF_DMA_TO_DEVICE,
  2403. qdf_nbuf_headlen(nbuf))) {
  2404. dp_tx_err("dma map error");
  2405. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2406. qdf_nbuf_free(nbuf);
  2407. return NULL;
  2408. }
  2409. paddr = qdf_nbuf_mapped_paddr_get(nbuf);
  2410. seg_info->frags[0].paddr_lo = paddr;
  2411. seg_info->frags[0].paddr_hi = ((uint64_t) paddr) >> 32;
  2412. seg_info->frags[0].len = qdf_nbuf_headlen(nbuf);
  2413. seg_info->frags[0].vaddr = (void *) nbuf;
  2414. for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
  2415. if (QDF_STATUS_SUCCESS != qdf_nbuf_frag_map(vdev->osdev,
  2416. nbuf, 0,
  2417. QDF_DMA_TO_DEVICE,
  2418. cur_frag)) {
  2419. dp_tx_err("frag dma map error");
  2420. DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
  2421. goto map_err;
  2422. }
  2423. paddr = qdf_nbuf_get_tx_frag_paddr(nbuf);
  2424. seg_info->frags[cur_frag + 1].paddr_lo = paddr;
  2425. seg_info->frags[cur_frag + 1].paddr_hi =
  2426. ((uint64_t) paddr) >> 32;
  2427. seg_info->frags[cur_frag + 1].len =
  2428. qdf_nbuf_get_frag_size(nbuf, cur_frag);
  2429. }
  2430. seg_info->frag_cnt = (cur_frag + 1);
  2431. seg_info->total_len = qdf_nbuf_len(nbuf);
  2432. seg_info->next = NULL;
  2433. sg_info->curr_seg = seg_info;
  2434. msdu_info->frm_type = dp_tx_frm_sg;
  2435. msdu_info->num_seg = 1;
  2436. return nbuf;
  2437. map_err:
  2438. /* restore paddr into nbuf before calling unmap */
  2439. qdf_nbuf_mapped_paddr_set(nbuf,
  2440. (qdf_dma_addr_t)(seg_info->frags[0].paddr_lo |
  2441. ((uint64_t)
  2442. seg_info->frags[0].paddr_hi) << 32));
  2443. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  2444. QDF_DMA_TO_DEVICE,
  2445. seg_info->frags[0].len);
  2446. for (i = 1; i <= cur_frag; i++) {
  2447. qdf_mem_unmap_page(vdev->osdev, (qdf_dma_addr_t)
  2448. (seg_info->frags[i].paddr_lo | ((uint64_t)
  2449. seg_info->frags[i].paddr_hi) << 32),
  2450. seg_info->frags[i].len,
  2451. QDF_DMA_TO_DEVICE);
  2452. }
  2453. qdf_nbuf_free(nbuf);
  2454. return NULL;
  2455. }
  2456. /**
  2457. * dp_tx_add_tx_sniffer_meta_data()- Add tx_sniffer meta hdr info
  2458. * @vdev: DP vdev handle
  2459. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2460. * @ppdu_cookie: PPDU cookie that should be replayed in the ppdu completions
  2461. *
  2462. * Return: NULL on failure,
  2463. * nbuf when extracted successfully
  2464. */
  2465. static
  2466. void dp_tx_add_tx_sniffer_meta_data(struct dp_vdev *vdev,
  2467. struct dp_tx_msdu_info_s *msdu_info,
  2468. uint16_t ppdu_cookie)
  2469. {
  2470. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2471. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2472. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2473. HTT_TX_MSDU_EXT2_DESC_FLAG_SEND_AS_STANDALONE_SET
  2474. (msdu_info->meta_data[5], 1);
  2475. HTT_TX_MSDU_EXT2_DESC_FLAG_HOST_OPAQUE_VALID_SET
  2476. (msdu_info->meta_data[5], 1);
  2477. HTT_TX_MSDU_EXT2_DESC_HOST_OPAQUE_COOKIE_SET
  2478. (msdu_info->meta_data[6], ppdu_cookie);
  2479. msdu_info->exception_fw = 1;
  2480. msdu_info->is_tx_sniffer = 1;
  2481. }
  2482. #ifdef MESH_MODE_SUPPORT
  2483. /**
  2484. * dp_tx_extract_mesh_meta_data()- Extract mesh meta hdr info from nbuf
  2485. and prepare msdu_info for mesh frames.
  2486. * @vdev: DP vdev handle
  2487. * @nbuf: skb
  2488. * @msdu_info: MSDU info to be setup in MSDU descriptor and MSDU extension desc.
  2489. *
  2490. * Return: NULL on failure,
  2491. * nbuf when extracted successfully
  2492. */
  2493. static
  2494. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2495. struct dp_tx_msdu_info_s *msdu_info)
  2496. {
  2497. struct meta_hdr_s *mhdr;
  2498. struct htt_tx_msdu_desc_ext2_t *meta_data =
  2499. (struct htt_tx_msdu_desc_ext2_t *)&msdu_info->meta_data[0];
  2500. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2501. if (CB_FTYPE_MESH_TX_INFO != qdf_nbuf_get_tx_ftype(nbuf)) {
  2502. msdu_info->exception_fw = 0;
  2503. goto remove_meta_hdr;
  2504. }
  2505. msdu_info->exception_fw = 1;
  2506. qdf_mem_zero(meta_data, sizeof(struct htt_tx_msdu_desc_ext2_t));
  2507. meta_data->host_tx_desc_pool = 1;
  2508. meta_data->update_peer_cache = 1;
  2509. meta_data->learning_frame = 1;
  2510. if (!(mhdr->flags & METAHDR_FLAG_AUTO_RATE)) {
  2511. meta_data->power = mhdr->power;
  2512. meta_data->mcs_mask = 1 << mhdr->rate_info[0].mcs;
  2513. meta_data->nss_mask = 1 << mhdr->rate_info[0].nss;
  2514. meta_data->pream_type = mhdr->rate_info[0].preamble_type;
  2515. meta_data->retry_limit = mhdr->rate_info[0].max_tries;
  2516. meta_data->dyn_bw = 1;
  2517. meta_data->valid_pwr = 1;
  2518. meta_data->valid_mcs_mask = 1;
  2519. meta_data->valid_nss_mask = 1;
  2520. meta_data->valid_preamble_type = 1;
  2521. meta_data->valid_retries = 1;
  2522. meta_data->valid_bw_info = 1;
  2523. }
  2524. if (mhdr->flags & METAHDR_FLAG_NOENCRYPT) {
  2525. meta_data->encrypt_type = 0;
  2526. meta_data->valid_encrypt_type = 1;
  2527. meta_data->learning_frame = 0;
  2528. }
  2529. meta_data->valid_key_flags = 1;
  2530. meta_data->key_flags = (mhdr->keyix & 0x3);
  2531. remove_meta_hdr:
  2532. if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
  2533. dp_tx_err("qdf_nbuf_pull_head failed");
  2534. qdf_nbuf_free(nbuf);
  2535. return NULL;
  2536. }
  2537. msdu_info->tid = qdf_nbuf_get_priority(nbuf);
  2538. dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
  2539. " tid %d to_fw %d",
  2540. msdu_info->meta_data[0],
  2541. msdu_info->meta_data[1],
  2542. msdu_info->meta_data[2],
  2543. msdu_info->meta_data[3],
  2544. msdu_info->meta_data[4],
  2545. msdu_info->meta_data[5],
  2546. msdu_info->tid, msdu_info->exception_fw);
  2547. return nbuf;
  2548. }
  2549. #else
  2550. static
  2551. qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
  2552. struct dp_tx_msdu_info_s *msdu_info)
  2553. {
  2554. return nbuf;
  2555. }
  2556. #endif
  2557. /**
  2558. * dp_check_exc_metadata() - Checks if parameters are valid
  2559. * @tx_exc - holds all exception path parameters
  2560. *
  2561. * Returns true when all the parameters are valid else false
  2562. *
  2563. */
  2564. static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
  2565. {
  2566. bool invalid_tid = (tx_exc->tid >= DP_MAX_TIDS && tx_exc->tid !=
  2567. HTT_INVALID_TID);
  2568. bool invalid_encap_type =
  2569. (tx_exc->tx_encap_type > htt_cmn_pkt_num_types &&
  2570. tx_exc->tx_encap_type != CDP_INVALID_TX_ENCAP_TYPE);
  2571. bool invalid_sec_type = (tx_exc->sec_type > cdp_num_sec_types &&
  2572. tx_exc->sec_type != CDP_INVALID_SEC_TYPE);
  2573. bool invalid_cookie = (tx_exc->is_tx_sniffer == 1 &&
  2574. tx_exc->ppdu_cookie == 0);
  2575. if (tx_exc->is_intrabss_fwd)
  2576. return true;
  2577. if (invalid_tid || invalid_encap_type || invalid_sec_type ||
  2578. invalid_cookie) {
  2579. return false;
  2580. }
  2581. return true;
  2582. }
  2583. #ifdef ATH_SUPPORT_IQUE
  2584. /**
  2585. * dp_tx_mcast_enhance() - Multicast enhancement on TX
  2586. * @vdev: vdev handle
  2587. * @nbuf: skb
  2588. *
  2589. * Return: true on success,
  2590. * false on failure
  2591. */
  2592. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2593. {
  2594. qdf_ether_header_t *eh;
  2595. /* Mcast to Ucast Conversion*/
  2596. if (qdf_likely(!vdev->mcast_enhancement_en))
  2597. return true;
  2598. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2599. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) &&
  2600. !DP_FRAME_IS_BROADCAST((eh)->ether_dhost)) {
  2601. dp_verbose_debug("Mcast frm for ME %pK", vdev);
  2602. qdf_nbuf_set_next(nbuf, NULL);
  2603. DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
  2604. qdf_nbuf_len(nbuf));
  2605. if (dp_tx_prepare_send_me(vdev, nbuf) ==
  2606. QDF_STATUS_SUCCESS) {
  2607. return false;
  2608. }
  2609. if (qdf_unlikely(vdev->igmp_mcast_enhanc_en > 0)) {
  2610. if (dp_tx_prepare_send_igmp_me(vdev, nbuf) ==
  2611. QDF_STATUS_SUCCESS) {
  2612. return false;
  2613. }
  2614. }
  2615. }
  2616. return true;
  2617. }
  2618. #else
  2619. static inline bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  2620. {
  2621. return true;
  2622. }
  2623. #endif
  2624. /**
  2625. * dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
  2626. * @nbuf: qdf_nbuf_t
  2627. * @vdev: struct dp_vdev *
  2628. *
  2629. * Allow packet for processing only if it is for peer client which is
  2630. * connected with same vap. Drop packet if client is connected to
  2631. * different vap.
  2632. *
  2633. * Return: QDF_STATUS
  2634. */
  2635. static inline QDF_STATUS
  2636. dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
  2637. {
  2638. struct dp_ast_entry *dst_ast_entry = NULL;
  2639. qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2640. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
  2641. DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
  2642. return QDF_STATUS_SUCCESS;
  2643. qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
  2644. dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
  2645. eh->ether_dhost,
  2646. vdev->vdev_id);
  2647. /* If there is no ast entry, return failure */
  2648. if (qdf_unlikely(!dst_ast_entry)) {
  2649. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2650. return QDF_STATUS_E_FAILURE;
  2651. }
  2652. qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
  2653. return QDF_STATUS_SUCCESS;
  2654. }
  2655. /**
  2656. * dp_tx_nawds_handler() - NAWDS handler
  2657. *
  2658. * @soc: DP soc handle
  2659. * @vdev_id: id of DP vdev handle
  2660. * @msdu_info: msdu_info required to create HTT metadata
  2661. * @nbuf: skb
  2662. *
  2663. * This API transfers the multicast frames with the peer id
  2664. * on NAWDS enabled peer.
  2665. * Return: none
  2666. */
  2667. static inline
  2668. void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
  2669. struct dp_tx_msdu_info_s *msdu_info,
  2670. qdf_nbuf_t nbuf, uint16_t sa_peer_id)
  2671. {
  2672. struct dp_peer *peer = NULL;
  2673. qdf_nbuf_t nbuf_clone = NULL;
  2674. uint16_t peer_id = DP_INVALID_PEER;
  2675. struct dp_txrx_peer *txrx_peer;
  2676. /* This check avoids pkt forwarding which is entered
  2677. * in the ast table but still doesn't have valid peerid.
  2678. */
  2679. if (sa_peer_id == HTT_INVALID_PEER)
  2680. return;
  2681. qdf_spin_lock_bh(&vdev->peer_list_lock);
  2682. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  2683. txrx_peer = dp_get_txrx_peer(peer);
  2684. if (!txrx_peer)
  2685. continue;
  2686. if (!txrx_peer->bss_peer && txrx_peer->nawds_enabled) {
  2687. peer_id = peer->peer_id;
  2688. if (!dp_peer_is_primary_link_peer(peer))
  2689. continue;
  2690. /* Multicast packets needs to be
  2691. * dropped in case of intra bss forwarding
  2692. */
  2693. if (sa_peer_id == txrx_peer->peer_id) {
  2694. dp_tx_debug("multicast packet");
  2695. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  2696. tx.nawds_mcast_drop,
  2697. 1);
  2698. continue;
  2699. }
  2700. nbuf_clone = qdf_nbuf_clone(nbuf);
  2701. if (!nbuf_clone) {
  2702. QDF_TRACE(QDF_MODULE_ID_DP,
  2703. QDF_TRACE_LEVEL_ERROR,
  2704. FL("nbuf clone failed"));
  2705. break;
  2706. }
  2707. nbuf_clone = dp_tx_send_msdu_single(vdev, nbuf_clone,
  2708. msdu_info, peer_id,
  2709. NULL);
  2710. if (nbuf_clone) {
  2711. dp_tx_debug("pkt send failed");
  2712. qdf_nbuf_free(nbuf_clone);
  2713. } else {
  2714. if (peer_id != DP_INVALID_PEER)
  2715. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  2716. tx.nawds_mcast,
  2717. 1, qdf_nbuf_len(nbuf));
  2718. }
  2719. }
  2720. }
  2721. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  2722. }
  2723. /**
  2724. * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
  2725. * @soc: DP soc handle
  2726. * @vdev_id: id of DP vdev handle
  2727. * @nbuf: skb
  2728. * @tx_exc_metadata: Handle that holds exception path meta data
  2729. *
  2730. * Entry point for Core Tx layer (DP_TX) invoked from
  2731. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2732. *
  2733. * Return: NULL on success,
  2734. * nbuf when it fails to send
  2735. */
  2736. qdf_nbuf_t
  2737. dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2738. qdf_nbuf_t nbuf,
  2739. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2740. {
  2741. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2742. qdf_ether_header_t *eh = NULL;
  2743. struct dp_tx_msdu_info_s msdu_info;
  2744. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2745. DP_MOD_ID_TX_EXCEPTION);
  2746. if (qdf_unlikely(!vdev))
  2747. goto fail;
  2748. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  2749. if (!tx_exc_metadata)
  2750. goto fail;
  2751. msdu_info.tid = tx_exc_metadata->tid;
  2752. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  2753. dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
  2754. QDF_MAC_ADDR_REF(nbuf->data));
  2755. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
  2756. if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
  2757. dp_tx_err("Invalid parameters in exception path");
  2758. goto fail;
  2759. }
  2760. /* for peer based metadata check if peer is valid */
  2761. if (tx_exc_metadata->peer_id != CDP_INVALID_PEER) {
  2762. struct dp_peer *peer = NULL;
  2763. peer = dp_peer_get_ref_by_id(vdev->pdev->soc,
  2764. tx_exc_metadata->peer_id,
  2765. DP_MOD_ID_TX_EXCEPTION);
  2766. if (qdf_unlikely(!peer)) {
  2767. DP_STATS_INC(vdev,
  2768. tx_i.dropped.invalid_peer_id_in_exc_path,
  2769. 1);
  2770. goto fail;
  2771. }
  2772. dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
  2773. }
  2774. /* Basic sanity checks for unsupported packets */
  2775. /* MESH mode */
  2776. if (qdf_unlikely(vdev->mesh_vdev)) {
  2777. dp_tx_err("Mesh mode is not supported in exception path");
  2778. goto fail;
  2779. }
  2780. /*
  2781. * Classify the frame and call corresponding
  2782. * "prepare" function which extracts the segment (TSO)
  2783. * and fragmentation information (for TSO , SG, ME, or Raw)
  2784. * into MSDU_INFO structure which is later used to fill
  2785. * SW and HW descriptors.
  2786. */
  2787. if (qdf_nbuf_is_tso(nbuf)) {
  2788. dp_verbose_debug("TSO frame %pK", vdev);
  2789. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  2790. qdf_nbuf_len(nbuf));
  2791. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  2792. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  2793. qdf_nbuf_len(nbuf));
  2794. goto fail;
  2795. }
  2796. goto send_multiple;
  2797. }
  2798. /* SG */
  2799. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  2800. struct dp_tx_seg_info_s seg_info = {0};
  2801. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
  2802. if (!nbuf)
  2803. goto fail;
  2804. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  2805. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  2806. qdf_nbuf_len(nbuf));
  2807. goto send_multiple;
  2808. }
  2809. if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
  2810. DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
  2811. qdf_nbuf_len(nbuf));
  2812. dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
  2813. tx_exc_metadata->ppdu_cookie);
  2814. }
  2815. /*
  2816. * Get HW Queue to use for this frame.
  2817. * TCL supports upto 4 DMA rings, out of which 3 rings are
  2818. * dedicated for data and 1 for command.
  2819. * "queue_id" maps to one hardware ring.
  2820. * With each ring, we also associate a unique Tx descriptor pool
  2821. * to minimize lock contention for these resources.
  2822. */
  2823. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  2824. if (qdf_likely(tx_exc_metadata->is_intrabss_fwd)) {
  2825. if (qdf_unlikely(vdev->nawds_enabled)) {
  2826. /*
  2827. * This is a multicast packet
  2828. */
  2829. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
  2830. tx_exc_metadata->peer_id);
  2831. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  2832. 1, qdf_nbuf_len(nbuf));
  2833. }
  2834. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2835. DP_INVALID_PEER, NULL);
  2836. } else {
  2837. /*
  2838. * Check exception descriptors
  2839. */
  2840. if (dp_tx_exception_limit_check(vdev))
  2841. goto fail;
  2842. /* Single linear frame */
  2843. /*
  2844. * If nbuf is a simple linear frame, use send_single function to
  2845. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  2846. * SRNG. There is no need to setup a MSDU extension descriptor.
  2847. */
  2848. nbuf = dp_tx_send_msdu_single(vdev, nbuf, &msdu_info,
  2849. tx_exc_metadata->peer_id,
  2850. tx_exc_metadata);
  2851. }
  2852. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2853. return nbuf;
  2854. send_multiple:
  2855. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  2856. fail:
  2857. if (vdev)
  2858. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2859. dp_verbose_debug("pkt send failed");
  2860. return nbuf;
  2861. }
  2862. /**
  2863. * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
  2864. * in exception path in special case to avoid regular exception path chk.
  2865. * @soc: DP soc handle
  2866. * @vdev_id: id of DP vdev handle
  2867. * @nbuf: skb
  2868. * @tx_exc_metadata: Handle that holds exception path meta data
  2869. *
  2870. * Entry point for Core Tx layer (DP_TX) invoked from
  2871. * hard_start_xmit in OSIF/HDD to transmit frames through fw
  2872. *
  2873. * Return: NULL on success,
  2874. * nbuf when it fails to send
  2875. */
  2876. qdf_nbuf_t
  2877. dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
  2878. uint8_t vdev_id, qdf_nbuf_t nbuf,
  2879. struct cdp_tx_exception_metadata *tx_exc_metadata)
  2880. {
  2881. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2882. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  2883. DP_MOD_ID_TX_EXCEPTION);
  2884. if (qdf_unlikely(!vdev))
  2885. goto fail;
  2886. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  2887. == QDF_STATUS_E_FAILURE)) {
  2888. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  2889. goto fail;
  2890. }
  2891. /* Unref count as it will agin be taken inside dp_tx_exception */
  2892. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2893. return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
  2894. fail:
  2895. if (vdev)
  2896. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
  2897. dp_verbose_debug("pkt send failed");
  2898. return nbuf;
  2899. }
  2900. /**
  2901. * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
  2902. * @soc: DP soc handle
  2903. * @vdev_id: DP vdev handle
  2904. * @nbuf: skb
  2905. *
  2906. * Entry point for Core Tx layer (DP_TX) invoked from
  2907. * hard_start_xmit in OSIF/HDD
  2908. *
  2909. * Return: NULL on success,
  2910. * nbuf when it fails to send
  2911. */
  2912. #ifdef MESH_MODE_SUPPORT
  2913. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  2914. qdf_nbuf_t nbuf)
  2915. {
  2916. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  2917. struct meta_hdr_s *mhdr;
  2918. qdf_nbuf_t nbuf_mesh = NULL;
  2919. qdf_nbuf_t nbuf_clone = NULL;
  2920. struct dp_vdev *vdev;
  2921. uint8_t no_enc_frame = 0;
  2922. nbuf_mesh = qdf_nbuf_unshare(nbuf);
  2923. if (!nbuf_mesh) {
  2924. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2925. "qdf_nbuf_unshare failed");
  2926. return nbuf;
  2927. }
  2928. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_MESH);
  2929. if (!vdev) {
  2930. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2931. "vdev is NULL for vdev_id %d", vdev_id);
  2932. return nbuf;
  2933. }
  2934. nbuf = nbuf_mesh;
  2935. mhdr = (struct meta_hdr_s *)qdf_nbuf_data(nbuf);
  2936. if ((vdev->sec_type != cdp_sec_type_none) &&
  2937. (mhdr->flags & METAHDR_FLAG_NOENCRYPT))
  2938. no_enc_frame = 1;
  2939. if (mhdr->flags & METAHDR_FLAG_NOQOS)
  2940. qdf_nbuf_set_priority(nbuf, HTT_TX_EXT_TID_NON_QOS_MCAST_BCAST);
  2941. if ((mhdr->flags & METAHDR_FLAG_INFO_UPDATED) &&
  2942. !no_enc_frame) {
  2943. nbuf_clone = qdf_nbuf_clone(nbuf);
  2944. if (!nbuf_clone) {
  2945. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  2946. "qdf_nbuf_clone failed");
  2947. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2948. return nbuf;
  2949. }
  2950. qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
  2951. }
  2952. if (nbuf_clone) {
  2953. if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
  2954. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2955. } else {
  2956. qdf_nbuf_free(nbuf_clone);
  2957. }
  2958. }
  2959. if (no_enc_frame)
  2960. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_MESH_TX_INFO);
  2961. else
  2962. qdf_nbuf_set_tx_ftype(nbuf, CB_FTYPE_INVALID);
  2963. nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
  2964. if ((!nbuf) && no_enc_frame) {
  2965. DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
  2966. }
  2967. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
  2968. return nbuf;
  2969. }
  2970. #else
  2971. qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
  2972. qdf_nbuf_t nbuf)
  2973. {
  2974. return dp_tx_send(soc, vdev_id, nbuf);
  2975. }
  2976. #endif
  2977. #ifdef QCA_DP_TX_NBUF_AND_NBUF_DATA_PREFETCH
  2978. static inline
  2979. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  2980. {
  2981. if (nbuf) {
  2982. qdf_prefetch(&nbuf->len);
  2983. qdf_prefetch(&nbuf->data);
  2984. }
  2985. }
  2986. #else
  2987. static inline
  2988. void dp_tx_prefetch_nbuf_data(qdf_nbuf_t nbuf)
  2989. {
  2990. }
  2991. #endif
  2992. #ifdef DP_UMAC_HW_RESET_SUPPORT
  2993. /*
  2994. * dp_tx_drop() - Drop the frame on a given VAP
  2995. * @soc: DP soc handle
  2996. * @vdev_id: id of DP vdev handle
  2997. * @nbuf: skb
  2998. *
  2999. * Drop all the incoming packets
  3000. *
  3001. * Return: nbuf
  3002. *
  3003. */
  3004. qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3005. qdf_nbuf_t nbuf)
  3006. {
  3007. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3008. struct dp_vdev *vdev = NULL;
  3009. vdev = soc->vdev_id_map[vdev_id];
  3010. if (qdf_unlikely(!vdev))
  3011. return nbuf;
  3012. DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
  3013. return nbuf;
  3014. }
  3015. /*
  3016. * dp_tx_exc_drop() - Drop the frame on a given VAP
  3017. * @soc: DP soc handle
  3018. * @vdev_id: id of DP vdev handle
  3019. * @nbuf: skb
  3020. * @tx_exc_metadata: Handle that holds exception path meta data
  3021. *
  3022. * Drop all the incoming packets
  3023. *
  3024. * Return: nbuf
  3025. *
  3026. */
  3027. qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3028. qdf_nbuf_t nbuf,
  3029. struct cdp_tx_exception_metadata *tx_exc_metadata)
  3030. {
  3031. return dp_tx_drop(soc_hdl, vdev_id, nbuf);
  3032. }
  3033. #endif
  3034. /*
  3035. * dp_tx_send() - Transmit a frame on a given VAP
  3036. * @soc: DP soc handle
  3037. * @vdev_id: id of DP vdev handle
  3038. * @nbuf: skb
  3039. *
  3040. * Entry point for Core Tx layer (DP_TX) invoked from
  3041. * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
  3042. * cases
  3043. *
  3044. * Return: NULL on success,
  3045. * nbuf when it fails to send
  3046. */
  3047. qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  3048. qdf_nbuf_t nbuf)
  3049. {
  3050. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3051. uint16_t peer_id = HTT_INVALID_PEER;
  3052. /*
  3053. * doing a memzero is causing additional function call overhead
  3054. * so doing static stack clearing
  3055. */
  3056. struct dp_tx_msdu_info_s msdu_info = {0};
  3057. struct dp_vdev *vdev = NULL;
  3058. qdf_nbuf_t end_nbuf = NULL;
  3059. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3060. return nbuf;
  3061. /*
  3062. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  3063. * this in per packet path.
  3064. *
  3065. * As in this path vdev memory is already protected with netdev
  3066. * tx lock
  3067. */
  3068. vdev = soc->vdev_id_map[vdev_id];
  3069. if (qdf_unlikely(!vdev))
  3070. return nbuf;
  3071. /*
  3072. * Set Default Host TID value to invalid TID
  3073. * (TID override disabled)
  3074. */
  3075. msdu_info.tid = HTT_TX_EXT_TID_INVALID;
  3076. DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_headlen(nbuf));
  3077. if (qdf_unlikely(vdev->mesh_vdev)) {
  3078. qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
  3079. &msdu_info);
  3080. if (!nbuf_mesh) {
  3081. dp_verbose_debug("Extracting mesh metadata failed");
  3082. return nbuf;
  3083. }
  3084. nbuf = nbuf_mesh;
  3085. }
  3086. /*
  3087. * Get HW Queue to use for this frame.
  3088. * TCL supports upto 4 DMA rings, out of which 3 rings are
  3089. * dedicated for data and 1 for command.
  3090. * "queue_id" maps to one hardware ring.
  3091. * With each ring, we also associate a unique Tx descriptor pool
  3092. * to minimize lock contention for these resources.
  3093. */
  3094. dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
  3095. DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
  3096. 1);
  3097. /*
  3098. * TCL H/W supports 2 DSCP-TID mapping tables.
  3099. * Table 1 - Default DSCP-TID mapping table
  3100. * Table 2 - 1 DSCP-TID override table
  3101. *
  3102. * If we need a different DSCP-TID mapping for this vap,
  3103. * call tid_classify to extract DSCP/ToS from frame and
  3104. * map to a TID and store in msdu_info. This is later used
  3105. * to fill in TCL Input descriptor (per-packet TID override).
  3106. */
  3107. dp_tx_classify_tid(vdev, nbuf, &msdu_info);
  3108. /*
  3109. * Classify the frame and call corresponding
  3110. * "prepare" function which extracts the segment (TSO)
  3111. * and fragmentation information (for TSO , SG, ME, or Raw)
  3112. * into MSDU_INFO structure which is later used to fill
  3113. * SW and HW descriptors.
  3114. */
  3115. if (qdf_nbuf_is_tso(nbuf)) {
  3116. dp_verbose_debug("TSO frame %pK", vdev);
  3117. DP_STATS_INC_PKT(vdev->pdev, tso_stats.num_tso_pkts, 1,
  3118. qdf_nbuf_len(nbuf));
  3119. if (dp_tx_prepare_tso(vdev, nbuf, &msdu_info)) {
  3120. DP_STATS_INC_PKT(vdev->pdev, tso_stats.dropped_host, 1,
  3121. qdf_nbuf_len(nbuf));
  3122. return nbuf;
  3123. }
  3124. goto send_multiple;
  3125. }
  3126. /* SG */
  3127. if (qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
  3128. if (qdf_nbuf_get_nr_frags(nbuf) > DP_TX_MAX_NUM_FRAGS - 1) {
  3129. if (qdf_unlikely(qdf_nbuf_linearize(nbuf)))
  3130. return nbuf;
  3131. } else {
  3132. struct dp_tx_seg_info_s seg_info = {0};
  3133. nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info,
  3134. &msdu_info);
  3135. if (!nbuf)
  3136. return NULL;
  3137. dp_verbose_debug("non-TSO SG frame %pK", vdev);
  3138. DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
  3139. qdf_nbuf_len(nbuf));
  3140. goto send_multiple;
  3141. }
  3142. }
  3143. if (qdf_unlikely(!dp_tx_mcast_enhance(vdev, nbuf)))
  3144. return NULL;
  3145. /* RAW */
  3146. if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
  3147. struct dp_tx_seg_info_s seg_info = {0};
  3148. nbuf = dp_tx_prepare_raw(vdev, nbuf, &seg_info, &msdu_info);
  3149. if (!nbuf)
  3150. return NULL;
  3151. dp_verbose_debug("Raw frame %pK", vdev);
  3152. goto send_multiple;
  3153. }
  3154. if (qdf_unlikely(vdev->nawds_enabled)) {
  3155. qdf_ether_header_t *eh = (qdf_ether_header_t *)
  3156. qdf_nbuf_data(nbuf);
  3157. if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
  3158. uint16_t sa_peer_id = DP_INVALID_PEER;
  3159. if (!soc->ast_offload_support) {
  3160. struct dp_ast_entry *ast_entry = NULL;
  3161. qdf_spin_lock_bh(&soc->ast_lock);
  3162. ast_entry = dp_peer_ast_hash_find_by_pdevid
  3163. (soc,
  3164. (uint8_t *)(eh->ether_shost),
  3165. vdev->pdev->pdev_id);
  3166. if (ast_entry)
  3167. sa_peer_id = ast_entry->peer_id;
  3168. qdf_spin_unlock_bh(&soc->ast_lock);
  3169. }
  3170. dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
  3171. sa_peer_id);
  3172. }
  3173. peer_id = DP_INVALID_PEER;
  3174. DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
  3175. 1, qdf_nbuf_len(nbuf));
  3176. }
  3177. /* Single linear frame */
  3178. /*
  3179. * If nbuf is a simple linear frame, use send_single function to
  3180. * prepare direct-buffer type TCL descriptor and enqueue to TCL
  3181. * SRNG. There is no need to setup a MSDU extension descriptor.
  3182. */
  3183. dp_tx_prefetch_nbuf_data(nbuf);
  3184. nbuf = dp_tx_send_msdu_single_wrapper(vdev, nbuf, &msdu_info,
  3185. peer_id, end_nbuf);
  3186. return nbuf;
  3187. send_multiple:
  3188. nbuf = dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
  3189. if (qdf_unlikely(nbuf && msdu_info.frm_type == dp_tx_frm_raw))
  3190. dp_tx_raw_prepare_unset(vdev->pdev->soc, nbuf);
  3191. return nbuf;
  3192. }
  3193. /**
  3194. * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
  3195. * case to vaoid check in perpkt path.
  3196. * @soc: DP soc handle
  3197. * @vdev_id: id of DP vdev handle
  3198. * @nbuf: skb
  3199. *
  3200. * Entry point for Core Tx layer (DP_TX) invoked from
  3201. * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
  3202. * with special condition to avoid per pkt check in dp_tx_send
  3203. *
  3204. * Return: NULL on success,
  3205. * nbuf when it fails to send
  3206. */
  3207. qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
  3208. uint8_t vdev_id, qdf_nbuf_t nbuf)
  3209. {
  3210. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  3211. struct dp_vdev *vdev = NULL;
  3212. if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
  3213. return nbuf;
  3214. /*
  3215. * dp_vdev_get_ref_by_id does does a atomic operation avoid using
  3216. * this in per packet path.
  3217. *
  3218. * As in this path vdev memory is already protected with netdev
  3219. * tx lock
  3220. */
  3221. vdev = soc->vdev_id_map[vdev_id];
  3222. if (qdf_unlikely(!vdev))
  3223. return nbuf;
  3224. if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
  3225. == QDF_STATUS_E_FAILURE)) {
  3226. DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
  3227. return nbuf;
  3228. }
  3229. return dp_tx_send(soc_hdl, vdev_id, nbuf);
  3230. }
  3231. #ifdef UMAC_SUPPORT_PROXY_ARP
  3232. /**
  3233. * dp_tx_proxy_arp() - Tx proxy arp handler
  3234. * @vdev: datapath vdev handle
  3235. * @buf: sk buffer
  3236. *
  3237. * Return: status
  3238. */
  3239. static inline
  3240. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  3241. {
  3242. if (vdev->osif_proxy_arp)
  3243. return vdev->osif_proxy_arp(vdev->osif_vdev, nbuf);
  3244. /*
  3245. * when UMAC_SUPPORT_PROXY_ARP is defined, we expect
  3246. * osif_proxy_arp has a valid function pointer assigned
  3247. * to it
  3248. */
  3249. dp_tx_err("valid function pointer for osif_proxy_arp is expected!!\n");
  3250. return QDF_STATUS_NOT_INITIALIZED;
  3251. }
  3252. #else
  3253. /**
  3254. * dp_tx_proxy_arp() - Tx proxy arp handler
  3255. * @vdev: datapath vdev handle
  3256. * @buf: sk buffer
  3257. *
  3258. * This function always return 0 when UMAC_SUPPORT_PROXY_ARP
  3259. * is not defined.
  3260. *
  3261. * Return: status
  3262. */
  3263. static inline
  3264. int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  3265. {
  3266. return QDF_STATUS_SUCCESS;
  3267. }
  3268. #endif
  3269. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
  3270. #ifdef WLAN_MCAST_MLO
  3271. static bool
  3272. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3273. struct dp_tx_desc_s *tx_desc,
  3274. qdf_nbuf_t nbuf,
  3275. uint8_t reinject_reason)
  3276. {
  3277. if (reinject_reason == HTT_TX_FW2WBM_REINJECT_REASON_MLO_MCAST) {
  3278. if (soc->arch_ops.dp_tx_mcast_handler)
  3279. soc->arch_ops.dp_tx_mcast_handler(soc, vdev, nbuf);
  3280. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3281. return true;
  3282. }
  3283. return false;
  3284. }
  3285. #else /* WLAN_MCAST_MLO */
  3286. static inline bool
  3287. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3288. struct dp_tx_desc_s *tx_desc,
  3289. qdf_nbuf_t nbuf,
  3290. uint8_t reinject_reason)
  3291. {
  3292. return false;
  3293. }
  3294. #endif /* WLAN_MCAST_MLO */
  3295. #else
  3296. static inline bool
  3297. dp_tx_reinject_mlo_hdl(struct dp_soc *soc, struct dp_vdev *vdev,
  3298. struct dp_tx_desc_s *tx_desc,
  3299. qdf_nbuf_t nbuf,
  3300. uint8_t reinject_reason)
  3301. {
  3302. return false;
  3303. }
  3304. #endif
  3305. /**
  3306. * dp_tx_reinject_handler() - Tx Reinject Handler
  3307. * @soc: datapath soc handle
  3308. * @vdev: datapath vdev handle
  3309. * @tx_desc: software descriptor head pointer
  3310. * @status : Tx completion status from HTT descriptor
  3311. * @reinject_reason : reinject reason from HTT descriptor
  3312. *
  3313. * This function reinjects frames back to Target.
  3314. * Todo - Host queue needs to be added
  3315. *
  3316. * Return: none
  3317. */
  3318. void dp_tx_reinject_handler(struct dp_soc *soc,
  3319. struct dp_vdev *vdev,
  3320. struct dp_tx_desc_s *tx_desc,
  3321. uint8_t *status,
  3322. uint8_t reinject_reason)
  3323. {
  3324. struct dp_peer *peer = NULL;
  3325. uint32_t peer_id = HTT_INVALID_PEER;
  3326. qdf_nbuf_t nbuf = tx_desc->nbuf;
  3327. qdf_nbuf_t nbuf_copy = NULL;
  3328. struct dp_tx_msdu_info_s msdu_info;
  3329. #ifdef WDS_VENDOR_EXTENSION
  3330. int is_mcast = 0, is_ucast = 0;
  3331. int num_peers_3addr = 0;
  3332. qdf_ether_header_t *eth_hdr = (qdf_ether_header_t *)(qdf_nbuf_data(nbuf));
  3333. struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
  3334. #endif
  3335. struct dp_txrx_peer *txrx_peer;
  3336. qdf_assert(vdev);
  3337. dp_tx_debug("Tx reinject path");
  3338. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  3339. qdf_nbuf_len(tx_desc->nbuf));
  3340. if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
  3341. return;
  3342. #ifdef WDS_VENDOR_EXTENSION
  3343. if (qdf_unlikely(vdev->tx_encap_type != htt_cmn_pkt_type_raw)) {
  3344. is_mcast = (IS_MULTICAST(wh->i_addr1)) ? 1 : 0;
  3345. } else {
  3346. is_mcast = (IS_MULTICAST(eth_hdr->ether_dhost)) ? 1 : 0;
  3347. }
  3348. is_ucast = !is_mcast;
  3349. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3350. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3351. txrx_peer = dp_get_txrx_peer(peer);
  3352. if (!txrx_peer || txrx_peer->bss_peer)
  3353. continue;
  3354. /* Detect wds peers that use 3-addr framing for mcast.
  3355. * if there are any, the bss_peer is used to send the
  3356. * the mcast frame using 3-addr format. all wds enabled
  3357. * peers that use 4-addr framing for mcast frames will
  3358. * be duplicated and sent as 4-addr frames below.
  3359. */
  3360. if (!txrx_peer->wds_enabled ||
  3361. !txrx_peer->wds_ecm.wds_tx_mcast_4addr) {
  3362. num_peers_3addr = 1;
  3363. break;
  3364. }
  3365. }
  3366. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3367. #endif
  3368. if (qdf_unlikely(vdev->mesh_vdev)) {
  3369. DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
  3370. } else {
  3371. qdf_spin_lock_bh(&vdev->peer_list_lock);
  3372. TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
  3373. txrx_peer = dp_get_txrx_peer(peer);
  3374. if (!txrx_peer)
  3375. continue;
  3376. if ((txrx_peer->peer_id != HTT_INVALID_PEER) &&
  3377. #ifdef WDS_VENDOR_EXTENSION
  3378. /*
  3379. * . if 3-addr STA, then send on BSS Peer
  3380. * . if Peer WDS enabled and accept 4-addr mcast,
  3381. * send mcast on that peer only
  3382. * . if Peer WDS enabled and accept 4-addr ucast,
  3383. * send ucast on that peer only
  3384. */
  3385. ((txrx_peer->bss_peer && num_peers_3addr && is_mcast) ||
  3386. (txrx_peer->wds_enabled &&
  3387. ((is_mcast && txrx_peer->wds_ecm.wds_tx_mcast_4addr) ||
  3388. (is_ucast &&
  3389. txrx_peer->wds_ecm.wds_tx_ucast_4addr))))) {
  3390. #else
  3391. (txrx_peer->bss_peer &&
  3392. (dp_tx_proxy_arp(vdev, nbuf) == QDF_STATUS_SUCCESS))) {
  3393. #endif
  3394. peer_id = DP_INVALID_PEER;
  3395. nbuf_copy = qdf_nbuf_copy(nbuf);
  3396. if (!nbuf_copy) {
  3397. dp_tx_debug("nbuf copy failed");
  3398. break;
  3399. }
  3400. qdf_mem_zero(&msdu_info, sizeof(msdu_info));
  3401. dp_tx_get_queue(vdev, nbuf,
  3402. &msdu_info.tx_queue);
  3403. nbuf_copy = dp_tx_send_msdu_single(vdev,
  3404. nbuf_copy,
  3405. &msdu_info,
  3406. peer_id,
  3407. NULL);
  3408. if (nbuf_copy) {
  3409. dp_tx_debug("pkt send failed");
  3410. qdf_nbuf_free(nbuf_copy);
  3411. }
  3412. }
  3413. }
  3414. qdf_spin_unlock_bh(&vdev->peer_list_lock);
  3415. qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
  3416. QDF_DMA_TO_DEVICE, nbuf->len);
  3417. qdf_nbuf_free(nbuf);
  3418. }
  3419. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3420. }
  3421. /**
  3422. * dp_tx_inspect_handler() - Tx Inspect Handler
  3423. * @soc: datapath soc handle
  3424. * @vdev: datapath vdev handle
  3425. * @tx_desc: software descriptor head pointer
  3426. * @status : Tx completion status from HTT descriptor
  3427. *
  3428. * Handles Tx frames sent back to Host for inspection
  3429. * (ProxyARP)
  3430. *
  3431. * Return: none
  3432. */
  3433. void dp_tx_inspect_handler(struct dp_soc *soc,
  3434. struct dp_vdev *vdev,
  3435. struct dp_tx_desc_s *tx_desc,
  3436. uint8_t *status)
  3437. {
  3438. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  3439. "%s Tx inspect path",
  3440. __func__);
  3441. DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
  3442. qdf_nbuf_len(tx_desc->nbuf));
  3443. DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
  3444. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  3445. }
  3446. #ifdef MESH_MODE_SUPPORT
  3447. /**
  3448. * dp_tx_comp_fill_tx_completion_stats() - Fill per packet Tx completion stats
  3449. * in mesh meta header
  3450. * @tx_desc: software descriptor head pointer
  3451. * @ts: pointer to tx completion stats
  3452. * Return: none
  3453. */
  3454. static
  3455. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3456. struct hal_tx_completion_status *ts)
  3457. {
  3458. qdf_nbuf_t netbuf = tx_desc->nbuf;
  3459. if (!tx_desc->msdu_ext_desc) {
  3460. if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
  3461. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  3462. "netbuf %pK offset %d",
  3463. netbuf, tx_desc->pkt_offset);
  3464. return;
  3465. }
  3466. }
  3467. }
  3468. #else
  3469. static
  3470. void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
  3471. struct hal_tx_completion_status *ts)
  3472. {
  3473. }
  3474. #endif
  3475. #ifdef CONFIG_SAWF
  3476. static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
  3477. struct dp_vdev *vdev,
  3478. struct dp_txrx_peer *txrx_peer,
  3479. struct dp_tx_desc_s *tx_desc,
  3480. struct hal_tx_completion_status *ts,
  3481. uint8_t tid)
  3482. {
  3483. dp_sawf_tx_compl_update_peer_stats(soc, vdev, txrx_peer, tx_desc,
  3484. ts, tid);
  3485. }
  3486. static void dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
  3487. uint32_t nw_delay,
  3488. uint32_t sw_delay,
  3489. uint32_t hw_delay)
  3490. {
  3491. dp_peer_tid_delay_avg(tx_delay,
  3492. nw_delay,
  3493. sw_delay,
  3494. hw_delay);
  3495. }
  3496. #else
  3497. static void dp_tx_update_peer_sawf_stats(struct dp_soc *soc,
  3498. struct dp_vdev *vdev,
  3499. struct dp_txrx_peer *txrx_peer,
  3500. struct dp_tx_desc_s *tx_desc,
  3501. struct hal_tx_completion_status *ts,
  3502. uint8_t tid)
  3503. {
  3504. }
  3505. static inline void
  3506. dp_tx_compute_delay_avg(struct cdp_delay_tx_stats *tx_delay,
  3507. uint32_t nw_delay, uint32_t sw_delay,
  3508. uint32_t hw_delay)
  3509. {
  3510. }
  3511. #endif
  3512. #ifdef QCA_PEER_EXT_STATS
  3513. #ifdef WLAN_CONFIG_TX_DELAY
  3514. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3515. struct dp_tx_desc_s *tx_desc,
  3516. struct hal_tx_completion_status *ts,
  3517. struct dp_vdev *vdev)
  3518. {
  3519. struct dp_soc *soc = vdev->pdev->soc;
  3520. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3521. int64_t timestamp_ingress, timestamp_hw_enqueue;
  3522. uint32_t sw_enqueue_delay, fwhw_transmit_delay = 0;
  3523. if (!ts->valid)
  3524. return;
  3525. timestamp_ingress = qdf_nbuf_get_timestamp_us(tx_desc->nbuf);
  3526. timestamp_hw_enqueue = qdf_ktime_to_us(tx_desc->timestamp);
  3527. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3528. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3529. if (soc->arch_ops.dp_tx_compute_hw_delay)
  3530. if (!soc->arch_ops.dp_tx_compute_hw_delay(soc, vdev, ts,
  3531. &fwhw_transmit_delay))
  3532. dp_hist_update_stats(&tx_delay->hwtx_delay,
  3533. fwhw_transmit_delay);
  3534. dp_tx_compute_delay_avg(tx_delay, 0, sw_enqueue_delay,
  3535. fwhw_transmit_delay);
  3536. }
  3537. #else
  3538. /*
  3539. * dp_tx_compute_tid_delay() - Compute per TID delay
  3540. * @stats: Per TID delay stats
  3541. * @tx_desc: Software Tx descriptor
  3542. * @ts: Tx completion status
  3543. * @vdev: vdev
  3544. *
  3545. * Compute the software enqueue and hw enqueue delays and
  3546. * update the respective histograms
  3547. *
  3548. * Return: void
  3549. */
  3550. static void dp_tx_compute_tid_delay(struct cdp_delay_tid_stats *stats,
  3551. struct dp_tx_desc_s *tx_desc,
  3552. struct hal_tx_completion_status *ts,
  3553. struct dp_vdev *vdev)
  3554. {
  3555. struct cdp_delay_tx_stats *tx_delay = &stats->tx_delay;
  3556. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3557. uint32_t sw_enqueue_delay, fwhw_transmit_delay;
  3558. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3559. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3560. timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
  3561. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3562. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3563. timestamp_hw_enqueue);
  3564. /*
  3565. * Update the Tx software enqueue delay and HW enque-Completion delay.
  3566. */
  3567. dp_hist_update_stats(&tx_delay->tx_swq_delay, sw_enqueue_delay);
  3568. dp_hist_update_stats(&tx_delay->hwtx_delay, fwhw_transmit_delay);
  3569. }
  3570. #endif
  3571. /*
  3572. * dp_tx_update_peer_delay_stats() - Update the peer delay stats
  3573. * @txrx_peer: DP peer context
  3574. * @tx_desc: Tx software descriptor
  3575. * @tid: Transmission ID
  3576. * @ring_id: Rx CPU context ID/CPU_ID
  3577. *
  3578. * Update the peer extended stats. These are enhanced other
  3579. * delay stats per msdu level.
  3580. *
  3581. * Return: void
  3582. */
  3583. static void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
  3584. struct dp_tx_desc_s *tx_desc,
  3585. struct hal_tx_completion_status *ts,
  3586. uint8_t ring_id)
  3587. {
  3588. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  3589. struct dp_soc *soc = NULL;
  3590. struct dp_peer_delay_stats *delay_stats = NULL;
  3591. uint8_t tid;
  3592. soc = pdev->soc;
  3593. if (qdf_likely(!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx)))
  3594. return;
  3595. tid = ts->tid;
  3596. delay_stats = txrx_peer->delay_stats;
  3597. qdf_assert(delay_stats);
  3598. qdf_assert(ring < CDP_MAX_TXRX_CTX);
  3599. /*
  3600. * For non-TID packets use the TID 9
  3601. */
  3602. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3603. tid = CDP_MAX_DATA_TIDS - 1;
  3604. dp_tx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
  3605. tx_desc, ts, txrx_peer->vdev);
  3606. }
  3607. #else
  3608. static inline
  3609. void dp_tx_update_peer_delay_stats(struct dp_txrx_peer *txrx_peer,
  3610. struct dp_tx_desc_s *tx_desc,
  3611. struct hal_tx_completion_status *ts,
  3612. uint8_t ring_id)
  3613. {
  3614. }
  3615. #endif
  3616. #ifdef HW_TX_DELAY_STATS_ENABLE
  3617. /**
  3618. * dp_update_tx_delay_stats() - update the delay stats
  3619. * @vdev: vdev handle
  3620. * @delay: delay in ms or us based on the flag delay_in_us
  3621. * @tid: tid value
  3622. * @mode: type of tx delay mode
  3623. * @ring id: ring number
  3624. * @delay_in_us: flag to indicate whether the delay is in ms or us
  3625. *
  3626. * Return: none
  3627. */
  3628. static inline
  3629. void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
  3630. uint8_t mode, uint8_t ring_id, bool delay_in_us)
  3631. {
  3632. struct cdp_tid_tx_stats *tstats =
  3633. &vdev->stats.tid_tx_stats[ring_id][tid];
  3634. dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
  3635. delay_in_us);
  3636. }
  3637. #else
  3638. static inline
  3639. void dp_update_tx_delay_stats(struct dp_vdev *vdev, uint32_t delay, uint8_t tid,
  3640. uint8_t mode, uint8_t ring_id, bool delay_in_us)
  3641. {
  3642. struct cdp_tid_tx_stats *tstats =
  3643. &vdev->pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3644. dp_update_delay_stats(tstats, NULL, delay, tid, mode, ring_id,
  3645. delay_in_us);
  3646. }
  3647. #endif
  3648. /**
  3649. * dp_tx_compute_delay() - Compute and fill in all timestamps
  3650. * to pass in correct fields
  3651. *
  3652. * @vdev: pdev handle
  3653. * @tx_desc: tx descriptor
  3654. * @tid: tid value
  3655. * @ring_id: TCL or WBM ring number for transmit path
  3656. * Return: none
  3657. */
  3658. void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
  3659. uint8_t tid, uint8_t ring_id)
  3660. {
  3661. int64_t current_timestamp, timestamp_ingress, timestamp_hw_enqueue;
  3662. uint32_t sw_enqueue_delay, fwhw_transmit_delay, interframe_delay;
  3663. uint32_t fwhw_transmit_delay_us;
  3664. if (qdf_likely(!vdev->pdev->delay_stats_flag) &&
  3665. qdf_likely(!dp_is_vdev_tx_delay_stats_enabled(vdev)))
  3666. return;
  3667. if (dp_is_vdev_tx_delay_stats_enabled(vdev)) {
  3668. fwhw_transmit_delay_us =
  3669. qdf_ktime_to_us(qdf_ktime_real_get()) -
  3670. qdf_ktime_to_us(tx_desc->timestamp);
  3671. /*
  3672. * Delay between packet enqueued to HW and Tx completion in us
  3673. */
  3674. dp_update_tx_delay_stats(vdev, fwhw_transmit_delay_us, tid,
  3675. CDP_DELAY_STATS_FW_HW_TRANSMIT,
  3676. ring_id, true);
  3677. /*
  3678. * For MCL, only enqueue to completion delay is required
  3679. * so return if the vdev flag is enabled.
  3680. */
  3681. return;
  3682. }
  3683. current_timestamp = qdf_ktime_to_ms(qdf_ktime_real_get());
  3684. timestamp_hw_enqueue = qdf_ktime_to_ms(tx_desc->timestamp);
  3685. fwhw_transmit_delay = (uint32_t)(current_timestamp -
  3686. timestamp_hw_enqueue);
  3687. /*
  3688. * Delay between packet enqueued to HW and Tx completion in ms
  3689. */
  3690. dp_update_tx_delay_stats(vdev, fwhw_transmit_delay, tid,
  3691. CDP_DELAY_STATS_FW_HW_TRANSMIT, ring_id,
  3692. false);
  3693. timestamp_ingress = qdf_nbuf_get_timestamp(tx_desc->nbuf);
  3694. sw_enqueue_delay = (uint32_t)(timestamp_hw_enqueue - timestamp_ingress);
  3695. interframe_delay = (uint32_t)(timestamp_ingress -
  3696. vdev->prev_tx_enq_tstamp);
  3697. /*
  3698. * Delay in software enqueue
  3699. */
  3700. dp_update_tx_delay_stats(vdev, sw_enqueue_delay, tid,
  3701. CDP_DELAY_STATS_SW_ENQ, ring_id,
  3702. false);
  3703. /*
  3704. * Update interframe delay stats calculated at hardstart receive point.
  3705. * Value of vdev->prev_tx_enq_tstamp will be 0 for 1st frame, so
  3706. * interframe delay will not be calculate correctly for 1st frame.
  3707. * On the other side, this will help in avoiding extra per packet check
  3708. * of !vdev->prev_tx_enq_tstamp.
  3709. */
  3710. dp_update_tx_delay_stats(vdev, interframe_delay, tid,
  3711. CDP_DELAY_STATS_TX_INTERFRAME, ring_id,
  3712. false);
  3713. vdev->prev_tx_enq_tstamp = timestamp_ingress;
  3714. }
  3715. #ifdef DISABLE_DP_STATS
  3716. static
  3717. inline void dp_update_no_ack_stats(qdf_nbuf_t nbuf,
  3718. struct dp_txrx_peer *txrx_peer)
  3719. {
  3720. }
  3721. #else
  3722. static inline void
  3723. dp_update_no_ack_stats(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer)
  3724. {
  3725. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  3726. DPTRACE(qdf_dp_track_noack_check(nbuf, &subtype));
  3727. if (subtype != QDF_PROTO_INVALID)
  3728. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.no_ack_count[subtype],
  3729. 1);
  3730. }
  3731. #endif
  3732. #ifndef QCA_ENHANCED_STATS_SUPPORT
  3733. #ifdef DP_PEER_EXTENDED_API
  3734. static inline uint8_t
  3735. dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
  3736. {
  3737. return txrx_peer->mpdu_retry_threshold;
  3738. }
  3739. #else
  3740. static inline uint8_t
  3741. dp_tx_get_mpdu_retry_threshold(struct dp_txrx_peer *txrx_peer)
  3742. {
  3743. return 0;
  3744. }
  3745. #endif
  3746. /**
  3747. * dp_tx_update_peer_extd_stats()- Update Tx extended path stats for peer
  3748. *
  3749. * @ts: Tx compltion status
  3750. * @txrx_peer: datapath txrx_peer handle
  3751. *
  3752. * Return: void
  3753. */
  3754. static inline void
  3755. dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
  3756. struct dp_txrx_peer *txrx_peer)
  3757. {
  3758. uint8_t mcs, pkt_type, dst_mcs_idx;
  3759. uint8_t retry_threshold = dp_tx_get_mpdu_retry_threshold(txrx_peer);
  3760. mcs = ts->mcs;
  3761. pkt_type = ts->pkt_type;
  3762. /* do HW to SW pkt type conversion */
  3763. pkt_type = (pkt_type >= HAL_DOT11_MAX ? DOT11_MAX :
  3764. hal_2_dp_pkt_type_map[pkt_type]);
  3765. dst_mcs_idx = dp_get_mcs_array_index_by_pkt_type_mcs(pkt_type, mcs);
  3766. if (MCS_INVALID_ARRAY_INDEX != dst_mcs_idx)
  3767. DP_PEER_EXTD_STATS_INC(txrx_peer,
  3768. tx.pkt_type[pkt_type].mcs_count[dst_mcs_idx],
  3769. 1);
  3770. DP_PEER_EXTD_STATS_INC(txrx_peer, tx.sgi_count[ts->sgi], 1);
  3771. DP_PEER_EXTD_STATS_INC(txrx_peer, tx.bw[ts->bw], 1);
  3772. DP_PEER_EXTD_STATS_UPD(txrx_peer, tx.last_ack_rssi, ts->ack_frame_rssi);
  3773. DP_PEER_EXTD_STATS_INC(txrx_peer,
  3774. tx.wme_ac_type[TID_TO_WME_AC(ts->tid)], 1);
  3775. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.stbc, 1, ts->stbc);
  3776. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.ldpc, 1, ts->ldpc);
  3777. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries, 1, ts->transmit_cnt > 1);
  3778. if (ts->first_msdu) {
  3779. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.retries_mpdu, 1,
  3780. ts->transmit_cnt > 1);
  3781. if (!retry_threshold)
  3782. return;
  3783. DP_PEER_EXTD_STATS_INCC(txrx_peer, tx.mpdu_success_with_retries,
  3784. qdf_do_div(ts->transmit_cnt,
  3785. retry_threshold),
  3786. ts->transmit_cnt > retry_threshold);
  3787. }
  3788. }
  3789. #else
  3790. static inline void
  3791. dp_tx_update_peer_extd_stats(struct hal_tx_completion_status *ts,
  3792. struct dp_txrx_peer *txrx_peer)
  3793. {
  3794. }
  3795. #endif
  3796. /**
  3797. * dp_tx_update_peer_stats() - Update peer stats from Tx completion indications
  3798. * per wbm ring
  3799. *
  3800. * @tx_desc: software descriptor head pointer
  3801. * @ts: Tx completion status
  3802. * @peer: peer handle
  3803. * @ring_id: ring number
  3804. *
  3805. * Return: None
  3806. */
  3807. static inline void
  3808. dp_tx_update_peer_stats(struct dp_tx_desc_s *tx_desc,
  3809. struct hal_tx_completion_status *ts,
  3810. struct dp_txrx_peer *txrx_peer, uint8_t ring_id)
  3811. {
  3812. struct dp_pdev *pdev = txrx_peer->vdev->pdev;
  3813. uint8_t tid = ts->tid;
  3814. uint32_t length;
  3815. struct cdp_tid_tx_stats *tid_stats;
  3816. if (!pdev)
  3817. return;
  3818. if (qdf_unlikely(tid >= CDP_MAX_DATA_TIDS))
  3819. tid = CDP_MAX_DATA_TIDS - 1;
  3820. tid_stats = &pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
  3821. if (ts->release_src != HAL_TX_COMP_RELEASE_SOURCE_TQM) {
  3822. dp_err_rl("Release source:%d is not from TQM", ts->release_src);
  3823. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.release_src_not_tqm, 1);
  3824. return;
  3825. }
  3826. length = qdf_nbuf_len(tx_desc->nbuf);
  3827. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  3828. if (qdf_unlikely(pdev->delay_stats_flag) ||
  3829. qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(txrx_peer->vdev)))
  3830. dp_tx_compute_delay(txrx_peer->vdev, tx_desc, tid, ring_id);
  3831. if (ts->status < CDP_MAX_TX_TQM_STATUS) {
  3832. tid_stats->tqm_status_cnt[ts->status]++;
  3833. }
  3834. if (qdf_likely(ts->status == HAL_TX_TQM_RR_FRAME_ACKED)) {
  3835. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.retry_count, 1,
  3836. ts->transmit_cnt > 1);
  3837. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.multiple_retry_count,
  3838. 1, ts->transmit_cnt > 2);
  3839. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.ofdma, 1, ts->ofdma);
  3840. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.amsdu_cnt, 1,
  3841. ts->msdu_part_of_amsdu);
  3842. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.non_amsdu_cnt, 1,
  3843. !ts->msdu_part_of_amsdu);
  3844. txrx_peer->stats.per_pkt_stats.tx.last_tx_ts =
  3845. qdf_system_ticks();
  3846. dp_tx_update_peer_extd_stats(ts, txrx_peer);
  3847. return;
  3848. }
  3849. /*
  3850. * tx_failed is ideally supposed to be updated from HTT ppdu
  3851. * completion stats. But in IPQ807X/IPQ6018 chipsets owing to
  3852. * hw limitation there are no completions for failed cases.
  3853. * Hence updating tx_failed from data path. Please note that
  3854. * if tx_failed is fixed to be from ppdu, then this has to be
  3855. * removed
  3856. */
  3857. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  3858. DP_PEER_PER_PKT_STATS_INCC(txrx_peer, tx.failed_retry_count, 1,
  3859. ts->transmit_cnt > DP_RETRY_COUNT);
  3860. dp_update_no_ack_stats(tx_desc->nbuf, txrx_peer);
  3861. if (ts->status == HAL_TX_TQM_RR_REM_CMD_AGED) {
  3862. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.age_out, 1);
  3863. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_REM) {
  3864. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.dropped.fw_rem, 1,
  3865. length);
  3866. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_NOTX) {
  3867. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_notx, 1);
  3868. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TX) {
  3869. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_rem_tx, 1);
  3870. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON1) {
  3871. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason1, 1);
  3872. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON2) {
  3873. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason2, 1);
  3874. } else if (ts->status == HAL_TX_TQM_RR_FW_REASON3) {
  3875. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.fw_reason3, 1);
  3876. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_DISABLE_QUEUE) {
  3877. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  3878. tx.dropped.fw_rem_queue_disable, 1);
  3879. } else if (ts->status == HAL_TX_TQM_RR_REM_CMD_TILL_NONMATCHING) {
  3880. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  3881. tx.dropped.fw_rem_no_match, 1);
  3882. } else if (ts->status == HAL_TX_TQM_RR_DROP_THRESHOLD) {
  3883. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  3884. tx.dropped.drop_threshold, 1);
  3885. } else if (ts->status == HAL_TX_TQM_RR_LINK_DESC_UNAVAILABLE) {
  3886. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  3887. tx.dropped.drop_link_desc_na, 1);
  3888. } else if (ts->status == HAL_TX_TQM_RR_DROP_OR_INVALID_MSDU) {
  3889. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  3890. tx.dropped.invalid_drop, 1);
  3891. } else if (ts->status == HAL_TX_TQM_RR_MULTICAST_DROP) {
  3892. DP_PEER_PER_PKT_STATS_INC(txrx_peer,
  3893. tx.dropped.mcast_vdev_drop, 1);
  3894. } else {
  3895. DP_PEER_PER_PKT_STATS_INC(txrx_peer, tx.dropped.invalid_rr, 1);
  3896. }
  3897. }
  3898. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  3899. /**
  3900. * dp_tx_flow_pool_lock() - take flow pool lock
  3901. * @soc: core txrx main context
  3902. * @tx_desc: tx desc
  3903. *
  3904. * Return: None
  3905. */
  3906. static inline
  3907. void dp_tx_flow_pool_lock(struct dp_soc *soc,
  3908. struct dp_tx_desc_s *tx_desc)
  3909. {
  3910. struct dp_tx_desc_pool_s *pool;
  3911. uint8_t desc_pool_id;
  3912. desc_pool_id = tx_desc->pool_id;
  3913. pool = &soc->tx_desc[desc_pool_id];
  3914. qdf_spin_lock_bh(&pool->flow_pool_lock);
  3915. }
  3916. /**
  3917. * dp_tx_flow_pool_unlock() - release flow pool lock
  3918. * @soc: core txrx main context
  3919. * @tx_desc: tx desc
  3920. *
  3921. * Return: None
  3922. */
  3923. static inline
  3924. void dp_tx_flow_pool_unlock(struct dp_soc *soc,
  3925. struct dp_tx_desc_s *tx_desc)
  3926. {
  3927. struct dp_tx_desc_pool_s *pool;
  3928. uint8_t desc_pool_id;
  3929. desc_pool_id = tx_desc->pool_id;
  3930. pool = &soc->tx_desc[desc_pool_id];
  3931. qdf_spin_unlock_bh(&pool->flow_pool_lock);
  3932. }
  3933. #else
  3934. static inline
  3935. void dp_tx_flow_pool_lock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3936. {
  3937. }
  3938. static inline
  3939. void dp_tx_flow_pool_unlock(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
  3940. {
  3941. }
  3942. #endif
  3943. /**
  3944. * dp_tx_notify_completion() - Notify tx completion for this desc
  3945. * @soc: core txrx main context
  3946. * @vdev: datapath vdev handle
  3947. * @tx_desc: tx desc
  3948. * @netbuf: buffer
  3949. * @status: tx status
  3950. *
  3951. * Return: none
  3952. */
  3953. static inline void dp_tx_notify_completion(struct dp_soc *soc,
  3954. struct dp_vdev *vdev,
  3955. struct dp_tx_desc_s *tx_desc,
  3956. qdf_nbuf_t netbuf,
  3957. uint8_t status)
  3958. {
  3959. void *osif_dev;
  3960. ol_txrx_completion_fp tx_compl_cbk = NULL;
  3961. uint16_t flag = BIT(QDF_TX_RX_STATUS_DOWNLOAD_SUCC);
  3962. qdf_assert(tx_desc);
  3963. if (!vdev ||
  3964. !vdev->osif_vdev) {
  3965. return;
  3966. }
  3967. osif_dev = vdev->osif_vdev;
  3968. tx_compl_cbk = vdev->tx_comp;
  3969. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  3970. flag |= BIT(QDF_TX_RX_STATUS_OK);
  3971. if (tx_compl_cbk)
  3972. tx_compl_cbk(netbuf, osif_dev, flag);
  3973. }
  3974. /** dp_tx_sojourn_stats_process() - Collect sojourn stats
  3975. * @pdev: pdev handle
  3976. * @tid: tid value
  3977. * @txdesc_ts: timestamp from txdesc
  3978. * @ppdu_id: ppdu id
  3979. *
  3980. * Return: none
  3981. */
  3982. #ifdef FEATURE_PERPKT_INFO
  3983. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  3984. struct dp_txrx_peer *txrx_peer,
  3985. uint8_t tid,
  3986. uint64_t txdesc_ts,
  3987. uint32_t ppdu_id)
  3988. {
  3989. uint64_t delta_ms;
  3990. struct cdp_tx_sojourn_stats *sojourn_stats;
  3991. struct dp_peer *primary_link_peer = NULL;
  3992. struct dp_soc *link_peer_soc = NULL;
  3993. if (qdf_unlikely(!pdev->enhanced_stats_en))
  3994. return;
  3995. if (qdf_unlikely(tid == HTT_INVALID_TID ||
  3996. tid >= CDP_DATA_TID_MAX))
  3997. return;
  3998. if (qdf_unlikely(!pdev->sojourn_buf))
  3999. return;
  4000. primary_link_peer = dp_get_primary_link_peer_by_id(pdev->soc,
  4001. txrx_peer->peer_id,
  4002. DP_MOD_ID_TX_COMP);
  4003. if (qdf_unlikely(!primary_link_peer))
  4004. return;
  4005. sojourn_stats = (struct cdp_tx_sojourn_stats *)
  4006. qdf_nbuf_data(pdev->sojourn_buf);
  4007. link_peer_soc = primary_link_peer->vdev->pdev->soc;
  4008. sojourn_stats->cookie = (void *)
  4009. dp_monitor_peer_get_peerstats_ctx(link_peer_soc,
  4010. primary_link_peer);
  4011. delta_ms = qdf_ktime_to_ms(qdf_ktime_real_get()) -
  4012. txdesc_ts;
  4013. qdf_ewma_tx_lag_add(&txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid],
  4014. delta_ms);
  4015. sojourn_stats->sum_sojourn_msdu[tid] = delta_ms;
  4016. sojourn_stats->num_msdus[tid] = 1;
  4017. sojourn_stats->avg_sojourn_msdu[tid].internal =
  4018. txrx_peer->stats.per_pkt_stats.tx.avg_sojourn_msdu[tid].internal;
  4019. dp_wdi_event_handler(WDI_EVENT_TX_SOJOURN_STAT, pdev->soc,
  4020. pdev->sojourn_buf, HTT_INVALID_PEER,
  4021. WDI_NO_VAL, pdev->pdev_id);
  4022. sojourn_stats->sum_sojourn_msdu[tid] = 0;
  4023. sojourn_stats->num_msdus[tid] = 0;
  4024. sojourn_stats->avg_sojourn_msdu[tid].internal = 0;
  4025. dp_peer_unref_delete(primary_link_peer, DP_MOD_ID_TX_COMP);
  4026. }
  4027. #else
  4028. static inline void dp_tx_sojourn_stats_process(struct dp_pdev *pdev,
  4029. struct dp_txrx_peer *txrx_peer,
  4030. uint8_t tid,
  4031. uint64_t txdesc_ts,
  4032. uint32_t ppdu_id)
  4033. {
  4034. }
  4035. #endif
  4036. #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
  4037. /**
  4038. * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
  4039. * @soc: dp_soc handle
  4040. * @desc: Tx Descriptor
  4041. * @ts: HAL Tx completion descriptor contents
  4042. *
  4043. * This function is used to send tx completion to packet capture
  4044. */
  4045. void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
  4046. struct dp_tx_desc_s *desc,
  4047. struct hal_tx_completion_status *ts)
  4048. {
  4049. dp_wdi_event_handler(WDI_EVENT_PKT_CAPTURE_TX_DATA, soc,
  4050. desc, ts->peer_id,
  4051. WDI_NO_VAL, desc->pdev->pdev_id);
  4052. }
  4053. #endif
  4054. /**
  4055. * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
  4056. * @soc: DP Soc handle
  4057. * @tx_desc: software Tx descriptor
  4058. * @ts : Tx completion status from HAL/HTT descriptor
  4059. *
  4060. * Return: none
  4061. */
  4062. void
  4063. dp_tx_comp_process_desc(struct dp_soc *soc,
  4064. struct dp_tx_desc_s *desc,
  4065. struct hal_tx_completion_status *ts,
  4066. struct dp_txrx_peer *txrx_peer)
  4067. {
  4068. uint64_t time_latency = 0;
  4069. uint16_t peer_id = DP_INVALID_PEER_ID;
  4070. /*
  4071. * m_copy/tx_capture modes are not supported for
  4072. * scatter gather packets
  4073. */
  4074. if (qdf_unlikely(!!desc->pdev->latency_capture_enable)) {
  4075. time_latency = (qdf_ktime_to_ms(qdf_ktime_real_get()) -
  4076. qdf_ktime_to_ms(desc->timestamp));
  4077. }
  4078. dp_send_completion_to_pkt_capture(soc, desc, ts);
  4079. if (dp_tx_pkt_tracepoints_enabled())
  4080. qdf_trace_dp_packet(desc->nbuf, QDF_TX,
  4081. desc->msdu_ext_desc ?
  4082. desc->msdu_ext_desc->tso_desc : NULL,
  4083. qdf_ktime_to_ms(desc->timestamp));
  4084. if (!(desc->msdu_ext_desc)) {
  4085. dp_tx_enh_unmap(soc, desc);
  4086. if (txrx_peer)
  4087. peer_id = txrx_peer->peer_id;
  4088. if (QDF_STATUS_SUCCESS ==
  4089. dp_monitor_tx_add_to_comp_queue(soc, desc, ts, peer_id)) {
  4090. return;
  4091. }
  4092. if (QDF_STATUS_SUCCESS ==
  4093. dp_get_completion_indication_for_stack(soc,
  4094. desc->pdev,
  4095. txrx_peer, ts,
  4096. desc->nbuf,
  4097. time_latency)) {
  4098. dp_send_completion_to_stack(soc,
  4099. desc->pdev,
  4100. ts->peer_id,
  4101. ts->ppdu_id,
  4102. desc->nbuf);
  4103. return;
  4104. }
  4105. }
  4106. desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
  4107. dp_tx_comp_free_buf(soc, desc, false);
  4108. }
  4109. #ifdef DISABLE_DP_STATS
  4110. /**
  4111. * dp_tx_update_connectivity_stats() - update tx connectivity stats
  4112. * @soc: core txrx main context
  4113. * @tx_desc: tx desc
  4114. * @status: tx status
  4115. *
  4116. * Return: none
  4117. */
  4118. static inline
  4119. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  4120. struct dp_vdev *vdev,
  4121. struct dp_tx_desc_s *tx_desc,
  4122. uint8_t status)
  4123. {
  4124. }
  4125. #else
  4126. static inline
  4127. void dp_tx_update_connectivity_stats(struct dp_soc *soc,
  4128. struct dp_vdev *vdev,
  4129. struct dp_tx_desc_s *tx_desc,
  4130. uint8_t status)
  4131. {
  4132. void *osif_dev;
  4133. ol_txrx_stats_rx_fp stats_cbk;
  4134. uint8_t pkt_type;
  4135. qdf_assert(tx_desc);
  4136. if (!vdev ||
  4137. !vdev->osif_vdev ||
  4138. !vdev->stats_cb)
  4139. return;
  4140. osif_dev = vdev->osif_vdev;
  4141. stats_cbk = vdev->stats_cb;
  4142. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_HOST_FW_SENT, &pkt_type);
  4143. if (status == HAL_TX_TQM_RR_FRAME_ACKED)
  4144. stats_cbk(tx_desc->nbuf, osif_dev, PKT_TYPE_TX_ACK_CNT,
  4145. &pkt_type);
  4146. }
  4147. #endif
  4148. #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
  4149. QDF_STATUS
  4150. dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
  4151. uint32_t delta_tsf,
  4152. uint32_t *delay_us)
  4153. {
  4154. uint32_t buffer_ts;
  4155. uint32_t delay;
  4156. if (!delay_us)
  4157. return QDF_STATUS_E_INVAL;
  4158. /* Tx_rate_stats_info_valid is 0 and tsf is invalid then */
  4159. if (!ts->valid)
  4160. return QDF_STATUS_E_INVAL;
  4161. /* buffer_timestamp is in units of 1024 us and is [31:13] of
  4162. * WBM_RELEASE_RING_4. After left shift 10 bits, it's
  4163. * valid up to 29 bits.
  4164. */
  4165. buffer_ts = ts->buffer_timestamp << 10;
  4166. delay = ts->tsf - buffer_ts - delta_tsf;
  4167. delay &= 0x1FFFFFFF; /* mask 29 BITS */
  4168. if (delay > 0x1000000) {
  4169. dp_info_rl("----------------------\n"
  4170. "Tx completion status:\n"
  4171. "----------------------\n"
  4172. "release_src = %d\n"
  4173. "ppdu_id = 0x%x\n"
  4174. "release_reason = %d\n"
  4175. "tsf = %u (0x%x)\n"
  4176. "buffer_timestamp = %u (0x%x)\n"
  4177. "delta_tsf = %u (0x%x)\n",
  4178. ts->release_src, ts->ppdu_id, ts->status,
  4179. ts->tsf, ts->tsf, ts->buffer_timestamp,
  4180. ts->buffer_timestamp, delta_tsf, delta_tsf);
  4181. return QDF_STATUS_E_FAILURE;
  4182. }
  4183. *delay_us = delay;
  4184. return QDF_STATUS_SUCCESS;
  4185. }
  4186. void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4187. uint32_t delta_tsf)
  4188. {
  4189. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4190. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4191. DP_MOD_ID_CDP);
  4192. if (!vdev) {
  4193. dp_err_rl("vdev %d does not exist", vdev_id);
  4194. return;
  4195. }
  4196. vdev->delta_tsf = delta_tsf;
  4197. dp_debug("vdev id %u delta_tsf %u", vdev_id, delta_tsf);
  4198. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4199. }
  4200. #endif
  4201. #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
  4202. QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
  4203. uint8_t vdev_id, bool enable)
  4204. {
  4205. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4206. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4207. DP_MOD_ID_CDP);
  4208. if (!vdev) {
  4209. dp_err_rl("vdev %d does not exist", vdev_id);
  4210. return QDF_STATUS_E_FAILURE;
  4211. }
  4212. qdf_atomic_set(&vdev->ul_delay_report, enable);
  4213. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4214. return QDF_STATUS_SUCCESS;
  4215. }
  4216. QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4217. uint32_t *val)
  4218. {
  4219. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4220. struct dp_vdev *vdev;
  4221. uint32_t delay_accum;
  4222. uint32_t pkts_accum;
  4223. vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
  4224. if (!vdev) {
  4225. dp_err_rl("vdev %d does not exist", vdev_id);
  4226. return QDF_STATUS_E_FAILURE;
  4227. }
  4228. if (!qdf_atomic_read(&vdev->ul_delay_report)) {
  4229. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4230. return QDF_STATUS_E_FAILURE;
  4231. }
  4232. /* Average uplink delay based on current accumulated values */
  4233. delay_accum = qdf_atomic_read(&vdev->ul_delay_accum);
  4234. pkts_accum = qdf_atomic_read(&vdev->ul_pkts_accum);
  4235. *val = delay_accum / pkts_accum;
  4236. dp_debug("uplink_delay %u delay_accum %u pkts_accum %u", *val,
  4237. delay_accum, pkts_accum);
  4238. /* Reset accumulated values to 0 */
  4239. qdf_atomic_set(&vdev->ul_delay_accum, 0);
  4240. qdf_atomic_set(&vdev->ul_pkts_accum, 0);
  4241. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  4242. return QDF_STATUS_SUCCESS;
  4243. }
  4244. static void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  4245. struct hal_tx_completion_status *ts)
  4246. {
  4247. uint32_t ul_delay;
  4248. if (qdf_unlikely(!vdev)) {
  4249. dp_info_rl("vdev is null or delete in progrss");
  4250. return;
  4251. }
  4252. if (!qdf_atomic_read(&vdev->ul_delay_report))
  4253. return;
  4254. if (QDF_IS_STATUS_ERROR(dp_tx_compute_hw_delay_us(ts,
  4255. vdev->delta_tsf,
  4256. &ul_delay)))
  4257. return;
  4258. ul_delay /= 1000; /* in unit of ms */
  4259. qdf_atomic_add(ul_delay, &vdev->ul_delay_accum);
  4260. qdf_atomic_inc(&vdev->ul_pkts_accum);
  4261. }
  4262. #else /* !WLAN_FEATURE_TSF_UPLINK_DELAY */
  4263. static inline
  4264. void dp_tx_update_uplink_delay(struct dp_soc *soc, struct dp_vdev *vdev,
  4265. struct hal_tx_completion_status *ts)
  4266. {
  4267. }
  4268. #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
  4269. /**
  4270. * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
  4271. * @soc: DP soc handle
  4272. * @tx_desc: software descriptor head pointer
  4273. * @ts: Tx completion status
  4274. * @txrx_peer: txrx peer handle
  4275. * @ring_id: ring number
  4276. *
  4277. * Return: none
  4278. */
  4279. void dp_tx_comp_process_tx_status(struct dp_soc *soc,
  4280. struct dp_tx_desc_s *tx_desc,
  4281. struct hal_tx_completion_status *ts,
  4282. struct dp_txrx_peer *txrx_peer,
  4283. uint8_t ring_id)
  4284. {
  4285. uint32_t length;
  4286. qdf_ether_header_t *eh;
  4287. struct dp_vdev *vdev = NULL;
  4288. qdf_nbuf_t nbuf = tx_desc->nbuf;
  4289. enum qdf_dp_tx_rx_status dp_status;
  4290. if (!nbuf) {
  4291. dp_info_rl("invalid tx descriptor. nbuf NULL");
  4292. goto out;
  4293. }
  4294. eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
  4295. length = qdf_nbuf_len(nbuf);
  4296. dp_status = dp_tx_hw_to_qdf(ts->status);
  4297. DPTRACE(qdf_dp_trace_ptr(tx_desc->nbuf,
  4298. QDF_DP_TRACE_LI_DP_FREE_PACKET_PTR_RECORD,
  4299. QDF_TRACE_DEFAULT_PDEV_ID,
  4300. qdf_nbuf_data_addr(nbuf),
  4301. sizeof(qdf_nbuf_data(nbuf)),
  4302. tx_desc->id, ts->status, dp_status));
  4303. dp_tx_comp_debug("-------------------- \n"
  4304. "Tx Completion Stats: \n"
  4305. "-------------------- \n"
  4306. "ack_frame_rssi = %d \n"
  4307. "first_msdu = %d \n"
  4308. "last_msdu = %d \n"
  4309. "msdu_part_of_amsdu = %d \n"
  4310. "rate_stats valid = %d \n"
  4311. "bw = %d \n"
  4312. "pkt_type = %d \n"
  4313. "stbc = %d \n"
  4314. "ldpc = %d \n"
  4315. "sgi = %d \n"
  4316. "mcs = %d \n"
  4317. "ofdma = %d \n"
  4318. "tones_in_ru = %d \n"
  4319. "tsf = %d \n"
  4320. "ppdu_id = %d \n"
  4321. "transmit_cnt = %d \n"
  4322. "tid = %d \n"
  4323. "peer_id = %d\n"
  4324. "tx_status = %d\n",
  4325. ts->ack_frame_rssi, ts->first_msdu,
  4326. ts->last_msdu, ts->msdu_part_of_amsdu,
  4327. ts->valid, ts->bw, ts->pkt_type, ts->stbc,
  4328. ts->ldpc, ts->sgi, ts->mcs, ts->ofdma,
  4329. ts->tones_in_ru, ts->tsf, ts->ppdu_id,
  4330. ts->transmit_cnt, ts->tid, ts->peer_id,
  4331. ts->status);
  4332. /* Update SoC level stats */
  4333. DP_STATS_INCC(soc, tx.dropped_fw_removed, 1,
  4334. (ts->status == HAL_TX_TQM_RR_REM_CMD_REM));
  4335. if (!txrx_peer) {
  4336. dp_info_rl("peer is null or deletion in progress");
  4337. DP_STATS_INC_PKT(soc, tx.tx_invalid_peer, 1, length);
  4338. goto out;
  4339. }
  4340. vdev = txrx_peer->vdev;
  4341. dp_tx_update_connectivity_stats(soc, vdev, tx_desc, ts->status);
  4342. dp_tx_update_uplink_delay(soc, vdev, ts);
  4343. /* check tx complete notification */
  4344. if (qdf_nbuf_tx_notify_comp_get(nbuf))
  4345. dp_tx_notify_completion(soc, vdev, tx_desc,
  4346. nbuf, ts->status);
  4347. /* Update per-packet stats for mesh mode */
  4348. if (qdf_unlikely(vdev->mesh_vdev) &&
  4349. !(tx_desc->flags & DP_TX_DESC_FLAG_TO_FW))
  4350. dp_tx_comp_fill_tx_completion_stats(tx_desc, ts);
  4351. /* Update peer level stats */
  4352. if (qdf_unlikely(txrx_peer->bss_peer &&
  4353. vdev->opmode == wlan_op_mode_ap)) {
  4354. if (ts->status != HAL_TX_TQM_RR_REM_CMD_REM) {
  4355. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.mcast, 1,
  4356. length);
  4357. if (txrx_peer->vdev->tx_encap_type ==
  4358. htt_cmn_pkt_type_ethernet &&
  4359. QDF_IS_ADDR_BROADCAST(eh->ether_dhost)) {
  4360. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  4361. tx.bcast, 1,
  4362. length);
  4363. }
  4364. }
  4365. } else {
  4366. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.ucast, 1, length);
  4367. if (ts->status == HAL_TX_TQM_RR_FRAME_ACKED) {
  4368. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, tx.tx_success,
  4369. 1, length);
  4370. if (qdf_unlikely(txrx_peer->in_twt)) {
  4371. DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer,
  4372. tx.tx_success_twt,
  4373. 1, length);
  4374. }
  4375. }
  4376. }
  4377. dp_tx_update_peer_stats(tx_desc, ts, txrx_peer, ring_id);
  4378. dp_tx_update_peer_delay_stats(txrx_peer, tx_desc, ts, ring_id);
  4379. dp_tx_update_peer_sawf_stats(soc, vdev, txrx_peer, tx_desc,
  4380. ts, ts->tid);
  4381. dp_tx_send_pktlog(soc, vdev->pdev, tx_desc, nbuf, dp_status);
  4382. #ifdef QCA_SUPPORT_RDK_STATS
  4383. if (soc->peerstats_enabled)
  4384. dp_tx_sojourn_stats_process(vdev->pdev, txrx_peer, ts->tid,
  4385. qdf_ktime_to_ms(tx_desc->timestamp),
  4386. ts->ppdu_id);
  4387. #endif
  4388. out:
  4389. return;
  4390. }
  4391. #if defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT) && \
  4392. defined(QCA_ENHANCED_STATS_SUPPORT)
  4393. /*
  4394. * dp_tx_update_peer_basic_stats(): Update peer basic stats
  4395. * @txrx_peer: Datapath txrx_peer handle
  4396. * @length: Length of the packet
  4397. * @tx_status: Tx status from TQM/FW
  4398. * @update: enhanced flag value present in dp_pdev
  4399. *
  4400. * Return: none
  4401. */
  4402. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4403. uint32_t length, uint8_t tx_status,
  4404. bool update)
  4405. {
  4406. if (update || (!txrx_peer->hw_txrx_stats_en)) {
  4407. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4408. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4409. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4410. }
  4411. }
  4412. #elif defined(QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT)
  4413. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4414. uint32_t length, uint8_t tx_status,
  4415. bool update)
  4416. {
  4417. if (!txrx_peer->hw_txrx_stats_en) {
  4418. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4419. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4420. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4421. }
  4422. }
  4423. #else
  4424. void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
  4425. uint32_t length, uint8_t tx_status,
  4426. bool update)
  4427. {
  4428. DP_PEER_STATS_FLAT_INC_PKT(txrx_peer, comp_pkt, 1, length);
  4429. if (tx_status != HAL_TX_TQM_RR_FRAME_ACKED)
  4430. DP_PEER_STATS_FLAT_INC(txrx_peer, tx_failed, 1);
  4431. }
  4432. #endif
  4433. /*
  4434. * dp_tx_prefetch_next_nbuf_data(): Prefetch nbuf and nbuf data
  4435. * @nbuf: skb buffer
  4436. *
  4437. * Return: none
  4438. */
  4439. #ifdef QCA_DP_RX_NBUF_AND_NBUF_DATA_PREFETCH
  4440. static inline
  4441. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  4442. {
  4443. qdf_nbuf_t nbuf = NULL;
  4444. if (next)
  4445. nbuf = next->nbuf;
  4446. if (nbuf) {
  4447. /* prefetch skb->next and first few bytes of skb->cb */
  4448. qdf_prefetch(next->shinfo_addr);
  4449. qdf_prefetch(nbuf);
  4450. /* prefetch skb fields present in different cachelines */
  4451. qdf_prefetch(&nbuf->len);
  4452. qdf_prefetch(&nbuf->users);
  4453. }
  4454. }
  4455. #else
  4456. static inline
  4457. void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
  4458. {
  4459. }
  4460. #endif
  4461. /**
  4462. * dp_tx_mcast_reinject_handler() - Tx reinjected multicast packets handler
  4463. * @soc: core txrx main context
  4464. * @desc: software descriptor
  4465. *
  4466. * Return: true when packet is reinjected
  4467. */
  4468. #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP) && \
  4469. defined(WLAN_MCAST_MLO)
  4470. static inline bool
  4471. dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  4472. {
  4473. struct dp_vdev *vdev = NULL;
  4474. if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
  4475. if (!soc->arch_ops.dp_tx_mcast_handler)
  4476. return false;
  4477. vdev = dp_vdev_get_ref_by_id(soc, desc->vdev_id,
  4478. DP_MOD_ID_REINJECT);
  4479. if (qdf_unlikely(!vdev)) {
  4480. dp_tx_comp_info_rl("Unable to get vdev ref %d",
  4481. desc->id);
  4482. return false;
  4483. }
  4484. DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
  4485. qdf_nbuf_len(desc->nbuf));
  4486. soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
  4487. dp_tx_desc_release(desc, desc->pool_id);
  4488. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
  4489. return true;
  4490. }
  4491. return false;
  4492. }
  4493. #else
  4494. static inline bool
  4495. dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
  4496. {
  4497. return false;
  4498. }
  4499. #endif
  4500. /**
  4501. * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
  4502. * @soc: core txrx main context
  4503. * @comp_head: software descriptor head pointer
  4504. * @ring_id: ring number
  4505. *
  4506. * This function will process batch of descriptors reaped by dp_tx_comp_handler
  4507. * and release the software descriptors after processing is complete
  4508. *
  4509. * Return: none
  4510. */
  4511. static void
  4512. dp_tx_comp_process_desc_list(struct dp_soc *soc,
  4513. struct dp_tx_desc_s *comp_head, uint8_t ring_id)
  4514. {
  4515. struct dp_tx_desc_s *desc;
  4516. struct dp_tx_desc_s *next;
  4517. struct hal_tx_completion_status ts;
  4518. struct dp_txrx_peer *txrx_peer = NULL;
  4519. uint16_t peer_id = DP_INVALID_PEER;
  4520. dp_txrx_ref_handle txrx_ref_handle = NULL;
  4521. desc = comp_head;
  4522. while (desc) {
  4523. next = desc->next;
  4524. dp_tx_prefetch_next_nbuf_data(next);
  4525. if (peer_id != desc->peer_id) {
  4526. if (txrx_peer)
  4527. dp_txrx_peer_unref_delete(txrx_ref_handle,
  4528. DP_MOD_ID_TX_COMP);
  4529. peer_id = desc->peer_id;
  4530. txrx_peer =
  4531. dp_txrx_peer_get_ref_by_id(soc, peer_id,
  4532. &txrx_ref_handle,
  4533. DP_MOD_ID_TX_COMP);
  4534. }
  4535. if (dp_tx_mcast_reinject_handler(soc, desc)) {
  4536. desc = next;
  4537. continue;
  4538. }
  4539. if (qdf_likely(desc->flags & DP_TX_DESC_FLAG_SIMPLE)) {
  4540. struct dp_pdev *pdev = desc->pdev;
  4541. if (qdf_likely(txrx_peer))
  4542. dp_tx_update_peer_basic_stats(txrx_peer,
  4543. desc->length,
  4544. desc->tx_status,
  4545. false);
  4546. qdf_assert(pdev);
  4547. dp_tx_outstanding_dec(pdev);
  4548. /*
  4549. * Calling a QDF WRAPPER here is creating signifcant
  4550. * performance impact so avoided the wrapper call here
  4551. */
  4552. dp_tx_desc_history_add(soc, desc->dma_addr, desc->nbuf,
  4553. desc->id, DP_TX_COMP_UNMAP);
  4554. dp_tx_nbuf_unmap(soc, desc);
  4555. qdf_nbuf_free_simple(desc->nbuf);
  4556. dp_tx_desc_free(soc, desc, desc->pool_id);
  4557. desc = next;
  4558. continue;
  4559. }
  4560. hal_tx_comp_get_status(&desc->comp, &ts, soc->hal_soc);
  4561. dp_tx_comp_process_tx_status(soc, desc, &ts, txrx_peer,
  4562. ring_id);
  4563. dp_tx_comp_process_desc(soc, desc, &ts, txrx_peer);
  4564. dp_tx_desc_release(desc, desc->pool_id);
  4565. desc = next;
  4566. }
  4567. if (txrx_peer)
  4568. dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
  4569. }
  4570. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  4571. static inline
  4572. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  4573. int max_reap_limit)
  4574. {
  4575. bool limit_hit = false;
  4576. limit_hit =
  4577. (num_reaped >= max_reap_limit) ? true : false;
  4578. if (limit_hit)
  4579. DP_STATS_INC(soc, tx.tx_comp_loop_pkt_limit_hit, 1);
  4580. return limit_hit;
  4581. }
  4582. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  4583. {
  4584. return soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check;
  4585. }
  4586. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  4587. {
  4588. struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
  4589. return cfg->tx_comp_loop_pkt_limit;
  4590. }
  4591. #else
  4592. static inline
  4593. bool dp_tx_comp_loop_pkt_limit_hit(struct dp_soc *soc, int num_reaped,
  4594. int max_reap_limit)
  4595. {
  4596. return false;
  4597. }
  4598. static inline bool dp_tx_comp_enable_eol_data_check(struct dp_soc *soc)
  4599. {
  4600. return false;
  4601. }
  4602. static inline int dp_tx_comp_get_loop_pkt_limit(struct dp_soc *soc)
  4603. {
  4604. return 0;
  4605. }
  4606. #endif
  4607. #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
  4608. static inline int
  4609. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  4610. int *max_reap_limit)
  4611. {
  4612. return soc->arch_ops.dp_srng_test_and_update_nf_params(soc, dp_srng,
  4613. max_reap_limit);
  4614. }
  4615. #else
  4616. static inline int
  4617. dp_srng_test_and_update_nf_params(struct dp_soc *soc, struct dp_srng *dp_srng,
  4618. int *max_reap_limit)
  4619. {
  4620. return 0;
  4621. }
  4622. #endif
  4623. #ifdef DP_TX_TRACKING
  4624. void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
  4625. {
  4626. if ((tx_desc->magic != DP_TX_MAGIC_PATTERN_INUSE) &&
  4627. (tx_desc->magic != DP_TX_MAGIC_PATTERN_FREE)) {
  4628. dp_err_rl("tx_desc %u is corrupted", tx_desc->id);
  4629. qdf_trigger_self_recovery(NULL, QDF_TX_DESC_LEAK);
  4630. }
  4631. }
  4632. #endif
  4633. uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
  4634. hal_ring_handle_t hal_ring_hdl, uint8_t ring_id,
  4635. uint32_t quota)
  4636. {
  4637. void *tx_comp_hal_desc;
  4638. void *last_prefetched_hw_desc = NULL;
  4639. struct dp_tx_desc_s *last_prefetched_sw_desc = NULL;
  4640. hal_soc_handle_t hal_soc;
  4641. uint8_t buffer_src;
  4642. struct dp_tx_desc_s *tx_desc = NULL;
  4643. struct dp_tx_desc_s *head_desc = NULL;
  4644. struct dp_tx_desc_s *tail_desc = NULL;
  4645. uint32_t num_processed = 0;
  4646. uint32_t count;
  4647. uint32_t num_avail_for_reap = 0;
  4648. bool force_break = false;
  4649. struct dp_srng *tx_comp_ring = &soc->tx_comp_ring[ring_id];
  4650. int max_reap_limit, ring_near_full;
  4651. DP_HIST_INIT();
  4652. more_data:
  4653. hal_soc = soc->hal_soc;
  4654. /* Re-initialize local variables to be re-used */
  4655. head_desc = NULL;
  4656. tail_desc = NULL;
  4657. count = 0;
  4658. max_reap_limit = dp_tx_comp_get_loop_pkt_limit(soc);
  4659. ring_near_full = dp_srng_test_and_update_nf_params(soc, tx_comp_ring,
  4660. &max_reap_limit);
  4661. if (qdf_unlikely(dp_srng_access_start(int_ctx, soc, hal_ring_hdl))) {
  4662. dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
  4663. return 0;
  4664. }
  4665. num_avail_for_reap = hal_srng_dst_num_valid(hal_soc, hal_ring_hdl, 0);
  4666. if (num_avail_for_reap >= quota)
  4667. num_avail_for_reap = quota;
  4668. dp_srng_dst_inv_cached_descs(soc, hal_ring_hdl, num_avail_for_reap);
  4669. last_prefetched_hw_desc = dp_srng_dst_prefetch_32_byte_desc(hal_soc,
  4670. hal_ring_hdl,
  4671. num_avail_for_reap);
  4672. /* Find head descriptor from completion ring */
  4673. while (qdf_likely(num_avail_for_reap--)) {
  4674. tx_comp_hal_desc = dp_srng_dst_get_next(soc, hal_ring_hdl);
  4675. if (qdf_unlikely(!tx_comp_hal_desc))
  4676. break;
  4677. buffer_src = hal_tx_comp_get_buffer_source(hal_soc,
  4678. tx_comp_hal_desc);
  4679. /* If this buffer was not released by TQM or FW, then it is not
  4680. * Tx completion indication, assert */
  4681. if (qdf_unlikely(buffer_src !=
  4682. HAL_TX_COMP_RELEASE_SOURCE_TQM) &&
  4683. (qdf_unlikely(buffer_src !=
  4684. HAL_TX_COMP_RELEASE_SOURCE_FW))) {
  4685. uint8_t wbm_internal_error;
  4686. dp_err_rl(
  4687. "Tx comp release_src != TQM | FW but from %d",
  4688. buffer_src);
  4689. hal_dump_comp_desc(tx_comp_hal_desc);
  4690. DP_STATS_INC(soc, tx.invalid_release_source, 1);
  4691. /* When WBM sees NULL buffer_addr_info in any of
  4692. * ingress rings it sends an error indication,
  4693. * with wbm_internal_error=1, to a specific ring.
  4694. * The WBM2SW ring used to indicate these errors is
  4695. * fixed in HW, and that ring is being used as Tx
  4696. * completion ring. These errors are not related to
  4697. * Tx completions, and should just be ignored
  4698. */
  4699. wbm_internal_error = hal_get_wbm_internal_error(
  4700. hal_soc,
  4701. tx_comp_hal_desc);
  4702. if (wbm_internal_error) {
  4703. dp_err_rl("Tx comp wbm_internal_error!!");
  4704. DP_STATS_INC(soc, tx.wbm_internal_error[WBM_INT_ERROR_ALL], 1);
  4705. if (HAL_TX_COMP_RELEASE_SOURCE_REO ==
  4706. buffer_src)
  4707. dp_handle_wbm_internal_error(
  4708. soc,
  4709. tx_comp_hal_desc,
  4710. hal_tx_comp_get_buffer_type(
  4711. tx_comp_hal_desc));
  4712. } else {
  4713. dp_err_rl("Tx comp wbm_internal_error false");
  4714. DP_STATS_INC(soc, tx.non_wbm_internal_err, 1);
  4715. }
  4716. continue;
  4717. }
  4718. soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
  4719. tx_comp_hal_desc,
  4720. &tx_desc);
  4721. if (!tx_desc) {
  4722. dp_err("unable to retrieve tx_desc!");
  4723. QDF_BUG(0);
  4724. continue;
  4725. }
  4726. tx_desc->buffer_src = buffer_src;
  4727. /*
  4728. * If the release source is FW, process the HTT status
  4729. */
  4730. if (qdf_unlikely(buffer_src ==
  4731. HAL_TX_COMP_RELEASE_SOURCE_FW)) {
  4732. uint8_t htt_tx_status[HAL_TX_COMP_HTT_STATUS_LEN];
  4733. hal_tx_comp_get_htt_desc(tx_comp_hal_desc,
  4734. htt_tx_status);
  4735. /* Collect hw completion contents */
  4736. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  4737. &tx_desc->comp, 1);
  4738. soc->arch_ops.dp_tx_process_htt_completion(
  4739. soc,
  4740. tx_desc,
  4741. htt_tx_status,
  4742. ring_id);
  4743. } else {
  4744. tx_desc->tx_status =
  4745. hal_tx_comp_get_tx_status(tx_comp_hal_desc);
  4746. tx_desc->buffer_src = buffer_src;
  4747. /*
  4748. * If the fast completion mode is enabled extended
  4749. * metadata from descriptor is not copied
  4750. */
  4751. if (qdf_likely(tx_desc->flags &
  4752. DP_TX_DESC_FLAG_SIMPLE))
  4753. goto add_to_pool;
  4754. /*
  4755. * If the descriptor is already freed in vdev_detach,
  4756. * continue to next descriptor
  4757. */
  4758. if (qdf_unlikely
  4759. ((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
  4760. !tx_desc->flags)) {
  4761. dp_tx_comp_info_rl("Descriptor freed in vdev_detach %d",
  4762. tx_desc->id);
  4763. DP_STATS_INC(soc, tx.tx_comp_exception, 1);
  4764. dp_tx_desc_check_corruption(tx_desc);
  4765. continue;
  4766. }
  4767. if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
  4768. dp_tx_comp_info_rl("pdev in down state %d",
  4769. tx_desc->id);
  4770. tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
  4771. dp_tx_comp_free_buf(soc, tx_desc, false);
  4772. dp_tx_desc_release(tx_desc, tx_desc->pool_id);
  4773. goto next_desc;
  4774. }
  4775. if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
  4776. !(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
  4777. dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
  4778. tx_desc->flags, tx_desc->id);
  4779. qdf_assert_always(0);
  4780. }
  4781. /* Collect hw completion contents */
  4782. hal_tx_comp_desc_sync(tx_comp_hal_desc,
  4783. &tx_desc->comp, 1);
  4784. add_to_pool:
  4785. DP_HIST_PACKET_COUNT_INC(tx_desc->pdev->pdev_id);
  4786. /* First ring descriptor on the cycle */
  4787. if (!head_desc) {
  4788. head_desc = tx_desc;
  4789. tail_desc = tx_desc;
  4790. }
  4791. tail_desc->next = tx_desc;
  4792. tx_desc->next = NULL;
  4793. tail_desc = tx_desc;
  4794. }
  4795. next_desc:
  4796. num_processed += !(count & DP_TX_NAPI_BUDGET_DIV_MASK);
  4797. /*
  4798. * Processed packet count is more than given quota
  4799. * stop to processing
  4800. */
  4801. count++;
  4802. dp_tx_prefetch_hw_sw_nbuf_desc(soc, hal_soc,
  4803. num_avail_for_reap,
  4804. hal_ring_hdl,
  4805. &last_prefetched_hw_desc,
  4806. &last_prefetched_sw_desc);
  4807. if (dp_tx_comp_loop_pkt_limit_hit(soc, count, max_reap_limit))
  4808. break;
  4809. }
  4810. dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
  4811. /* Process the reaped descriptors */
  4812. if (head_desc)
  4813. dp_tx_comp_process_desc_list(soc, head_desc, ring_id);
  4814. DP_STATS_INC(soc, tx.tx_comp[ring_id], count);
  4815. /*
  4816. * If we are processing in near-full condition, there are 3 scenario
  4817. * 1) Ring entries has reached critical state
  4818. * 2) Ring entries are still near high threshold
  4819. * 3) Ring entries are below the safe level
  4820. *
  4821. * One more loop will move te state to normal processing and yield
  4822. */
  4823. if (ring_near_full)
  4824. goto more_data;
  4825. if (dp_tx_comp_enable_eol_data_check(soc)) {
  4826. if (num_processed >= quota)
  4827. force_break = true;
  4828. if (!force_break &&
  4829. hal_srng_dst_peek_sync_locked(soc->hal_soc,
  4830. hal_ring_hdl)) {
  4831. DP_STATS_INC(soc, tx.hp_oos2, 1);
  4832. if (!hif_exec_should_yield(soc->hif_handle,
  4833. int_ctx->dp_intr_id))
  4834. goto more_data;
  4835. }
  4836. }
  4837. DP_TX_HIST_STATS_PER_PDEV();
  4838. return num_processed;
  4839. }
  4840. #ifdef FEATURE_WLAN_TDLS
  4841. qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
  4842. enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list)
  4843. {
  4844. struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
  4845. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  4846. DP_MOD_ID_TDLS);
  4847. if (!vdev) {
  4848. dp_err("vdev handle for id %d is NULL", vdev_id);
  4849. return NULL;
  4850. }
  4851. if (tx_spec & OL_TX_SPEC_NO_FREE)
  4852. vdev->is_tdls_frame = true;
  4853. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TDLS);
  4854. return dp_tx_send(soc_hdl, vdev_id, msdu_list);
  4855. }
  4856. #endif
  4857. /**
  4858. * dp_tx_vdev_attach() - attach vdev to dp tx
  4859. * @vdev: virtual device instance
  4860. *
  4861. * Return: QDF_STATUS_SUCCESS: success
  4862. * QDF_STATUS_E_RESOURCES: Error return
  4863. */
  4864. QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
  4865. {
  4866. int pdev_id;
  4867. /*
  4868. * Fill HTT TCL Metadata with Vdev ID and MAC ID
  4869. */
  4870. DP_TX_TCL_METADATA_TYPE_SET(vdev->htt_tcl_metadata,
  4871. DP_TCL_METADATA_TYPE_VDEV_BASED);
  4872. DP_TX_TCL_METADATA_VDEV_ID_SET(vdev->htt_tcl_metadata,
  4873. vdev->vdev_id);
  4874. pdev_id =
  4875. dp_get_target_pdev_id_for_host_pdev_id(vdev->pdev->soc,
  4876. vdev->pdev->pdev_id);
  4877. DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata, pdev_id);
  4878. /*
  4879. * Set HTT Extension Valid bit to 0 by default
  4880. */
  4881. DP_TX_TCL_METADATA_VALID_HTT_SET(vdev->htt_tcl_metadata, 0);
  4882. dp_tx_vdev_update_search_flags(vdev);
  4883. return QDF_STATUS_SUCCESS;
  4884. }
  4885. #ifndef FEATURE_WDS
  4886. static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
  4887. {
  4888. return false;
  4889. }
  4890. #endif
  4891. /**
  4892. * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
  4893. * @vdev: virtual device instance
  4894. *
  4895. * Return: void
  4896. *
  4897. */
  4898. void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
  4899. {
  4900. struct dp_soc *soc = vdev->pdev->soc;
  4901. /*
  4902. * Enable both AddrY (SA based search) and AddrX (Da based search)
  4903. * for TDLS link
  4904. *
  4905. * Enable AddrY (SA based search) only for non-WDS STA and
  4906. * ProxySTA VAP (in HKv1) modes.
  4907. *
  4908. * In all other VAP modes, only DA based search should be
  4909. * enabled
  4910. */
  4911. if (vdev->opmode == wlan_op_mode_sta &&
  4912. vdev->tdls_link_connected)
  4913. vdev->hal_desc_addr_search_flags =
  4914. (HAL_TX_DESC_ADDRX_EN | HAL_TX_DESC_ADDRY_EN);
  4915. else if ((vdev->opmode == wlan_op_mode_sta) &&
  4916. !dp_tx_da_search_override(vdev))
  4917. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRY_EN;
  4918. else
  4919. vdev->hal_desc_addr_search_flags = HAL_TX_DESC_ADDRX_EN;
  4920. if (vdev->opmode == wlan_op_mode_sta && !vdev->tdls_link_connected)
  4921. vdev->search_type = soc->sta_mode_search_policy;
  4922. else
  4923. vdev->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
  4924. }
  4925. static inline bool
  4926. dp_is_tx_desc_flush_match(struct dp_pdev *pdev,
  4927. struct dp_vdev *vdev,
  4928. struct dp_tx_desc_s *tx_desc)
  4929. {
  4930. if (!(tx_desc && (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)))
  4931. return false;
  4932. /*
  4933. * if vdev is given, then only check whether desc
  4934. * vdev match. if vdev is NULL, then check whether
  4935. * desc pdev match.
  4936. */
  4937. return vdev ? (tx_desc->vdev_id == vdev->vdev_id) :
  4938. (tx_desc->pdev == pdev);
  4939. }
  4940. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  4941. /**
  4942. * dp_tx_desc_flush() - release resources associated
  4943. * to TX Desc
  4944. *
  4945. * @dp_pdev: Handle to DP pdev structure
  4946. * @vdev: virtual device instance
  4947. * NULL: no specific Vdev is required and check all allcated TX desc
  4948. * on this pdev.
  4949. * Non-NULL: only check the allocated TX Desc associated to this Vdev.
  4950. *
  4951. * @force_free:
  4952. * true: flush the TX desc.
  4953. * false: only reset the Vdev in each allocated TX desc
  4954. * that associated to current Vdev.
  4955. *
  4956. * This function will go through the TX desc pool to flush
  4957. * the outstanding TX data or reset Vdev to NULL in associated TX
  4958. * Desc.
  4959. */
  4960. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  4961. bool force_free)
  4962. {
  4963. uint8_t i;
  4964. uint32_t j;
  4965. uint32_t num_desc, page_id, offset;
  4966. uint16_t num_desc_per_page;
  4967. struct dp_soc *soc = pdev->soc;
  4968. struct dp_tx_desc_s *tx_desc = NULL;
  4969. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  4970. if (!vdev && !force_free) {
  4971. dp_err("Reset TX desc vdev, Vdev param is required!");
  4972. return;
  4973. }
  4974. for (i = 0; i < MAX_TXDESC_POOLS; i++) {
  4975. tx_desc_pool = &soc->tx_desc[i];
  4976. if (!(tx_desc_pool->pool_size) ||
  4977. IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
  4978. !(tx_desc_pool->desc_pages.cacheable_pages))
  4979. continue;
  4980. /*
  4981. * Add flow pool lock protection in case pool is freed
  4982. * due to all tx_desc is recycled when handle TX completion.
  4983. * this is not necessary when do force flush as:
  4984. * a. double lock will happen if dp_tx_desc_release is
  4985. * also trying to acquire it.
  4986. * b. dp interrupt has been disabled before do force TX desc
  4987. * flush in dp_pdev_deinit().
  4988. */
  4989. if (!force_free)
  4990. qdf_spin_lock_bh(&tx_desc_pool->flow_pool_lock);
  4991. num_desc = tx_desc_pool->pool_size;
  4992. num_desc_per_page =
  4993. tx_desc_pool->desc_pages.num_element_per_page;
  4994. for (j = 0; j < num_desc; j++) {
  4995. page_id = j / num_desc_per_page;
  4996. offset = j % num_desc_per_page;
  4997. if (qdf_unlikely(!(tx_desc_pool->
  4998. desc_pages.cacheable_pages)))
  4999. break;
  5000. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  5001. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  5002. /*
  5003. * Free TX desc if force free is
  5004. * required, otherwise only reset vdev
  5005. * in this TX desc.
  5006. */
  5007. if (force_free) {
  5008. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  5009. dp_tx_comp_free_buf(soc, tx_desc,
  5010. false);
  5011. dp_tx_desc_release(tx_desc, i);
  5012. } else {
  5013. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  5014. }
  5015. }
  5016. }
  5017. if (!force_free)
  5018. qdf_spin_unlock_bh(&tx_desc_pool->flow_pool_lock);
  5019. }
  5020. }
  5021. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  5022. /**
  5023. * dp_tx_desc_reset_vdev() - reset vdev to NULL in TX Desc
  5024. *
  5025. * @soc: Handle to DP soc structure
  5026. * @tx_desc: pointer of one TX desc
  5027. * @desc_pool_id: TX Desc pool id
  5028. */
  5029. static inline void
  5030. dp_tx_desc_reset_vdev(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
  5031. uint8_t desc_pool_id)
  5032. {
  5033. TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
  5034. tx_desc->vdev_id = DP_INVALID_VDEV_ID;
  5035. TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
  5036. }
  5037. void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
  5038. bool force_free)
  5039. {
  5040. uint8_t i, num_pool;
  5041. uint32_t j;
  5042. uint32_t num_desc, page_id, offset;
  5043. uint16_t num_desc_per_page;
  5044. struct dp_soc *soc = pdev->soc;
  5045. struct dp_tx_desc_s *tx_desc = NULL;
  5046. struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
  5047. if (!vdev && !force_free) {
  5048. dp_err("Reset TX desc vdev, Vdev param is required!");
  5049. return;
  5050. }
  5051. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5052. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5053. for (i = 0; i < num_pool; i++) {
  5054. tx_desc_pool = &soc->tx_desc[i];
  5055. if (!tx_desc_pool->desc_pages.cacheable_pages)
  5056. continue;
  5057. num_desc_per_page =
  5058. tx_desc_pool->desc_pages.num_element_per_page;
  5059. for (j = 0; j < num_desc; j++) {
  5060. page_id = j / num_desc_per_page;
  5061. offset = j % num_desc_per_page;
  5062. tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
  5063. if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
  5064. if (force_free) {
  5065. tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
  5066. dp_tx_comp_free_buf(soc, tx_desc,
  5067. false);
  5068. dp_tx_desc_release(tx_desc, i);
  5069. } else {
  5070. dp_tx_desc_reset_vdev(soc, tx_desc,
  5071. i);
  5072. }
  5073. }
  5074. }
  5075. }
  5076. }
  5077. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  5078. /**
  5079. * dp_tx_vdev_detach() - detach vdev from dp tx
  5080. * @vdev: virtual device instance
  5081. *
  5082. * Return: QDF_STATUS_SUCCESS: success
  5083. * QDF_STATUS_E_RESOURCES: Error return
  5084. */
  5085. QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
  5086. {
  5087. struct dp_pdev *pdev = vdev->pdev;
  5088. /* Reset TX desc associated to this Vdev as NULL */
  5089. dp_tx_desc_flush(pdev, vdev, false);
  5090. return QDF_STATUS_SUCCESS;
  5091. }
  5092. #ifdef QCA_LL_TX_FLOW_CONTROL_V2
  5093. /* Pools will be allocated dynamically */
  5094. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  5095. int num_desc)
  5096. {
  5097. uint8_t i;
  5098. for (i = 0; i < num_pool; i++) {
  5099. qdf_spinlock_create(&soc->tx_desc[i].flow_pool_lock);
  5100. soc->tx_desc[i].status = FLOW_POOL_INACTIVE;
  5101. }
  5102. return QDF_STATUS_SUCCESS;
  5103. }
  5104. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  5105. uint32_t num_desc)
  5106. {
  5107. return QDF_STATUS_SUCCESS;
  5108. }
  5109. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  5110. {
  5111. }
  5112. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  5113. {
  5114. uint8_t i;
  5115. for (i = 0; i < num_pool; i++)
  5116. qdf_spinlock_destroy(&soc->tx_desc[i].flow_pool_lock);
  5117. }
  5118. #else /* QCA_LL_TX_FLOW_CONTROL_V2! */
  5119. static QDF_STATUS dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
  5120. uint32_t num_desc)
  5121. {
  5122. uint8_t i, count;
  5123. /* Allocate software Tx descriptor pools */
  5124. for (i = 0; i < num_pool; i++) {
  5125. if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
  5126. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5127. FL("Tx Desc Pool alloc %d failed %pK"),
  5128. i, soc);
  5129. goto fail;
  5130. }
  5131. }
  5132. return QDF_STATUS_SUCCESS;
  5133. fail:
  5134. for (count = 0; count < i; count++)
  5135. dp_tx_desc_pool_free(soc, count);
  5136. return QDF_STATUS_E_NOMEM;
  5137. }
  5138. static QDF_STATUS dp_tx_init_static_pools(struct dp_soc *soc, int num_pool,
  5139. uint32_t num_desc)
  5140. {
  5141. uint8_t i;
  5142. for (i = 0; i < num_pool; i++) {
  5143. if (dp_tx_desc_pool_init(soc, i, num_desc)) {
  5144. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  5145. FL("Tx Desc Pool init %d failed %pK"),
  5146. i, soc);
  5147. return QDF_STATUS_E_NOMEM;
  5148. }
  5149. }
  5150. return QDF_STATUS_SUCCESS;
  5151. }
  5152. static void dp_tx_deinit_static_pools(struct dp_soc *soc, int num_pool)
  5153. {
  5154. uint8_t i;
  5155. for (i = 0; i < num_pool; i++)
  5156. dp_tx_desc_pool_deinit(soc, i);
  5157. }
  5158. static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
  5159. {
  5160. uint8_t i;
  5161. for (i = 0; i < num_pool; i++)
  5162. dp_tx_desc_pool_free(soc, i);
  5163. }
  5164. #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
  5165. /**
  5166. * dp_tx_tso_cmn_desc_pool_deinit() - de-initialize TSO descriptors
  5167. * @soc: core txrx main context
  5168. * @num_pool: number of pools
  5169. *
  5170. */
  5171. void dp_tx_tso_cmn_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
  5172. {
  5173. dp_tx_tso_desc_pool_deinit(soc, num_pool);
  5174. dp_tx_tso_num_seg_pool_deinit(soc, num_pool);
  5175. }
  5176. /**
  5177. * dp_tx_tso_cmn_desc_pool_free() - free TSO descriptors
  5178. * @soc: core txrx main context
  5179. * @num_pool: number of pools
  5180. *
  5181. */
  5182. void dp_tx_tso_cmn_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
  5183. {
  5184. dp_tx_tso_desc_pool_free(soc, num_pool);
  5185. dp_tx_tso_num_seg_pool_free(soc, num_pool);
  5186. }
  5187. /**
  5188. * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
  5189. * @soc: core txrx main context
  5190. *
  5191. * This function frees all tx related descriptors as below
  5192. * 1. Regular TX descriptors (static pools)
  5193. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  5194. * 3. TSO descriptors
  5195. *
  5196. */
  5197. void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
  5198. {
  5199. uint8_t num_pool;
  5200. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5201. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  5202. dp_tx_ext_desc_pool_free(soc, num_pool);
  5203. dp_tx_delete_static_pools(soc, num_pool);
  5204. }
  5205. /**
  5206. * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
  5207. * @soc: core txrx main context
  5208. *
  5209. * This function de-initializes all tx related descriptors as below
  5210. * 1. Regular TX descriptors (static pools)
  5211. * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
  5212. * 3. TSO descriptors
  5213. *
  5214. */
  5215. void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
  5216. {
  5217. uint8_t num_pool;
  5218. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5219. dp_tx_flow_control_deinit(soc);
  5220. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  5221. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  5222. dp_tx_deinit_static_pools(soc, num_pool);
  5223. }
  5224. /**
  5225. * dp_tso_attach() - TSO attach handler
  5226. * @txrx_soc: Opaque Dp handle
  5227. *
  5228. * Reserve TSO descriptor buffers
  5229. *
  5230. * Return: QDF_STATUS_E_FAILURE on failure or
  5231. * QDF_STATUS_SUCCESS on success
  5232. */
  5233. QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
  5234. uint8_t num_pool,
  5235. uint32_t num_desc)
  5236. {
  5237. if (dp_tx_tso_desc_pool_alloc(soc, num_pool, num_desc)) {
  5238. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  5239. return QDF_STATUS_E_FAILURE;
  5240. }
  5241. if (dp_tx_tso_num_seg_pool_alloc(soc, num_pool, num_desc)) {
  5242. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  5243. num_pool, soc);
  5244. return QDF_STATUS_E_FAILURE;
  5245. }
  5246. return QDF_STATUS_SUCCESS;
  5247. }
  5248. /**
  5249. * dp_tx_tso_cmn_desc_pool_init() - TSO cmn desc pool init
  5250. * @soc: DP soc handle
  5251. * @num_pool: Number of pools
  5252. * @num_desc: Number of descriptors
  5253. *
  5254. * Initialize TSO descriptor pools
  5255. *
  5256. * Return: QDF_STATUS_E_FAILURE on failure or
  5257. * QDF_STATUS_SUCCESS on success
  5258. */
  5259. QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
  5260. uint8_t num_pool,
  5261. uint32_t num_desc)
  5262. {
  5263. if (dp_tx_tso_desc_pool_init(soc, num_pool, num_desc)) {
  5264. dp_err("TSO Desc Pool alloc %d failed %pK", num_pool, soc);
  5265. return QDF_STATUS_E_FAILURE;
  5266. }
  5267. if (dp_tx_tso_num_seg_pool_init(soc, num_pool, num_desc)) {
  5268. dp_err("TSO Num of seg Pool alloc %d failed %pK",
  5269. num_pool, soc);
  5270. return QDF_STATUS_E_FAILURE;
  5271. }
  5272. return QDF_STATUS_SUCCESS;
  5273. }
  5274. /**
  5275. * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
  5276. * @soc: core txrx main context
  5277. *
  5278. * This function allocates memory for following descriptor pools
  5279. * 1. regular sw tx descriptor pools (static pools)
  5280. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  5281. * 3. TSO descriptor pools
  5282. *
  5283. * Return: QDF_STATUS_SUCCESS: success
  5284. * QDF_STATUS_E_RESOURCES: Error return
  5285. */
  5286. QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
  5287. {
  5288. uint8_t num_pool;
  5289. uint32_t num_desc;
  5290. uint32_t num_ext_desc;
  5291. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5292. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5293. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5294. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
  5295. "%s Tx Desc Alloc num_pool = %d, descs = %d",
  5296. __func__, num_pool, num_desc);
  5297. if ((num_pool > MAX_TXDESC_POOLS) ||
  5298. (num_desc > WLAN_CFG_NUM_TX_DESC_MAX))
  5299. goto fail1;
  5300. if (dp_tx_alloc_static_pools(soc, num_pool, num_desc))
  5301. goto fail1;
  5302. if (dp_tx_ext_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5303. goto fail2;
  5304. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  5305. return QDF_STATUS_SUCCESS;
  5306. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5307. goto fail3;
  5308. return QDF_STATUS_SUCCESS;
  5309. fail3:
  5310. dp_tx_ext_desc_pool_free(soc, num_pool);
  5311. fail2:
  5312. dp_tx_delete_static_pools(soc, num_pool);
  5313. fail1:
  5314. return QDF_STATUS_E_RESOURCES;
  5315. }
  5316. /**
  5317. * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
  5318. * @soc: core txrx main context
  5319. *
  5320. * This function initializes the following TX descriptor pools
  5321. * 1. regular sw tx descriptor pools (static pools)
  5322. * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
  5323. * 3. TSO descriptor pools
  5324. *
  5325. * Return: QDF_STATUS_SUCCESS: success
  5326. * QDF_STATUS_E_RESOURCES: Error return
  5327. */
  5328. QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
  5329. {
  5330. uint8_t num_pool;
  5331. uint32_t num_desc;
  5332. uint32_t num_ext_desc;
  5333. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5334. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5335. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5336. if (dp_tx_init_static_pools(soc, num_pool, num_desc))
  5337. goto fail1;
  5338. if (dp_tx_ext_desc_pool_init(soc, num_pool, num_ext_desc))
  5339. goto fail2;
  5340. if (wlan_cfg_is_tso_desc_attach_defer(soc->wlan_cfg_ctx))
  5341. return QDF_STATUS_SUCCESS;
  5342. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  5343. goto fail3;
  5344. dp_tx_flow_control_init(soc);
  5345. soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
  5346. return QDF_STATUS_SUCCESS;
  5347. fail3:
  5348. dp_tx_ext_desc_pool_deinit(soc, num_pool);
  5349. fail2:
  5350. dp_tx_deinit_static_pools(soc, num_pool);
  5351. fail1:
  5352. return QDF_STATUS_E_RESOURCES;
  5353. }
  5354. /**
  5355. * dp_tso_soc_attach() - Allocate and initialize TSO descriptors
  5356. * @txrx_soc: dp soc handle
  5357. *
  5358. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  5359. * QDF_STATUS_E_FAILURE
  5360. */
  5361. QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc)
  5362. {
  5363. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5364. uint8_t num_pool;
  5365. uint32_t num_desc;
  5366. uint32_t num_ext_desc;
  5367. num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5368. num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
  5369. num_ext_desc = wlan_cfg_get_num_tx_ext_desc(soc->wlan_cfg_ctx);
  5370. if (dp_tx_tso_cmn_desc_pool_alloc(soc, num_pool, num_ext_desc))
  5371. return QDF_STATUS_E_FAILURE;
  5372. if (dp_tx_tso_cmn_desc_pool_init(soc, num_pool, num_ext_desc))
  5373. return QDF_STATUS_E_FAILURE;
  5374. return QDF_STATUS_SUCCESS;
  5375. }
  5376. /**
  5377. * dp_tso_soc_detach() - de-initialize and free the TSO descriptors
  5378. * @txrx_soc: dp soc handle
  5379. *
  5380. * Return: QDF_STATUS - QDF_STATUS_SUCCESS
  5381. */
  5382. QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc)
  5383. {
  5384. struct dp_soc *soc = (struct dp_soc *)txrx_soc;
  5385. uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
  5386. dp_tx_tso_cmn_desc_pool_deinit(soc, num_pool);
  5387. dp_tx_tso_cmn_desc_pool_free(soc, num_pool);
  5388. return QDF_STATUS_SUCCESS;
  5389. }
  5390. #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
  5391. void dp_pkt_add_timestamp(struct dp_vdev *vdev,
  5392. enum qdf_pkt_timestamp_index index, uint64_t time,
  5393. qdf_nbuf_t nbuf)
  5394. {
  5395. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled())) {
  5396. uint64_t tsf_time;
  5397. if (vdev->get_tsf_time) {
  5398. vdev->get_tsf_time(vdev->osif_vdev, time, &tsf_time);
  5399. qdf_add_dp_pkt_timestamp(nbuf, index, tsf_time);
  5400. }
  5401. }
  5402. }
  5403. void dp_pkt_get_timestamp(uint64_t *time)
  5404. {
  5405. if (qdf_unlikely(qdf_is_dp_pkt_timestamp_enabled()))
  5406. *time = qdf_get_log_timestamp();
  5407. }
  5408. #endif