qdf_nbuf.c 156 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259
  1. /*
  2. * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * DOC: qdf_nbuf.c
  21. * QCA driver framework(QDF) network buffer management APIs
  22. */
  23. #include <linux/hashtable.h>
  24. #include <linux/kernel.h>
  25. #include <linux/version.h>
  26. #include <linux/skbuff.h>
  27. #include <linux/module.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/inetdevice.h>
  30. #include <qdf_atomic.h>
  31. #include <qdf_debugfs.h>
  32. #include <qdf_lock.h>
  33. #include <qdf_mem.h>
  34. #include <qdf_module.h>
  35. #include <qdf_nbuf.h>
  36. #include <qdf_status.h>
  37. #include "qdf_str.h"
  38. #include <qdf_trace.h>
  39. #include "qdf_tracker.h"
  40. #include <qdf_types.h>
  41. #include <net/ieee80211_radiotap.h>
  42. #include <pld_common.h>
  43. #include <qdf_crypto.h>
  44. #include <linux/igmp.h>
  45. #include <net/mld.h>
  46. #if defined(FEATURE_TSO)
  47. #include <net/ipv6.h>
  48. #include <linux/ipv6.h>
  49. #include <linux/tcp.h>
  50. #include <linux/if_vlan.h>
  51. #include <linux/ip.h>
  52. #endif /* FEATURE_TSO */
  53. #ifdef IPA_OFFLOAD
  54. #include <i_qdf_ipa_wdi3.h>
  55. #endif /* IPA_OFFLOAD */
  56. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
  57. #define qdf_nbuf_users_inc atomic_inc
  58. #define qdf_nbuf_users_dec atomic_dec
  59. #define qdf_nbuf_users_set atomic_set
  60. #define qdf_nbuf_users_read atomic_read
  61. #else
  62. #define qdf_nbuf_users_inc refcount_inc
  63. #define qdf_nbuf_users_dec refcount_dec
  64. #define qdf_nbuf_users_set refcount_set
  65. #define qdf_nbuf_users_read refcount_read
  66. #endif /* KERNEL_VERSION(4, 13, 0) */
  67. #define IEEE80211_RADIOTAP_VHT_BW_20 0
  68. #define IEEE80211_RADIOTAP_VHT_BW_40 1
  69. #define IEEE80211_RADIOTAP_VHT_BW_80 2
  70. #define IEEE80211_RADIOTAP_VHT_BW_160 3
  71. #define RADIOTAP_VHT_BW_20 0
  72. #define RADIOTAP_VHT_BW_40 1
  73. #define RADIOTAP_VHT_BW_80 4
  74. #define RADIOTAP_VHT_BW_160 11
  75. /* tx status */
  76. #define RADIOTAP_TX_STATUS_FAIL 1
  77. #define RADIOTAP_TX_STATUS_NOACK 2
  78. /* channel number to freq conversion */
  79. #define CHANNEL_NUM_14 14
  80. #define CHANNEL_NUM_15 15
  81. #define CHANNEL_NUM_27 27
  82. #define CHANNEL_NUM_35 35
  83. #define CHANNEL_NUM_182 182
  84. #define CHANNEL_NUM_197 197
  85. #define CHANNEL_FREQ_2484 2484
  86. #define CHANNEL_FREQ_2407 2407
  87. #define CHANNEL_FREQ_2512 2512
  88. #define CHANNEL_FREQ_5000 5000
  89. #define CHANNEL_FREQ_4000 4000
  90. #define CHANNEL_FREQ_5150 5150
  91. #define FREQ_MULTIPLIER_CONST_5MHZ 5
  92. #define FREQ_MULTIPLIER_CONST_20MHZ 20
  93. #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
  94. #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
  95. #define RADIOTAP_CCK_CHANNEL 0x0020
  96. #define RADIOTAP_OFDM_CHANNEL 0x0040
  97. #ifdef FEATURE_NBUFF_REPLENISH_TIMER
  98. #include <qdf_mc_timer.h>
  99. struct qdf_track_timer {
  100. qdf_mc_timer_t track_timer;
  101. qdf_atomic_t alloc_fail_cnt;
  102. };
  103. static struct qdf_track_timer alloc_track_timer;
  104. #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS 5000
  105. #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD 50
  106. #endif
  107. #ifdef NBUF_MEMORY_DEBUG
  108. /* SMMU crash indication*/
  109. static qdf_atomic_t smmu_crashed;
  110. /* Number of nbuf not added to history*/
  111. unsigned long g_histroy_add_drop;
  112. #endif
  113. /* Packet Counter */
  114. static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
  115. static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
  116. #ifdef QDF_NBUF_GLOBAL_COUNT
  117. #define NBUF_DEBUGFS_NAME "nbuf_counters"
  118. static qdf_atomic_t nbuf_count;
  119. #endif
  120. #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
  121. static bool is_initial_mem_debug_disabled;
  122. #endif
  123. /**
  124. * __qdf_nbuf_get_ip_offset() - Get IPV4/V6 header offset
  125. * @data: Pointer to network data buffer
  126. *
  127. * Get the IP header offset in case of 8021Q and 8021AD
  128. * tag is present in L2 header.
  129. *
  130. * Return: IP header offset
  131. */
  132. static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
  133. {
  134. uint16_t ether_type;
  135. ether_type = *(uint16_t *)(data +
  136. QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  137. if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
  138. return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
  139. else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
  140. return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
  141. return QDF_NBUF_TRAC_IP_OFFSET;
  142. }
  143. /**
  144. * __qdf_nbuf_get_ether_type() - Get the ether type
  145. * @data: Pointer to network data buffer
  146. *
  147. * Get the ether type in case of 8021Q and 8021AD tag
  148. * is present in L2 header, e.g for the returned ether type
  149. * value, if IPV4 data ether type 0x0800, return 0x0008.
  150. *
  151. * Return ether type.
  152. */
  153. static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
  154. {
  155. uint16_t ether_type;
  156. ether_type = *(uint16_t *)(data +
  157. QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  158. if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
  159. ether_type = *(uint16_t *)(data +
  160. QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
  161. else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
  162. ether_type = *(uint16_t *)(data +
  163. QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
  164. return ether_type;
  165. }
  166. void qdf_nbuf_tx_desc_count_display(void)
  167. {
  168. qdf_debug("Current Snapshot of the Driver:");
  169. qdf_debug("Data Packets:");
  170. qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
  171. nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
  172. (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
  173. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
  174. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
  175. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
  176. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
  177. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
  178. nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
  179. nbuf_tx_data[QDF_NBUF_TX_PKT_HTT] -
  180. nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
  181. qdf_debug(" HTC %d HIF %d CE %d TX_COMP %d",
  182. nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
  183. nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
  184. nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
  185. nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
  186. nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
  187. nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
  188. nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
  189. qdf_debug("Mgmt Packets:");
  190. qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
  191. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
  192. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
  193. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
  194. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
  195. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
  196. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
  197. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
  198. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
  199. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
  200. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
  201. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
  202. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
  203. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
  204. }
  205. qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
  206. /**
  207. * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
  208. * @packet_type : packet type either mgmt/data
  209. * @current_state : layer at which the packet currently present
  210. *
  211. * Return: none
  212. */
  213. static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
  214. uint8_t current_state)
  215. {
  216. switch (packet_type) {
  217. case QDF_NBUF_TX_PKT_MGMT_TRACK:
  218. nbuf_tx_mgmt[current_state]++;
  219. break;
  220. case QDF_NBUF_TX_PKT_DATA_TRACK:
  221. nbuf_tx_data[current_state]++;
  222. break;
  223. default:
  224. break;
  225. }
  226. }
  227. void qdf_nbuf_tx_desc_count_clear(void)
  228. {
  229. memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
  230. memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
  231. }
  232. qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
  233. void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
  234. {
  235. /*
  236. * Only Mgmt, Data Packets are tracked. WMI messages
  237. * such as scan commands are not tracked
  238. */
  239. uint8_t packet_type;
  240. packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
  241. if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
  242. (packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
  243. return;
  244. }
  245. QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
  246. qdf_nbuf_tx_desc_count_update(packet_type,
  247. current_state);
  248. }
  249. qdf_export_symbol(qdf_nbuf_set_state);
  250. #ifdef FEATURE_NBUFF_REPLENISH_TIMER
  251. /**
  252. * __qdf_nbuf_start_replenish_timer() - Start alloc fail replenish timer
  253. *
  254. * This function starts the alloc fail replenish timer.
  255. *
  256. * Return: void
  257. */
  258. static inline void __qdf_nbuf_start_replenish_timer(void)
  259. {
  260. qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
  261. if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
  262. QDF_TIMER_STATE_RUNNING)
  263. qdf_mc_timer_start(&alloc_track_timer.track_timer,
  264. QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
  265. }
  266. /**
  267. * __qdf_nbuf_stop_replenish_timer() - Stop alloc fail replenish timer
  268. *
  269. * This function stops the alloc fail replenish timer.
  270. *
  271. * Return: void
  272. */
  273. static inline void __qdf_nbuf_stop_replenish_timer(void)
  274. {
  275. if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
  276. return;
  277. qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
  278. if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
  279. QDF_TIMER_STATE_RUNNING)
  280. qdf_mc_timer_stop(&alloc_track_timer.track_timer);
  281. }
  282. /**
  283. * qdf_replenish_expire_handler() - Replenish expire handler
  284. * @arg: unused callback argument
  285. *
  286. * This function triggers when the alloc fail replenish timer expires.
  287. *
  288. * Return: void
  289. */
  290. static void qdf_replenish_expire_handler(void *arg)
  291. {
  292. if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
  293. QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
  294. qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
  295. qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
  296. /* Error handling here */
  297. }
  298. }
  299. void __qdf_nbuf_init_replenish_timer(void)
  300. {
  301. qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
  302. qdf_replenish_expire_handler, NULL);
  303. }
  304. void __qdf_nbuf_deinit_replenish_timer(void)
  305. {
  306. __qdf_nbuf_stop_replenish_timer();
  307. qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
  308. }
  309. void qdf_nbuf_stop_replenish_timer(void)
  310. {
  311. __qdf_nbuf_stop_replenish_timer();
  312. }
  313. #else
  314. static inline void __qdf_nbuf_start_replenish_timer(void) {}
  315. static inline void __qdf_nbuf_stop_replenish_timer(void) {}
  316. void qdf_nbuf_stop_replenish_timer(void)
  317. {
  318. }
  319. #endif
  320. /* globals do not need to be initialized to NULL/0 */
  321. qdf_nbuf_trace_update_t qdf_trace_update_cb;
  322. qdf_nbuf_free_t nbuf_free_cb;
  323. #ifdef QDF_NBUF_GLOBAL_COUNT
  324. int __qdf_nbuf_count_get(void)
  325. {
  326. return qdf_atomic_read(&nbuf_count);
  327. }
  328. qdf_export_symbol(__qdf_nbuf_count_get);
  329. void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
  330. {
  331. int num_nbuf = 1;
  332. qdf_nbuf_t ext_list;
  333. if (qdf_likely(is_initial_mem_debug_disabled))
  334. return;
  335. ext_list = qdf_nbuf_get_ext_list(nbuf);
  336. /* Take care to account for frag_list */
  337. while (ext_list) {
  338. ++num_nbuf;
  339. ext_list = qdf_nbuf_queue_next(ext_list);
  340. }
  341. qdf_atomic_add(num_nbuf, &nbuf_count);
  342. }
  343. qdf_export_symbol(__qdf_nbuf_count_inc);
  344. void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
  345. {
  346. qdf_nbuf_t ext_list;
  347. int num_nbuf;
  348. if (qdf_likely(is_initial_mem_debug_disabled))
  349. return;
  350. if (qdf_nbuf_get_users(nbuf) > 1)
  351. return;
  352. num_nbuf = 1;
  353. /* Take care to account for frag_list */
  354. ext_list = qdf_nbuf_get_ext_list(nbuf);
  355. while (ext_list) {
  356. if (qdf_nbuf_get_users(ext_list) == 1)
  357. ++num_nbuf;
  358. ext_list = qdf_nbuf_queue_next(ext_list);
  359. }
  360. qdf_atomic_sub(num_nbuf, &nbuf_count);
  361. }
  362. qdf_export_symbol(__qdf_nbuf_count_dec);
  363. #endif
  364. #ifdef NBUF_FRAG_MEMORY_DEBUG
  365. void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
  366. {
  367. qdf_nbuf_t ext_list;
  368. uint32_t num_nr_frags;
  369. uint32_t total_num_nr_frags;
  370. if (qdf_likely(is_initial_mem_debug_disabled))
  371. return;
  372. num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  373. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  374. total_num_nr_frags = num_nr_frags;
  375. /* Take into account the frags attached to frag_list */
  376. ext_list = qdf_nbuf_get_ext_list(nbuf);
  377. while (ext_list) {
  378. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  379. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  380. total_num_nr_frags += num_nr_frags;
  381. ext_list = qdf_nbuf_queue_next(ext_list);
  382. }
  383. qdf_frag_count_inc(total_num_nr_frags);
  384. }
  385. qdf_export_symbol(qdf_nbuf_frag_count_inc);
  386. void qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
  387. {
  388. qdf_nbuf_t ext_list;
  389. uint32_t num_nr_frags;
  390. uint32_t total_num_nr_frags;
  391. if (qdf_likely(is_initial_mem_debug_disabled))
  392. return;
  393. if (qdf_nbuf_get_users(nbuf) > 1)
  394. return;
  395. num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  396. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  397. total_num_nr_frags = num_nr_frags;
  398. /* Take into account the frags attached to frag_list */
  399. ext_list = qdf_nbuf_get_ext_list(nbuf);
  400. while (ext_list) {
  401. if (qdf_nbuf_get_users(ext_list) == 1) {
  402. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  403. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  404. total_num_nr_frags += num_nr_frags;
  405. }
  406. ext_list = qdf_nbuf_queue_next(ext_list);
  407. }
  408. qdf_frag_count_dec(total_num_nr_frags);
  409. }
  410. qdf_export_symbol(qdf_nbuf_frag_count_dec);
  411. #endif
  412. #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
  413. !defined(QCA_WIFI_QCN9000)
  414. struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
  415. int align, int prio, const char *func,
  416. uint32_t line)
  417. {
  418. struct sk_buff *skb;
  419. unsigned long offset;
  420. uint32_t lowmem_alloc_tries = 0;
  421. if (align)
  422. size += (align - 1);
  423. realloc:
  424. skb = dev_alloc_skb(size);
  425. if (skb)
  426. goto skb_alloc;
  427. skb = pld_nbuf_pre_alloc(size);
  428. if (!skb) {
  429. qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
  430. size, func, line);
  431. return NULL;
  432. }
  433. skb_alloc:
  434. /* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
  435. * Though we are trying to reserve low memory upfront to prevent this,
  436. * we sometimes see SKBs allocated from low memory.
  437. */
  438. if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
  439. lowmem_alloc_tries++;
  440. if (lowmem_alloc_tries > 100) {
  441. qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
  442. size, func, line);
  443. return NULL;
  444. } else {
  445. /* Not freeing to make sure it
  446. * will not get allocated again
  447. */
  448. goto realloc;
  449. }
  450. }
  451. memset(skb->cb, 0x0, sizeof(skb->cb));
  452. /*
  453. * The default is for netbuf fragments to be interpreted
  454. * as wordstreams rather than bytestreams.
  455. */
  456. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
  457. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
  458. /*
  459. * XXX:how about we reserve first then align
  460. * Align & make sure that the tail & data are adjusted properly
  461. */
  462. if (align) {
  463. offset = ((unsigned long)skb->data) % align;
  464. if (offset)
  465. skb_reserve(skb, align - offset);
  466. }
  467. /*
  468. * NOTE:alloc doesn't take responsibility if reserve unaligns the data
  469. * pointer
  470. */
  471. skb_reserve(skb, reserve);
  472. qdf_nbuf_count_inc(skb);
  473. return skb;
  474. }
  475. #else
  476. #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
  477. struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
  478. int align, int prio, const char *func,
  479. uint32_t line)
  480. {
  481. return __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio, func,
  482. line);
  483. }
  484. #else
  485. struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
  486. int align, int prio, const char *func,
  487. uint32_t line)
  488. {
  489. struct sk_buff *skb;
  490. unsigned long offset;
  491. int flags = GFP_KERNEL;
  492. if (align)
  493. size += (align - 1);
  494. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  495. flags = GFP_ATOMIC;
  496. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  497. /*
  498. * Observed that kcompactd burns out CPU to make order-3 page.
  499. *__netdev_alloc_skb has 4k page fallback option just in case of
  500. * failing high order page allocation so we don't need to be
  501. * hard. Make kcompactd rest in piece.
  502. */
  503. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  504. #endif
  505. }
  506. skb = alloc_skb(size, flags);
  507. if (skb)
  508. goto skb_alloc;
  509. skb = pld_nbuf_pre_alloc(size);
  510. if (!skb) {
  511. qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
  512. size, func, line);
  513. __qdf_nbuf_start_replenish_timer();
  514. return NULL;
  515. }
  516. __qdf_nbuf_stop_replenish_timer();
  517. skb_alloc:
  518. memset(skb->cb, 0x0, sizeof(skb->cb));
  519. skb->dev = NULL;
  520. /*
  521. * The default is for netbuf fragments to be interpreted
  522. * as wordstreams rather than bytestreams.
  523. */
  524. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
  525. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
  526. /*
  527. * XXX:how about we reserve first then align
  528. * Align & make sure that the tail & data are adjusted properly
  529. */
  530. if (align) {
  531. offset = ((unsigned long)skb->data) % align;
  532. if (offset)
  533. skb_reserve(skb, align - offset);
  534. }
  535. /*
  536. * NOTE:alloc doesn't take responsibility if reserve unaligns the data
  537. * pointer
  538. */
  539. skb_reserve(skb, reserve);
  540. qdf_nbuf_count_inc(skb);
  541. return skb;
  542. }
  543. #endif
  544. #endif
  545. qdf_export_symbol(__qdf_nbuf_alloc);
  546. struct sk_buff *__qdf_nbuf_frag_alloc(qdf_device_t osdev, size_t size,
  547. int reserve, int align, int prio,
  548. const char *func, uint32_t line)
  549. {
  550. struct sk_buff *skb;
  551. unsigned long offset;
  552. int flags = GFP_KERNEL & ~__GFP_DIRECT_RECLAIM;
  553. if (align)
  554. size += (align - 1);
  555. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  556. flags = GFP_ATOMIC;
  557. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  558. /*
  559. * Observed that kcompactd burns out CPU to make order-3 page.
  560. *__netdev_alloc_skb has 4k page fallback option just in case of
  561. * failing high order page allocation so we don't need to be
  562. * hard. Make kcompactd rest in piece.
  563. */
  564. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  565. #endif
  566. }
  567. skb = __netdev_alloc_skb(NULL, size, flags);
  568. if (skb)
  569. goto skb_alloc;
  570. skb = pld_nbuf_pre_alloc(size);
  571. if (!skb) {
  572. qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
  573. size, func, line);
  574. __qdf_nbuf_start_replenish_timer();
  575. return NULL;
  576. }
  577. __qdf_nbuf_stop_replenish_timer();
  578. skb_alloc:
  579. memset(skb->cb, 0x0, sizeof(skb->cb));
  580. /*
  581. * The default is for netbuf fragments to be interpreted
  582. * as wordstreams rather than bytestreams.
  583. */
  584. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
  585. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
  586. /*
  587. * XXX:how about we reserve first then align
  588. * Align & make sure that the tail & data are adjusted properly
  589. */
  590. if (align) {
  591. offset = ((unsigned long)skb->data) % align;
  592. if (offset)
  593. skb_reserve(skb, align - offset);
  594. }
  595. /*
  596. * NOTE:alloc doesn't take responsibility if reserve unaligns the data
  597. * pointer
  598. */
  599. skb_reserve(skb, reserve);
  600. qdf_nbuf_count_inc(skb);
  601. return skb;
  602. }
  603. qdf_export_symbol(__qdf_nbuf_frag_alloc);
  604. __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
  605. const char *func, uint32_t line)
  606. {
  607. qdf_nbuf_t nbuf;
  608. unsigned long offset;
  609. if (align)
  610. size += (align - 1);
  611. nbuf = alloc_skb(size, GFP_ATOMIC);
  612. if (!nbuf)
  613. goto ret_nbuf;
  614. memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
  615. skb_reserve(nbuf, reserve);
  616. if (align) {
  617. offset = ((unsigned long)nbuf->data) % align;
  618. if (offset)
  619. skb_reserve(nbuf, align - offset);
  620. }
  621. qdf_nbuf_count_inc(nbuf);
  622. ret_nbuf:
  623. return nbuf;
  624. }
  625. qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
  626. void __qdf_nbuf_free(struct sk_buff *skb)
  627. {
  628. if (pld_nbuf_pre_alloc_free(skb))
  629. return;
  630. qdf_nbuf_frag_count_dec(skb);
  631. qdf_nbuf_count_dec(skb);
  632. if (nbuf_free_cb)
  633. nbuf_free_cb(skb);
  634. else
  635. dev_kfree_skb_any(skb);
  636. }
  637. qdf_export_symbol(__qdf_nbuf_free);
  638. __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
  639. {
  640. qdf_nbuf_t skb_new = NULL;
  641. skb_new = skb_clone(skb, GFP_ATOMIC);
  642. if (skb_new) {
  643. qdf_nbuf_frag_count_inc(skb_new);
  644. qdf_nbuf_count_inc(skb_new);
  645. }
  646. return skb_new;
  647. }
  648. qdf_export_symbol(__qdf_nbuf_clone);
  649. #ifdef QCA_DP_TX_NBUF_LIST_FREE
  650. void
  651. __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
  652. {
  653. dev_kfree_skb_list_fast(nbuf_queue_head);
  654. }
  655. #else
  656. void
  657. __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
  658. {
  659. }
  660. #endif
  661. qdf_export_symbol(__qdf_nbuf_dev_kfree_list);
  662. #ifdef NBUF_MEMORY_DEBUG
  663. struct qdf_nbuf_event {
  664. qdf_nbuf_t nbuf;
  665. char func[QDF_MEM_FUNC_NAME_SIZE];
  666. uint32_t line;
  667. enum qdf_nbuf_event_type type;
  668. uint64_t timestamp;
  669. qdf_dma_addr_t iova;
  670. };
  671. #ifndef QDF_NBUF_HISTORY_SIZE
  672. #define QDF_NBUF_HISTORY_SIZE 4096
  673. #endif
  674. static qdf_atomic_t qdf_nbuf_history_index;
  675. static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
  676. static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
  677. {
  678. int32_t next = qdf_atomic_inc_return(index);
  679. if (next == size)
  680. qdf_atomic_sub(size, index);
  681. return next % size;
  682. }
  683. void
  684. qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
  685. enum qdf_nbuf_event_type type)
  686. {
  687. int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
  688. QDF_NBUF_HISTORY_SIZE);
  689. struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
  690. if (qdf_atomic_read(&smmu_crashed)) {
  691. g_histroy_add_drop++;
  692. return;
  693. }
  694. event->nbuf = nbuf;
  695. qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
  696. event->line = line;
  697. event->type = type;
  698. event->timestamp = qdf_get_log_timestamp();
  699. if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP ||
  700. type == QDF_NBUF_SMMU_MAP || type == QDF_NBUF_SMMU_UNMAP)
  701. event->iova = QDF_NBUF_CB_PADDR(nbuf);
  702. else
  703. event->iova = 0;
  704. }
  705. void qdf_set_smmu_fault_state(bool smmu_fault_state)
  706. {
  707. qdf_atomic_set(&smmu_crashed, smmu_fault_state);
  708. if (!smmu_fault_state)
  709. g_histroy_add_drop = 0;
  710. }
  711. qdf_export_symbol(qdf_set_smmu_fault_state);
  712. #endif /* NBUF_MEMORY_DEBUG */
  713. #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
  714. #define qdf_nbuf_smmu_map_tracker_bits 11 /* 2048 buckets */
  715. qdf_tracker_declare(qdf_nbuf_smmu_map_tracker, qdf_nbuf_smmu_map_tracker_bits,
  716. "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
  717. static void qdf_nbuf_smmu_map_tracking_init(void)
  718. {
  719. qdf_tracker_init(&qdf_nbuf_smmu_map_tracker);
  720. }
  721. static void qdf_nbuf_smmu_map_tracking_deinit(void)
  722. {
  723. qdf_tracker_deinit(&qdf_nbuf_smmu_map_tracker);
  724. }
  725. static QDF_STATUS
  726. qdf_nbuf_track_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  727. {
  728. if (is_initial_mem_debug_disabled)
  729. return QDF_STATUS_SUCCESS;
  730. return qdf_tracker_track(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
  731. }
  732. static void
  733. qdf_nbuf_untrack_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  734. {
  735. if (is_initial_mem_debug_disabled)
  736. return;
  737. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_SMMU_UNMAP);
  738. qdf_tracker_untrack(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
  739. }
  740. void qdf_nbuf_map_check_for_smmu_leaks(void)
  741. {
  742. qdf_tracker_check_for_leaks(&qdf_nbuf_smmu_map_tracker);
  743. }
  744. #ifdef IPA_OFFLOAD
  745. QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
  746. uint8_t hdl,
  747. uint8_t num_buffers,
  748. qdf_mem_info_t *info,
  749. const char *func,
  750. uint32_t line)
  751. {
  752. QDF_STATUS status;
  753. status = qdf_nbuf_track_smmu_map(nbuf, func, line);
  754. if (QDF_IS_STATUS_ERROR(status))
  755. return status;
  756. status = __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
  757. if (QDF_IS_STATUS_ERROR(status)) {
  758. qdf_nbuf_untrack_smmu_map(nbuf, func, line);
  759. } else {
  760. if (!is_initial_mem_debug_disabled)
  761. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
  762. qdf_net_buf_debug_update_smmu_map_node(nbuf, info->iova,
  763. info->pa, func, line);
  764. }
  765. return status;
  766. }
  767. qdf_export_symbol(qdf_nbuf_smmu_map_debug);
  768. QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
  769. uint8_t hdl,
  770. uint8_t num_buffers,
  771. qdf_mem_info_t *info,
  772. const char *func,
  773. uint32_t line)
  774. {
  775. QDF_STATUS status;
  776. qdf_nbuf_untrack_smmu_map(nbuf, func, line);
  777. status = __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
  778. qdf_net_buf_debug_update_smmu_unmap_node(nbuf, info->iova,
  779. info->pa, func, line);
  780. return status;
  781. }
  782. qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
  783. #endif /* IPA_OFFLOAD */
  784. static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
  785. const char *func,
  786. uint32_t line)
  787. {
  788. char map_func[QDF_TRACKER_FUNC_SIZE];
  789. uint32_t map_line;
  790. if (!qdf_tracker_lookup(&qdf_nbuf_smmu_map_tracker, nbuf,
  791. &map_func, &map_line))
  792. return;
  793. QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
  794. func, line, map_func, map_line);
  795. }
  796. static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
  797. {
  798. p_node->smmu_unmap_line_num = 0;
  799. p_node->is_nbuf_smmu_mapped = false;
  800. p_node->smmu_map_line_num = 0;
  801. p_node->smmu_map_func_name[0] = '\0';
  802. p_node->smmu_unmap_func_name[0] = '\0';
  803. p_node->smmu_unmap_iova_addr = 0;
  804. p_node->smmu_unmap_pa_addr = 0;
  805. p_node->smmu_map_iova_addr = 0;
  806. p_node->smmu_map_pa_addr = 0;
  807. }
  808. #else /* !NBUF_SMMU_MAP_UNMAP_DEBUG */
  809. #ifdef NBUF_MEMORY_DEBUG
  810. static void qdf_nbuf_smmu_map_tracking_init(void)
  811. {
  812. }
  813. static void qdf_nbuf_smmu_map_tracking_deinit(void)
  814. {
  815. }
  816. static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
  817. const char *func,
  818. uint32_t line)
  819. {
  820. }
  821. static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
  822. {
  823. }
  824. #endif /* NBUF_MEMORY_DEBUG */
  825. #ifdef IPA_OFFLOAD
  826. QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
  827. uint8_t hdl,
  828. uint8_t num_buffers,
  829. qdf_mem_info_t *info,
  830. const char *func,
  831. uint32_t line)
  832. {
  833. return __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
  834. }
  835. qdf_export_symbol(qdf_nbuf_smmu_map_debug);
  836. QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
  837. uint8_t hdl,
  838. uint8_t num_buffers,
  839. qdf_mem_info_t *info,
  840. const char *func,
  841. uint32_t line)
  842. {
  843. return __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
  844. }
  845. qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
  846. #endif /* IPA_OFFLOAD */
  847. #endif /* NBUF_SMMU_MAP_UNMAP_DEBUG */
  848. #ifdef NBUF_MAP_UNMAP_DEBUG
  849. #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
  850. qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
  851. "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
  852. static void qdf_nbuf_map_tracking_init(void)
  853. {
  854. qdf_tracker_init(&qdf_nbuf_map_tracker);
  855. }
  856. static void qdf_nbuf_map_tracking_deinit(void)
  857. {
  858. qdf_tracker_deinit(&qdf_nbuf_map_tracker);
  859. }
  860. static QDF_STATUS
  861. qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  862. {
  863. if (is_initial_mem_debug_disabled)
  864. return QDF_STATUS_SUCCESS;
  865. return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
  866. }
  867. static void
  868. qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  869. {
  870. if (is_initial_mem_debug_disabled)
  871. return;
  872. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
  873. qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
  874. }
  875. void qdf_nbuf_map_check_for_leaks(void)
  876. {
  877. qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
  878. }
  879. QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
  880. qdf_nbuf_t buf,
  881. qdf_dma_dir_t dir,
  882. const char *func,
  883. uint32_t line)
  884. {
  885. QDF_STATUS status;
  886. status = qdf_nbuf_track_map(buf, func, line);
  887. if (QDF_IS_STATUS_ERROR(status))
  888. return status;
  889. status = __qdf_nbuf_map(osdev, buf, dir);
  890. if (QDF_IS_STATUS_ERROR(status)) {
  891. qdf_nbuf_untrack_map(buf, func, line);
  892. } else {
  893. if (!is_initial_mem_debug_disabled)
  894. qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
  895. qdf_net_buf_debug_update_map_node(buf, func, line);
  896. }
  897. return status;
  898. }
  899. qdf_export_symbol(qdf_nbuf_map_debug);
  900. void qdf_nbuf_unmap_debug(qdf_device_t osdev,
  901. qdf_nbuf_t buf,
  902. qdf_dma_dir_t dir,
  903. const char *func,
  904. uint32_t line)
  905. {
  906. qdf_nbuf_untrack_map(buf, func, line);
  907. __qdf_nbuf_unmap_single(osdev, buf, dir);
  908. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  909. }
  910. qdf_export_symbol(qdf_nbuf_unmap_debug);
  911. QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
  912. qdf_nbuf_t buf,
  913. qdf_dma_dir_t dir,
  914. const char *func,
  915. uint32_t line)
  916. {
  917. QDF_STATUS status;
  918. status = qdf_nbuf_track_map(buf, func, line);
  919. if (QDF_IS_STATUS_ERROR(status))
  920. return status;
  921. status = __qdf_nbuf_map_single(osdev, buf, dir);
  922. if (QDF_IS_STATUS_ERROR(status)) {
  923. qdf_nbuf_untrack_map(buf, func, line);
  924. } else {
  925. if (!is_initial_mem_debug_disabled)
  926. qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
  927. qdf_net_buf_debug_update_map_node(buf, func, line);
  928. }
  929. return status;
  930. }
  931. qdf_export_symbol(qdf_nbuf_map_single_debug);
  932. void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
  933. qdf_nbuf_t buf,
  934. qdf_dma_dir_t dir,
  935. const char *func,
  936. uint32_t line)
  937. {
  938. qdf_nbuf_untrack_map(buf, func, line);
  939. __qdf_nbuf_unmap_single(osdev, buf, dir);
  940. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  941. }
  942. qdf_export_symbol(qdf_nbuf_unmap_single_debug);
  943. QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
  944. qdf_nbuf_t buf,
  945. qdf_dma_dir_t dir,
  946. int nbytes,
  947. const char *func,
  948. uint32_t line)
  949. {
  950. QDF_STATUS status;
  951. status = qdf_nbuf_track_map(buf, func, line);
  952. if (QDF_IS_STATUS_ERROR(status))
  953. return status;
  954. status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
  955. if (QDF_IS_STATUS_ERROR(status)) {
  956. qdf_nbuf_untrack_map(buf, func, line);
  957. } else {
  958. if (!is_initial_mem_debug_disabled)
  959. qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
  960. qdf_net_buf_debug_update_map_node(buf, func, line);
  961. }
  962. return status;
  963. }
  964. qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
  965. void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
  966. qdf_nbuf_t buf,
  967. qdf_dma_dir_t dir,
  968. int nbytes,
  969. const char *func,
  970. uint32_t line)
  971. {
  972. qdf_nbuf_untrack_map(buf, func, line);
  973. __qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
  974. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  975. }
  976. qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
  977. QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
  978. qdf_nbuf_t buf,
  979. qdf_dma_dir_t dir,
  980. int nbytes,
  981. const char *func,
  982. uint32_t line)
  983. {
  984. QDF_STATUS status;
  985. status = qdf_nbuf_track_map(buf, func, line);
  986. if (QDF_IS_STATUS_ERROR(status))
  987. return status;
  988. status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
  989. if (QDF_IS_STATUS_ERROR(status)) {
  990. qdf_nbuf_untrack_map(buf, func, line);
  991. } else {
  992. if (!is_initial_mem_debug_disabled)
  993. qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
  994. qdf_net_buf_debug_update_map_node(buf, func, line);
  995. }
  996. return status;
  997. }
  998. qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
  999. void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
  1000. qdf_nbuf_t buf,
  1001. qdf_dma_dir_t dir,
  1002. int nbytes,
  1003. const char *func,
  1004. uint32_t line)
  1005. {
  1006. qdf_nbuf_untrack_map(buf, func, line);
  1007. __qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
  1008. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  1009. }
  1010. qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
  1011. void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
  1012. qdf_nbuf_t buf,
  1013. qdf_dma_addr_t phy_addr,
  1014. qdf_dma_dir_t dir, int nbytes,
  1015. const char *func, uint32_t line)
  1016. {
  1017. qdf_nbuf_untrack_map(buf, func, line);
  1018. __qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf), dir, false);
  1019. __qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
  1020. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  1021. }
  1022. qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
  1023. static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
  1024. const char *func,
  1025. uint32_t line)
  1026. {
  1027. char map_func[QDF_TRACKER_FUNC_SIZE];
  1028. uint32_t map_line;
  1029. if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
  1030. &map_func, &map_line))
  1031. return;
  1032. QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
  1033. func, line, map_func, map_line);
  1034. }
  1035. #else
  1036. static inline void qdf_nbuf_map_tracking_init(void)
  1037. {
  1038. }
  1039. static inline void qdf_nbuf_map_tracking_deinit(void)
  1040. {
  1041. }
  1042. static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
  1043. const char *func,
  1044. uint32_t line)
  1045. {
  1046. }
  1047. #endif /* NBUF_MAP_UNMAP_DEBUG */
  1048. #ifdef QDF_OS_DEBUG
  1049. QDF_STATUS
  1050. __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
  1051. {
  1052. struct skb_shared_info *sh = skb_shinfo(skb);
  1053. qdf_assert((dir == QDF_DMA_TO_DEVICE)
  1054. || (dir == QDF_DMA_FROM_DEVICE));
  1055. /*
  1056. * Assume there's only a single fragment.
  1057. * To support multiple fragments, it would be necessary to change
  1058. * qdf_nbuf_t to be a separate object that stores meta-info
  1059. * (including the bus address for each fragment) and a pointer
  1060. * to the underlying sk_buff.
  1061. */
  1062. qdf_assert(sh->nr_frags == 0);
  1063. return __qdf_nbuf_map_single(osdev, skb, dir);
  1064. }
  1065. qdf_export_symbol(__qdf_nbuf_map);
  1066. #else
  1067. QDF_STATUS
  1068. __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
  1069. {
  1070. return __qdf_nbuf_map_single(osdev, skb, dir);
  1071. }
  1072. qdf_export_symbol(__qdf_nbuf_map);
  1073. #endif
  1074. void
  1075. __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
  1076. qdf_dma_dir_t dir)
  1077. {
  1078. qdf_assert((dir == QDF_DMA_TO_DEVICE)
  1079. || (dir == QDF_DMA_FROM_DEVICE));
  1080. /*
  1081. * Assume there's a single fragment.
  1082. * If this is not true, the assertion in __qdf_nbuf_map will catch it.
  1083. */
  1084. __qdf_nbuf_unmap_single(osdev, skb, dir);
  1085. }
  1086. qdf_export_symbol(__qdf_nbuf_unmap);
  1087. #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
  1088. QDF_STATUS
  1089. __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
  1090. {
  1091. qdf_dma_addr_t paddr;
  1092. QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
  1093. BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
  1094. BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
  1095. return QDF_STATUS_SUCCESS;
  1096. }
  1097. qdf_export_symbol(__qdf_nbuf_map_single);
  1098. #else
  1099. QDF_STATUS
  1100. __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
  1101. {
  1102. qdf_dma_addr_t paddr;
  1103. /* assume that the OS only provides a single fragment */
  1104. QDF_NBUF_CB_PADDR(buf) = paddr =
  1105. dma_map_single(osdev->dev, buf->data,
  1106. skb_end_pointer(buf) - buf->data,
  1107. __qdf_dma_dir_to_os(dir));
  1108. __qdf_record_nbuf_nbytes(
  1109. __qdf_nbuf_get_end_offset(buf), dir, true);
  1110. return dma_mapping_error(osdev->dev, paddr)
  1111. ? QDF_STATUS_E_FAILURE
  1112. : QDF_STATUS_SUCCESS;
  1113. }
  1114. qdf_export_symbol(__qdf_nbuf_map_single);
  1115. #endif
  1116. #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
  1117. void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
  1118. qdf_dma_dir_t dir)
  1119. {
  1120. }
  1121. #else
  1122. void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
  1123. qdf_dma_dir_t dir)
  1124. {
  1125. if (QDF_NBUF_CB_PADDR(buf)) {
  1126. __qdf_record_nbuf_nbytes(
  1127. __qdf_nbuf_get_end_offset(buf), dir, false);
  1128. dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
  1129. skb_end_pointer(buf) - buf->data,
  1130. __qdf_dma_dir_to_os(dir));
  1131. }
  1132. }
  1133. #endif
  1134. qdf_export_symbol(__qdf_nbuf_unmap_single);
  1135. QDF_STATUS
  1136. __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
  1137. {
  1138. switch (cksum->l4_result) {
  1139. case QDF_NBUF_RX_CKSUM_NONE:
  1140. skb->ip_summed = CHECKSUM_NONE;
  1141. break;
  1142. case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
  1143. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1144. skb->csum_level = cksum->csum_level;
  1145. break;
  1146. case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
  1147. skb->ip_summed = CHECKSUM_PARTIAL;
  1148. skb->csum = cksum->val;
  1149. break;
  1150. default:
  1151. pr_err("Unknown checksum type\n");
  1152. qdf_assert(0);
  1153. return QDF_STATUS_E_NOSUPPORT;
  1154. }
  1155. return QDF_STATUS_SUCCESS;
  1156. }
  1157. qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
  1158. qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
  1159. {
  1160. switch (skb->ip_summed) {
  1161. case CHECKSUM_NONE:
  1162. return QDF_NBUF_TX_CKSUM_NONE;
  1163. case CHECKSUM_PARTIAL:
  1164. return QDF_NBUF_TX_CKSUM_TCP_UDP;
  1165. case CHECKSUM_COMPLETE:
  1166. return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
  1167. default:
  1168. return QDF_NBUF_TX_CKSUM_NONE;
  1169. }
  1170. }
  1171. qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
  1172. uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
  1173. {
  1174. return skb->priority;
  1175. }
  1176. qdf_export_symbol(__qdf_nbuf_get_tid);
  1177. void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
  1178. {
  1179. skb->priority = tid;
  1180. }
  1181. qdf_export_symbol(__qdf_nbuf_set_tid);
  1182. uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
  1183. {
  1184. return QDF_NBUF_EXEMPT_NO_EXEMPTION;
  1185. }
  1186. qdf_export_symbol(__qdf_nbuf_get_exemption_type);
  1187. void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
  1188. {
  1189. qdf_trace_update_cb = cb_func_ptr;
  1190. }
  1191. qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
  1192. enum qdf_proto_subtype
  1193. __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
  1194. {
  1195. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  1196. if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
  1197. (data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
  1198. QDF_DHCP_OPTION53_LENGTH)) {
  1199. switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
  1200. case QDF_DHCP_DISCOVER:
  1201. subtype = QDF_PROTO_DHCP_DISCOVER;
  1202. break;
  1203. case QDF_DHCP_REQUEST:
  1204. subtype = QDF_PROTO_DHCP_REQUEST;
  1205. break;
  1206. case QDF_DHCP_OFFER:
  1207. subtype = QDF_PROTO_DHCP_OFFER;
  1208. break;
  1209. case QDF_DHCP_ACK:
  1210. subtype = QDF_PROTO_DHCP_ACK;
  1211. break;
  1212. case QDF_DHCP_NAK:
  1213. subtype = QDF_PROTO_DHCP_NACK;
  1214. break;
  1215. case QDF_DHCP_RELEASE:
  1216. subtype = QDF_PROTO_DHCP_RELEASE;
  1217. break;
  1218. case QDF_DHCP_INFORM:
  1219. subtype = QDF_PROTO_DHCP_INFORM;
  1220. break;
  1221. case QDF_DHCP_DECLINE:
  1222. subtype = QDF_PROTO_DHCP_DECLINE;
  1223. break;
  1224. default:
  1225. break;
  1226. }
  1227. }
  1228. return subtype;
  1229. }
  1230. #define EAPOL_WPA_KEY_INFO_ACK BIT(7)
  1231. #define EAPOL_WPA_KEY_INFO_MIC BIT(8)
  1232. #define EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */
  1233. /**
  1234. * __qdf_nbuf_data_get_eapol_key() - Get EAPOL key
  1235. * @data: Pointer to EAPOL packet data buffer
  1236. *
  1237. * We can distinguish M1/M3 from M2/M4 by the ack bit in the keyinfo field
  1238. * The ralationship between the ack bit and EAPOL type is as follows:
  1239. *
  1240. * EAPOL type | M1 M2 M3 M4
  1241. * --------------------------------------
  1242. * Ack | 1 0 1 0
  1243. * --------------------------------------
  1244. *
  1245. * Then, we can differentiate M1 from M3, M2 from M4 by below methods:
  1246. * M2/M4: by keyDataLength or Nonce value being 0 for M4.
  1247. * M1/M3: by the mic/encrKeyData bit in the keyinfo field.
  1248. *
  1249. * Return: subtype of the EAPOL packet.
  1250. */
  1251. static inline enum qdf_proto_subtype
  1252. __qdf_nbuf_data_get_eapol_key(uint8_t *data)
  1253. {
  1254. uint16_t key_info, key_data_length;
  1255. enum qdf_proto_subtype subtype;
  1256. uint64_t *key_nonce;
  1257. key_info = qdf_ntohs((uint16_t)(*(uint16_t *)
  1258. (data + EAPOL_KEY_INFO_OFFSET)));
  1259. key_data_length = qdf_ntohs((uint16_t)(*(uint16_t *)
  1260. (data + EAPOL_KEY_DATA_LENGTH_OFFSET)));
  1261. key_nonce = (uint64_t *)(data + EAPOL_WPA_KEY_NONCE_OFFSET);
  1262. if (key_info & EAPOL_WPA_KEY_INFO_ACK)
  1263. if (key_info &
  1264. (EAPOL_WPA_KEY_INFO_MIC | EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA))
  1265. subtype = QDF_PROTO_EAPOL_M3;
  1266. else
  1267. subtype = QDF_PROTO_EAPOL_M1;
  1268. else
  1269. if (key_data_length == 0 ||
  1270. !((*key_nonce) || (*(key_nonce + 1)) ||
  1271. (*(key_nonce + 2)) || (*(key_nonce + 3))))
  1272. subtype = QDF_PROTO_EAPOL_M4;
  1273. else
  1274. subtype = QDF_PROTO_EAPOL_M2;
  1275. return subtype;
  1276. }
  1277. /**
  1278. * __qdf_nbuf_data_get_exp_msg_type() - Get EAP expanded msg type
  1279. * @data: Pointer to EAPOL packet data buffer
  1280. * @code: EAP code
  1281. *
  1282. * Return: subtype of the EAPOL packet.
  1283. */
  1284. static inline enum qdf_proto_subtype
  1285. __qdf_nbuf_data_get_exp_msg_type(uint8_t *data, uint8_t code)
  1286. {
  1287. uint8_t msg_type;
  1288. uint8_t opcode = *(data + EAP_EXP_MSG_OPCODE_OFFSET);
  1289. switch (opcode) {
  1290. case WSC_START:
  1291. return QDF_PROTO_EAP_WSC_START;
  1292. case WSC_ACK:
  1293. return QDF_PROTO_EAP_WSC_ACK;
  1294. case WSC_NACK:
  1295. return QDF_PROTO_EAP_WSC_NACK;
  1296. case WSC_MSG:
  1297. msg_type = *(data + EAP_EXP_MSG_TYPE_OFFSET);
  1298. switch (msg_type) {
  1299. case EAP_EXP_TYPE_M1:
  1300. return QDF_PROTO_EAP_M1;
  1301. case EAP_EXP_TYPE_M2:
  1302. return QDF_PROTO_EAP_M2;
  1303. case EAP_EXP_TYPE_M3:
  1304. return QDF_PROTO_EAP_M3;
  1305. case EAP_EXP_TYPE_M4:
  1306. return QDF_PROTO_EAP_M4;
  1307. case EAP_EXP_TYPE_M5:
  1308. return QDF_PROTO_EAP_M5;
  1309. case EAP_EXP_TYPE_M6:
  1310. return QDF_PROTO_EAP_M6;
  1311. case EAP_EXP_TYPE_M7:
  1312. return QDF_PROTO_EAP_M7;
  1313. case EAP_EXP_TYPE_M8:
  1314. return QDF_PROTO_EAP_M8;
  1315. default:
  1316. break;
  1317. }
  1318. break;
  1319. case WSC_DONE:
  1320. return QDF_PROTO_EAP_WSC_DONE;
  1321. case WSC_FRAG_ACK:
  1322. return QDF_PROTO_EAP_WSC_FRAG_ACK;
  1323. default:
  1324. break;
  1325. }
  1326. switch (code) {
  1327. case QDF_EAP_REQUEST:
  1328. return QDF_PROTO_EAP_REQUEST;
  1329. case QDF_EAP_RESPONSE:
  1330. return QDF_PROTO_EAP_RESPONSE;
  1331. default:
  1332. return QDF_PROTO_INVALID;
  1333. }
  1334. }
  1335. /**
  1336. * __qdf_nbuf_data_get_eap_type() - Get EAP type
  1337. * @data: Pointer to EAPOL packet data buffer
  1338. * @code: EAP code
  1339. *
  1340. * Return: subtype of the EAPOL packet.
  1341. */
  1342. static inline enum qdf_proto_subtype
  1343. __qdf_nbuf_data_get_eap_type(uint8_t *data, uint8_t code)
  1344. {
  1345. uint8_t type = *(data + EAP_TYPE_OFFSET);
  1346. switch (type) {
  1347. case EAP_PACKET_TYPE_EXP:
  1348. return __qdf_nbuf_data_get_exp_msg_type(data, code);
  1349. case EAP_PACKET_TYPE_ID:
  1350. switch (code) {
  1351. case QDF_EAP_REQUEST:
  1352. return QDF_PROTO_EAP_REQ_ID;
  1353. case QDF_EAP_RESPONSE:
  1354. return QDF_PROTO_EAP_RSP_ID;
  1355. default:
  1356. return QDF_PROTO_INVALID;
  1357. }
  1358. default:
  1359. switch (code) {
  1360. case QDF_EAP_REQUEST:
  1361. return QDF_PROTO_EAP_REQUEST;
  1362. case QDF_EAP_RESPONSE:
  1363. return QDF_PROTO_EAP_RESPONSE;
  1364. default:
  1365. return QDF_PROTO_INVALID;
  1366. }
  1367. }
  1368. }
  1369. /**
  1370. * __qdf_nbuf_data_get_eap_code() - Get EAPOL code
  1371. * @data: Pointer to EAPOL packet data buffer
  1372. *
  1373. * Return: subtype of the EAPOL packet.
  1374. */
  1375. static inline enum qdf_proto_subtype
  1376. __qdf_nbuf_data_get_eap_code(uint8_t *data)
  1377. {
  1378. uint8_t code = *(data + EAP_CODE_OFFSET);
  1379. switch (code) {
  1380. case QDF_EAP_REQUEST:
  1381. case QDF_EAP_RESPONSE:
  1382. return __qdf_nbuf_data_get_eap_type(data, code);
  1383. case QDF_EAP_SUCCESS:
  1384. return QDF_PROTO_EAP_SUCCESS;
  1385. case QDF_EAP_FAILURE:
  1386. return QDF_PROTO_EAP_FAILURE;
  1387. case QDF_EAP_INITIATE:
  1388. return QDF_PROTO_EAP_INITIATE;
  1389. case QDF_EAP_FINISH:
  1390. return QDF_PROTO_EAP_FINISH;
  1391. default:
  1392. return QDF_PROTO_INVALID;
  1393. }
  1394. }
  1395. enum qdf_proto_subtype
  1396. __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
  1397. {
  1398. uint8_t pkt_type = *(data + EAPOL_PACKET_TYPE_OFFSET);
  1399. switch (pkt_type) {
  1400. case EAPOL_PACKET_TYPE_EAP:
  1401. return __qdf_nbuf_data_get_eap_code(data);
  1402. case EAPOL_PACKET_TYPE_START:
  1403. return QDF_PROTO_EAPOL_START;
  1404. case EAPOL_PACKET_TYPE_LOGOFF:
  1405. return QDF_PROTO_EAPOL_LOGOFF;
  1406. case EAPOL_PACKET_TYPE_KEY:
  1407. return __qdf_nbuf_data_get_eapol_key(data);
  1408. case EAPOL_PACKET_TYPE_ASF:
  1409. return QDF_PROTO_EAPOL_ASF;
  1410. default:
  1411. return QDF_PROTO_INVALID;
  1412. }
  1413. }
  1414. qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
  1415. enum qdf_proto_subtype
  1416. __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
  1417. {
  1418. uint16_t subtype;
  1419. enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
  1420. subtype = (uint16_t)(*(uint16_t *)
  1421. (data + ARP_SUB_TYPE_OFFSET));
  1422. switch (QDF_SWAP_U16(subtype)) {
  1423. case ARP_REQUEST:
  1424. proto_subtype = QDF_PROTO_ARP_REQ;
  1425. break;
  1426. case ARP_RESPONSE:
  1427. proto_subtype = QDF_PROTO_ARP_RES;
  1428. break;
  1429. default:
  1430. break;
  1431. }
  1432. return proto_subtype;
  1433. }
  1434. enum qdf_proto_subtype
  1435. __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
  1436. {
  1437. uint8_t subtype;
  1438. enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
  1439. subtype = (uint8_t)(*(uint8_t *)
  1440. (data + ICMP_SUBTYPE_OFFSET));
  1441. switch (subtype) {
  1442. case ICMP_REQUEST:
  1443. proto_subtype = QDF_PROTO_ICMP_REQ;
  1444. break;
  1445. case ICMP_RESPONSE:
  1446. proto_subtype = QDF_PROTO_ICMP_RES;
  1447. break;
  1448. default:
  1449. break;
  1450. }
  1451. return proto_subtype;
  1452. }
  1453. enum qdf_proto_subtype
  1454. __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
  1455. {
  1456. uint8_t subtype;
  1457. enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
  1458. subtype = (uint8_t)(*(uint8_t *)
  1459. (data + ICMPV6_SUBTYPE_OFFSET));
  1460. switch (subtype) {
  1461. case ICMPV6_REQUEST:
  1462. proto_subtype = QDF_PROTO_ICMPV6_REQ;
  1463. break;
  1464. case ICMPV6_RESPONSE:
  1465. proto_subtype = QDF_PROTO_ICMPV6_RES;
  1466. break;
  1467. case ICMPV6_RS:
  1468. proto_subtype = QDF_PROTO_ICMPV6_RS;
  1469. break;
  1470. case ICMPV6_RA:
  1471. proto_subtype = QDF_PROTO_ICMPV6_RA;
  1472. break;
  1473. case ICMPV6_NS:
  1474. proto_subtype = QDF_PROTO_ICMPV6_NS;
  1475. break;
  1476. case ICMPV6_NA:
  1477. proto_subtype = QDF_PROTO_ICMPV6_NA;
  1478. break;
  1479. default:
  1480. break;
  1481. }
  1482. return proto_subtype;
  1483. }
  1484. bool
  1485. __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb)
  1486. {
  1487. if (((ntohs(ip_hdr(skb)->frag_off) & ~IP_OFFSET) & IP_MF) == 0)
  1488. return true;
  1489. return false;
  1490. }
  1491. void
  1492. __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos)
  1493. {
  1494. *(uint8_t *)(data + QDF_NBUF_TRAC_IPV4_TOS_OFFSET) = tos;
  1495. }
  1496. uint8_t
  1497. __qdf_nbuf_data_get_ipv4_tos(uint8_t *data)
  1498. {
  1499. uint8_t tos;
  1500. tos = (uint8_t)(*(uint8_t *)(data +
  1501. QDF_NBUF_TRAC_IPV4_TOS_OFFSET));
  1502. return tos;
  1503. }
  1504. uint8_t
  1505. __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
  1506. {
  1507. uint8_t proto_type;
  1508. proto_type = (uint8_t)(*(uint8_t *)(data +
  1509. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  1510. return proto_type;
  1511. }
  1512. uint8_t
  1513. __qdf_nbuf_data_get_ipv6_tc(uint8_t *data)
  1514. {
  1515. struct ipv6hdr *hdr;
  1516. hdr = (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
  1517. return ip6_tclass(ip6_flowinfo(hdr));
  1518. }
  1519. void
  1520. __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc)
  1521. {
  1522. struct ipv6hdr *hdr;
  1523. hdr = (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
  1524. ip6_flow_hdr(hdr, tc, ip6_flowlabel(hdr));
  1525. }
  1526. uint8_t
  1527. __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
  1528. {
  1529. uint8_t proto_type;
  1530. proto_type = (uint8_t)(*(uint8_t *)(data +
  1531. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  1532. return proto_type;
  1533. }
  1534. bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
  1535. {
  1536. uint16_t ether_type;
  1537. ether_type = (uint16_t)(*(uint16_t *)(data +
  1538. QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
  1539. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
  1540. return true;
  1541. else
  1542. return false;
  1543. }
  1544. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
  1545. bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
  1546. {
  1547. uint16_t sport;
  1548. uint16_t dport;
  1549. uint8_t ipv4_offset;
  1550. uint8_t ipv4_hdr_len;
  1551. struct iphdr *iphdr;
  1552. if (__qdf_nbuf_get_ether_type(data) !=
  1553. QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
  1554. return false;
  1555. ipv4_offset = __qdf_nbuf_get_ip_offset(data);
  1556. iphdr = (struct iphdr *)(data + ipv4_offset);
  1557. ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
  1558. sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
  1559. dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
  1560. sizeof(uint16_t));
  1561. if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
  1562. (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
  1563. ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
  1564. (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
  1565. return true;
  1566. else
  1567. return false;
  1568. }
  1569. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
  1570. /**
  1571. * qdf_is_eapol_type() - check if packet is EAPOL
  1572. * @type: Packet type
  1573. *
  1574. * This api is to check if frame is EAPOL packet type.
  1575. *
  1576. * Return: true if it is EAPOL frame
  1577. * false otherwise.
  1578. */
  1579. #ifdef BIG_ENDIAN_HOST
  1580. static inline bool qdf_is_eapol_type(uint16_t type)
  1581. {
  1582. return (type == QDF_NBUF_TRAC_EAPOL_ETH_TYPE);
  1583. }
  1584. #else
  1585. static inline bool qdf_is_eapol_type(uint16_t type)
  1586. {
  1587. return (type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE));
  1588. }
  1589. #endif
  1590. bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
  1591. {
  1592. uint16_t ether_type;
  1593. ether_type = __qdf_nbuf_get_ether_type(data);
  1594. return qdf_is_eapol_type(ether_type);
  1595. }
  1596. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
  1597. bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
  1598. {
  1599. uint16_t ether_type;
  1600. ether_type = (uint16_t)(*(uint16_t *)(skb->data +
  1601. QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
  1602. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
  1603. return true;
  1604. else
  1605. return false;
  1606. }
  1607. qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
  1608. /**
  1609. * qdf_nbuf_is_ipv6_vlan_pkt() - check whether packet is vlan IPV6
  1610. * @data: Pointer to network data buffer
  1611. *
  1612. * This api is for vlan header included ipv6 packet.
  1613. *
  1614. * Return: true if packet is vlan header included IPV6
  1615. * false otherwise.
  1616. */
  1617. static bool qdf_nbuf_is_ipv6_vlan_pkt(uint8_t *data)
  1618. {
  1619. uint16_t ether_type;
  1620. ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  1621. if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
  1622. ether_type = *(uint16_t *)(data +
  1623. QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
  1624. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
  1625. return true;
  1626. }
  1627. return false;
  1628. }
  1629. /**
  1630. * qdf_nbuf_is_ipv4_vlan_pkt() - check whether packet is vlan IPV4
  1631. * @data: Pointer to network data buffer
  1632. *
  1633. * This api is for vlan header included ipv4 packet.
  1634. *
  1635. * Return: true if packet is vlan header included IPV4
  1636. * false otherwise.
  1637. */
  1638. static bool qdf_nbuf_is_ipv4_vlan_pkt(uint8_t *data)
  1639. {
  1640. uint16_t ether_type;
  1641. ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  1642. if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
  1643. ether_type = *(uint16_t *)(data +
  1644. QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
  1645. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
  1646. return true;
  1647. }
  1648. return false;
  1649. }
  1650. bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
  1651. {
  1652. uint8_t pkt_type;
  1653. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  1654. pkt_type = (uint8_t)(*(uint8_t *)(data +
  1655. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  1656. goto is_igmp;
  1657. }
  1658. if (qdf_nbuf_is_ipv4_vlan_pkt(data)) {
  1659. pkt_type = (uint8_t)(*(uint8_t *)(
  1660. data +
  1661. QDF_NBUF_TRAC_VLAN_IPV4_PROTO_TYPE_OFFSET));
  1662. goto is_igmp;
  1663. }
  1664. return false;
  1665. is_igmp:
  1666. if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
  1667. return true;
  1668. return false;
  1669. }
  1670. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
  1671. bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
  1672. {
  1673. uint8_t pkt_type;
  1674. uint8_t next_hdr;
  1675. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  1676. pkt_type = (uint8_t)(*(uint8_t *)(data +
  1677. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  1678. next_hdr = (uint8_t)(*(uint8_t *)(
  1679. data +
  1680. QDF_NBUF_TRAC_IPV6_OFFSET +
  1681. QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
  1682. goto is_mld;
  1683. }
  1684. if (qdf_nbuf_is_ipv6_vlan_pkt(data)) {
  1685. pkt_type = (uint8_t)(*(uint8_t *)(
  1686. data +
  1687. QDF_NBUF_TRAC_VLAN_IPV6_PROTO_TYPE_OFFSET));
  1688. next_hdr = (uint8_t)(*(uint8_t *)(
  1689. data +
  1690. QDF_NBUF_TRAC_VLAN_IPV6_OFFSET +
  1691. QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
  1692. goto is_mld;
  1693. }
  1694. return false;
  1695. is_mld:
  1696. if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
  1697. return true;
  1698. if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
  1699. (next_hdr == QDF_NBUF_TRAC_ICMPV6_TYPE))
  1700. return true;
  1701. return false;
  1702. }
  1703. qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
  1704. bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf)
  1705. {
  1706. qdf_ether_header_t *eh = NULL;
  1707. uint16_t ether_type;
  1708. uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
  1709. eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
  1710. ether_type = eh->ether_type;
  1711. if (ether_type == htons(ETH_P_8021Q)) {
  1712. struct vlan_ethhdr *veth =
  1713. (struct vlan_ethhdr *)qdf_nbuf_data(buf);
  1714. ether_type = veth->h_vlan_encapsulated_proto;
  1715. eth_hdr_size = sizeof(struct vlan_ethhdr);
  1716. }
  1717. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
  1718. struct iphdr *iph = NULL;
  1719. struct igmphdr *ih = NULL;
  1720. iph = (struct iphdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
  1721. ih = (struct igmphdr *)((uint8_t *)iph + iph->ihl * 4);
  1722. switch (ih->type) {
  1723. case IGMP_HOST_LEAVE_MESSAGE:
  1724. return true;
  1725. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  1726. {
  1727. struct igmpv3_report *ihv3 = (struct igmpv3_report *)ih;
  1728. struct igmpv3_grec *grec = NULL;
  1729. int num = 0;
  1730. int i = 0;
  1731. int len = 0;
  1732. int type = 0;
  1733. num = ntohs(ihv3->ngrec);
  1734. for (i = 0; i < num; i++) {
  1735. grec = (void *)((uint8_t *)(ihv3->grec) + len);
  1736. type = grec->grec_type;
  1737. if ((type == IGMPV3_MODE_IS_INCLUDE) ||
  1738. (type == IGMPV3_CHANGE_TO_INCLUDE))
  1739. return true;
  1740. len += sizeof(struct igmpv3_grec);
  1741. len += ntohs(grec->grec_nsrcs) * 4;
  1742. }
  1743. break;
  1744. }
  1745. default:
  1746. break;
  1747. }
  1748. }
  1749. return false;
  1750. }
  1751. qdf_export_symbol(__qdf_nbuf_is_ipv4_igmp_leave_pkt);
  1752. bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf)
  1753. {
  1754. qdf_ether_header_t *eh = NULL;
  1755. uint16_t ether_type;
  1756. uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
  1757. eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
  1758. ether_type = eh->ether_type;
  1759. if (ether_type == htons(ETH_P_8021Q)) {
  1760. struct vlan_ethhdr *veth =
  1761. (struct vlan_ethhdr *)qdf_nbuf_data(buf);
  1762. ether_type = veth->h_vlan_encapsulated_proto;
  1763. eth_hdr_size = sizeof(struct vlan_ethhdr);
  1764. }
  1765. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
  1766. struct ipv6hdr *ip6h = NULL;
  1767. struct icmp6hdr *icmp6h = NULL;
  1768. uint8_t nexthdr;
  1769. uint16_t frag_off = 0;
  1770. int offset;
  1771. qdf_nbuf_t buf_copy = NULL;
  1772. ip6h = (struct ipv6hdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
  1773. if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
  1774. ip6h->payload_len == 0)
  1775. return false;
  1776. buf_copy = qdf_nbuf_copy(buf);
  1777. if (qdf_likely(!buf_copy))
  1778. return false;
  1779. nexthdr = ip6h->nexthdr;
  1780. offset = ipv6_skip_exthdr(buf_copy,
  1781. eth_hdr_size + sizeof(*ip6h),
  1782. &nexthdr,
  1783. &frag_off);
  1784. qdf_nbuf_free(buf_copy);
  1785. if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
  1786. return false;
  1787. icmp6h = (struct icmp6hdr *)(qdf_nbuf_data(buf) + offset);
  1788. switch (icmp6h->icmp6_type) {
  1789. case ICMPV6_MGM_REDUCTION:
  1790. return true;
  1791. case ICMPV6_MLD2_REPORT:
  1792. {
  1793. struct mld2_report *mh = NULL;
  1794. struct mld2_grec *grec = NULL;
  1795. int num = 0;
  1796. int i = 0;
  1797. int len = 0;
  1798. int type = -1;
  1799. mh = (struct mld2_report *)icmp6h;
  1800. num = ntohs(mh->mld2r_ngrec);
  1801. for (i = 0; i < num; i++) {
  1802. grec = (void *)(((uint8_t *)mh->mld2r_grec) +
  1803. len);
  1804. type = grec->grec_type;
  1805. if ((type == MLD2_MODE_IS_INCLUDE) ||
  1806. (type == MLD2_CHANGE_TO_INCLUDE))
  1807. return true;
  1808. else if (type == MLD2_BLOCK_OLD_SOURCES)
  1809. return true;
  1810. len += sizeof(struct mld2_grec);
  1811. len += ntohs(grec->grec_nsrcs) *
  1812. sizeof(struct in6_addr);
  1813. }
  1814. break;
  1815. }
  1816. default:
  1817. break;
  1818. }
  1819. }
  1820. return false;
  1821. }
  1822. qdf_export_symbol(__qdf_nbuf_is_ipv6_igmp_leave_pkt);
  1823. bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
  1824. {
  1825. uint16_t ether_type;
  1826. ether_type = *(uint16_t *)(skb->data +
  1827. QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  1828. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
  1829. return true;
  1830. else
  1831. return false;
  1832. }
  1833. qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
  1834. bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
  1835. {
  1836. uint16_t ether_type;
  1837. ether_type = __qdf_nbuf_get_ether_type(data);
  1838. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
  1839. return true;
  1840. else
  1841. return false;
  1842. }
  1843. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
  1844. bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
  1845. {
  1846. uint16_t op_code;
  1847. op_code = (uint16_t)(*(uint16_t *)(data +
  1848. QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
  1849. if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
  1850. return true;
  1851. return false;
  1852. }
  1853. bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
  1854. {
  1855. uint16_t op_code;
  1856. op_code = (uint16_t)(*(uint16_t *)(data +
  1857. QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
  1858. if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
  1859. return true;
  1860. return false;
  1861. }
  1862. uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data)
  1863. {
  1864. uint32_t src_ip;
  1865. src_ip = (uint32_t)(*(uint32_t *)(data +
  1866. QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
  1867. return src_ip;
  1868. }
  1869. uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
  1870. {
  1871. uint32_t tgt_ip;
  1872. tgt_ip = (uint32_t)(*(uint32_t *)(data +
  1873. QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
  1874. return tgt_ip;
  1875. }
  1876. uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
  1877. {
  1878. uint8_t *domain_name;
  1879. domain_name = (uint8_t *)
  1880. (data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
  1881. return domain_name;
  1882. }
  1883. bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
  1884. {
  1885. uint16_t op_code;
  1886. uint16_t tgt_port;
  1887. tgt_port = (uint16_t)(*(uint16_t *)(data +
  1888. QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
  1889. /* Standard DNS query always happen on Dest Port 53. */
  1890. if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
  1891. op_code = (uint16_t)(*(uint16_t *)(data +
  1892. QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
  1893. if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
  1894. QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
  1895. return true;
  1896. }
  1897. return false;
  1898. }
  1899. bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
  1900. {
  1901. uint16_t op_code;
  1902. uint16_t src_port;
  1903. src_port = (uint16_t)(*(uint16_t *)(data +
  1904. QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
  1905. /* Standard DNS response always comes on Src Port 53. */
  1906. if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
  1907. op_code = (uint16_t)(*(uint16_t *)(data +
  1908. QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
  1909. if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
  1910. QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
  1911. return true;
  1912. }
  1913. return false;
  1914. }
  1915. bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
  1916. {
  1917. uint8_t op_code;
  1918. op_code = (uint8_t)(*(uint8_t *)(data +
  1919. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1920. if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
  1921. return true;
  1922. return false;
  1923. }
  1924. bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
  1925. {
  1926. uint8_t op_code;
  1927. op_code = (uint8_t)(*(uint8_t *)(data +
  1928. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1929. if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
  1930. return true;
  1931. return false;
  1932. }
  1933. bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
  1934. {
  1935. uint8_t op_code;
  1936. op_code = (uint8_t)(*(uint8_t *)(data +
  1937. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1938. if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
  1939. return true;
  1940. return false;
  1941. }
  1942. bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
  1943. {
  1944. uint8_t op_code;
  1945. op_code = (uint8_t)(*(uint8_t *)(data +
  1946. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1947. if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
  1948. return true;
  1949. return false;
  1950. }
  1951. bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
  1952. {
  1953. uint8_t op_code;
  1954. op_code = (uint8_t)(*(uint8_t *)(data +
  1955. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1956. if (op_code == QDF_NBUF_PKT_TCPOP_RST)
  1957. return true;
  1958. return false;
  1959. }
  1960. bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
  1961. {
  1962. uint8_t op_code;
  1963. op_code = (uint8_t)(*(uint8_t *)(data +
  1964. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1965. if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
  1966. return true;
  1967. return false;
  1968. }
  1969. uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
  1970. {
  1971. uint16_t src_port;
  1972. src_port = (uint16_t)(*(uint16_t *)(data +
  1973. QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
  1974. return src_port;
  1975. }
  1976. uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
  1977. {
  1978. uint16_t tgt_port;
  1979. tgt_port = (uint16_t)(*(uint16_t *)(data +
  1980. QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
  1981. return tgt_port;
  1982. }
  1983. bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
  1984. {
  1985. uint8_t op_code;
  1986. op_code = (uint8_t)(*(uint8_t *)(data +
  1987. QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
  1988. if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
  1989. return true;
  1990. return false;
  1991. }
  1992. bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
  1993. {
  1994. uint8_t op_code;
  1995. op_code = (uint8_t)(*(uint8_t *)(data +
  1996. QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
  1997. if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
  1998. return true;
  1999. return false;
  2000. }
  2001. bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data)
  2002. {
  2003. uint8_t op_code;
  2004. op_code = (uint8_t)(*(uint8_t *)(data +
  2005. QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
  2006. if (op_code == QDF_NBUF_PKT_ICMPV4_REDIRECT)
  2007. return true;
  2008. return false;
  2009. }
  2010. qdf_export_symbol(__qdf_nbuf_data_is_icmpv4_redirect);
  2011. bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data)
  2012. {
  2013. uint8_t subtype;
  2014. subtype = (uint8_t)(*(uint8_t *)(data + ICMPV6_SUBTYPE_OFFSET));
  2015. if (subtype == ICMPV6_REDIRECT)
  2016. return true;
  2017. return false;
  2018. }
  2019. qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_redirect);
  2020. uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
  2021. {
  2022. uint32_t src_ip;
  2023. src_ip = (uint32_t)(*(uint32_t *)(data +
  2024. QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
  2025. return src_ip;
  2026. }
  2027. uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
  2028. {
  2029. uint32_t tgt_ip;
  2030. tgt_ip = (uint32_t)(*(uint32_t *)(data +
  2031. QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
  2032. return tgt_ip;
  2033. }
  2034. bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
  2035. {
  2036. uint16_t ether_type;
  2037. ether_type = (uint16_t)(*(uint16_t *)(data +
  2038. QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
  2039. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
  2040. return true;
  2041. else
  2042. return false;
  2043. }
  2044. qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
  2045. bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
  2046. {
  2047. uint16_t sport;
  2048. uint16_t dport;
  2049. uint8_t ipv6_offset;
  2050. if (!__qdf_nbuf_data_is_ipv6_pkt(data))
  2051. return false;
  2052. ipv6_offset = __qdf_nbuf_get_ip_offset(data);
  2053. sport = *(uint16_t *)(data + ipv6_offset +
  2054. QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
  2055. dport = *(uint16_t *)(data + ipv6_offset +
  2056. QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
  2057. sizeof(uint16_t));
  2058. if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
  2059. (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
  2060. ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
  2061. (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
  2062. return true;
  2063. else
  2064. return false;
  2065. }
  2066. qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
  2067. bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
  2068. {
  2069. uint16_t sport;
  2070. uint16_t dport;
  2071. sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
  2072. QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
  2073. dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
  2074. QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
  2075. sizeof(uint16_t));
  2076. if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
  2077. dport == sport)
  2078. return true;
  2079. else
  2080. return false;
  2081. }
  2082. qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
  2083. bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
  2084. {
  2085. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  2086. uint32_t *dst_addr =
  2087. (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
  2088. /*
  2089. * Check first word of the IPV4 address and if it is
  2090. * equal to 0xE then it represents multicast IP.
  2091. */
  2092. if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
  2093. QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
  2094. return true;
  2095. else
  2096. return false;
  2097. } else
  2098. return false;
  2099. }
  2100. bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
  2101. {
  2102. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  2103. uint16_t *dst_addr;
  2104. dst_addr = (uint16_t *)
  2105. (data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
  2106. /*
  2107. * Check first byte of the IP address and if it
  2108. * 0xFF00 then it is a IPV6 mcast packet.
  2109. */
  2110. if (*dst_addr ==
  2111. QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
  2112. return true;
  2113. else
  2114. return false;
  2115. } else
  2116. return false;
  2117. }
  2118. bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
  2119. {
  2120. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  2121. uint8_t pkt_type;
  2122. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2123. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  2124. if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
  2125. return true;
  2126. else
  2127. return false;
  2128. } else
  2129. return false;
  2130. }
  2131. qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
  2132. bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
  2133. {
  2134. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  2135. uint8_t pkt_type;
  2136. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2137. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  2138. if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
  2139. return true;
  2140. else
  2141. return false;
  2142. } else
  2143. return false;
  2144. }
  2145. qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_pkt);
  2146. bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
  2147. {
  2148. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  2149. uint8_t pkt_type;
  2150. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2151. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  2152. if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
  2153. return true;
  2154. else
  2155. return false;
  2156. } else
  2157. return false;
  2158. }
  2159. bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
  2160. {
  2161. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  2162. uint8_t pkt_type;
  2163. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2164. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  2165. if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
  2166. return true;
  2167. else
  2168. return false;
  2169. } else
  2170. return false;
  2171. }
  2172. bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
  2173. {
  2174. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  2175. uint8_t pkt_type;
  2176. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2177. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  2178. if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
  2179. return true;
  2180. else
  2181. return false;
  2182. } else
  2183. return false;
  2184. }
  2185. bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
  2186. {
  2187. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  2188. uint8_t pkt_type;
  2189. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2190. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  2191. if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
  2192. return true;
  2193. else
  2194. return false;
  2195. } else
  2196. return false;
  2197. }
  2198. bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
  2199. {
  2200. struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
  2201. return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
  2202. }
  2203. qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
  2204. bool __qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)
  2205. {
  2206. struct sk_buff *skb = (struct sk_buff *)nbuf;
  2207. struct ethhdr *eth = eth_hdr(skb);
  2208. if (qdf_likely(skb->pkt_type != PACKET_MULTICAST))
  2209. return false;
  2210. if (qdf_unlikely(ether_addr_equal(eth->h_source, skb->dev->dev_addr)))
  2211. return true;
  2212. return false;
  2213. }
  2214. bool __qdf_nbuf_is_arp_local(struct sk_buff *skb)
  2215. {
  2216. struct arphdr *arp;
  2217. struct in_ifaddr **ifap = NULL;
  2218. struct in_ifaddr *ifa = NULL;
  2219. struct in_device *in_dev;
  2220. unsigned char *arp_ptr;
  2221. __be32 tip;
  2222. arp = (struct arphdr *)skb->data;
  2223. if (arp->ar_op == htons(ARPOP_REQUEST)) {
  2224. /* if fail to acquire rtnl lock, assume it's local arp */
  2225. if (!rtnl_trylock())
  2226. return true;
  2227. in_dev = __in_dev_get_rtnl(skb->dev);
  2228. if (in_dev) {
  2229. for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
  2230. ifap = &ifa->ifa_next) {
  2231. if (!strcmp(skb->dev->name, ifa->ifa_label))
  2232. break;
  2233. }
  2234. }
  2235. if (ifa && ifa->ifa_local) {
  2236. arp_ptr = (unsigned char *)(arp + 1);
  2237. arp_ptr += (skb->dev->addr_len + 4 +
  2238. skb->dev->addr_len);
  2239. memcpy(&tip, arp_ptr, 4);
  2240. qdf_debug("ARP packet: local IP: %x dest IP: %x",
  2241. ifa->ifa_local, tip);
  2242. if (ifa->ifa_local == tip) {
  2243. rtnl_unlock();
  2244. return true;
  2245. }
  2246. }
  2247. rtnl_unlock();
  2248. }
  2249. return false;
  2250. }
  2251. /**
  2252. * __qdf_nbuf_data_get_tcp_hdr_len() - get TCP header length
  2253. * @data: pointer to data of network buffer
  2254. * @tcp_hdr_len_offset: bytes offset for tcp header length of ethernet packets
  2255. *
  2256. * Return: TCP header length in unit of byte
  2257. */
  2258. static inline
  2259. uint8_t __qdf_nbuf_data_get_tcp_hdr_len(uint8_t *data,
  2260. uint8_t tcp_hdr_len_offset)
  2261. {
  2262. uint8_t tcp_hdr_len;
  2263. tcp_hdr_len =
  2264. *((uint8_t *)(data + tcp_hdr_len_offset));
  2265. tcp_hdr_len = ((tcp_hdr_len & QDF_NBUF_PKT_TCP_HDR_LEN_MASK) >>
  2266. QDF_NBUF_PKT_TCP_HDR_LEN_LSB) *
  2267. QDF_NBUF_PKT_TCP_HDR_LEN_UNIT;
  2268. return tcp_hdr_len;
  2269. }
  2270. bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb)
  2271. {
  2272. bool is_tcp_ack = false;
  2273. uint8_t op_code, tcp_hdr_len;
  2274. uint16_t ip_payload_len;
  2275. uint8_t *data = skb->data;
  2276. /*
  2277. * If packet length > TCP ACK max length or it's nonlinearized,
  2278. * then it must not be TCP ACK.
  2279. */
  2280. if (qdf_nbuf_len(skb) > QDF_NBUF_PKT_TCP_ACK_MAX_LEN ||
  2281. qdf_nbuf_is_nonlinear(skb))
  2282. return false;
  2283. if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
  2284. ip_payload_len =
  2285. QDF_SWAP_U16(*((uint16_t *)(data +
  2286. QDF_NBUF_TRAC_IPV4_TOTAL_LEN_OFFSET)))
  2287. - QDF_NBUF_TRAC_IPV4_HEADER_SIZE;
  2288. tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
  2289. data,
  2290. QDF_NBUF_PKT_IPV4_TCP_HDR_LEN_OFFSET);
  2291. op_code = (uint8_t)(*(uint8_t *)(data +
  2292. QDF_NBUF_PKT_IPV4_TCP_OPCODE_OFFSET));
  2293. if (ip_payload_len == tcp_hdr_len &&
  2294. op_code == QDF_NBUF_PKT_TCPOP_ACK)
  2295. is_tcp_ack = true;
  2296. } else if (qdf_nbuf_is_ipv6_tcp_pkt(skb)) {
  2297. ip_payload_len =
  2298. QDF_SWAP_U16(*((uint16_t *)(data +
  2299. QDF_NBUF_TRAC_IPV6_PAYLOAD_LEN_OFFSET)));
  2300. tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
  2301. data,
  2302. QDF_NBUF_PKT_IPV6_TCP_HDR_LEN_OFFSET);
  2303. op_code = (uint8_t)(*(uint8_t *)(data +
  2304. QDF_NBUF_PKT_IPV6_TCP_OPCODE_OFFSET));
  2305. if (ip_payload_len == tcp_hdr_len &&
  2306. op_code == QDF_NBUF_PKT_TCPOP_ACK)
  2307. is_tcp_ack = true;
  2308. }
  2309. return is_tcp_ack;
  2310. }
  2311. #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
  2312. bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
  2313. {
  2314. return nbuf->fast_xmit;
  2315. }
  2316. qdf_export_symbol(qdf_nbuf_fast_xmit);
  2317. void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
  2318. {
  2319. nbuf->fast_xmit = value;
  2320. }
  2321. qdf_export_symbol(qdf_nbuf_set_fast_xmit);
  2322. #else
  2323. bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
  2324. {
  2325. return false;
  2326. }
  2327. qdf_export_symbol(qdf_nbuf_fast_xmit);
  2328. void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
  2329. {
  2330. }
  2331. qdf_export_symbol(qdf_nbuf_set_fast_xmit);
  2332. #endif
  2333. #ifdef NBUF_MEMORY_DEBUG
  2334. static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
  2335. static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
  2336. static struct kmem_cache *nbuf_tracking_cache;
  2337. static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
  2338. static spinlock_t qdf_net_buf_track_free_list_lock;
  2339. static uint32_t qdf_net_buf_track_free_list_count;
  2340. static uint32_t qdf_net_buf_track_used_list_count;
  2341. static uint32_t qdf_net_buf_track_max_used;
  2342. static uint32_t qdf_net_buf_track_max_free;
  2343. static uint32_t qdf_net_buf_track_max_allocated;
  2344. static uint32_t qdf_net_buf_track_fail_count;
  2345. /**
  2346. * update_max_used() - update qdf_net_buf_track_max_used tracking variable
  2347. *
  2348. * tracks the max number of network buffers that the wlan driver was tracking
  2349. * at any one time.
  2350. *
  2351. * Return: none
  2352. */
  2353. static inline void update_max_used(void)
  2354. {
  2355. int sum;
  2356. if (qdf_net_buf_track_max_used <
  2357. qdf_net_buf_track_used_list_count)
  2358. qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
  2359. sum = qdf_net_buf_track_free_list_count +
  2360. qdf_net_buf_track_used_list_count;
  2361. if (qdf_net_buf_track_max_allocated < sum)
  2362. qdf_net_buf_track_max_allocated = sum;
  2363. }
  2364. /**
  2365. * update_max_free() - update qdf_net_buf_track_free_list_count
  2366. *
  2367. * tracks the max number tracking buffers kept in the freelist.
  2368. *
  2369. * Return: none
  2370. */
  2371. static inline void update_max_free(void)
  2372. {
  2373. if (qdf_net_buf_track_max_free <
  2374. qdf_net_buf_track_free_list_count)
  2375. qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
  2376. }
  2377. /**
  2378. * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
  2379. *
  2380. * This function pulls from a freelist if possible and uses kmem_cache_alloc.
  2381. * This function also ads fexibility to adjust the allocation and freelist
  2382. * scheems.
  2383. *
  2384. * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
  2385. */
  2386. static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
  2387. {
  2388. int flags = GFP_KERNEL;
  2389. unsigned long irq_flag;
  2390. QDF_NBUF_TRACK *new_node = NULL;
  2391. spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
  2392. qdf_net_buf_track_used_list_count++;
  2393. if (qdf_net_buf_track_free_list) {
  2394. new_node = qdf_net_buf_track_free_list;
  2395. qdf_net_buf_track_free_list =
  2396. qdf_net_buf_track_free_list->p_next;
  2397. qdf_net_buf_track_free_list_count--;
  2398. }
  2399. update_max_used();
  2400. spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
  2401. if (new_node)
  2402. return new_node;
  2403. if (in_interrupt() || irqs_disabled() || in_atomic())
  2404. flags = GFP_ATOMIC;
  2405. return kmem_cache_alloc(nbuf_tracking_cache, flags);
  2406. }
  2407. /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
  2408. #define FREEQ_POOLSIZE 2048
  2409. /**
  2410. * qdf_nbuf_track_free() - free the nbuf tracking cookie.
  2411. * @node: nbuf tracking node
  2412. *
  2413. * Matches calls to qdf_nbuf_track_alloc.
  2414. * Either frees the tracking cookie to kernel or an internal
  2415. * freelist based on the size of the freelist.
  2416. *
  2417. * Return: none
  2418. */
  2419. static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
  2420. {
  2421. unsigned long irq_flag;
  2422. if (!node)
  2423. return;
  2424. /* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
  2425. * only shrink the freelist if it is bigger than twice the number of
  2426. * nbufs in use. If the driver is stalling in a consistent bursty
  2427. * fashion, this will keep 3/4 of thee allocations from the free list
  2428. * while also allowing the system to recover memory as less frantic
  2429. * traffic occurs.
  2430. */
  2431. spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
  2432. qdf_net_buf_track_used_list_count--;
  2433. if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
  2434. (qdf_net_buf_track_free_list_count >
  2435. qdf_net_buf_track_used_list_count << 1)) {
  2436. kmem_cache_free(nbuf_tracking_cache, node);
  2437. } else {
  2438. node->p_next = qdf_net_buf_track_free_list;
  2439. qdf_net_buf_track_free_list = node;
  2440. qdf_net_buf_track_free_list_count++;
  2441. }
  2442. update_max_free();
  2443. spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
  2444. }
  2445. /**
  2446. * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
  2447. *
  2448. * Removes a 'warmup time' characteristic of the freelist. Prefilling
  2449. * the freelist first makes it performant for the first iperf udp burst
  2450. * as well as steady state.
  2451. *
  2452. * Return: None
  2453. */
  2454. static void qdf_nbuf_track_prefill(void)
  2455. {
  2456. int i;
  2457. QDF_NBUF_TRACK *node, *head;
  2458. /* prepopulate the freelist */
  2459. head = NULL;
  2460. for (i = 0; i < FREEQ_POOLSIZE; i++) {
  2461. node = qdf_nbuf_track_alloc();
  2462. if (!node)
  2463. continue;
  2464. node->p_next = head;
  2465. head = node;
  2466. }
  2467. while (head) {
  2468. node = head->p_next;
  2469. qdf_nbuf_track_free(head);
  2470. head = node;
  2471. }
  2472. /* prefilled buffers should not count as used */
  2473. qdf_net_buf_track_max_used = 0;
  2474. }
  2475. /**
  2476. * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
  2477. *
  2478. * This initializes the memory manager for the nbuf tracking cookies. Because
  2479. * these cookies are all the same size and only used in this feature, we can
  2480. * use a kmem_cache to provide tracking as well as to speed up allocations.
  2481. * To avoid the overhead of allocating and freeing the buffers (including SLUB
  2482. * features) a freelist is prepopulated here.
  2483. *
  2484. * Return: None
  2485. */
  2486. static void qdf_nbuf_track_memory_manager_create(void)
  2487. {
  2488. spin_lock_init(&qdf_net_buf_track_free_list_lock);
  2489. nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
  2490. sizeof(QDF_NBUF_TRACK),
  2491. 0, 0, NULL);
  2492. qdf_nbuf_track_prefill();
  2493. }
  2494. /**
  2495. * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
  2496. *
  2497. * Empty the freelist and print out usage statistics when it is no longer
  2498. * needed. Also the kmem_cache should be destroyed here so that it can warn if
  2499. * any nbuf tracking cookies were leaked.
  2500. *
  2501. * Return: None
  2502. */
  2503. static void qdf_nbuf_track_memory_manager_destroy(void)
  2504. {
  2505. QDF_NBUF_TRACK *node, *tmp;
  2506. unsigned long irq_flag;
  2507. spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
  2508. node = qdf_net_buf_track_free_list;
  2509. if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
  2510. qdf_print("%s: unexpectedly large max_used count %d",
  2511. __func__, qdf_net_buf_track_max_used);
  2512. if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
  2513. qdf_print("%s: %d unused trackers were allocated",
  2514. __func__,
  2515. qdf_net_buf_track_max_allocated -
  2516. qdf_net_buf_track_max_used);
  2517. if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
  2518. qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
  2519. qdf_print("%s: check freelist shrinking functionality",
  2520. __func__);
  2521. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
  2522. "%s: %d residual freelist size",
  2523. __func__, qdf_net_buf_track_free_list_count);
  2524. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
  2525. "%s: %d max freelist size observed",
  2526. __func__, qdf_net_buf_track_max_free);
  2527. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
  2528. "%s: %d max buffers used observed",
  2529. __func__, qdf_net_buf_track_max_used);
  2530. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
  2531. "%s: %d max buffers allocated observed",
  2532. __func__, qdf_net_buf_track_max_allocated);
  2533. while (node) {
  2534. tmp = node;
  2535. node = node->p_next;
  2536. kmem_cache_free(nbuf_tracking_cache, tmp);
  2537. qdf_net_buf_track_free_list_count--;
  2538. }
  2539. if (qdf_net_buf_track_free_list_count != 0)
  2540. qdf_info("%d unfreed tracking memory lost in freelist",
  2541. qdf_net_buf_track_free_list_count);
  2542. if (qdf_net_buf_track_used_list_count != 0)
  2543. qdf_info("%d unfreed tracking memory still in use",
  2544. qdf_net_buf_track_used_list_count);
  2545. spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
  2546. kmem_cache_destroy(nbuf_tracking_cache);
  2547. qdf_net_buf_track_free_list = NULL;
  2548. }
  2549. void qdf_net_buf_debug_init(void)
  2550. {
  2551. uint32_t i;
  2552. is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
  2553. if (is_initial_mem_debug_disabled)
  2554. return;
  2555. qdf_atomic_set(&qdf_nbuf_history_index, -1);
  2556. qdf_nbuf_map_tracking_init();
  2557. qdf_nbuf_smmu_map_tracking_init();
  2558. qdf_nbuf_track_memory_manager_create();
  2559. for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
  2560. gp_qdf_net_buf_track_tbl[i] = NULL;
  2561. spin_lock_init(&g_qdf_net_buf_track_lock[i]);
  2562. }
  2563. }
  2564. qdf_export_symbol(qdf_net_buf_debug_init);
  2565. void qdf_net_buf_debug_exit(void)
  2566. {
  2567. uint32_t i;
  2568. uint32_t count = 0;
  2569. unsigned long irq_flag;
  2570. QDF_NBUF_TRACK *p_node;
  2571. QDF_NBUF_TRACK *p_prev;
  2572. if (is_initial_mem_debug_disabled)
  2573. return;
  2574. for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
  2575. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2576. p_node = gp_qdf_net_buf_track_tbl[i];
  2577. while (p_node) {
  2578. p_prev = p_node;
  2579. p_node = p_node->p_next;
  2580. count++;
  2581. qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
  2582. p_prev->func_name, p_prev->line_num,
  2583. p_prev->size, p_prev->net_buf);
  2584. qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
  2585. p_prev->map_func_name,
  2586. p_prev->map_line_num,
  2587. p_prev->unmap_func_name,
  2588. p_prev->unmap_line_num,
  2589. p_prev->is_nbuf_mapped);
  2590. qdf_nbuf_track_free(p_prev);
  2591. }
  2592. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2593. }
  2594. qdf_nbuf_track_memory_manager_destroy();
  2595. qdf_nbuf_map_tracking_deinit();
  2596. qdf_nbuf_smmu_map_tracking_deinit();
  2597. #ifdef CONFIG_HALT_KMEMLEAK
  2598. if (count) {
  2599. qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
  2600. QDF_BUG(0);
  2601. }
  2602. #endif
  2603. }
  2604. qdf_export_symbol(qdf_net_buf_debug_exit);
  2605. /**
  2606. * qdf_net_buf_debug_hash() - hash network buffer pointer
  2607. * @net_buf: network buffer
  2608. *
  2609. * Return: hash value
  2610. */
  2611. static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
  2612. {
  2613. uint32_t i;
  2614. i = (uint32_t) (((uintptr_t) net_buf) >> 4);
  2615. i += (uint32_t) (((uintptr_t) net_buf) >> 14);
  2616. i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
  2617. return i;
  2618. }
  2619. /**
  2620. * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
  2621. * @net_buf: network buffer
  2622. *
  2623. * Return: If skb is found in hash table then return pointer to network buffer
  2624. * else return %NULL
  2625. */
  2626. static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
  2627. {
  2628. uint32_t i;
  2629. QDF_NBUF_TRACK *p_node;
  2630. i = qdf_net_buf_debug_hash(net_buf);
  2631. p_node = gp_qdf_net_buf_track_tbl[i];
  2632. while (p_node) {
  2633. if (p_node->net_buf == net_buf)
  2634. return p_node;
  2635. p_node = p_node->p_next;
  2636. }
  2637. return NULL;
  2638. }
  2639. void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
  2640. const char *func_name, uint32_t line_num)
  2641. {
  2642. uint32_t i;
  2643. unsigned long irq_flag;
  2644. QDF_NBUF_TRACK *p_node;
  2645. QDF_NBUF_TRACK *new_node;
  2646. if (is_initial_mem_debug_disabled)
  2647. return;
  2648. new_node = qdf_nbuf_track_alloc();
  2649. i = qdf_net_buf_debug_hash(net_buf);
  2650. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2651. p_node = qdf_net_buf_debug_look_up(net_buf);
  2652. if (p_node) {
  2653. qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
  2654. p_node->net_buf, p_node->func_name, p_node->line_num,
  2655. net_buf, func_name, line_num);
  2656. qdf_nbuf_track_free(new_node);
  2657. } else {
  2658. p_node = new_node;
  2659. if (p_node) {
  2660. p_node->net_buf = net_buf;
  2661. qdf_str_lcopy(p_node->func_name, func_name,
  2662. QDF_MEM_FUNC_NAME_SIZE);
  2663. p_node->line_num = line_num;
  2664. p_node->is_nbuf_mapped = false;
  2665. p_node->map_line_num = 0;
  2666. p_node->unmap_line_num = 0;
  2667. p_node->map_func_name[0] = '\0';
  2668. p_node->unmap_func_name[0] = '\0';
  2669. p_node->size = size;
  2670. p_node->time = qdf_get_log_timestamp();
  2671. qdf_net_buf_update_smmu_params(p_node);
  2672. qdf_mem_skb_inc(size);
  2673. p_node->p_next = gp_qdf_net_buf_track_tbl[i];
  2674. gp_qdf_net_buf_track_tbl[i] = p_node;
  2675. } else {
  2676. qdf_net_buf_track_fail_count++;
  2677. qdf_print(
  2678. "Mem alloc failed ! Could not track skb from %s %d of size %zu",
  2679. func_name, line_num, size);
  2680. }
  2681. }
  2682. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2683. }
  2684. qdf_export_symbol(qdf_net_buf_debug_add_node);
  2685. void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
  2686. uint32_t line_num)
  2687. {
  2688. uint32_t i;
  2689. unsigned long irq_flag;
  2690. QDF_NBUF_TRACK *p_node;
  2691. if (is_initial_mem_debug_disabled)
  2692. return;
  2693. i = qdf_net_buf_debug_hash(net_buf);
  2694. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2695. p_node = qdf_net_buf_debug_look_up(net_buf);
  2696. if (p_node) {
  2697. qdf_str_lcopy(p_node->func_name, kbasename(func_name),
  2698. QDF_MEM_FUNC_NAME_SIZE);
  2699. p_node->line_num = line_num;
  2700. }
  2701. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2702. }
  2703. qdf_export_symbol(qdf_net_buf_debug_update_node);
  2704. void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
  2705. const char *func_name,
  2706. uint32_t line_num)
  2707. {
  2708. uint32_t i;
  2709. unsigned long irq_flag;
  2710. QDF_NBUF_TRACK *p_node;
  2711. if (is_initial_mem_debug_disabled)
  2712. return;
  2713. i = qdf_net_buf_debug_hash(net_buf);
  2714. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2715. p_node = qdf_net_buf_debug_look_up(net_buf);
  2716. if (p_node) {
  2717. qdf_str_lcopy(p_node->map_func_name, func_name,
  2718. QDF_MEM_FUNC_NAME_SIZE);
  2719. p_node->map_line_num = line_num;
  2720. p_node->is_nbuf_mapped = true;
  2721. }
  2722. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2723. }
  2724. #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
  2725. void qdf_net_buf_debug_update_smmu_map_node(qdf_nbuf_t nbuf,
  2726. unsigned long iova,
  2727. unsigned long pa,
  2728. const char *func,
  2729. uint32_t line)
  2730. {
  2731. uint32_t i;
  2732. unsigned long irq_flag;
  2733. QDF_NBUF_TRACK *p_node;
  2734. if (is_initial_mem_debug_disabled)
  2735. return;
  2736. i = qdf_net_buf_debug_hash(nbuf);
  2737. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2738. p_node = qdf_net_buf_debug_look_up(nbuf);
  2739. if (p_node) {
  2740. qdf_str_lcopy(p_node->smmu_map_func_name, func,
  2741. QDF_MEM_FUNC_NAME_SIZE);
  2742. p_node->smmu_map_line_num = line;
  2743. p_node->is_nbuf_smmu_mapped = true;
  2744. p_node->smmu_map_iova_addr = iova;
  2745. p_node->smmu_map_pa_addr = pa;
  2746. }
  2747. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2748. }
  2749. void qdf_net_buf_debug_update_smmu_unmap_node(qdf_nbuf_t nbuf,
  2750. unsigned long iova,
  2751. unsigned long pa,
  2752. const char *func,
  2753. uint32_t line)
  2754. {
  2755. uint32_t i;
  2756. unsigned long irq_flag;
  2757. QDF_NBUF_TRACK *p_node;
  2758. if (is_initial_mem_debug_disabled)
  2759. return;
  2760. i = qdf_net_buf_debug_hash(nbuf);
  2761. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2762. p_node = qdf_net_buf_debug_look_up(nbuf);
  2763. if (p_node) {
  2764. qdf_str_lcopy(p_node->smmu_unmap_func_name, func,
  2765. QDF_MEM_FUNC_NAME_SIZE);
  2766. p_node->smmu_unmap_line_num = line;
  2767. p_node->is_nbuf_smmu_mapped = false;
  2768. p_node->smmu_unmap_iova_addr = iova;
  2769. p_node->smmu_unmap_pa_addr = pa;
  2770. }
  2771. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2772. }
  2773. #endif
  2774. void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
  2775. const char *func_name,
  2776. uint32_t line_num)
  2777. {
  2778. uint32_t i;
  2779. unsigned long irq_flag;
  2780. QDF_NBUF_TRACK *p_node;
  2781. if (is_initial_mem_debug_disabled)
  2782. return;
  2783. i = qdf_net_buf_debug_hash(net_buf);
  2784. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2785. p_node = qdf_net_buf_debug_look_up(net_buf);
  2786. if (p_node) {
  2787. qdf_str_lcopy(p_node->unmap_func_name, func_name,
  2788. QDF_MEM_FUNC_NAME_SIZE);
  2789. p_node->unmap_line_num = line_num;
  2790. p_node->is_nbuf_mapped = false;
  2791. }
  2792. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2793. }
  2794. void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
  2795. {
  2796. uint32_t i;
  2797. QDF_NBUF_TRACK *p_head;
  2798. QDF_NBUF_TRACK *p_node = NULL;
  2799. unsigned long irq_flag;
  2800. QDF_NBUF_TRACK *p_prev;
  2801. if (is_initial_mem_debug_disabled)
  2802. return;
  2803. i = qdf_net_buf_debug_hash(net_buf);
  2804. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2805. p_head = gp_qdf_net_buf_track_tbl[i];
  2806. /* Unallocated SKB */
  2807. if (!p_head)
  2808. goto done;
  2809. p_node = p_head;
  2810. /* Found at head of the table */
  2811. if (p_head->net_buf == net_buf) {
  2812. gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
  2813. goto done;
  2814. }
  2815. /* Search in collision list */
  2816. while (p_node) {
  2817. p_prev = p_node;
  2818. p_node = p_node->p_next;
  2819. if ((p_node) && (p_node->net_buf == net_buf)) {
  2820. p_prev->p_next = p_node->p_next;
  2821. break;
  2822. }
  2823. }
  2824. done:
  2825. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2826. if (p_node) {
  2827. qdf_mem_skb_dec(p_node->size);
  2828. qdf_nbuf_track_free(p_node);
  2829. } else {
  2830. if (qdf_net_buf_track_fail_count) {
  2831. qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
  2832. net_buf, qdf_net_buf_track_fail_count);
  2833. } else
  2834. QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
  2835. net_buf);
  2836. }
  2837. }
  2838. qdf_export_symbol(qdf_net_buf_debug_delete_node);
  2839. void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
  2840. const char *func_name, uint32_t line_num)
  2841. {
  2842. qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
  2843. if (is_initial_mem_debug_disabled)
  2844. return;
  2845. while (ext_list) {
  2846. /*
  2847. * Take care to add if it is Jumbo packet connected using
  2848. * frag_list
  2849. */
  2850. qdf_nbuf_t next;
  2851. next = qdf_nbuf_queue_next(ext_list);
  2852. qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
  2853. ext_list = next;
  2854. }
  2855. qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
  2856. }
  2857. qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
  2858. void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
  2859. {
  2860. qdf_nbuf_t ext_list;
  2861. if (is_initial_mem_debug_disabled)
  2862. return;
  2863. ext_list = qdf_nbuf_get_ext_list(net_buf);
  2864. while (ext_list) {
  2865. /*
  2866. * Take care to free if it is Jumbo packet connected using
  2867. * frag_list
  2868. */
  2869. qdf_nbuf_t next;
  2870. next = qdf_nbuf_queue_next(ext_list);
  2871. if (qdf_nbuf_get_users(ext_list) > 1) {
  2872. ext_list = next;
  2873. continue;
  2874. }
  2875. qdf_net_buf_debug_delete_node(ext_list);
  2876. ext_list = next;
  2877. }
  2878. if (qdf_nbuf_get_users(net_buf) > 1)
  2879. return;
  2880. qdf_net_buf_debug_delete_node(net_buf);
  2881. }
  2882. qdf_export_symbol(qdf_net_buf_debug_release_skb);
  2883. qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
  2884. int reserve, int align, int prio,
  2885. const char *func, uint32_t line)
  2886. {
  2887. qdf_nbuf_t nbuf;
  2888. if (is_initial_mem_debug_disabled)
  2889. return __qdf_nbuf_alloc(osdev, size,
  2890. reserve, align,
  2891. prio, func, line);
  2892. nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
  2893. /* Store SKB in internal QDF tracking table */
  2894. if (qdf_likely(nbuf)) {
  2895. qdf_net_buf_debug_add_node(nbuf, size, func, line);
  2896. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
  2897. } else {
  2898. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
  2899. }
  2900. return nbuf;
  2901. }
  2902. qdf_export_symbol(qdf_nbuf_alloc_debug);
  2903. qdf_nbuf_t qdf_nbuf_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size,
  2904. int reserve, int align, int prio,
  2905. const char *func, uint32_t line)
  2906. {
  2907. qdf_nbuf_t nbuf;
  2908. if (is_initial_mem_debug_disabled)
  2909. return __qdf_nbuf_frag_alloc(osdev, size,
  2910. reserve, align,
  2911. prio, func, line);
  2912. nbuf = __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio,
  2913. func, line);
  2914. /* Store SKB in internal QDF tracking table */
  2915. if (qdf_likely(nbuf)) {
  2916. qdf_net_buf_debug_add_node(nbuf, size, func, line);
  2917. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
  2918. } else {
  2919. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
  2920. }
  2921. return nbuf;
  2922. }
  2923. qdf_export_symbol(qdf_nbuf_frag_alloc_debug);
  2924. qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
  2925. const char *func, uint32_t line)
  2926. {
  2927. qdf_nbuf_t nbuf;
  2928. if (is_initial_mem_debug_disabled)
  2929. return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
  2930. line);
  2931. nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
  2932. /* Store SKB in internal QDF tracking table */
  2933. if (qdf_likely(nbuf)) {
  2934. qdf_net_buf_debug_add_node(nbuf, size, func, line);
  2935. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
  2936. } else {
  2937. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
  2938. }
  2939. return nbuf;
  2940. }
  2941. qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
  2942. void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  2943. {
  2944. qdf_nbuf_t ext_list;
  2945. qdf_frag_t p_frag;
  2946. uint32_t num_nr_frags;
  2947. uint32_t idx = 0;
  2948. if (qdf_unlikely(!nbuf))
  2949. return;
  2950. if (is_initial_mem_debug_disabled)
  2951. goto free_buf;
  2952. if (qdf_nbuf_get_users(nbuf) > 1)
  2953. goto free_buf;
  2954. /* Remove SKB from internal QDF tracking table */
  2955. qdf_nbuf_panic_on_free_if_smmu_mapped(nbuf, func, line);
  2956. qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
  2957. qdf_net_buf_debug_delete_node(nbuf);
  2958. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
  2959. /* Take care to delete the debug entries for frags */
  2960. num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2961. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  2962. while (idx < num_nr_frags) {
  2963. p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
  2964. if (qdf_likely(p_frag))
  2965. qdf_frag_debug_refcount_dec(p_frag, func, line);
  2966. idx++;
  2967. }
  2968. /*
  2969. * Take care to update the debug entries for frag_list and also
  2970. * for the frags attached to frag_list
  2971. */
  2972. ext_list = qdf_nbuf_get_ext_list(nbuf);
  2973. while (ext_list) {
  2974. if (qdf_nbuf_get_users(ext_list) == 1) {
  2975. qdf_nbuf_panic_on_free_if_smmu_mapped(ext_list, func,
  2976. line);
  2977. qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
  2978. idx = 0;
  2979. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  2980. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  2981. while (idx < num_nr_frags) {
  2982. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  2983. if (qdf_likely(p_frag))
  2984. qdf_frag_debug_refcount_dec(p_frag,
  2985. func, line);
  2986. idx++;
  2987. }
  2988. qdf_net_buf_debug_delete_node(ext_list);
  2989. }
  2990. ext_list = qdf_nbuf_queue_next(ext_list);
  2991. }
  2992. free_buf:
  2993. __qdf_nbuf_free(nbuf);
  2994. }
  2995. qdf_export_symbol(qdf_nbuf_free_debug);
  2996. struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
  2997. const char *func, uint32_t line)
  2998. {
  2999. struct sk_buff *skb;
  3000. int flags = GFP_KERNEL;
  3001. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  3002. flags = GFP_ATOMIC;
  3003. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  3004. /*
  3005. * Observed that kcompactd burns out CPU to make order-3 page.
  3006. *__netdev_alloc_skb has 4k page fallback option just in case of
  3007. * failing high order page allocation so we don't need to be
  3008. * hard. Make kcompactd rest in piece.
  3009. */
  3010. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  3011. #endif
  3012. }
  3013. skb = __netdev_alloc_skb(NULL, size, flags);
  3014. if (qdf_likely(is_initial_mem_debug_disabled)) {
  3015. if (qdf_likely(skb))
  3016. qdf_nbuf_count_inc(skb);
  3017. } else {
  3018. if (qdf_likely(skb)) {
  3019. qdf_nbuf_count_inc(skb);
  3020. qdf_net_buf_debug_add_node(skb, size, func, line);
  3021. qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
  3022. } else {
  3023. qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
  3024. }
  3025. }
  3026. return skb;
  3027. }
  3028. qdf_export_symbol(__qdf_nbuf_alloc_simple);
  3029. void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
  3030. uint32_t line)
  3031. {
  3032. if (qdf_likely(nbuf)) {
  3033. if (is_initial_mem_debug_disabled) {
  3034. dev_kfree_skb_any(nbuf);
  3035. } else {
  3036. qdf_nbuf_free_debug(nbuf, func, line);
  3037. }
  3038. }
  3039. }
  3040. qdf_export_symbol(qdf_nbuf_free_debug_simple);
  3041. qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
  3042. {
  3043. uint32_t num_nr_frags;
  3044. uint32_t idx = 0;
  3045. qdf_nbuf_t ext_list;
  3046. qdf_frag_t p_frag;
  3047. qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
  3048. if (is_initial_mem_debug_disabled)
  3049. return cloned_buf;
  3050. if (qdf_unlikely(!cloned_buf))
  3051. return NULL;
  3052. /* Take care to update the debug entries for frags */
  3053. num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
  3054. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  3055. while (idx < num_nr_frags) {
  3056. p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
  3057. if (qdf_likely(p_frag))
  3058. qdf_frag_debug_refcount_inc(p_frag, func, line);
  3059. idx++;
  3060. }
  3061. /* Take care to update debug entries for frags attached to frag_list */
  3062. ext_list = qdf_nbuf_get_ext_list(cloned_buf);
  3063. while (ext_list) {
  3064. idx = 0;
  3065. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  3066. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  3067. while (idx < num_nr_frags) {
  3068. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  3069. if (qdf_likely(p_frag))
  3070. qdf_frag_debug_refcount_inc(p_frag, func, line);
  3071. idx++;
  3072. }
  3073. ext_list = qdf_nbuf_queue_next(ext_list);
  3074. }
  3075. /* Store SKB in internal QDF tracking table */
  3076. qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
  3077. qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
  3078. return cloned_buf;
  3079. }
  3080. qdf_export_symbol(qdf_nbuf_clone_debug);
  3081. qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
  3082. {
  3083. qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
  3084. if (is_initial_mem_debug_disabled)
  3085. return copied_buf;
  3086. if (qdf_unlikely(!copied_buf))
  3087. return NULL;
  3088. /* Store SKB in internal QDF tracking table */
  3089. qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
  3090. qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
  3091. return copied_buf;
  3092. }
  3093. qdf_export_symbol(qdf_nbuf_copy_debug);
  3094. qdf_nbuf_t
  3095. qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
  3096. const char *func, uint32_t line)
  3097. {
  3098. qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
  3099. if (qdf_unlikely(!copied_buf))
  3100. return NULL;
  3101. if (is_initial_mem_debug_disabled)
  3102. return copied_buf;
  3103. /* Store SKB in internal QDF tracking table */
  3104. qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
  3105. qdf_nbuf_history_add(copied_buf, func, line,
  3106. QDF_NBUF_ALLOC_COPY_EXPAND);
  3107. return copied_buf;
  3108. }
  3109. qdf_export_symbol(qdf_nbuf_copy_expand_debug);
  3110. qdf_nbuf_t
  3111. qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
  3112. uint32_t line_num)
  3113. {
  3114. qdf_nbuf_t unshared_buf;
  3115. qdf_frag_t p_frag;
  3116. uint32_t num_nr_frags;
  3117. uint32_t idx = 0;
  3118. qdf_nbuf_t ext_list, next;
  3119. if (is_initial_mem_debug_disabled)
  3120. return __qdf_nbuf_unshare(buf);
  3121. /* Not a shared buffer, nothing to do */
  3122. if (!qdf_nbuf_is_cloned(buf))
  3123. return buf;
  3124. if (qdf_nbuf_get_users(buf) > 1)
  3125. goto unshare_buf;
  3126. /* Take care to delete the debug entries for frags */
  3127. num_nr_frags = qdf_nbuf_get_nr_frags(buf);
  3128. while (idx < num_nr_frags) {
  3129. p_frag = qdf_nbuf_get_frag_addr(buf, idx);
  3130. if (qdf_likely(p_frag))
  3131. qdf_frag_debug_refcount_dec(p_frag, func_name,
  3132. line_num);
  3133. idx++;
  3134. }
  3135. qdf_net_buf_debug_delete_node(buf);
  3136. /* Take care of jumbo packet connected using frag_list and frags */
  3137. ext_list = qdf_nbuf_get_ext_list(buf);
  3138. while (ext_list) {
  3139. idx = 0;
  3140. next = qdf_nbuf_queue_next(ext_list);
  3141. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  3142. if (qdf_nbuf_get_users(ext_list) > 1) {
  3143. ext_list = next;
  3144. continue;
  3145. }
  3146. while (idx < num_nr_frags) {
  3147. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  3148. if (qdf_likely(p_frag))
  3149. qdf_frag_debug_refcount_dec(p_frag, func_name,
  3150. line_num);
  3151. idx++;
  3152. }
  3153. qdf_net_buf_debug_delete_node(ext_list);
  3154. ext_list = next;
  3155. }
  3156. unshare_buf:
  3157. unshared_buf = __qdf_nbuf_unshare(buf);
  3158. if (qdf_likely(unshared_buf))
  3159. qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
  3160. line_num);
  3161. return unshared_buf;
  3162. }
  3163. qdf_export_symbol(qdf_nbuf_unshare_debug);
  3164. void
  3165. qdf_nbuf_dev_kfree_list_debug(__qdf_nbuf_queue_head_t *nbuf_queue_head,
  3166. const char *func, uint32_t line)
  3167. {
  3168. qdf_nbuf_t buf;
  3169. if (qdf_nbuf_queue_empty(nbuf_queue_head))
  3170. return;
  3171. if (is_initial_mem_debug_disabled)
  3172. return __qdf_nbuf_dev_kfree_list(nbuf_queue_head);
  3173. while ((buf = qdf_nbuf_queue_head_dequeue(nbuf_queue_head)) != NULL)
  3174. qdf_nbuf_free_debug(buf, func, line);
  3175. }
  3176. qdf_export_symbol(qdf_nbuf_dev_kfree_list_debug);
  3177. #endif /* NBUF_MEMORY_DEBUG */
  3178. #if defined(QCA_DP_NBUF_FAST_PPEDS)
  3179. #if defined(NBUF_MEMORY_DEBUG)
  3180. struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
  3181. const char *func, uint32_t line)
  3182. {
  3183. struct sk_buff *skb;
  3184. int flags = GFP_KERNEL;
  3185. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  3186. flags = GFP_ATOMIC;
  3187. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  3188. /*
  3189. * Observed that kcompactd burns out CPU to make order-3
  3190. * page.__netdev_alloc_skb has 4k page fallback option
  3191. * just in case of
  3192. * failing high order page allocation so we don't need
  3193. * to be hard. Make kcompactd rest in piece.
  3194. */
  3195. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  3196. #endif
  3197. }
  3198. skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
  3199. if (qdf_likely(is_initial_mem_debug_disabled)) {
  3200. if (qdf_likely(skb))
  3201. qdf_nbuf_count_inc(skb);
  3202. } else {
  3203. if (qdf_likely(skb)) {
  3204. qdf_nbuf_count_inc(skb);
  3205. qdf_net_buf_debug_add_node(skb, size, func, line);
  3206. qdf_nbuf_history_add(skb, func, line,
  3207. QDF_NBUF_ALLOC);
  3208. } else {
  3209. qdf_nbuf_history_add(skb, func, line,
  3210. QDF_NBUF_ALLOC_FAILURE);
  3211. }
  3212. }
  3213. return skb;
  3214. }
  3215. #else
  3216. struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
  3217. const char *func, uint32_t line)
  3218. {
  3219. struct sk_buff *skb;
  3220. int flags = GFP_KERNEL;
  3221. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  3222. flags = GFP_ATOMIC;
  3223. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  3224. /*
  3225. * Observed that kcompactd burns out CPU to make order-3
  3226. * page.__netdev_alloc_skb has 4k page fallback option
  3227. * just in case of
  3228. * failing high order page allocation so we don't need
  3229. * to be hard. Make kcompactd rest in piece.
  3230. */
  3231. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  3232. #endif
  3233. }
  3234. skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
  3235. if (qdf_likely(skb))
  3236. qdf_nbuf_count_inc(skb);
  3237. return skb;
  3238. }
  3239. #endif
  3240. qdf_export_symbol(__qdf_nbuf_alloc_ppe_ds);
  3241. #endif
  3242. #if defined(FEATURE_TSO)
  3243. /**
  3244. * struct qdf_tso_cmn_seg_info_t - TSO common info structure
  3245. *
  3246. * @ethproto: ethernet type of the msdu
  3247. * @ip_tcp_hdr_len: ip + tcp length for the msdu
  3248. * @l2_len: L2 length for the msdu
  3249. * @eit_hdr: pointer to EIT header
  3250. * @eit_hdr_len: EIT header length for the msdu
  3251. * @eit_hdr_dma_map_addr: dma addr for EIT header
  3252. * @tcphdr: pointer to tcp header
  3253. * @ipv4_csum_en: ipv4 checksum enable
  3254. * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
  3255. * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
  3256. * @ip_id: IP id
  3257. * @tcp_seq_num: TCP sequence number
  3258. *
  3259. * This structure holds the TSO common info that is common
  3260. * across all the TCP segments of the jumbo packet.
  3261. */
  3262. struct qdf_tso_cmn_seg_info_t {
  3263. uint16_t ethproto;
  3264. uint16_t ip_tcp_hdr_len;
  3265. uint16_t l2_len;
  3266. uint8_t *eit_hdr;
  3267. uint32_t eit_hdr_len;
  3268. qdf_dma_addr_t eit_hdr_dma_map_addr;
  3269. struct tcphdr *tcphdr;
  3270. uint16_t ipv4_csum_en;
  3271. uint16_t tcp_ipv4_csum_en;
  3272. uint16_t tcp_ipv6_csum_en;
  3273. uint16_t ip_id;
  3274. uint32_t tcp_seq_num;
  3275. };
  3276. /**
  3277. * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
  3278. * @skb: network buffer
  3279. *
  3280. * Return: byte offset length of 8 bytes aligned.
  3281. */
  3282. #ifdef FIX_TXDMA_LIMITATION
  3283. static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
  3284. {
  3285. uint32_t eit_hdr_len;
  3286. uint8_t *eit_hdr;
  3287. uint8_t byte_8_align_offset;
  3288. eit_hdr = skb->data;
  3289. eit_hdr_len = (skb_transport_header(skb)
  3290. - skb_mac_header(skb)) + tcp_hdrlen(skb);
  3291. byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
  3292. if (qdf_unlikely(byte_8_align_offset)) {
  3293. TSO_DEBUG("%pK,Len %d %d",
  3294. eit_hdr, eit_hdr_len, byte_8_align_offset);
  3295. if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
  3296. TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
  3297. __LINE__, skb->head, skb->data,
  3298. byte_8_align_offset);
  3299. return 0;
  3300. }
  3301. qdf_nbuf_push_head(skb, byte_8_align_offset);
  3302. qdf_mem_move(skb->data,
  3303. skb->data + byte_8_align_offset,
  3304. eit_hdr_len);
  3305. skb->len -= byte_8_align_offset;
  3306. skb->mac_header -= byte_8_align_offset;
  3307. skb->network_header -= byte_8_align_offset;
  3308. skb->transport_header -= byte_8_align_offset;
  3309. }
  3310. return byte_8_align_offset;
  3311. }
  3312. #else
  3313. static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
  3314. {
  3315. return 0;
  3316. }
  3317. #endif
  3318. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  3319. void qdf_record_nbuf_nbytes(
  3320. uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
  3321. {
  3322. __qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
  3323. }
  3324. qdf_export_symbol(qdf_record_nbuf_nbytes);
  3325. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  3326. /**
  3327. * qdf_nbuf_tso_map_frag() - Map TSO segment
  3328. * @osdev: qdf device handle
  3329. * @tso_frag_vaddr: addr of tso fragment
  3330. * @nbytes: number of bytes
  3331. * @dir: direction
  3332. *
  3333. * Map TSO segment and for MCL record the amount of memory mapped
  3334. *
  3335. * Return: DMA address of mapped TSO fragment in success and
  3336. * NULL in case of DMA mapping failure
  3337. */
  3338. static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
  3339. qdf_device_t osdev, void *tso_frag_vaddr,
  3340. uint32_t nbytes, qdf_dma_dir_t dir)
  3341. {
  3342. qdf_dma_addr_t tso_frag_paddr = 0;
  3343. tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
  3344. nbytes, __qdf_dma_dir_to_os(dir));
  3345. if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
  3346. qdf_err("DMA mapping error!");
  3347. qdf_assert_always(0);
  3348. return 0;
  3349. }
  3350. qdf_record_nbuf_nbytes(nbytes, dir, true);
  3351. return tso_frag_paddr;
  3352. }
  3353. /**
  3354. * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
  3355. * @osdev: qdf device handle
  3356. * @tso_frag_paddr: DMA addr of tso fragment
  3357. * @dir: direction
  3358. * @nbytes: number of bytes
  3359. *
  3360. * Unmap TSO segment and for MCL record the amount of memory mapped
  3361. *
  3362. * Return: None
  3363. */
  3364. static inline void qdf_nbuf_tso_unmap_frag(
  3365. qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
  3366. uint32_t nbytes, qdf_dma_dir_t dir)
  3367. {
  3368. qdf_record_nbuf_nbytes(nbytes, dir, false);
  3369. dma_unmap_single(osdev->dev, tso_frag_paddr,
  3370. nbytes, __qdf_dma_dir_to_os(dir));
  3371. }
  3372. /**
  3373. * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
  3374. * information
  3375. * @osdev: qdf device handle
  3376. * @skb: skb buffer
  3377. * @tso_info: Parameters common to all segments
  3378. *
  3379. * Get the TSO information that is common across all the TCP
  3380. * segments of the jumbo packet
  3381. *
  3382. * Return: 0 - success 1 - failure
  3383. */
  3384. static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
  3385. struct sk_buff *skb,
  3386. struct qdf_tso_cmn_seg_info_t *tso_info)
  3387. {
  3388. /* Get ethernet type and ethernet header length */
  3389. tso_info->ethproto = vlan_get_protocol(skb);
  3390. /* Determine whether this is an IPv4 or IPv6 packet */
  3391. if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
  3392. /* for IPv4, get the IP ID and enable TCP and IP csum */
  3393. struct iphdr *ipv4_hdr = ip_hdr(skb);
  3394. tso_info->ip_id = ntohs(ipv4_hdr->id);
  3395. tso_info->ipv4_csum_en = 1;
  3396. tso_info->tcp_ipv4_csum_en = 1;
  3397. if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
  3398. qdf_err("TSO IPV4 proto 0x%x not TCP",
  3399. ipv4_hdr->protocol);
  3400. return 1;
  3401. }
  3402. } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
  3403. /* for IPv6, enable TCP csum. No IP ID or IP csum */
  3404. tso_info->tcp_ipv6_csum_en = 1;
  3405. } else {
  3406. qdf_err("TSO: ethertype 0x%x is not supported!",
  3407. tso_info->ethproto);
  3408. return 1;
  3409. }
  3410. tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
  3411. tso_info->tcphdr = tcp_hdr(skb);
  3412. tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
  3413. /* get pointer to the ethernet + IP + TCP header and their length */
  3414. tso_info->eit_hdr = skb->data;
  3415. tso_info->eit_hdr_len = (skb_transport_header(skb)
  3416. - skb_mac_header(skb)) + tcp_hdrlen(skb);
  3417. tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
  3418. osdev, tso_info->eit_hdr,
  3419. tso_info->eit_hdr_len,
  3420. QDF_DMA_TO_DEVICE);
  3421. if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
  3422. return 1;
  3423. if (tso_info->ethproto == htons(ETH_P_IP)) {
  3424. /* include IPv4 header length for IPV4 (total length) */
  3425. tso_info->ip_tcp_hdr_len =
  3426. tso_info->eit_hdr_len - tso_info->l2_len;
  3427. } else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
  3428. /* exclude IPv6 header length for IPv6 (payload length) */
  3429. tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
  3430. }
  3431. /*
  3432. * The length of the payload (application layer data) is added to
  3433. * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
  3434. * descriptor.
  3435. */
  3436. TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u skb len %u\n", __func__,
  3437. tso_info->tcp_seq_num,
  3438. tso_info->eit_hdr_len,
  3439. tso_info->l2_len,
  3440. skb->len);
  3441. return 0;
  3442. }
  3443. /**
  3444. * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
  3445. *
  3446. * @curr_seg: Segment whose contents are initialized
  3447. * @tso_cmn_info: Parameters common to all segments
  3448. *
  3449. * Return: None
  3450. */
  3451. static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
  3452. struct qdf_tso_seg_elem_t *curr_seg,
  3453. struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
  3454. {
  3455. /* Initialize the flags to 0 */
  3456. memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
  3457. /*
  3458. * The following fields remain the same across all segments of
  3459. * a jumbo packet
  3460. */
  3461. curr_seg->seg.tso_flags.tso_enable = 1;
  3462. curr_seg->seg.tso_flags.ipv4_checksum_en =
  3463. tso_cmn_info->ipv4_csum_en;
  3464. curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
  3465. tso_cmn_info->tcp_ipv6_csum_en;
  3466. curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
  3467. tso_cmn_info->tcp_ipv4_csum_en;
  3468. curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
  3469. /* The following fields change for the segments */
  3470. curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
  3471. tso_cmn_info->ip_id++;
  3472. curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
  3473. curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
  3474. curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
  3475. curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
  3476. curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
  3477. curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
  3478. curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
  3479. /*
  3480. * First fragment for each segment always contains the ethernet,
  3481. * IP and TCP header
  3482. */
  3483. curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
  3484. curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
  3485. curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
  3486. curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
  3487. TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
  3488. __func__, __LINE__, tso_cmn_info->eit_hdr,
  3489. tso_cmn_info->eit_hdr_len,
  3490. curr_seg->seg.tso_flags.tcp_seq_num,
  3491. curr_seg->seg.total_len);
  3492. qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
  3493. }
  3494. uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
  3495. struct qdf_tso_info_t *tso_info)
  3496. {
  3497. /* common across all segments */
  3498. struct qdf_tso_cmn_seg_info_t tso_cmn_info;
  3499. /* segment specific */
  3500. void *tso_frag_vaddr;
  3501. qdf_dma_addr_t tso_frag_paddr = 0;
  3502. uint32_t num_seg = 0;
  3503. struct qdf_tso_seg_elem_t *curr_seg;
  3504. struct qdf_tso_num_seg_elem_t *total_num_seg;
  3505. skb_frag_t *frag = NULL;
  3506. uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
  3507. uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
  3508. uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
  3509. uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
  3510. int j = 0; /* skb fragment index */
  3511. uint8_t byte_8_align_offset;
  3512. memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
  3513. total_num_seg = tso_info->tso_num_seg_list;
  3514. curr_seg = tso_info->tso_seg_list;
  3515. total_num_seg->num_seg.tso_cmn_num_seg = 0;
  3516. byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
  3517. if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
  3518. skb, &tso_cmn_info))) {
  3519. qdf_warn("TSO: error getting common segment info");
  3520. return 0;
  3521. }
  3522. /* length of the first chunk of data in the skb */
  3523. skb_frag_len = skb_headlen(skb);
  3524. /* the 0th tso segment's 0th fragment always contains the EIT header */
  3525. /* update the remaining skb fragment length and TSO segment length */
  3526. skb_frag_len -= tso_cmn_info.eit_hdr_len;
  3527. skb_proc -= tso_cmn_info.eit_hdr_len;
  3528. /* get the address to the next tso fragment */
  3529. tso_frag_vaddr = skb->data +
  3530. tso_cmn_info.eit_hdr_len +
  3531. byte_8_align_offset;
  3532. /* get the length of the next tso fragment */
  3533. tso_frag_len = min(skb_frag_len, tso_seg_size);
  3534. if (tso_frag_len != 0) {
  3535. tso_frag_paddr = qdf_nbuf_tso_map_frag(
  3536. osdev, tso_frag_vaddr, tso_frag_len,
  3537. QDF_DMA_TO_DEVICE);
  3538. if (qdf_unlikely(!tso_frag_paddr))
  3539. return 0;
  3540. }
  3541. TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
  3542. __LINE__, skb_frag_len, tso_frag_len);
  3543. num_seg = tso_info->num_segs;
  3544. tso_info->num_segs = 0;
  3545. tso_info->is_tso = 1;
  3546. while (num_seg && curr_seg) {
  3547. int i = 1; /* tso fragment index */
  3548. uint8_t more_tso_frags = 1;
  3549. curr_seg->seg.num_frags = 0;
  3550. tso_info->num_segs++;
  3551. total_num_seg->num_seg.tso_cmn_num_seg++;
  3552. __qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
  3553. &tso_cmn_info);
  3554. /* If TCP PSH flag is set, set it in the last or only segment */
  3555. if (num_seg == 1)
  3556. curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
  3557. if (unlikely(skb_proc == 0))
  3558. return tso_info->num_segs;
  3559. curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
  3560. curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
  3561. /* frag len is added to ip_len in while loop below*/
  3562. curr_seg->seg.num_frags++;
  3563. while (more_tso_frags) {
  3564. if (tso_frag_len != 0) {
  3565. curr_seg->seg.tso_frags[i].vaddr =
  3566. tso_frag_vaddr;
  3567. curr_seg->seg.tso_frags[i].length =
  3568. tso_frag_len;
  3569. curr_seg->seg.total_len += tso_frag_len;
  3570. curr_seg->seg.tso_flags.ip_len += tso_frag_len;
  3571. curr_seg->seg.num_frags++;
  3572. skb_proc = skb_proc - tso_frag_len;
  3573. /* increment the TCP sequence number */
  3574. tso_cmn_info.tcp_seq_num += tso_frag_len;
  3575. curr_seg->seg.tso_frags[i].paddr =
  3576. tso_frag_paddr;
  3577. qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
  3578. }
  3579. TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
  3580. __func__, __LINE__,
  3581. i,
  3582. tso_frag_len,
  3583. curr_seg->seg.total_len,
  3584. curr_seg->seg.tso_frags[i].vaddr);
  3585. /* if there is no more data left in the skb */
  3586. if (!skb_proc)
  3587. return tso_info->num_segs;
  3588. /* get the next payload fragment information */
  3589. /* check if there are more fragments in this segment */
  3590. if (tso_frag_len < tso_seg_size) {
  3591. more_tso_frags = 1;
  3592. if (tso_frag_len != 0) {
  3593. tso_seg_size = tso_seg_size -
  3594. tso_frag_len;
  3595. i++;
  3596. if (curr_seg->seg.num_frags ==
  3597. FRAG_NUM_MAX) {
  3598. more_tso_frags = 0;
  3599. /*
  3600. * reset i and the tso
  3601. * payload size
  3602. */
  3603. i = 1;
  3604. tso_seg_size =
  3605. skb_shinfo(skb)->
  3606. gso_size;
  3607. }
  3608. }
  3609. } else {
  3610. more_tso_frags = 0;
  3611. /* reset i and the tso payload size */
  3612. i = 1;
  3613. tso_seg_size = skb_shinfo(skb)->gso_size;
  3614. }
  3615. /* if the next fragment is contiguous */
  3616. if ((tso_frag_len != 0) && (tso_frag_len < skb_frag_len)) {
  3617. tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
  3618. skb_frag_len = skb_frag_len - tso_frag_len;
  3619. tso_frag_len = min(skb_frag_len, tso_seg_size);
  3620. } else { /* the next fragment is not contiguous */
  3621. if (skb_shinfo(skb)->nr_frags == 0) {
  3622. qdf_info("TSO: nr_frags == 0!");
  3623. qdf_assert(0);
  3624. return 0;
  3625. }
  3626. if (j >= skb_shinfo(skb)->nr_frags) {
  3627. qdf_info("TSO: nr_frags %d j %d",
  3628. skb_shinfo(skb)->nr_frags, j);
  3629. qdf_assert(0);
  3630. return 0;
  3631. }
  3632. frag = &skb_shinfo(skb)->frags[j];
  3633. skb_frag_len = skb_frag_size(frag);
  3634. tso_frag_len = min(skb_frag_len, tso_seg_size);
  3635. tso_frag_vaddr = skb_frag_address_safe(frag);
  3636. j++;
  3637. }
  3638. TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
  3639. __func__, __LINE__, skb_frag_len, tso_frag_len,
  3640. tso_seg_size);
  3641. if (!(tso_frag_vaddr)) {
  3642. TSO_DEBUG("%s: Fragment virtual addr is NULL",
  3643. __func__);
  3644. return 0;
  3645. }
  3646. tso_frag_paddr = qdf_nbuf_tso_map_frag(
  3647. osdev, tso_frag_vaddr,
  3648. tso_frag_len,
  3649. QDF_DMA_TO_DEVICE);
  3650. if (qdf_unlikely(!tso_frag_paddr))
  3651. return 0;
  3652. }
  3653. TSO_DEBUG("%s tcp_seq_num: %u", __func__,
  3654. curr_seg->seg.tso_flags.tcp_seq_num);
  3655. num_seg--;
  3656. /* if TCP FIN flag was set, set it in the last segment */
  3657. if (!num_seg)
  3658. curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
  3659. qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
  3660. curr_seg = curr_seg->next;
  3661. }
  3662. return tso_info->num_segs;
  3663. }
  3664. qdf_export_symbol(__qdf_nbuf_get_tso_info);
  3665. void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
  3666. struct qdf_tso_seg_elem_t *tso_seg,
  3667. bool is_last_seg)
  3668. {
  3669. uint32_t num_frags = 0;
  3670. if (tso_seg->seg.num_frags > 0)
  3671. num_frags = tso_seg->seg.num_frags - 1;
  3672. /*Num of frags in a tso seg cannot be less than 2 */
  3673. if (num_frags < 1) {
  3674. /*
  3675. * If Num of frags is 1 in a tso seg but is_last_seg true,
  3676. * this may happen when qdf_nbuf_get_tso_info failed,
  3677. * do dma unmap for the 0th frag in this seg.
  3678. */
  3679. if (is_last_seg && tso_seg->seg.num_frags == 1)
  3680. goto last_seg_free_first_frag;
  3681. qdf_assert(0);
  3682. qdf_err("ERROR: num of frags in a tso segment is %d",
  3683. (num_frags + 1));
  3684. return;
  3685. }
  3686. while (num_frags) {
  3687. /*Do dma unmap the tso seg except the 0th frag */
  3688. if (0 == tso_seg->seg.tso_frags[num_frags].paddr) {
  3689. qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
  3690. num_frags);
  3691. qdf_assert(0);
  3692. return;
  3693. }
  3694. qdf_nbuf_tso_unmap_frag(
  3695. osdev,
  3696. tso_seg->seg.tso_frags[num_frags].paddr,
  3697. tso_seg->seg.tso_frags[num_frags].length,
  3698. QDF_DMA_TO_DEVICE);
  3699. tso_seg->seg.tso_frags[num_frags].paddr = 0;
  3700. num_frags--;
  3701. qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
  3702. }
  3703. last_seg_free_first_frag:
  3704. if (is_last_seg) {
  3705. /*Do dma unmap for the tso seg 0th frag */
  3706. if (0 == tso_seg->seg.tso_frags[0].paddr) {
  3707. qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
  3708. qdf_assert(0);
  3709. return;
  3710. }
  3711. qdf_nbuf_tso_unmap_frag(osdev,
  3712. tso_seg->seg.tso_frags[0].paddr,
  3713. tso_seg->seg.tso_frags[0].length,
  3714. QDF_DMA_TO_DEVICE);
  3715. tso_seg->seg.tso_frags[0].paddr = 0;
  3716. qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
  3717. }
  3718. }
  3719. qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
  3720. size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
  3721. {
  3722. size_t packet_len;
  3723. packet_len = skb->len -
  3724. ((skb_transport_header(skb) - skb_mac_header(skb)) +
  3725. tcp_hdrlen(skb));
  3726. return packet_len;
  3727. }
  3728. qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
  3729. #ifndef BUILD_X86
  3730. uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
  3731. {
  3732. uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
  3733. uint32_t remainder, num_segs = 0;
  3734. uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
  3735. uint8_t frags_per_tso = 0;
  3736. uint32_t skb_frag_len = 0;
  3737. uint32_t eit_hdr_len = (skb_transport_header(skb)
  3738. - skb_mac_header(skb)) + tcp_hdrlen(skb);
  3739. skb_frag_t *frag = NULL;
  3740. int j = 0;
  3741. uint32_t temp_num_seg = 0;
  3742. /* length of the first chunk of data in the skb minus eit header*/
  3743. skb_frag_len = skb_headlen(skb) - eit_hdr_len;
  3744. /* Calculate num of segs for skb's first chunk of data*/
  3745. remainder = skb_frag_len % tso_seg_size;
  3746. num_segs = skb_frag_len / tso_seg_size;
  3747. /*
  3748. * Remainder non-zero and nr_frags zero implies end of skb data.
  3749. * In that case, one more tso seg is required to accommodate
  3750. * remaining data, hence num_segs++. If nr_frags is non-zero,
  3751. * then remaining data will be accommodated while doing the calculation
  3752. * for nr_frags data. Hence, frags_per_tso++.
  3753. */
  3754. if (remainder) {
  3755. if (!skb_nr_frags)
  3756. num_segs++;
  3757. else
  3758. frags_per_tso++;
  3759. }
  3760. while (skb_nr_frags) {
  3761. if (j >= skb_shinfo(skb)->nr_frags) {
  3762. qdf_info("TSO: nr_frags %d j %d",
  3763. skb_shinfo(skb)->nr_frags, j);
  3764. qdf_assert(0);
  3765. return 0;
  3766. }
  3767. /*
  3768. * Calculate the number of tso seg for nr_frags data:
  3769. * Get the length of each frag in skb_frag_len, add to
  3770. * remainder.Get the number of segments by dividing it to
  3771. * tso_seg_size and calculate the new remainder.
  3772. * Decrement the nr_frags value and keep
  3773. * looping all the skb_fragments.
  3774. */
  3775. frag = &skb_shinfo(skb)->frags[j];
  3776. skb_frag_len = skb_frag_size(frag);
  3777. temp_num_seg = num_segs;
  3778. remainder += skb_frag_len;
  3779. num_segs += remainder / tso_seg_size;
  3780. remainder = remainder % tso_seg_size;
  3781. skb_nr_frags--;
  3782. if (remainder) {
  3783. if (num_segs > temp_num_seg)
  3784. frags_per_tso = 0;
  3785. /*
  3786. * increment the tso per frags whenever remainder is
  3787. * positive. If frags_per_tso reaches the (max-1),
  3788. * [First frags always have EIT header, therefore max-1]
  3789. * increment the num_segs as no more data can be
  3790. * accommodated in the curr tso seg. Reset the remainder
  3791. * and frags per tso and keep looping.
  3792. */
  3793. frags_per_tso++;
  3794. if (frags_per_tso == FRAG_NUM_MAX - 1) {
  3795. num_segs++;
  3796. frags_per_tso = 0;
  3797. remainder = 0;
  3798. }
  3799. /*
  3800. * If this is the last skb frag and still remainder is
  3801. * non-zero(frags_per_tso is not reached to the max-1)
  3802. * then increment the num_segs to take care of the
  3803. * remaining length.
  3804. */
  3805. if (!skb_nr_frags && remainder) {
  3806. num_segs++;
  3807. frags_per_tso = 0;
  3808. }
  3809. } else {
  3810. /* Whenever remainder is 0, reset the frags_per_tso. */
  3811. frags_per_tso = 0;
  3812. }
  3813. j++;
  3814. }
  3815. return num_segs;
  3816. }
  3817. #elif !defined(QCA_WIFI_QCN9000)
  3818. uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
  3819. {
  3820. uint32_t i, gso_size, tmp_len, num_segs = 0;
  3821. skb_frag_t *frag = NULL;
  3822. /*
  3823. * Check if the head SKB or any of frags are allocated in < 0x50000000
  3824. * region which cannot be accessed by Target
  3825. */
  3826. if (virt_to_phys(skb->data) < 0x50000040) {
  3827. TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
  3828. __func__, __LINE__, skb_shinfo(skb)->nr_frags,
  3829. virt_to_phys(skb->data));
  3830. goto fail;
  3831. }
  3832. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  3833. frag = &skb_shinfo(skb)->frags[i];
  3834. if (!frag)
  3835. goto fail;
  3836. if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
  3837. goto fail;
  3838. }
  3839. gso_size = skb_shinfo(skb)->gso_size;
  3840. tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
  3841. + tcp_hdrlen(skb));
  3842. while (tmp_len) {
  3843. num_segs++;
  3844. if (tmp_len > gso_size)
  3845. tmp_len -= gso_size;
  3846. else
  3847. break;
  3848. }
  3849. return num_segs;
  3850. /*
  3851. * Do not free this frame, just do socket level accounting
  3852. * so that this is not reused.
  3853. */
  3854. fail:
  3855. if (skb->sk)
  3856. atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
  3857. return 0;
  3858. }
  3859. #else
  3860. uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
  3861. {
  3862. uint32_t i, gso_size, tmp_len, num_segs = 0;
  3863. skb_frag_t *frag = NULL;
  3864. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  3865. frag = &skb_shinfo(skb)->frags[i];
  3866. if (!frag)
  3867. goto fail;
  3868. }
  3869. gso_size = skb_shinfo(skb)->gso_size;
  3870. tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
  3871. + tcp_hdrlen(skb));
  3872. while (tmp_len) {
  3873. num_segs++;
  3874. if (tmp_len > gso_size)
  3875. tmp_len -= gso_size;
  3876. else
  3877. break;
  3878. }
  3879. return num_segs;
  3880. /*
  3881. * Do not free this frame, just do socket level accounting
  3882. * so that this is not reused.
  3883. */
  3884. fail:
  3885. if (skb->sk)
  3886. atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
  3887. return 0;
  3888. }
  3889. #endif
  3890. qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
  3891. #endif /* FEATURE_TSO */
  3892. void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
  3893. uint32_t *lo, uint32_t *hi)
  3894. {
  3895. if (sizeof(dmaaddr) > sizeof(uint32_t)) {
  3896. *lo = lower_32_bits(dmaaddr);
  3897. *hi = upper_32_bits(dmaaddr);
  3898. } else {
  3899. *lo = dmaaddr;
  3900. *hi = 0;
  3901. }
  3902. }
  3903. qdf_export_symbol(__qdf_dmaaddr_to_32s);
  3904. struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
  3905. {
  3906. qdf_nbuf_users_inc(&skb->users);
  3907. return skb;
  3908. }
  3909. qdf_export_symbol(__qdf_nbuf_inc_users);
  3910. int __qdf_nbuf_get_users(struct sk_buff *skb)
  3911. {
  3912. return qdf_nbuf_users_read(&skb->users);
  3913. }
  3914. qdf_export_symbol(__qdf_nbuf_get_users);
  3915. void __qdf_nbuf_ref(struct sk_buff *skb)
  3916. {
  3917. skb_get(skb);
  3918. }
  3919. qdf_export_symbol(__qdf_nbuf_ref);
  3920. int __qdf_nbuf_shared(struct sk_buff *skb)
  3921. {
  3922. return skb_shared(skb);
  3923. }
  3924. qdf_export_symbol(__qdf_nbuf_shared);
  3925. QDF_STATUS
  3926. __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
  3927. {
  3928. QDF_STATUS error = QDF_STATUS_SUCCESS;
  3929. /*
  3930. * driver can tell its SG capability, it must be handled.
  3931. * Bounce buffers if they are there
  3932. */
  3933. (*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
  3934. if (!(*dmap))
  3935. error = QDF_STATUS_E_NOMEM;
  3936. return error;
  3937. }
  3938. qdf_export_symbol(__qdf_nbuf_dmamap_create);
  3939. void
  3940. __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
  3941. {
  3942. kfree(dmap);
  3943. }
  3944. qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
  3945. #ifdef QDF_OS_DEBUG
  3946. QDF_STATUS
  3947. __qdf_nbuf_map_nbytes(
  3948. qdf_device_t osdev,
  3949. struct sk_buff *skb,
  3950. qdf_dma_dir_t dir,
  3951. int nbytes)
  3952. {
  3953. struct skb_shared_info *sh = skb_shinfo(skb);
  3954. qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
  3955. /*
  3956. * Assume there's only a single fragment.
  3957. * To support multiple fragments, it would be necessary to change
  3958. * adf_nbuf_t to be a separate object that stores meta-info
  3959. * (including the bus address for each fragment) and a pointer
  3960. * to the underlying sk_buff.
  3961. */
  3962. qdf_assert(sh->nr_frags == 0);
  3963. return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
  3964. }
  3965. qdf_export_symbol(__qdf_nbuf_map_nbytes);
  3966. #else
  3967. QDF_STATUS
  3968. __qdf_nbuf_map_nbytes(
  3969. qdf_device_t osdev,
  3970. struct sk_buff *skb,
  3971. qdf_dma_dir_t dir,
  3972. int nbytes)
  3973. {
  3974. return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
  3975. }
  3976. qdf_export_symbol(__qdf_nbuf_map_nbytes);
  3977. #endif
  3978. void
  3979. __qdf_nbuf_unmap_nbytes(
  3980. qdf_device_t osdev,
  3981. struct sk_buff *skb,
  3982. qdf_dma_dir_t dir,
  3983. int nbytes)
  3984. {
  3985. qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
  3986. /*
  3987. * Assume there's a single fragment.
  3988. * If this is not true, the assertion in __adf_nbuf_map will catch it.
  3989. */
  3990. __qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
  3991. }
  3992. qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
  3993. void
  3994. __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
  3995. {
  3996. qdf_assert(bmap->mapped);
  3997. qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
  3998. memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
  3999. sizeof(struct __qdf_segment));
  4000. sg->nsegs = bmap->nsegs;
  4001. }
  4002. qdf_export_symbol(__qdf_nbuf_dma_map_info);
  4003. #if defined(__QDF_SUPPORT_FRAG_MEM)
  4004. void
  4005. __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
  4006. {
  4007. qdf_assert(skb);
  4008. sg->sg_segs[0].vaddr = skb->data;
  4009. sg->sg_segs[0].len = skb->len;
  4010. sg->nsegs = 1;
  4011. for (int i = 1; i <= sh->nr_frags; i++) {
  4012. skb_frag_t *f = &sh->frags[i - 1];
  4013. sg->sg_segs[i].vaddr = (uint8_t *)(page_address(f->page) +
  4014. f->page_offset);
  4015. sg->sg_segs[i].len = f->size;
  4016. qdf_assert(i < QDF_MAX_SGLIST);
  4017. }
  4018. sg->nsegs += i;
  4019. }
  4020. qdf_export_symbol(__qdf_nbuf_frag_info);
  4021. #else
  4022. #ifdef QDF_OS_DEBUG
  4023. void
  4024. __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
  4025. {
  4026. struct skb_shared_info *sh = skb_shinfo(skb);
  4027. qdf_assert(skb);
  4028. sg->sg_segs[0].vaddr = skb->data;
  4029. sg->sg_segs[0].len = skb->len;
  4030. sg->nsegs = 1;
  4031. qdf_assert(sh->nr_frags == 0);
  4032. }
  4033. qdf_export_symbol(__qdf_nbuf_frag_info);
  4034. #else
  4035. void
  4036. __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
  4037. {
  4038. sg->sg_segs[0].vaddr = skb->data;
  4039. sg->sg_segs[0].len = skb->len;
  4040. sg->nsegs = 1;
  4041. }
  4042. qdf_export_symbol(__qdf_nbuf_frag_info);
  4043. #endif
  4044. #endif
  4045. uint32_t
  4046. __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
  4047. {
  4048. struct skb_shared_info *sh = skb_shinfo(nbuf);
  4049. const skb_frag_t *frag = sh->frags + cur_frag;
  4050. return skb_frag_size(frag);
  4051. }
  4052. qdf_export_symbol(__qdf_nbuf_get_frag_size);
  4053. #ifdef A_SIMOS_DEVHOST
  4054. QDF_STATUS __qdf_nbuf_frag_map(
  4055. qdf_device_t osdev, __qdf_nbuf_t nbuf,
  4056. int offset, qdf_dma_dir_t dir, int cur_frag)
  4057. {
  4058. int32_t paddr, frag_len;
  4059. QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
  4060. return QDF_STATUS_SUCCESS;
  4061. }
  4062. qdf_export_symbol(__qdf_nbuf_frag_map);
  4063. #else
  4064. QDF_STATUS __qdf_nbuf_frag_map(
  4065. qdf_device_t osdev, __qdf_nbuf_t nbuf,
  4066. int offset, qdf_dma_dir_t dir, int cur_frag)
  4067. {
  4068. dma_addr_t paddr, frag_len;
  4069. struct skb_shared_info *sh = skb_shinfo(nbuf);
  4070. const skb_frag_t *frag = sh->frags + cur_frag;
  4071. frag_len = skb_frag_size(frag);
  4072. QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
  4073. skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
  4074. __qdf_dma_dir_to_os(dir));
  4075. return dma_mapping_error(osdev->dev, paddr) ?
  4076. QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
  4077. }
  4078. qdf_export_symbol(__qdf_nbuf_frag_map);
  4079. #endif
  4080. void
  4081. __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
  4082. {
  4083. return;
  4084. }
  4085. qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
  4086. /**
  4087. * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
  4088. * @osdev: os device
  4089. * @buf: sk buff
  4090. * @dir: direction
  4091. *
  4092. * Return: none
  4093. */
  4094. #if defined(A_SIMOS_DEVHOST)
  4095. static void __qdf_nbuf_sync_single_for_cpu(
  4096. qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
  4097. {
  4098. return;
  4099. }
  4100. #else
  4101. static void __qdf_nbuf_sync_single_for_cpu(
  4102. qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
  4103. {
  4104. if (0 == QDF_NBUF_CB_PADDR(buf)) {
  4105. qdf_err("ERROR: NBUF mapped physical address is NULL");
  4106. return;
  4107. }
  4108. dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
  4109. skb_end_offset(buf) - skb_headroom(buf),
  4110. __qdf_dma_dir_to_os(dir));
  4111. }
  4112. #endif
  4113. void
  4114. __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
  4115. struct sk_buff *skb, qdf_dma_dir_t dir)
  4116. {
  4117. qdf_assert(
  4118. (dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
  4119. /*
  4120. * Assume there's a single fragment.
  4121. * If this is not true, the assertion in __adf_nbuf_map will catch it.
  4122. */
  4123. __qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
  4124. }
  4125. qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
  4126. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
  4127. /**
  4128. * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
  4129. * @rx_status: Pointer to rx_status.
  4130. * @rtap_buf: Buf to which VHT info has to be updated.
  4131. * @rtap_len: Current length of radiotap buffer
  4132. *
  4133. * Return: Length of radiotap after VHT flags updated.
  4134. */
  4135. static unsigned int qdf_nbuf_update_radiotap_vht_flags(
  4136. struct mon_rx_status *rx_status,
  4137. int8_t *rtap_buf,
  4138. uint32_t rtap_len)
  4139. {
  4140. uint16_t vht_flags = 0;
  4141. struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
  4142. rtap_len = qdf_align(rtap_len, 2);
  4143. /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
  4144. vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
  4145. IEEE80211_RADIOTAP_VHT_KNOWN_GI |
  4146. IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
  4147. IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
  4148. IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
  4149. IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
  4150. put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
  4151. rtap_len += 2;
  4152. rtap_buf[rtap_len] |=
  4153. (rx_status->is_stbc ?
  4154. IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
  4155. (rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
  4156. (rx_status->ldpc ?
  4157. IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
  4158. (rx_status->beamformed ?
  4159. IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
  4160. rtap_len += 1;
  4161. if (!rx_user_status) {
  4162. switch (rx_status->vht_flag_values2) {
  4163. case IEEE80211_RADIOTAP_VHT_BW_20:
  4164. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
  4165. break;
  4166. case IEEE80211_RADIOTAP_VHT_BW_40:
  4167. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
  4168. break;
  4169. case IEEE80211_RADIOTAP_VHT_BW_80:
  4170. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
  4171. break;
  4172. case IEEE80211_RADIOTAP_VHT_BW_160:
  4173. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
  4174. break;
  4175. }
  4176. rtap_len += 1;
  4177. rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
  4178. rtap_len += 1;
  4179. rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
  4180. rtap_len += 1;
  4181. rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
  4182. rtap_len += 1;
  4183. rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
  4184. rtap_len += 1;
  4185. rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
  4186. rtap_len += 1;
  4187. rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
  4188. rtap_len += 1;
  4189. put_unaligned_le16(rx_status->vht_flag_values6,
  4190. &rtap_buf[rtap_len]);
  4191. rtap_len += 2;
  4192. } else {
  4193. switch (rx_user_status->vht_flag_values2) {
  4194. case IEEE80211_RADIOTAP_VHT_BW_20:
  4195. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
  4196. break;
  4197. case IEEE80211_RADIOTAP_VHT_BW_40:
  4198. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
  4199. break;
  4200. case IEEE80211_RADIOTAP_VHT_BW_80:
  4201. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
  4202. break;
  4203. case IEEE80211_RADIOTAP_VHT_BW_160:
  4204. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
  4205. break;
  4206. }
  4207. rtap_len += 1;
  4208. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
  4209. rtap_len += 1;
  4210. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
  4211. rtap_len += 1;
  4212. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
  4213. rtap_len += 1;
  4214. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
  4215. rtap_len += 1;
  4216. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
  4217. rtap_len += 1;
  4218. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
  4219. rtap_len += 1;
  4220. put_unaligned_le16(rx_user_status->vht_flag_values6,
  4221. &rtap_buf[rtap_len]);
  4222. rtap_len += 2;
  4223. }
  4224. return rtap_len;
  4225. }
  4226. /**
  4227. * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
  4228. * @rx_status: Pointer to rx_status.
  4229. * @rtap_buf: buffer to which radiotap has to be updated
  4230. * @rtap_len: radiotap length
  4231. *
  4232. * API update high-efficiency (11ax) fields in the radiotap header
  4233. *
  4234. * Return: length of rtap_len updated.
  4235. */
  4236. static unsigned int
  4237. qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
  4238. int8_t *rtap_buf, uint32_t rtap_len)
  4239. {
  4240. /*
  4241. * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
  4242. * Enable all "known" HE radiotap flags for now
  4243. */
  4244. rtap_len = qdf_align(rtap_len, 2);
  4245. put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
  4246. rtap_len += 2;
  4247. put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
  4248. rtap_len += 2;
  4249. put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
  4250. rtap_len += 2;
  4251. put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
  4252. rtap_len += 2;
  4253. put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
  4254. rtap_len += 2;
  4255. put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
  4256. rtap_len += 2;
  4257. qdf_rl_debug("he data %x %x %x %x %x %x",
  4258. rx_status->he_data1,
  4259. rx_status->he_data2, rx_status->he_data3,
  4260. rx_status->he_data4, rx_status->he_data5,
  4261. rx_status->he_data6);
  4262. return rtap_len;
  4263. }
  4264. /**
  4265. * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
  4266. * @rx_status: Pointer to rx_status.
  4267. * @rtap_buf: buffer to which radiotap has to be updated
  4268. * @rtap_len: radiotap length
  4269. *
  4270. * API update HE-MU fields in the radiotap header
  4271. *
  4272. * Return: length of rtap_len updated.
  4273. */
  4274. static unsigned int
  4275. qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
  4276. int8_t *rtap_buf, uint32_t rtap_len)
  4277. {
  4278. struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
  4279. rtap_len = qdf_align(rtap_len, 2);
  4280. /*
  4281. * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
  4282. * Enable all "known" he-mu radiotap flags for now
  4283. */
  4284. if (!rx_user_status) {
  4285. put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
  4286. rtap_len += 2;
  4287. put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
  4288. rtap_len += 2;
  4289. rtap_buf[rtap_len] = rx_status->he_RU[0];
  4290. rtap_len += 1;
  4291. rtap_buf[rtap_len] = rx_status->he_RU[1];
  4292. rtap_len += 1;
  4293. rtap_buf[rtap_len] = rx_status->he_RU[2];
  4294. rtap_len += 1;
  4295. rtap_buf[rtap_len] = rx_status->he_RU[3];
  4296. rtap_len += 1;
  4297. qdf_debug("he_flags %x %x he-RU %x %x %x %x",
  4298. rx_status->he_flags1,
  4299. rx_status->he_flags2, rx_status->he_RU[0],
  4300. rx_status->he_RU[1], rx_status->he_RU[2],
  4301. rx_status->he_RU[3]);
  4302. } else {
  4303. put_unaligned_le16(rx_user_status->he_flags1,
  4304. &rtap_buf[rtap_len]);
  4305. rtap_len += 2;
  4306. put_unaligned_le16(rx_user_status->he_flags2,
  4307. &rtap_buf[rtap_len]);
  4308. rtap_len += 2;
  4309. rtap_buf[rtap_len] = rx_user_status->he_RU[0];
  4310. rtap_len += 1;
  4311. rtap_buf[rtap_len] = rx_user_status->he_RU[1];
  4312. rtap_len += 1;
  4313. rtap_buf[rtap_len] = rx_user_status->he_RU[2];
  4314. rtap_len += 1;
  4315. rtap_buf[rtap_len] = rx_user_status->he_RU[3];
  4316. rtap_len += 1;
  4317. qdf_debug("he_flags %x %x he-RU %x %x %x %x",
  4318. rx_user_status->he_flags1,
  4319. rx_user_status->he_flags2, rx_user_status->he_RU[0],
  4320. rx_user_status->he_RU[1], rx_user_status->he_RU[2],
  4321. rx_user_status->he_RU[3]);
  4322. }
  4323. return rtap_len;
  4324. }
  4325. /**
  4326. * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
  4327. * @rx_status: Pointer to rx_status.
  4328. * @rtap_buf: buffer to which radiotap has to be updated
  4329. * @rtap_len: radiotap length
  4330. *
  4331. * API update he-mu-other fields in the radiotap header
  4332. *
  4333. * Return: length of rtap_len updated.
  4334. */
  4335. static unsigned int
  4336. qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
  4337. int8_t *rtap_buf, uint32_t rtap_len)
  4338. {
  4339. struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
  4340. rtap_len = qdf_align(rtap_len, 2);
  4341. /*
  4342. * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
  4343. * Enable all "known" he-mu-other radiotap flags for now
  4344. */
  4345. if (!rx_user_status) {
  4346. put_unaligned_le16(rx_status->he_per_user_1,
  4347. &rtap_buf[rtap_len]);
  4348. rtap_len += 2;
  4349. put_unaligned_le16(rx_status->he_per_user_2,
  4350. &rtap_buf[rtap_len]);
  4351. rtap_len += 2;
  4352. rtap_buf[rtap_len] = rx_status->he_per_user_position;
  4353. rtap_len += 1;
  4354. rtap_buf[rtap_len] = rx_status->he_per_user_known;
  4355. rtap_len += 1;
  4356. qdf_debug("he_per_user %x %x pos %x knwn %x",
  4357. rx_status->he_per_user_1,
  4358. rx_status->he_per_user_2,
  4359. rx_status->he_per_user_position,
  4360. rx_status->he_per_user_known);
  4361. } else {
  4362. put_unaligned_le16(rx_user_status->he_per_user_1,
  4363. &rtap_buf[rtap_len]);
  4364. rtap_len += 2;
  4365. put_unaligned_le16(rx_user_status->he_per_user_2,
  4366. &rtap_buf[rtap_len]);
  4367. rtap_len += 2;
  4368. rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
  4369. rtap_len += 1;
  4370. rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
  4371. rtap_len += 1;
  4372. qdf_debug("he_per_user %x %x pos %x knwn %x",
  4373. rx_user_status->he_per_user_1,
  4374. rx_user_status->he_per_user_2,
  4375. rx_user_status->he_per_user_position,
  4376. rx_user_status->he_per_user_known);
  4377. }
  4378. return rtap_len;
  4379. }
  4380. /**
  4381. * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
  4382. * from rx_status
  4383. * @rx_status: Pointer to rx_status.
  4384. * @rtap_buf: buffer to which radiotap has to be updated
  4385. * @rtap_len: radiotap length
  4386. *
  4387. * API update Extra High Throughput (11be) fields in the radiotap header
  4388. *
  4389. * Return: length of rtap_len updated.
  4390. */
  4391. static unsigned int
  4392. qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
  4393. int8_t *rtap_buf, uint32_t rtap_len)
  4394. {
  4395. /*
  4396. * IEEE80211_RADIOTAP_USIG:
  4397. * u32, u32, u32
  4398. */
  4399. rtap_len = qdf_align(rtap_len, 4);
  4400. put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
  4401. rtap_len += 4;
  4402. put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
  4403. rtap_len += 4;
  4404. put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
  4405. rtap_len += 4;
  4406. qdf_rl_debug("U-SIG data %x %x %x",
  4407. rx_status->usig_common, rx_status->usig_value,
  4408. rx_status->usig_mask);
  4409. return rtap_len;
  4410. }
  4411. /**
  4412. * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
  4413. * from rx_status
  4414. * @rx_status: Pointer to rx_status.
  4415. * @rtap_buf: buffer to which radiotap has to be updated
  4416. * @rtap_len: radiotap length
  4417. *
  4418. * API update Extra High Throughput (11be) fields in the radiotap header
  4419. *
  4420. * Return: length of rtap_len updated.
  4421. */
  4422. static unsigned int
  4423. qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
  4424. int8_t *rtap_buf, uint32_t rtap_len)
  4425. {
  4426. uint32_t user;
  4427. /*
  4428. * IEEE80211_RADIOTAP_EHT:
  4429. * u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
  4430. */
  4431. rtap_len = qdf_align(rtap_len, 4);
  4432. put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
  4433. rtap_len += 4;
  4434. put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
  4435. rtap_len += 4;
  4436. put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
  4437. rtap_len += 4;
  4438. put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
  4439. rtap_len += 4;
  4440. put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
  4441. rtap_len += 4;
  4442. put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
  4443. rtap_len += 4;
  4444. put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
  4445. rtap_len += 4;
  4446. for (user = 0; user < EHT_USER_INFO_LEN &&
  4447. rx_status->num_eht_user_info_valid &&
  4448. user < rx_status->num_eht_user_info_valid; user++) {
  4449. put_unaligned_le32(rx_status->eht_user_info[user],
  4450. &rtap_buf[rtap_len]);
  4451. rtap_len += 4;
  4452. }
  4453. qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
  4454. rx_status->eht_known, rx_status->eht_data[0],
  4455. rx_status->eht_data[1], rx_status->eht_data[2],
  4456. rx_status->eht_data[3], rx_status->eht_data[4],
  4457. rx_status->eht_data[5]);
  4458. return rtap_len;
  4459. }
  4460. #define IEEE80211_RADIOTAP_TX_STATUS 0
  4461. #define IEEE80211_RADIOTAP_RETRY_COUNT 1
  4462. #define IEEE80211_RADIOTAP_EXTENSION2 2
  4463. uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
  4464. /**
  4465. * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
  4466. * @rx_status: Pointer to rx_status.
  4467. * @rtap_buf: Buf to which AMPDU info has to be updated.
  4468. * @rtap_len: Current length of radiotap buffer
  4469. *
  4470. * Return: Length of radiotap after AMPDU flags updated.
  4471. */
  4472. static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
  4473. struct mon_rx_status *rx_status,
  4474. uint8_t *rtap_buf,
  4475. uint32_t rtap_len)
  4476. {
  4477. /*
  4478. * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
  4479. * First 32 bits of AMPDU represents the reference number
  4480. */
  4481. uint32_t ampdu_reference_num = rx_status->ppdu_id;
  4482. uint16_t ampdu_flags = 0;
  4483. uint16_t ampdu_reserved_flags = 0;
  4484. rtap_len = qdf_align(rtap_len, 4);
  4485. put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
  4486. rtap_len += 4;
  4487. put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
  4488. rtap_len += 2;
  4489. put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
  4490. rtap_len += 2;
  4491. return rtap_len;
  4492. }
  4493. #ifdef DP_MON_RSSI_IN_DBM
  4494. #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
  4495. (rx_status->rssi_comb)
  4496. #else
  4497. #ifdef QCA_RSSI_DB2DBM
  4498. #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
  4499. (((rx_status)->rssi_dbm_conv_support) ? \
  4500. ((rx_status)->rssi_comb + (rx_status)->rssi_offset) :\
  4501. ((rx_status)->rssi_comb + (rx_status)->chan_noise_floor))
  4502. #else
  4503. #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
  4504. (rx_status->rssi_comb + rx_status->chan_noise_floor)
  4505. #endif
  4506. #endif
  4507. /**
  4508. * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
  4509. * @rx_status: Pointer to rx_status.
  4510. * @rtap_buf: Buf to which tx info has to be updated.
  4511. * @rtap_len: Current length of radiotap buffer
  4512. *
  4513. * Return: Length of radiotap after tx flags updated.
  4514. */
  4515. static unsigned int qdf_nbuf_update_radiotap_tx_flags(
  4516. struct mon_rx_status *rx_status,
  4517. uint8_t *rtap_buf,
  4518. uint32_t rtap_len)
  4519. {
  4520. /*
  4521. * IEEE80211_RADIOTAP_TX_FLAGS u16
  4522. */
  4523. uint16_t tx_flags = 0;
  4524. rtap_len = qdf_align(rtap_len, 2);
  4525. switch (rx_status->tx_status) {
  4526. case RADIOTAP_TX_STATUS_FAIL:
  4527. tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
  4528. break;
  4529. case RADIOTAP_TX_STATUS_NOACK:
  4530. tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
  4531. break;
  4532. }
  4533. put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
  4534. rtap_len += 2;
  4535. return rtap_len;
  4536. }
  4537. unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
  4538. qdf_nbuf_t nbuf, uint32_t headroom_sz)
  4539. {
  4540. uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
  4541. struct ieee80211_radiotap_header *rthdr =
  4542. (struct ieee80211_radiotap_header *)rtap_buf;
  4543. uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
  4544. uint32_t rtap_len = rtap_hdr_len;
  4545. uint8_t length = rtap_len;
  4546. struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
  4547. struct qdf_radiotap_ext2 *rtap_ext2;
  4548. struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
  4549. /* per user info */
  4550. qdf_le32_t *it_present;
  4551. uint32_t it_present_val;
  4552. bool radiotap_ext1_hdr_present = false;
  4553. it_present = &rthdr->it_present;
  4554. /* Adding Extended Header space */
  4555. if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
  4556. rx_status->usig_flags || rx_status->eht_flags) {
  4557. rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
  4558. rtap_len = rtap_hdr_len;
  4559. radiotap_ext1_hdr_present = true;
  4560. }
  4561. length = rtap_len;
  4562. /* IEEE80211_RADIOTAP_TSFT __le64 microseconds*/
  4563. it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
  4564. put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
  4565. rtap_len += 8;
  4566. /* IEEE80211_RADIOTAP_FLAGS u8 */
  4567. it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
  4568. if (rx_status->rs_fcs_err)
  4569. rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
  4570. rtap_buf[rtap_len] = rx_status->rtap_flags;
  4571. rtap_len += 1;
  4572. /* IEEE80211_RADIOTAP_RATE u8 500kb/s */
  4573. if (!rx_status->ht_flags && !rx_status->vht_flags &&
  4574. !rx_status->he_flags && !rx_status->eht_flags) {
  4575. it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
  4576. rtap_buf[rtap_len] = rx_status->rate;
  4577. } else
  4578. rtap_buf[rtap_len] = 0;
  4579. rtap_len += 1;
  4580. /* IEEE80211_RADIOTAP_CHANNEL 2 x __le16 MHz, bitmap */
  4581. it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
  4582. put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
  4583. rtap_len += 2;
  4584. /* Channel flags. */
  4585. if (rx_status->chan_freq > CHANNEL_FREQ_5150)
  4586. rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
  4587. else
  4588. rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
  4589. if (rx_status->cck_flag)
  4590. rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
  4591. if (rx_status->ofdm_flag)
  4592. rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
  4593. put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
  4594. rtap_len += 2;
  4595. /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8 decibels from one milliwatt
  4596. * (dBm)
  4597. */
  4598. it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
  4599. /*
  4600. * rssi_comb is int dB, need to convert it to dBm.
  4601. * normalize value to noise floor of -96 dBm
  4602. */
  4603. rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
  4604. rtap_len += 1;
  4605. /* RX signal noise floor */
  4606. it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
  4607. rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
  4608. rtap_len += 1;
  4609. /* IEEE80211_RADIOTAP_ANTENNA u8 antenna index */
  4610. it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
  4611. rtap_buf[rtap_len] = rx_status->nr_ant;
  4612. rtap_len += 1;
  4613. if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
  4614. qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
  4615. return 0;
  4616. }
  4617. /* update tx flags for pkt capture*/
  4618. if (rx_status->add_rtap_ext) {
  4619. it_present_val |=
  4620. cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
  4621. rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
  4622. rtap_buf,
  4623. rtap_len);
  4624. if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
  4625. qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
  4626. return 0;
  4627. }
  4628. }
  4629. if (rx_status->ht_flags) {
  4630. length = rtap_len;
  4631. /* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
  4632. it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
  4633. rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
  4634. IEEE80211_RADIOTAP_MCS_HAVE_MCS |
  4635. IEEE80211_RADIOTAP_MCS_HAVE_GI;
  4636. rtap_len += 1;
  4637. if (rx_status->sgi)
  4638. rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
  4639. if (rx_status->bw)
  4640. rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
  4641. else
  4642. rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
  4643. rtap_len += 1;
  4644. rtap_buf[rtap_len] = rx_status->ht_mcs;
  4645. rtap_len += 1;
  4646. if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
  4647. qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
  4648. return 0;
  4649. }
  4650. }
  4651. if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
  4652. /* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
  4653. it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
  4654. rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
  4655. rtap_buf,
  4656. rtap_len);
  4657. }
  4658. if (rx_status->vht_flags) {
  4659. length = rtap_len;
  4660. /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
  4661. it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
  4662. rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
  4663. rtap_buf,
  4664. rtap_len);
  4665. if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
  4666. qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
  4667. return 0;
  4668. }
  4669. }
  4670. if (rx_status->he_flags) {
  4671. length = rtap_len;
  4672. /* IEEE80211_RADIOTAP_HE */
  4673. it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
  4674. rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
  4675. rtap_buf,
  4676. rtap_len);
  4677. if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
  4678. qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
  4679. return 0;
  4680. }
  4681. }
  4682. if (rx_status->he_mu_flags) {
  4683. length = rtap_len;
  4684. /* IEEE80211_RADIOTAP_HE-MU */
  4685. it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
  4686. rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
  4687. rtap_buf,
  4688. rtap_len);
  4689. if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
  4690. qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
  4691. return 0;
  4692. }
  4693. }
  4694. if (rx_status->he_mu_other_flags) {
  4695. length = rtap_len;
  4696. /* IEEE80211_RADIOTAP_HE-MU-OTHER */
  4697. it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
  4698. rtap_len =
  4699. qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
  4700. rtap_buf,
  4701. rtap_len);
  4702. if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
  4703. qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
  4704. return 0;
  4705. }
  4706. }
  4707. rtap_len = qdf_align(rtap_len, 2);
  4708. /*
  4709. * Radiotap Vendor Namespace
  4710. */
  4711. it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
  4712. radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
  4713. (rtap_buf + rtap_len);
  4714. /*
  4715. * Copy Atheros OUI - 3 bytes (4th byte is 0)
  4716. */
  4717. qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
  4718. /*
  4719. * Name space selector = 0
  4720. * We only will have one namespace for now
  4721. */
  4722. radiotap_vendor_ns_ath->hdr.selector = 0;
  4723. radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
  4724. sizeof(*radiotap_vendor_ns_ath) -
  4725. sizeof(radiotap_vendor_ns_ath->hdr));
  4726. radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
  4727. radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
  4728. radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
  4729. radiotap_vendor_ns_ath->ppdu_start_timestamp =
  4730. cpu_to_le32(rx_status->ppdu_timestamp);
  4731. rtap_len += sizeof(*radiotap_vendor_ns_ath);
  4732. /* Move to next it_present */
  4733. if (radiotap_ext1_hdr_present) {
  4734. it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
  4735. put_unaligned_le32(it_present_val, it_present);
  4736. it_present_val = 0;
  4737. it_present++;
  4738. }
  4739. /* Add Extension to Radiotap Header & corresponding data */
  4740. if (rx_status->add_rtap_ext) {
  4741. it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
  4742. it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
  4743. rtap_buf[rtap_len] = rx_status->tx_status;
  4744. rtap_len += 1;
  4745. rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
  4746. rtap_len += 1;
  4747. }
  4748. /* Add Extension2 to Radiotap Header */
  4749. if (rx_status->add_rtap_ext2) {
  4750. it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
  4751. rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
  4752. rtap_ext2->ppdu_id = rx_status->ppdu_id;
  4753. rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
  4754. if (!rx_user_status) {
  4755. rtap_ext2->tid = rx_status->tid;
  4756. rtap_ext2->start_seq = rx_status->start_seq;
  4757. qdf_mem_copy(rtap_ext2->ba_bitmap,
  4758. rx_status->ba_bitmap,
  4759. 8 * (sizeof(uint32_t)));
  4760. } else {
  4761. uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
  4762. /* set default bitmap sz if not set */
  4763. ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
  4764. rtap_ext2->tid = rx_user_status->tid;
  4765. rtap_ext2->start_seq = rx_user_status->start_seq;
  4766. qdf_mem_copy(rtap_ext2->ba_bitmap,
  4767. rx_user_status->ba_bitmap,
  4768. ba_bitmap_sz * (sizeof(uint32_t)));
  4769. }
  4770. rtap_len += sizeof(*rtap_ext2);
  4771. }
  4772. if (rx_status->usig_flags) {
  4773. length = rtap_len;
  4774. /* IEEE80211_RADIOTAP_USIG */
  4775. it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
  4776. rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
  4777. rtap_buf,
  4778. rtap_len);
  4779. if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
  4780. qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
  4781. return 0;
  4782. }
  4783. }
  4784. if (rx_status->eht_flags) {
  4785. length = rtap_len;
  4786. /* IEEE80211_RADIOTAP_EHT */
  4787. it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
  4788. rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
  4789. rtap_buf,
  4790. rtap_len);
  4791. if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
  4792. qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
  4793. return 0;
  4794. }
  4795. }
  4796. put_unaligned_le32(it_present_val, it_present);
  4797. rthdr->it_len = cpu_to_le16(rtap_len);
  4798. if (headroom_sz < rtap_len) {
  4799. qdf_debug("DEBUG: Not enough space to update radiotap");
  4800. return 0;
  4801. }
  4802. qdf_nbuf_push_head(nbuf, rtap_len);
  4803. qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
  4804. return rtap_len;
  4805. }
  4806. #else
  4807. static unsigned int qdf_nbuf_update_radiotap_vht_flags(
  4808. struct mon_rx_status *rx_status,
  4809. int8_t *rtap_buf,
  4810. uint32_t rtap_len)
  4811. {
  4812. qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
  4813. return 0;
  4814. }
  4815. unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
  4816. int8_t *rtap_buf, uint32_t rtap_len)
  4817. {
  4818. qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
  4819. return 0;
  4820. }
  4821. static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
  4822. struct mon_rx_status *rx_status,
  4823. uint8_t *rtap_buf,
  4824. uint32_t rtap_len)
  4825. {
  4826. qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
  4827. return 0;
  4828. }
  4829. unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
  4830. qdf_nbuf_t nbuf, uint32_t headroom_sz)
  4831. {
  4832. qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
  4833. return 0;
  4834. }
  4835. #endif
  4836. qdf_export_symbol(qdf_nbuf_update_radiotap);
  4837. void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
  4838. {
  4839. nbuf_free_cb = cb_func_ptr;
  4840. }
  4841. qdf_export_symbol(__qdf_nbuf_reg_free_cb);
  4842. void qdf_nbuf_classify_pkt(struct sk_buff *skb)
  4843. {
  4844. struct ethhdr *eh = (struct ethhdr *)skb->data;
  4845. /* check destination mac address is broadcast/multicast */
  4846. if (is_broadcast_ether_addr((uint8_t *)eh))
  4847. QDF_NBUF_CB_SET_BCAST(skb);
  4848. else if (is_multicast_ether_addr((uint8_t *)eh))
  4849. QDF_NBUF_CB_SET_MCAST(skb);
  4850. if (qdf_nbuf_is_ipv4_arp_pkt(skb))
  4851. QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
  4852. QDF_NBUF_CB_PACKET_TYPE_ARP;
  4853. else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
  4854. QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
  4855. QDF_NBUF_CB_PACKET_TYPE_DHCP;
  4856. else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
  4857. QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
  4858. QDF_NBUF_CB_PACKET_TYPE_EAPOL;
  4859. else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
  4860. QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
  4861. QDF_NBUF_CB_PACKET_TYPE_WAPI;
  4862. }
  4863. qdf_export_symbol(qdf_nbuf_classify_pkt);
  4864. void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
  4865. {
  4866. qdf_nbuf_users_set(&nbuf->users, 1);
  4867. nbuf->data = nbuf->head + NET_SKB_PAD;
  4868. skb_reset_tail_pointer(nbuf);
  4869. }
  4870. qdf_export_symbol(__qdf_nbuf_init);
  4871. #ifdef WLAN_FEATURE_FASTPATH
  4872. void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
  4873. {
  4874. qdf_nbuf_users_set(&nbuf->users, 1);
  4875. nbuf->data = nbuf->head + NET_SKB_PAD;
  4876. skb_reset_tail_pointer(nbuf);
  4877. }
  4878. qdf_export_symbol(qdf_nbuf_init_fast);
  4879. #endif /* WLAN_FEATURE_FASTPATH */
  4880. #ifdef QDF_NBUF_GLOBAL_COUNT
  4881. void __qdf_nbuf_mod_init(void)
  4882. {
  4883. is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
  4884. qdf_atomic_init(&nbuf_count);
  4885. qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
  4886. }
  4887. void __qdf_nbuf_mod_exit(void)
  4888. {
  4889. }
  4890. #endif
  4891. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
  4892. QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
  4893. int offset)
  4894. {
  4895. unsigned int frag_offset;
  4896. skb_frag_t *frag;
  4897. if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
  4898. return QDF_STATUS_E_FAILURE;
  4899. frag = &skb_shinfo(nbuf)->frags[idx];
  4900. frag_offset = skb_frag_off(frag);
  4901. frag_offset += offset;
  4902. skb_frag_off_set(frag, frag_offset);
  4903. __qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
  4904. return QDF_STATUS_SUCCESS;
  4905. }
  4906. #else
  4907. QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
  4908. int offset)
  4909. {
  4910. uint16_t frag_offset;
  4911. skb_frag_t *frag;
  4912. if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
  4913. return QDF_STATUS_E_FAILURE;
  4914. frag = &skb_shinfo(nbuf)->frags[idx];
  4915. frag_offset = frag->page_offset;
  4916. frag_offset += offset;
  4917. frag->page_offset = frag_offset;
  4918. __qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
  4919. return QDF_STATUS_SUCCESS;
  4920. }
  4921. #endif
  4922. qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
  4923. void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,
  4924. uint16_t idx,
  4925. uint16_t truesize)
  4926. {
  4927. struct page *page;
  4928. uint16_t frag_len;
  4929. page = skb_frag_page(&skb_shinfo(nbuf)->frags[idx]);
  4930. if (qdf_unlikely(!page))
  4931. return;
  4932. frag_len = qdf_nbuf_get_frag_size_by_idx(nbuf, idx);
  4933. put_page(page);
  4934. nbuf->len -= frag_len;
  4935. nbuf->data_len -= frag_len;
  4936. nbuf->truesize -= truesize;
  4937. skb_shinfo(nbuf)->nr_frags--;
  4938. }
  4939. qdf_export_symbol(__qdf_nbuf_remove_frag);
  4940. void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
  4941. int offset, int frag_len,
  4942. unsigned int truesize, bool take_frag_ref)
  4943. {
  4944. struct page *page;
  4945. int frag_offset;
  4946. uint8_t nr_frag;
  4947. nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
  4948. qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
  4949. page = virt_to_head_page(buf);
  4950. frag_offset = buf - page_address(page);
  4951. skb_add_rx_frag(nbuf, nr_frag, page,
  4952. (frag_offset + offset),
  4953. frag_len, truesize);
  4954. if (unlikely(take_frag_ref)) {
  4955. qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
  4956. skb_frag_ref(nbuf, nr_frag);
  4957. }
  4958. }
  4959. qdf_export_symbol(__qdf_nbuf_add_rx_frag);
  4960. void __qdf_nbuf_ref_frag(__qdf_frag_t buf)
  4961. {
  4962. struct page *page;
  4963. skb_frag_t frag = {0};
  4964. page = virt_to_head_page(buf);
  4965. __skb_frag_set_page(&frag, page);
  4966. /*
  4967. * since __skb_frag_ref() just use page to increase ref
  4968. * we just decode page alone
  4969. */
  4970. qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
  4971. __skb_frag_ref(&frag);
  4972. }
  4973. qdf_export_symbol(__qdf_nbuf_ref_frag);
  4974. #ifdef NBUF_FRAG_MEMORY_DEBUG
  4975. QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
  4976. int offset, const char *func,
  4977. uint32_t line)
  4978. {
  4979. QDF_STATUS result;
  4980. qdf_frag_t p_fragp, n_fragp;
  4981. p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
  4982. result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
  4983. if (qdf_likely(is_initial_mem_debug_disabled))
  4984. return result;
  4985. n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
  4986. /*
  4987. * Update frag address in frag debug tracker
  4988. * when frag offset is successfully changed in skb
  4989. */
  4990. if (result == QDF_STATUS_SUCCESS)
  4991. qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
  4992. return result;
  4993. }
  4994. qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
  4995. void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
  4996. int offset, int frag_len,
  4997. unsigned int truesize, bool take_frag_ref,
  4998. const char *func, uint32_t line)
  4999. {
  5000. qdf_frag_t fragp;
  5001. uint32_t num_nr_frags;
  5002. __qdf_nbuf_add_rx_frag(buf, nbuf, offset,
  5003. frag_len, truesize, take_frag_ref);
  5004. if (qdf_likely(is_initial_mem_debug_disabled))
  5005. return;
  5006. num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  5007. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5008. fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
  5009. /* Update frag address in frag debug tracking table */
  5010. if (fragp != buf && !take_frag_ref)
  5011. qdf_frag_debug_update_addr(buf, fragp, func, line);
  5012. /* Update frag refcount in frag debug tracking table */
  5013. qdf_frag_debug_refcount_inc(fragp, func, line);
  5014. }
  5015. qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
  5016. void qdf_nbuf_ref_frag_debug(qdf_frag_t buf, const char *func, uint32_t line)
  5017. {
  5018. __qdf_nbuf_ref_frag(buf);
  5019. if (qdf_likely(is_initial_mem_debug_disabled))
  5020. return;
  5021. /* Update frag refcount in frag debug tracking table */
  5022. qdf_frag_debug_refcount_inc(buf, func, line);
  5023. }
  5024. qdf_export_symbol(qdf_nbuf_ref_frag_debug);
  5025. void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
  5026. uint32_t line)
  5027. {
  5028. uint32_t num_nr_frags;
  5029. uint32_t idx = 0;
  5030. qdf_nbuf_t ext_list;
  5031. qdf_frag_t p_frag;
  5032. if (qdf_likely(is_initial_mem_debug_disabled))
  5033. return;
  5034. if (qdf_unlikely(!buf))
  5035. return;
  5036. /* Take care to update the refcount in the debug entries for frags */
  5037. num_nr_frags = qdf_nbuf_get_nr_frags(buf);
  5038. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5039. while (idx < num_nr_frags) {
  5040. p_frag = qdf_nbuf_get_frag_addr(buf, idx);
  5041. if (qdf_likely(p_frag))
  5042. qdf_frag_debug_refcount_inc(p_frag, func, line);
  5043. idx++;
  5044. }
  5045. /*
  5046. * Take care to update the refcount in the debug entries for the
  5047. * frags attached to frag_list
  5048. */
  5049. ext_list = qdf_nbuf_get_ext_list(buf);
  5050. while (ext_list) {
  5051. idx = 0;
  5052. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  5053. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5054. while (idx < num_nr_frags) {
  5055. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  5056. if (qdf_likely(p_frag))
  5057. qdf_frag_debug_refcount_inc(p_frag, func, line);
  5058. idx++;
  5059. }
  5060. ext_list = qdf_nbuf_queue_next(ext_list);
  5061. }
  5062. }
  5063. qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
  5064. void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
  5065. uint32_t line)
  5066. {
  5067. uint32_t num_nr_frags;
  5068. qdf_nbuf_t ext_list;
  5069. uint32_t idx = 0;
  5070. qdf_frag_t p_frag;
  5071. if (qdf_likely(is_initial_mem_debug_disabled))
  5072. return;
  5073. if (qdf_unlikely(!buf))
  5074. return;
  5075. /*
  5076. * Decrement refcount for frag debug nodes only when last user
  5077. * of nbuf calls this API so as to avoid decrementing refcount
  5078. * on every call expect the last one in case where nbuf has multiple
  5079. * users
  5080. */
  5081. if (qdf_nbuf_get_users(buf) > 1)
  5082. return;
  5083. /* Take care to update the refcount in the debug entries for frags */
  5084. num_nr_frags = qdf_nbuf_get_nr_frags(buf);
  5085. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5086. while (idx < num_nr_frags) {
  5087. p_frag = qdf_nbuf_get_frag_addr(buf, idx);
  5088. if (qdf_likely(p_frag))
  5089. qdf_frag_debug_refcount_dec(p_frag, func, line);
  5090. idx++;
  5091. }
  5092. /* Take care to update debug entries for frags attached to frag_list */
  5093. ext_list = qdf_nbuf_get_ext_list(buf);
  5094. while (ext_list) {
  5095. if (qdf_nbuf_get_users(ext_list) == 1) {
  5096. idx = 0;
  5097. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  5098. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5099. while (idx < num_nr_frags) {
  5100. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  5101. if (qdf_likely(p_frag))
  5102. qdf_frag_debug_refcount_dec(p_frag,
  5103. func, line);
  5104. idx++;
  5105. }
  5106. }
  5107. ext_list = qdf_nbuf_queue_next(ext_list);
  5108. }
  5109. }
  5110. qdf_export_symbol(qdf_net_buf_debug_release_frag);
  5111. QDF_STATUS
  5112. qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,
  5113. uint16_t idx,
  5114. uint16_t truesize,
  5115. const char *func,
  5116. uint32_t line)
  5117. {
  5118. uint16_t num_frags;
  5119. qdf_frag_t frag;
  5120. if (qdf_unlikely(!nbuf))
  5121. return QDF_STATUS_E_INVAL;
  5122. num_frags = qdf_nbuf_get_nr_frags(nbuf);
  5123. if (idx >= num_frags)
  5124. return QDF_STATUS_E_INVAL;
  5125. if (qdf_likely(is_initial_mem_debug_disabled)) {
  5126. __qdf_nbuf_remove_frag(nbuf, idx, truesize);
  5127. return QDF_STATUS_SUCCESS;
  5128. }
  5129. frag = qdf_nbuf_get_frag_addr(nbuf, idx);
  5130. if (qdf_likely(frag))
  5131. qdf_frag_debug_refcount_dec(frag, func, line);
  5132. __qdf_nbuf_remove_frag(nbuf, idx, truesize);
  5133. return QDF_STATUS_SUCCESS;
  5134. }
  5135. qdf_export_symbol(qdf_nbuf_remove_frag_debug);
  5136. #endif /* NBUF_FRAG_MEMORY_DEBUG */
  5137. qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
  5138. {
  5139. qdf_nbuf_t last_nbuf;
  5140. uint32_t num_frags;
  5141. if (qdf_unlikely(!nbuf))
  5142. return NULL;
  5143. num_frags = qdf_nbuf_get_nr_frags(nbuf);
  5144. /* Check nbuf has enough memory to store frag memory */
  5145. if (num_frags < QDF_NBUF_MAX_FRAGS)
  5146. return nbuf;
  5147. if (!__qdf_nbuf_has_fraglist(nbuf))
  5148. return NULL;
  5149. last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
  5150. if (qdf_unlikely(!last_nbuf))
  5151. return NULL;
  5152. num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
  5153. if (num_frags < QDF_NBUF_MAX_FRAGS)
  5154. return last_nbuf;
  5155. return NULL;
  5156. }
  5157. qdf_export_symbol(qdf_get_nbuf_valid_frag);
  5158. QDF_STATUS
  5159. qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
  5160. qdf_nbuf_t nbuf, int offset,
  5161. int frag_len, unsigned int truesize,
  5162. bool take_frag_ref, unsigned int minsize,
  5163. const char *func, uint32_t line)
  5164. {
  5165. qdf_nbuf_t cur_nbuf;
  5166. qdf_nbuf_t this_nbuf;
  5167. cur_nbuf = nbuf;
  5168. this_nbuf = nbuf;
  5169. if (qdf_unlikely(!frag_len || !buf)) {
  5170. qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
  5171. func, line,
  5172. buf, frag_len);
  5173. return QDF_STATUS_E_INVAL;
  5174. }
  5175. this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
  5176. if (this_nbuf) {
  5177. cur_nbuf = this_nbuf;
  5178. } else {
  5179. /* allocate a dummy mpdu buffer of 64 bytes headroom */
  5180. this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
  5181. if (qdf_unlikely(!this_nbuf)) {
  5182. qdf_nofl_err("%s : %d no memory to allocate\n",
  5183. func, line);
  5184. return QDF_STATUS_E_NOMEM;
  5185. }
  5186. }
  5187. qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
  5188. take_frag_ref);
  5189. if (this_nbuf != cur_nbuf) {
  5190. /* add new skb to frag list */
  5191. qdf_nbuf_append_ext_list(nbuf, this_nbuf,
  5192. qdf_nbuf_len(this_nbuf));
  5193. }
  5194. return QDF_STATUS_SUCCESS;
  5195. }
  5196. qdf_export_symbol(qdf_nbuf_add_frag_debug);
  5197. #ifdef MEMORY_DEBUG
  5198. void qdf_nbuf_acquire_track_lock(uint32_t index,
  5199. unsigned long irq_flag)
  5200. {
  5201. spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
  5202. irq_flag);
  5203. }
  5204. void qdf_nbuf_release_track_lock(uint32_t index,
  5205. unsigned long irq_flag)
  5206. {
  5207. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
  5208. irq_flag);
  5209. }
  5210. QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
  5211. {
  5212. return gp_qdf_net_buf_track_tbl[index];
  5213. }
  5214. #endif /* MEMORY_DEBUG */
  5215. #ifdef ENHANCED_OS_ABSTRACTION
  5216. void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
  5217. {
  5218. __qdf_nbuf_set_timestamp(buf);
  5219. }
  5220. qdf_export_symbol(qdf_nbuf_set_timestamp);
  5221. uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
  5222. {
  5223. return __qdf_nbuf_get_timestamp(buf);
  5224. }
  5225. qdf_export_symbol(qdf_nbuf_get_timestamp);
  5226. uint64_t qdf_nbuf_get_timestamp_us(qdf_nbuf_t buf)
  5227. {
  5228. return __qdf_nbuf_get_timestamp_us(buf);
  5229. }
  5230. qdf_export_symbol(qdf_nbuf_get_timestamp_us);
  5231. uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
  5232. {
  5233. return __qdf_nbuf_get_timedelta_us(buf);
  5234. }
  5235. qdf_export_symbol(qdf_nbuf_get_timedelta_us);
  5236. uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
  5237. {
  5238. return __qdf_nbuf_get_timedelta_ms(buf);
  5239. }
  5240. qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
  5241. qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
  5242. {
  5243. return __qdf_nbuf_net_timedelta(t);
  5244. }
  5245. qdf_export_symbol(qdf_nbuf_net_timedelta);
  5246. #endif