qdf_nbuf.c 157 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289
  1. /*
  2. * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * DOC: qdf_nbuf.c
  21. * QCA driver framework(QDF) network buffer management APIs
  22. */
  23. #include <linux/hashtable.h>
  24. #include <linux/kernel.h>
  25. #include <linux/version.h>
  26. #include <linux/skbuff.h>
  27. #include <linux/module.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/inetdevice.h>
  30. #include <qdf_atomic.h>
  31. #include <qdf_debugfs.h>
  32. #include <qdf_lock.h>
  33. #include <qdf_mem.h>
  34. #include <qdf_module.h>
  35. #include <qdf_nbuf.h>
  36. #include <qdf_status.h>
  37. #include "qdf_str.h"
  38. #include <qdf_trace.h>
  39. #include "qdf_tracker.h"
  40. #include <qdf_types.h>
  41. #include <net/ieee80211_radiotap.h>
  42. #include <pld_common.h>
  43. #include <qdf_crypto.h>
  44. #include <linux/igmp.h>
  45. #include <net/mld.h>
  46. #if defined(FEATURE_TSO)
  47. #include <net/ipv6.h>
  48. #include <linux/ipv6.h>
  49. #include <linux/tcp.h>
  50. #include <linux/if_vlan.h>
  51. #include <linux/ip.h>
  52. #endif /* FEATURE_TSO */
  53. #ifdef IPA_OFFLOAD
  54. #include <i_qdf_ipa_wdi3.h>
  55. #endif /* IPA_OFFLOAD */
  56. #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)
  57. #define qdf_nbuf_users_inc atomic_inc
  58. #define qdf_nbuf_users_dec atomic_dec
  59. #define qdf_nbuf_users_set atomic_set
  60. #define qdf_nbuf_users_read atomic_read
  61. #else
  62. #define qdf_nbuf_users_inc refcount_inc
  63. #define qdf_nbuf_users_dec refcount_dec
  64. #define qdf_nbuf_users_set refcount_set
  65. #define qdf_nbuf_users_read refcount_read
  66. #endif /* KERNEL_VERSION(4, 13, 0) */
  67. #define IEEE80211_RADIOTAP_VHT_BW_20 0
  68. #define IEEE80211_RADIOTAP_VHT_BW_40 1
  69. #define IEEE80211_RADIOTAP_VHT_BW_80 2
  70. #define IEEE80211_RADIOTAP_VHT_BW_160 3
  71. #define RADIOTAP_VHT_BW_20 0
  72. #define RADIOTAP_VHT_BW_40 1
  73. #define RADIOTAP_VHT_BW_80 4
  74. #define RADIOTAP_VHT_BW_160 11
  75. /* tx status */
  76. #define RADIOTAP_TX_STATUS_FAIL 1
  77. #define RADIOTAP_TX_STATUS_NOACK 2
  78. /* channel number to freq conversion */
  79. #define CHANNEL_NUM_14 14
  80. #define CHANNEL_NUM_15 15
  81. #define CHANNEL_NUM_27 27
  82. #define CHANNEL_NUM_35 35
  83. #define CHANNEL_NUM_182 182
  84. #define CHANNEL_NUM_197 197
  85. #define CHANNEL_FREQ_2484 2484
  86. #define CHANNEL_FREQ_2407 2407
  87. #define CHANNEL_FREQ_2512 2512
  88. #define CHANNEL_FREQ_5000 5000
  89. #define CHANNEL_FREQ_4000 4000
  90. #define CHANNEL_FREQ_5150 5150
  91. #define FREQ_MULTIPLIER_CONST_5MHZ 5
  92. #define FREQ_MULTIPLIER_CONST_20MHZ 20
  93. #define RADIOTAP_5G_SPECTRUM_CHANNEL 0x0100
  94. #define RADIOTAP_2G_SPECTRUM_CHANNEL 0x0080
  95. #define RADIOTAP_CCK_CHANNEL 0x0020
  96. #define RADIOTAP_OFDM_CHANNEL 0x0040
  97. #ifdef FEATURE_NBUFF_REPLENISH_TIMER
  98. #include <qdf_mc_timer.h>
  99. struct qdf_track_timer {
  100. qdf_mc_timer_t track_timer;
  101. qdf_atomic_t alloc_fail_cnt;
  102. };
  103. static struct qdf_track_timer alloc_track_timer;
  104. #define QDF_NBUF_ALLOC_EXPIRE_TIMER_MS 5000
  105. #define QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD 50
  106. #endif
  107. #ifdef NBUF_MEMORY_DEBUG
  108. /* SMMU crash indication*/
  109. static qdf_atomic_t smmu_crashed;
  110. /* Number of nbuf not added to history*/
  111. unsigned long g_histroy_add_drop;
  112. #endif
  113. /* Packet Counter */
  114. static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
  115. static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
  116. #ifdef QDF_NBUF_GLOBAL_COUNT
  117. #define NBUF_DEBUGFS_NAME "nbuf_counters"
  118. static qdf_atomic_t nbuf_count;
  119. #endif
  120. #if defined(NBUF_MEMORY_DEBUG) || defined(QDF_NBUF_GLOBAL_COUNT)
  121. static bool is_initial_mem_debug_disabled;
  122. #endif
  123. /**
  124. * __qdf_nbuf_get_ip_offset() - Get IPV4/V6 header offset
  125. * @data: Pointer to network data buffer
  126. *
  127. * Get the IP header offset in case of 8021Q and 8021AD
  128. * tag is present in L2 header.
  129. *
  130. * Return: IP header offset
  131. */
  132. static inline uint8_t __qdf_nbuf_get_ip_offset(uint8_t *data)
  133. {
  134. uint16_t ether_type;
  135. ether_type = *(uint16_t *)(data +
  136. QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  137. if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
  138. return QDF_NBUF_TRAC_VLAN_IP_OFFSET;
  139. else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
  140. return QDF_NBUF_TRAC_DOUBLE_VLAN_IP_OFFSET;
  141. return QDF_NBUF_TRAC_IP_OFFSET;
  142. }
  143. /**
  144. * __qdf_nbuf_get_ether_type() - Get the ether type
  145. * @data: Pointer to network data buffer
  146. *
  147. * Get the ether type in case of 8021Q and 8021AD tag
  148. * is present in L2 header, e.g for the returned ether type
  149. * value, if IPV4 data ether type 0x0800, return 0x0008.
  150. *
  151. * Return ether type.
  152. */
  153. static inline uint16_t __qdf_nbuf_get_ether_type(uint8_t *data)
  154. {
  155. uint16_t ether_type;
  156. ether_type = *(uint16_t *)(data +
  157. QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  158. if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q)))
  159. ether_type = *(uint16_t *)(data +
  160. QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
  161. else if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021AD)))
  162. ether_type = *(uint16_t *)(data +
  163. QDF_NBUF_TRAC_DOUBLE_VLAN_ETH_TYPE_OFFSET);
  164. return ether_type;
  165. }
  166. void qdf_nbuf_tx_desc_count_display(void)
  167. {
  168. qdf_debug("Current Snapshot of the Driver:");
  169. qdf_debug("Data Packets:");
  170. qdf_debug("HDD %d TXRX_Q %d TXRX %d HTT %d",
  171. nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
  172. (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
  173. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
  174. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
  175. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
  176. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
  177. nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
  178. nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
  179. nbuf_tx_data[QDF_NBUF_TX_PKT_HTT] -
  180. nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
  181. qdf_debug(" HTC %d HIF %d CE %d TX_COMP %d",
  182. nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
  183. nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
  184. nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
  185. nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
  186. nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
  187. nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
  188. nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
  189. qdf_debug("Mgmt Packets:");
  190. qdf_debug("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d",
  191. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
  192. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
  193. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
  194. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
  195. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
  196. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
  197. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
  198. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
  199. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
  200. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
  201. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
  202. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
  203. nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
  204. }
  205. qdf_export_symbol(qdf_nbuf_tx_desc_count_display);
  206. /**
  207. * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
  208. * @packet_type : packet type either mgmt/data
  209. * @current_state : layer at which the packet currently present
  210. *
  211. * Return: none
  212. */
  213. static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
  214. uint8_t current_state)
  215. {
  216. switch (packet_type) {
  217. case QDF_NBUF_TX_PKT_MGMT_TRACK:
  218. nbuf_tx_mgmt[current_state]++;
  219. break;
  220. case QDF_NBUF_TX_PKT_DATA_TRACK:
  221. nbuf_tx_data[current_state]++;
  222. break;
  223. default:
  224. break;
  225. }
  226. }
  227. void qdf_nbuf_tx_desc_count_clear(void)
  228. {
  229. memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
  230. memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
  231. }
  232. qdf_export_symbol(qdf_nbuf_tx_desc_count_clear);
  233. void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
  234. {
  235. /*
  236. * Only Mgmt, Data Packets are tracked. WMI messages
  237. * such as scan commands are not tracked
  238. */
  239. uint8_t packet_type;
  240. packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
  241. if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
  242. (packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
  243. return;
  244. }
  245. QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
  246. qdf_nbuf_tx_desc_count_update(packet_type,
  247. current_state);
  248. }
  249. qdf_export_symbol(qdf_nbuf_set_state);
  250. #ifdef FEATURE_NBUFF_REPLENISH_TIMER
  251. /**
  252. * __qdf_nbuf_start_replenish_timer() - Start alloc fail replenish timer
  253. *
  254. * This function starts the alloc fail replenish timer.
  255. *
  256. * Return: void
  257. */
  258. static inline void __qdf_nbuf_start_replenish_timer(void)
  259. {
  260. qdf_atomic_inc(&alloc_track_timer.alloc_fail_cnt);
  261. if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) !=
  262. QDF_TIMER_STATE_RUNNING)
  263. qdf_mc_timer_start(&alloc_track_timer.track_timer,
  264. QDF_NBUF_ALLOC_EXPIRE_TIMER_MS);
  265. }
  266. /**
  267. * __qdf_nbuf_stop_replenish_timer() - Stop alloc fail replenish timer
  268. *
  269. * This function stops the alloc fail replenish timer.
  270. *
  271. * Return: void
  272. */
  273. static inline void __qdf_nbuf_stop_replenish_timer(void)
  274. {
  275. if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) == 0)
  276. return;
  277. qdf_atomic_set(&alloc_track_timer.alloc_fail_cnt, 0);
  278. if (qdf_mc_timer_get_current_state(&alloc_track_timer.track_timer) ==
  279. QDF_TIMER_STATE_RUNNING)
  280. qdf_mc_timer_stop(&alloc_track_timer.track_timer);
  281. }
  282. /**
  283. * qdf_replenish_expire_handler() - Replenish expire handler
  284. * @arg: unused callback argument
  285. *
  286. * This function triggers when the alloc fail replenish timer expires.
  287. *
  288. * Return: void
  289. */
  290. static void qdf_replenish_expire_handler(void *arg)
  291. {
  292. if (qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt) >
  293. QDF_NBUF_ALLOC_EXPIRE_CNT_THRESHOLD) {
  294. qdf_print("ERROR: NBUF allocation timer expired Fail count %d",
  295. qdf_atomic_read(&alloc_track_timer.alloc_fail_cnt));
  296. /* Error handling here */
  297. }
  298. }
  299. void __qdf_nbuf_init_replenish_timer(void)
  300. {
  301. qdf_mc_timer_init(&alloc_track_timer.track_timer, QDF_TIMER_TYPE_SW,
  302. qdf_replenish_expire_handler, NULL);
  303. }
  304. void __qdf_nbuf_deinit_replenish_timer(void)
  305. {
  306. __qdf_nbuf_stop_replenish_timer();
  307. qdf_mc_timer_destroy(&alloc_track_timer.track_timer);
  308. }
  309. void qdf_nbuf_stop_replenish_timer(void)
  310. {
  311. __qdf_nbuf_stop_replenish_timer();
  312. }
  313. #else
  314. static inline void __qdf_nbuf_start_replenish_timer(void) {}
  315. static inline void __qdf_nbuf_stop_replenish_timer(void) {}
  316. void qdf_nbuf_stop_replenish_timer(void)
  317. {
  318. }
  319. #endif
  320. /* globals do not need to be initialized to NULL/0 */
  321. qdf_nbuf_trace_update_t qdf_trace_update_cb;
  322. qdf_nbuf_free_t nbuf_free_cb;
  323. #ifdef QDF_NBUF_GLOBAL_COUNT
  324. int __qdf_nbuf_count_get(void)
  325. {
  326. return qdf_atomic_read(&nbuf_count);
  327. }
  328. qdf_export_symbol(__qdf_nbuf_count_get);
  329. void __qdf_nbuf_count_inc(qdf_nbuf_t nbuf)
  330. {
  331. int num_nbuf = 1;
  332. qdf_nbuf_t ext_list;
  333. if (qdf_likely(is_initial_mem_debug_disabled))
  334. return;
  335. ext_list = qdf_nbuf_get_ext_list(nbuf);
  336. /* Take care to account for frag_list */
  337. while (ext_list) {
  338. ++num_nbuf;
  339. ext_list = qdf_nbuf_queue_next(ext_list);
  340. }
  341. qdf_atomic_add(num_nbuf, &nbuf_count);
  342. }
  343. qdf_export_symbol(__qdf_nbuf_count_inc);
  344. void __qdf_nbuf_count_dec(__qdf_nbuf_t nbuf)
  345. {
  346. qdf_nbuf_t ext_list;
  347. int num_nbuf;
  348. if (qdf_likely(is_initial_mem_debug_disabled))
  349. return;
  350. if (qdf_nbuf_get_users(nbuf) > 1)
  351. return;
  352. num_nbuf = 1;
  353. /* Take care to account for frag_list */
  354. ext_list = qdf_nbuf_get_ext_list(nbuf);
  355. while (ext_list) {
  356. if (qdf_nbuf_get_users(ext_list) == 1)
  357. ++num_nbuf;
  358. ext_list = qdf_nbuf_queue_next(ext_list);
  359. }
  360. qdf_atomic_sub(num_nbuf, &nbuf_count);
  361. }
  362. qdf_export_symbol(__qdf_nbuf_count_dec);
  363. #endif
  364. #ifdef NBUF_FRAG_MEMORY_DEBUG
  365. void qdf_nbuf_frag_count_inc(qdf_nbuf_t nbuf)
  366. {
  367. qdf_nbuf_t ext_list;
  368. uint32_t num_nr_frags;
  369. uint32_t total_num_nr_frags;
  370. if (qdf_likely(is_initial_mem_debug_disabled))
  371. return;
  372. num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  373. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  374. total_num_nr_frags = num_nr_frags;
  375. /* Take into account the frags attached to frag_list */
  376. ext_list = qdf_nbuf_get_ext_list(nbuf);
  377. while (ext_list) {
  378. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  379. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  380. total_num_nr_frags += num_nr_frags;
  381. ext_list = qdf_nbuf_queue_next(ext_list);
  382. }
  383. qdf_frag_count_inc(total_num_nr_frags);
  384. }
  385. qdf_export_symbol(qdf_nbuf_frag_count_inc);
  386. void qdf_nbuf_frag_count_dec(qdf_nbuf_t nbuf)
  387. {
  388. qdf_nbuf_t ext_list;
  389. uint32_t num_nr_frags;
  390. uint32_t total_num_nr_frags;
  391. if (qdf_likely(is_initial_mem_debug_disabled))
  392. return;
  393. if (qdf_nbuf_get_users(nbuf) > 1)
  394. return;
  395. num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  396. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  397. total_num_nr_frags = num_nr_frags;
  398. /* Take into account the frags attached to frag_list */
  399. ext_list = qdf_nbuf_get_ext_list(nbuf);
  400. while (ext_list) {
  401. if (qdf_nbuf_get_users(ext_list) == 1) {
  402. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  403. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  404. total_num_nr_frags += num_nr_frags;
  405. }
  406. ext_list = qdf_nbuf_queue_next(ext_list);
  407. }
  408. qdf_frag_count_dec(total_num_nr_frags);
  409. }
  410. qdf_export_symbol(qdf_nbuf_frag_count_dec);
  411. #endif
  412. static inline void
  413. qdf_nbuf_set_defaults(struct sk_buff *skb, int align, int reserve)
  414. {
  415. unsigned long offset;
  416. memset(skb->cb, 0x0, sizeof(skb->cb));
  417. skb->dev = NULL;
  418. /*
  419. * The default is for netbuf fragments to be interpreted
  420. * as wordstreams rather than bytestreams.
  421. */
  422. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
  423. QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
  424. /*
  425. * XXX:how about we reserve first then align
  426. * Align & make sure that the tail & data are adjusted properly
  427. */
  428. if (align) {
  429. offset = ((unsigned long)skb->data) % align;
  430. if (offset)
  431. skb_reserve(skb, align - offset);
  432. }
  433. /*
  434. * NOTE:alloc doesn't take responsibility if reserve unaligns the data
  435. * pointer
  436. */
  437. skb_reserve(skb, reserve);
  438. qdf_nbuf_count_inc(skb);
  439. }
  440. #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86) && \
  441. !defined(QCA_WIFI_QCN9000)
  442. struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
  443. int align, int prio, const char *func,
  444. uint32_t line)
  445. {
  446. struct sk_buff *skb;
  447. uint32_t lowmem_alloc_tries = 0;
  448. if (align)
  449. size += (align - 1);
  450. realloc:
  451. skb = dev_alloc_skb(size);
  452. if (skb)
  453. goto skb_alloc;
  454. skb = pld_nbuf_pre_alloc(size);
  455. if (!skb) {
  456. qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
  457. size, func, line);
  458. return NULL;
  459. }
  460. skb_alloc:
  461. /* Hawkeye M2M emulation cannot handle memory addresses below 0x50000040
  462. * Though we are trying to reserve low memory upfront to prevent this,
  463. * we sometimes see SKBs allocated from low memory.
  464. */
  465. if (virt_to_phys(qdf_nbuf_data(skb)) < 0x50000040) {
  466. lowmem_alloc_tries++;
  467. if (lowmem_alloc_tries > 100) {
  468. qdf_nofl_err("NBUF alloc failed %zuB @ %s:%d",
  469. size, func, line);
  470. return NULL;
  471. } else {
  472. /* Not freeing to make sure it
  473. * will not get allocated again
  474. */
  475. goto realloc;
  476. }
  477. }
  478. qdf_nbuf_set_defaults(skb, align, reserve);
  479. return skb;
  480. }
  481. #else
  482. #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
  483. struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
  484. int align, int prio, const char *func,
  485. uint32_t line)
  486. {
  487. return __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio, func,
  488. line);
  489. }
  490. #else
  491. struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
  492. int align, int prio, const char *func,
  493. uint32_t line)
  494. {
  495. struct sk_buff *skb;
  496. int flags = GFP_KERNEL;
  497. if (align)
  498. size += (align - 1);
  499. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  500. flags = GFP_ATOMIC;
  501. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  502. /*
  503. * Observed that kcompactd burns out CPU to make order-3 page.
  504. *__netdev_alloc_skb has 4k page fallback option just in case of
  505. * failing high order page allocation so we don't need to be
  506. * hard. Make kcompactd rest in piece.
  507. */
  508. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  509. #endif
  510. }
  511. skb = alloc_skb(size, flags);
  512. if (skb)
  513. goto skb_alloc;
  514. skb = pld_nbuf_pre_alloc(size);
  515. if (!skb) {
  516. qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
  517. size, func, line);
  518. __qdf_nbuf_start_replenish_timer();
  519. return NULL;
  520. }
  521. __qdf_nbuf_stop_replenish_timer();
  522. skb_alloc:
  523. qdf_nbuf_set_defaults(skb, align, reserve);
  524. return skb;
  525. }
  526. #endif
  527. #endif
  528. qdf_export_symbol(__qdf_nbuf_alloc);
  529. struct sk_buff *__qdf_nbuf_frag_alloc(qdf_device_t osdev, size_t size,
  530. int reserve, int align, int prio,
  531. const char *func, uint32_t line)
  532. {
  533. struct sk_buff *skb;
  534. int flags = GFP_KERNEL & ~__GFP_DIRECT_RECLAIM;
  535. bool atomic = false;
  536. if (align)
  537. size += (align - 1);
  538. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  539. atomic = true;
  540. flags = GFP_ATOMIC;
  541. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  542. /*
  543. * Observed that kcompactd burns out CPU to make order-3 page.
  544. *__netdev_alloc_skb has 4k page fallback option just in case of
  545. * failing high order page allocation so we don't need to be
  546. * hard. Make kcompactd rest in piece.
  547. */
  548. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  549. #endif
  550. }
  551. skb = __netdev_alloc_skb(NULL, size, flags);
  552. if (skb)
  553. goto skb_alloc;
  554. /* 32k page frag alloc failed, try page slab allocation */
  555. if (likely(!atomic))
  556. flags |= __GFP_DIRECT_RECLAIM;
  557. skb = alloc_skb(size, flags);
  558. if (skb)
  559. goto skb_alloc;
  560. skb = pld_nbuf_pre_alloc(size);
  561. if (!skb) {
  562. qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
  563. size, func, line);
  564. __qdf_nbuf_start_replenish_timer();
  565. return NULL;
  566. }
  567. __qdf_nbuf_stop_replenish_timer();
  568. skb_alloc:
  569. qdf_nbuf_set_defaults(skb, align, reserve);
  570. return skb;
  571. }
  572. qdf_export_symbol(__qdf_nbuf_frag_alloc);
  573. __qdf_nbuf_t __qdf_nbuf_alloc_no_recycler(size_t size, int reserve, int align,
  574. const char *func, uint32_t line)
  575. {
  576. qdf_nbuf_t nbuf;
  577. unsigned long offset;
  578. if (align)
  579. size += (align - 1);
  580. nbuf = alloc_skb(size, GFP_ATOMIC);
  581. if (!nbuf)
  582. goto ret_nbuf;
  583. memset(nbuf->cb, 0x0, sizeof(nbuf->cb));
  584. skb_reserve(nbuf, reserve);
  585. if (align) {
  586. offset = ((unsigned long)nbuf->data) % align;
  587. if (offset)
  588. skb_reserve(nbuf, align - offset);
  589. }
  590. qdf_nbuf_count_inc(nbuf);
  591. ret_nbuf:
  592. return nbuf;
  593. }
  594. qdf_export_symbol(__qdf_nbuf_alloc_no_recycler);
  595. void __qdf_nbuf_free(struct sk_buff *skb)
  596. {
  597. if (pld_nbuf_pre_alloc_free(skb))
  598. return;
  599. qdf_nbuf_frag_count_dec(skb);
  600. qdf_nbuf_count_dec(skb);
  601. if (nbuf_free_cb)
  602. nbuf_free_cb(skb);
  603. else
  604. dev_kfree_skb_any(skb);
  605. }
  606. qdf_export_symbol(__qdf_nbuf_free);
  607. __qdf_nbuf_t __qdf_nbuf_clone(__qdf_nbuf_t skb)
  608. {
  609. qdf_nbuf_t skb_new = NULL;
  610. skb_new = skb_clone(skb, GFP_ATOMIC);
  611. if (skb_new) {
  612. qdf_nbuf_frag_count_inc(skb_new);
  613. qdf_nbuf_count_inc(skb_new);
  614. }
  615. return skb_new;
  616. }
  617. qdf_export_symbol(__qdf_nbuf_clone);
  618. struct sk_buff *
  619. __qdf_nbuf_page_frag_alloc(qdf_device_t osdev, size_t size, int reserve,
  620. int align, __qdf_frag_cache_t *pf_cache,
  621. const char *func, uint32_t line)
  622. {
  623. struct sk_buff *skb;
  624. qdf_frag_t frag_data;
  625. size_t orig_size = size;
  626. int flags = GFP_KERNEL;
  627. if (align)
  628. size += (align - 1);
  629. size += NET_SKB_PAD;
  630. size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  631. size = SKB_DATA_ALIGN(size);
  632. if (in_interrupt() || irqs_disabled() || in_atomic())
  633. flags = GFP_ATOMIC;
  634. frag_data = page_frag_alloc(pf_cache, size, flags);
  635. if (!frag_data) {
  636. qdf_rl_nofl_err("page frag alloc failed %zuB @ %s:%d",
  637. size, func, line);
  638. return __qdf_nbuf_alloc(osdev, orig_size, reserve, align, 0,
  639. func, line);
  640. }
  641. skb = build_skb(frag_data, size);
  642. if (skb) {
  643. skb_reserve(skb, NET_SKB_PAD);
  644. goto skb_alloc;
  645. }
  646. /* Free the data allocated from pf_cache */
  647. page_frag_free(frag_data);
  648. size = orig_size + align - 1;
  649. skb = pld_nbuf_pre_alloc(size);
  650. if (!skb) {
  651. qdf_rl_nofl_err("NBUF alloc failed %zuB @ %s:%d",
  652. size, func, line);
  653. __qdf_nbuf_start_replenish_timer();
  654. return NULL;
  655. }
  656. __qdf_nbuf_stop_replenish_timer();
  657. skb_alloc:
  658. qdf_nbuf_set_defaults(skb, align, reserve);
  659. return skb;
  660. }
  661. qdf_export_symbol(__qdf_nbuf_page_frag_alloc);
  662. #ifdef QCA_DP_TX_NBUF_LIST_FREE
  663. void
  664. __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
  665. {
  666. dev_kfree_skb_list_fast(nbuf_queue_head);
  667. }
  668. #else
  669. void
  670. __qdf_nbuf_dev_kfree_list(__qdf_nbuf_queue_head_t *nbuf_queue_head)
  671. {
  672. }
  673. #endif
  674. qdf_export_symbol(__qdf_nbuf_dev_kfree_list);
  675. #ifdef NBUF_MEMORY_DEBUG
  676. struct qdf_nbuf_event {
  677. qdf_nbuf_t nbuf;
  678. char func[QDF_MEM_FUNC_NAME_SIZE];
  679. uint32_t line;
  680. enum qdf_nbuf_event_type type;
  681. uint64_t timestamp;
  682. qdf_dma_addr_t iova;
  683. };
  684. #ifndef QDF_NBUF_HISTORY_SIZE
  685. #define QDF_NBUF_HISTORY_SIZE 4096
  686. #endif
  687. static qdf_atomic_t qdf_nbuf_history_index;
  688. static struct qdf_nbuf_event qdf_nbuf_history[QDF_NBUF_HISTORY_SIZE];
  689. static int32_t qdf_nbuf_circular_index_next(qdf_atomic_t *index, int size)
  690. {
  691. int32_t next = qdf_atomic_inc_return(index);
  692. if (next == size)
  693. qdf_atomic_sub(size, index);
  694. return next % size;
  695. }
  696. void
  697. qdf_nbuf_history_add(qdf_nbuf_t nbuf, const char *func, uint32_t line,
  698. enum qdf_nbuf_event_type type)
  699. {
  700. int32_t idx = qdf_nbuf_circular_index_next(&qdf_nbuf_history_index,
  701. QDF_NBUF_HISTORY_SIZE);
  702. struct qdf_nbuf_event *event = &qdf_nbuf_history[idx];
  703. if (qdf_atomic_read(&smmu_crashed)) {
  704. g_histroy_add_drop++;
  705. return;
  706. }
  707. event->nbuf = nbuf;
  708. qdf_str_lcopy(event->func, func, QDF_MEM_FUNC_NAME_SIZE);
  709. event->line = line;
  710. event->type = type;
  711. event->timestamp = qdf_get_log_timestamp();
  712. if (type == QDF_NBUF_MAP || type == QDF_NBUF_UNMAP ||
  713. type == QDF_NBUF_SMMU_MAP || type == QDF_NBUF_SMMU_UNMAP)
  714. event->iova = QDF_NBUF_CB_PADDR(nbuf);
  715. else
  716. event->iova = 0;
  717. }
  718. void qdf_set_smmu_fault_state(bool smmu_fault_state)
  719. {
  720. qdf_atomic_set(&smmu_crashed, smmu_fault_state);
  721. if (!smmu_fault_state)
  722. g_histroy_add_drop = 0;
  723. }
  724. qdf_export_symbol(qdf_set_smmu_fault_state);
  725. #endif /* NBUF_MEMORY_DEBUG */
  726. #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
  727. #define qdf_nbuf_smmu_map_tracker_bits 11 /* 2048 buckets */
  728. qdf_tracker_declare(qdf_nbuf_smmu_map_tracker, qdf_nbuf_smmu_map_tracker_bits,
  729. "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
  730. static void qdf_nbuf_smmu_map_tracking_init(void)
  731. {
  732. qdf_tracker_init(&qdf_nbuf_smmu_map_tracker);
  733. }
  734. static void qdf_nbuf_smmu_map_tracking_deinit(void)
  735. {
  736. qdf_tracker_deinit(&qdf_nbuf_smmu_map_tracker);
  737. }
  738. static QDF_STATUS
  739. qdf_nbuf_track_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  740. {
  741. if (is_initial_mem_debug_disabled)
  742. return QDF_STATUS_SUCCESS;
  743. return qdf_tracker_track(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
  744. }
  745. static void
  746. qdf_nbuf_untrack_smmu_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  747. {
  748. if (is_initial_mem_debug_disabled)
  749. return;
  750. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_SMMU_UNMAP);
  751. qdf_tracker_untrack(&qdf_nbuf_smmu_map_tracker, nbuf, func, line);
  752. }
  753. void qdf_nbuf_map_check_for_smmu_leaks(void)
  754. {
  755. qdf_tracker_check_for_leaks(&qdf_nbuf_smmu_map_tracker);
  756. }
  757. #ifdef IPA_OFFLOAD
  758. QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
  759. uint8_t hdl,
  760. uint8_t num_buffers,
  761. qdf_mem_info_t *info,
  762. const char *func,
  763. uint32_t line)
  764. {
  765. QDF_STATUS status;
  766. status = qdf_nbuf_track_smmu_map(nbuf, func, line);
  767. if (QDF_IS_STATUS_ERROR(status))
  768. return status;
  769. status = __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
  770. if (QDF_IS_STATUS_ERROR(status)) {
  771. qdf_nbuf_untrack_smmu_map(nbuf, func, line);
  772. } else {
  773. if (!is_initial_mem_debug_disabled)
  774. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_MAP);
  775. qdf_net_buf_debug_update_smmu_map_node(nbuf, info->iova,
  776. info->pa, func, line);
  777. }
  778. return status;
  779. }
  780. qdf_export_symbol(qdf_nbuf_smmu_map_debug);
  781. QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
  782. uint8_t hdl,
  783. uint8_t num_buffers,
  784. qdf_mem_info_t *info,
  785. const char *func,
  786. uint32_t line)
  787. {
  788. QDF_STATUS status;
  789. qdf_nbuf_untrack_smmu_map(nbuf, func, line);
  790. status = __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
  791. qdf_net_buf_debug_update_smmu_unmap_node(nbuf, info->iova,
  792. info->pa, func, line);
  793. return status;
  794. }
  795. qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
  796. #endif /* IPA_OFFLOAD */
  797. static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
  798. const char *func,
  799. uint32_t line)
  800. {
  801. char map_func[QDF_TRACKER_FUNC_SIZE];
  802. uint32_t map_line;
  803. if (!qdf_tracker_lookup(&qdf_nbuf_smmu_map_tracker, nbuf,
  804. &map_func, &map_line))
  805. return;
  806. QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
  807. func, line, map_func, map_line);
  808. }
  809. static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
  810. {
  811. p_node->smmu_unmap_line_num = 0;
  812. p_node->is_nbuf_smmu_mapped = false;
  813. p_node->smmu_map_line_num = 0;
  814. p_node->smmu_map_func_name[0] = '\0';
  815. p_node->smmu_unmap_func_name[0] = '\0';
  816. p_node->smmu_unmap_iova_addr = 0;
  817. p_node->smmu_unmap_pa_addr = 0;
  818. p_node->smmu_map_iova_addr = 0;
  819. p_node->smmu_map_pa_addr = 0;
  820. }
  821. #else /* !NBUF_SMMU_MAP_UNMAP_DEBUG */
  822. #ifdef NBUF_MEMORY_DEBUG
  823. static void qdf_nbuf_smmu_map_tracking_init(void)
  824. {
  825. }
  826. static void qdf_nbuf_smmu_map_tracking_deinit(void)
  827. {
  828. }
  829. static void qdf_nbuf_panic_on_free_if_smmu_mapped(qdf_nbuf_t nbuf,
  830. const char *func,
  831. uint32_t line)
  832. {
  833. }
  834. static inline void qdf_net_buf_update_smmu_params(QDF_NBUF_TRACK *p_node)
  835. {
  836. }
  837. #endif /* NBUF_MEMORY_DEBUG */
  838. #ifdef IPA_OFFLOAD
  839. QDF_STATUS qdf_nbuf_smmu_map_debug(qdf_nbuf_t nbuf,
  840. uint8_t hdl,
  841. uint8_t num_buffers,
  842. qdf_mem_info_t *info,
  843. const char *func,
  844. uint32_t line)
  845. {
  846. return __qdf_ipa_wdi_create_smmu_mapping(hdl, num_buffers, info);
  847. }
  848. qdf_export_symbol(qdf_nbuf_smmu_map_debug);
  849. QDF_STATUS qdf_nbuf_smmu_unmap_debug(qdf_nbuf_t nbuf,
  850. uint8_t hdl,
  851. uint8_t num_buffers,
  852. qdf_mem_info_t *info,
  853. const char *func,
  854. uint32_t line)
  855. {
  856. return __qdf_ipa_wdi_release_smmu_mapping(hdl, num_buffers, info);
  857. }
  858. qdf_export_symbol(qdf_nbuf_smmu_unmap_debug);
  859. #endif /* IPA_OFFLOAD */
  860. #endif /* NBUF_SMMU_MAP_UNMAP_DEBUG */
  861. #ifdef NBUF_MAP_UNMAP_DEBUG
  862. #define qdf_nbuf_map_tracker_bits 11 /* 2048 buckets */
  863. qdf_tracker_declare(qdf_nbuf_map_tracker, qdf_nbuf_map_tracker_bits,
  864. "nbuf map-no-unmap events", "nbuf map", "nbuf unmap");
  865. static void qdf_nbuf_map_tracking_init(void)
  866. {
  867. qdf_tracker_init(&qdf_nbuf_map_tracker);
  868. }
  869. static void qdf_nbuf_map_tracking_deinit(void)
  870. {
  871. qdf_tracker_deinit(&qdf_nbuf_map_tracker);
  872. }
  873. static QDF_STATUS
  874. qdf_nbuf_track_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  875. {
  876. if (is_initial_mem_debug_disabled)
  877. return QDF_STATUS_SUCCESS;
  878. return qdf_tracker_track(&qdf_nbuf_map_tracker, nbuf, func, line);
  879. }
  880. static void
  881. qdf_nbuf_untrack_map(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  882. {
  883. if (is_initial_mem_debug_disabled)
  884. return;
  885. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_UNMAP);
  886. qdf_tracker_untrack(&qdf_nbuf_map_tracker, nbuf, func, line);
  887. }
  888. void qdf_nbuf_map_check_for_leaks(void)
  889. {
  890. qdf_tracker_check_for_leaks(&qdf_nbuf_map_tracker);
  891. }
  892. QDF_STATUS qdf_nbuf_map_debug(qdf_device_t osdev,
  893. qdf_nbuf_t buf,
  894. qdf_dma_dir_t dir,
  895. const char *func,
  896. uint32_t line)
  897. {
  898. QDF_STATUS status;
  899. status = qdf_nbuf_track_map(buf, func, line);
  900. if (QDF_IS_STATUS_ERROR(status))
  901. return status;
  902. status = __qdf_nbuf_map(osdev, buf, dir);
  903. if (QDF_IS_STATUS_ERROR(status)) {
  904. qdf_nbuf_untrack_map(buf, func, line);
  905. } else {
  906. if (!is_initial_mem_debug_disabled)
  907. qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
  908. qdf_net_buf_debug_update_map_node(buf, func, line);
  909. }
  910. return status;
  911. }
  912. qdf_export_symbol(qdf_nbuf_map_debug);
  913. void qdf_nbuf_unmap_debug(qdf_device_t osdev,
  914. qdf_nbuf_t buf,
  915. qdf_dma_dir_t dir,
  916. const char *func,
  917. uint32_t line)
  918. {
  919. qdf_nbuf_untrack_map(buf, func, line);
  920. __qdf_nbuf_unmap_single(osdev, buf, dir);
  921. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  922. }
  923. qdf_export_symbol(qdf_nbuf_unmap_debug);
  924. QDF_STATUS qdf_nbuf_map_single_debug(qdf_device_t osdev,
  925. qdf_nbuf_t buf,
  926. qdf_dma_dir_t dir,
  927. const char *func,
  928. uint32_t line)
  929. {
  930. QDF_STATUS status;
  931. status = qdf_nbuf_track_map(buf, func, line);
  932. if (QDF_IS_STATUS_ERROR(status))
  933. return status;
  934. status = __qdf_nbuf_map_single(osdev, buf, dir);
  935. if (QDF_IS_STATUS_ERROR(status)) {
  936. qdf_nbuf_untrack_map(buf, func, line);
  937. } else {
  938. if (!is_initial_mem_debug_disabled)
  939. qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
  940. qdf_net_buf_debug_update_map_node(buf, func, line);
  941. }
  942. return status;
  943. }
  944. qdf_export_symbol(qdf_nbuf_map_single_debug);
  945. void qdf_nbuf_unmap_single_debug(qdf_device_t osdev,
  946. qdf_nbuf_t buf,
  947. qdf_dma_dir_t dir,
  948. const char *func,
  949. uint32_t line)
  950. {
  951. qdf_nbuf_untrack_map(buf, func, line);
  952. __qdf_nbuf_unmap_single(osdev, buf, dir);
  953. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  954. }
  955. qdf_export_symbol(qdf_nbuf_unmap_single_debug);
  956. QDF_STATUS qdf_nbuf_map_nbytes_debug(qdf_device_t osdev,
  957. qdf_nbuf_t buf,
  958. qdf_dma_dir_t dir,
  959. int nbytes,
  960. const char *func,
  961. uint32_t line)
  962. {
  963. QDF_STATUS status;
  964. status = qdf_nbuf_track_map(buf, func, line);
  965. if (QDF_IS_STATUS_ERROR(status))
  966. return status;
  967. status = __qdf_nbuf_map_nbytes(osdev, buf, dir, nbytes);
  968. if (QDF_IS_STATUS_ERROR(status)) {
  969. qdf_nbuf_untrack_map(buf, func, line);
  970. } else {
  971. if (!is_initial_mem_debug_disabled)
  972. qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
  973. qdf_net_buf_debug_update_map_node(buf, func, line);
  974. }
  975. return status;
  976. }
  977. qdf_export_symbol(qdf_nbuf_map_nbytes_debug);
  978. void qdf_nbuf_unmap_nbytes_debug(qdf_device_t osdev,
  979. qdf_nbuf_t buf,
  980. qdf_dma_dir_t dir,
  981. int nbytes,
  982. const char *func,
  983. uint32_t line)
  984. {
  985. qdf_nbuf_untrack_map(buf, func, line);
  986. __qdf_nbuf_unmap_nbytes(osdev, buf, dir, nbytes);
  987. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  988. }
  989. qdf_export_symbol(qdf_nbuf_unmap_nbytes_debug);
  990. QDF_STATUS qdf_nbuf_map_nbytes_single_debug(qdf_device_t osdev,
  991. qdf_nbuf_t buf,
  992. qdf_dma_dir_t dir,
  993. int nbytes,
  994. const char *func,
  995. uint32_t line)
  996. {
  997. QDF_STATUS status;
  998. status = qdf_nbuf_track_map(buf, func, line);
  999. if (QDF_IS_STATUS_ERROR(status))
  1000. return status;
  1001. status = __qdf_nbuf_map_nbytes_single(osdev, buf, dir, nbytes);
  1002. if (QDF_IS_STATUS_ERROR(status)) {
  1003. qdf_nbuf_untrack_map(buf, func, line);
  1004. } else {
  1005. if (!is_initial_mem_debug_disabled)
  1006. qdf_nbuf_history_add(buf, func, line, QDF_NBUF_MAP);
  1007. qdf_net_buf_debug_update_map_node(buf, func, line);
  1008. }
  1009. return status;
  1010. }
  1011. qdf_export_symbol(qdf_nbuf_map_nbytes_single_debug);
  1012. void qdf_nbuf_unmap_nbytes_single_debug(qdf_device_t osdev,
  1013. qdf_nbuf_t buf,
  1014. qdf_dma_dir_t dir,
  1015. int nbytes,
  1016. const char *func,
  1017. uint32_t line)
  1018. {
  1019. qdf_nbuf_untrack_map(buf, func, line);
  1020. __qdf_nbuf_unmap_nbytes_single(osdev, buf, dir, nbytes);
  1021. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  1022. }
  1023. qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_debug);
  1024. void qdf_nbuf_unmap_nbytes_single_paddr_debug(qdf_device_t osdev,
  1025. qdf_nbuf_t buf,
  1026. qdf_dma_addr_t phy_addr,
  1027. qdf_dma_dir_t dir, int nbytes,
  1028. const char *func, uint32_t line)
  1029. {
  1030. qdf_nbuf_untrack_map(buf, func, line);
  1031. __qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf), dir, false);
  1032. __qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
  1033. qdf_net_buf_debug_update_unmap_node(buf, func, line);
  1034. }
  1035. qdf_export_symbol(qdf_nbuf_unmap_nbytes_single_paddr_debug);
  1036. static void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
  1037. const char *func,
  1038. uint32_t line)
  1039. {
  1040. char map_func[QDF_TRACKER_FUNC_SIZE];
  1041. uint32_t map_line;
  1042. if (!qdf_tracker_lookup(&qdf_nbuf_map_tracker, nbuf,
  1043. &map_func, &map_line))
  1044. return;
  1045. QDF_MEMDEBUG_PANIC("Nbuf freed @ %s:%u while mapped from %s:%u",
  1046. func, line, map_func, map_line);
  1047. }
  1048. #else
  1049. static inline void qdf_nbuf_map_tracking_init(void)
  1050. {
  1051. }
  1052. static inline void qdf_nbuf_map_tracking_deinit(void)
  1053. {
  1054. }
  1055. static inline void qdf_nbuf_panic_on_free_if_mapped(qdf_nbuf_t nbuf,
  1056. const char *func,
  1057. uint32_t line)
  1058. {
  1059. }
  1060. #endif /* NBUF_MAP_UNMAP_DEBUG */
  1061. #ifdef QDF_OS_DEBUG
  1062. QDF_STATUS
  1063. __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
  1064. {
  1065. struct skb_shared_info *sh = skb_shinfo(skb);
  1066. qdf_assert((dir == QDF_DMA_TO_DEVICE)
  1067. || (dir == QDF_DMA_FROM_DEVICE));
  1068. /*
  1069. * Assume there's only a single fragment.
  1070. * To support multiple fragments, it would be necessary to change
  1071. * qdf_nbuf_t to be a separate object that stores meta-info
  1072. * (including the bus address for each fragment) and a pointer
  1073. * to the underlying sk_buff.
  1074. */
  1075. qdf_assert(sh->nr_frags == 0);
  1076. return __qdf_nbuf_map_single(osdev, skb, dir);
  1077. }
  1078. qdf_export_symbol(__qdf_nbuf_map);
  1079. #else
  1080. QDF_STATUS
  1081. __qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
  1082. {
  1083. return __qdf_nbuf_map_single(osdev, skb, dir);
  1084. }
  1085. qdf_export_symbol(__qdf_nbuf_map);
  1086. #endif
  1087. void
  1088. __qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
  1089. qdf_dma_dir_t dir)
  1090. {
  1091. qdf_assert((dir == QDF_DMA_TO_DEVICE)
  1092. || (dir == QDF_DMA_FROM_DEVICE));
  1093. /*
  1094. * Assume there's a single fragment.
  1095. * If this is not true, the assertion in __qdf_nbuf_map will catch it.
  1096. */
  1097. __qdf_nbuf_unmap_single(osdev, skb, dir);
  1098. }
  1099. qdf_export_symbol(__qdf_nbuf_unmap);
  1100. #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
  1101. QDF_STATUS
  1102. __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
  1103. {
  1104. qdf_dma_addr_t paddr;
  1105. QDF_NBUF_CB_PADDR(buf) = paddr = (uintptr_t)buf->data;
  1106. BUILD_BUG_ON(sizeof(paddr) < sizeof(buf->data));
  1107. BUILD_BUG_ON(sizeof(QDF_NBUF_CB_PADDR(buf)) < sizeof(buf->data));
  1108. return QDF_STATUS_SUCCESS;
  1109. }
  1110. qdf_export_symbol(__qdf_nbuf_map_single);
  1111. #else
  1112. QDF_STATUS
  1113. __qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
  1114. {
  1115. qdf_dma_addr_t paddr;
  1116. /* assume that the OS only provides a single fragment */
  1117. QDF_NBUF_CB_PADDR(buf) = paddr =
  1118. dma_map_single(osdev->dev, buf->data,
  1119. skb_end_pointer(buf) - buf->data,
  1120. __qdf_dma_dir_to_os(dir));
  1121. __qdf_record_nbuf_nbytes(
  1122. __qdf_nbuf_get_end_offset(buf), dir, true);
  1123. return dma_mapping_error(osdev->dev, paddr)
  1124. ? QDF_STATUS_E_FAILURE
  1125. : QDF_STATUS_SUCCESS;
  1126. }
  1127. qdf_export_symbol(__qdf_nbuf_map_single);
  1128. #endif
  1129. #if defined(A_SIMOS_DEVHOST) || defined(HIF_USB) || defined(HIF_SDIO)
  1130. void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
  1131. qdf_dma_dir_t dir)
  1132. {
  1133. }
  1134. #else
  1135. void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
  1136. qdf_dma_dir_t dir)
  1137. {
  1138. if (QDF_NBUF_CB_PADDR(buf)) {
  1139. __qdf_record_nbuf_nbytes(
  1140. __qdf_nbuf_get_end_offset(buf), dir, false);
  1141. dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
  1142. skb_end_pointer(buf) - buf->data,
  1143. __qdf_dma_dir_to_os(dir));
  1144. }
  1145. }
  1146. #endif
  1147. qdf_export_symbol(__qdf_nbuf_unmap_single);
  1148. QDF_STATUS
  1149. __qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
  1150. {
  1151. switch (cksum->l4_result) {
  1152. case QDF_NBUF_RX_CKSUM_NONE:
  1153. skb->ip_summed = CHECKSUM_NONE;
  1154. break;
  1155. case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
  1156. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1157. skb->csum_level = cksum->csum_level;
  1158. break;
  1159. case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
  1160. skb->ip_summed = CHECKSUM_PARTIAL;
  1161. skb->csum = cksum->val;
  1162. break;
  1163. default:
  1164. pr_err("Unknown checksum type\n");
  1165. qdf_assert(0);
  1166. return QDF_STATUS_E_NOSUPPORT;
  1167. }
  1168. return QDF_STATUS_SUCCESS;
  1169. }
  1170. qdf_export_symbol(__qdf_nbuf_set_rx_cksum);
  1171. qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
  1172. {
  1173. switch (skb->ip_summed) {
  1174. case CHECKSUM_NONE:
  1175. return QDF_NBUF_TX_CKSUM_NONE;
  1176. case CHECKSUM_PARTIAL:
  1177. return QDF_NBUF_TX_CKSUM_TCP_UDP;
  1178. case CHECKSUM_COMPLETE:
  1179. return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
  1180. default:
  1181. return QDF_NBUF_TX_CKSUM_NONE;
  1182. }
  1183. }
  1184. qdf_export_symbol(__qdf_nbuf_get_tx_cksum);
  1185. uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
  1186. {
  1187. return skb->priority;
  1188. }
  1189. qdf_export_symbol(__qdf_nbuf_get_tid);
  1190. void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
  1191. {
  1192. skb->priority = tid;
  1193. }
  1194. qdf_export_symbol(__qdf_nbuf_set_tid);
  1195. uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
  1196. {
  1197. return QDF_NBUF_EXEMPT_NO_EXEMPTION;
  1198. }
  1199. qdf_export_symbol(__qdf_nbuf_get_exemption_type);
  1200. void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
  1201. {
  1202. qdf_trace_update_cb = cb_func_ptr;
  1203. }
  1204. qdf_export_symbol(__qdf_nbuf_reg_trace_cb);
  1205. enum qdf_proto_subtype
  1206. __qdf_nbuf_data_get_dhcp_subtype(uint8_t *data)
  1207. {
  1208. enum qdf_proto_subtype subtype = QDF_PROTO_INVALID;
  1209. if ((data[QDF_DHCP_OPTION53_OFFSET] == QDF_DHCP_OPTION53) &&
  1210. (data[QDF_DHCP_OPTION53_LENGTH_OFFSET] ==
  1211. QDF_DHCP_OPTION53_LENGTH)) {
  1212. switch (data[QDF_DHCP_OPTION53_STATUS_OFFSET]) {
  1213. case QDF_DHCP_DISCOVER:
  1214. subtype = QDF_PROTO_DHCP_DISCOVER;
  1215. break;
  1216. case QDF_DHCP_REQUEST:
  1217. subtype = QDF_PROTO_DHCP_REQUEST;
  1218. break;
  1219. case QDF_DHCP_OFFER:
  1220. subtype = QDF_PROTO_DHCP_OFFER;
  1221. break;
  1222. case QDF_DHCP_ACK:
  1223. subtype = QDF_PROTO_DHCP_ACK;
  1224. break;
  1225. case QDF_DHCP_NAK:
  1226. subtype = QDF_PROTO_DHCP_NACK;
  1227. break;
  1228. case QDF_DHCP_RELEASE:
  1229. subtype = QDF_PROTO_DHCP_RELEASE;
  1230. break;
  1231. case QDF_DHCP_INFORM:
  1232. subtype = QDF_PROTO_DHCP_INFORM;
  1233. break;
  1234. case QDF_DHCP_DECLINE:
  1235. subtype = QDF_PROTO_DHCP_DECLINE;
  1236. break;
  1237. default:
  1238. break;
  1239. }
  1240. }
  1241. return subtype;
  1242. }
  1243. #define EAPOL_WPA_KEY_INFO_ACK BIT(7)
  1244. #define EAPOL_WPA_KEY_INFO_MIC BIT(8)
  1245. #define EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA BIT(12) /* IEEE 802.11i/RSN only */
  1246. /**
  1247. * __qdf_nbuf_data_get_eapol_key() - Get EAPOL key
  1248. * @data: Pointer to EAPOL packet data buffer
  1249. *
  1250. * We can distinguish M1/M3 from M2/M4 by the ack bit in the keyinfo field
  1251. * The ralationship between the ack bit and EAPOL type is as follows:
  1252. *
  1253. * EAPOL type | M1 M2 M3 M4
  1254. * --------------------------------------
  1255. * Ack | 1 0 1 0
  1256. * --------------------------------------
  1257. *
  1258. * Then, we can differentiate M1 from M3, M2 from M4 by below methods:
  1259. * M2/M4: by keyDataLength or Nonce value being 0 for M4.
  1260. * M1/M3: by the mic/encrKeyData bit in the keyinfo field.
  1261. *
  1262. * Return: subtype of the EAPOL packet.
  1263. */
  1264. static inline enum qdf_proto_subtype
  1265. __qdf_nbuf_data_get_eapol_key(uint8_t *data)
  1266. {
  1267. uint16_t key_info, key_data_length;
  1268. enum qdf_proto_subtype subtype;
  1269. uint64_t *key_nonce;
  1270. key_info = qdf_ntohs((uint16_t)(*(uint16_t *)
  1271. (data + EAPOL_KEY_INFO_OFFSET)));
  1272. key_data_length = qdf_ntohs((uint16_t)(*(uint16_t *)
  1273. (data + EAPOL_KEY_DATA_LENGTH_OFFSET)));
  1274. key_nonce = (uint64_t *)(data + EAPOL_WPA_KEY_NONCE_OFFSET);
  1275. if (key_info & EAPOL_WPA_KEY_INFO_ACK)
  1276. if (key_info &
  1277. (EAPOL_WPA_KEY_INFO_MIC | EAPOL_WPA_KEY_INFO_ENCR_KEY_DATA))
  1278. subtype = QDF_PROTO_EAPOL_M3;
  1279. else
  1280. subtype = QDF_PROTO_EAPOL_M1;
  1281. else
  1282. if (key_data_length == 0 ||
  1283. !((*key_nonce) || (*(key_nonce + 1)) ||
  1284. (*(key_nonce + 2)) || (*(key_nonce + 3))))
  1285. subtype = QDF_PROTO_EAPOL_M4;
  1286. else
  1287. subtype = QDF_PROTO_EAPOL_M2;
  1288. return subtype;
  1289. }
  1290. /**
  1291. * __qdf_nbuf_data_get_exp_msg_type() - Get EAP expanded msg type
  1292. * @data: Pointer to EAPOL packet data buffer
  1293. * @code: EAP code
  1294. *
  1295. * Return: subtype of the EAPOL packet.
  1296. */
  1297. static inline enum qdf_proto_subtype
  1298. __qdf_nbuf_data_get_exp_msg_type(uint8_t *data, uint8_t code)
  1299. {
  1300. uint8_t msg_type;
  1301. uint8_t opcode = *(data + EAP_EXP_MSG_OPCODE_OFFSET);
  1302. switch (opcode) {
  1303. case WSC_START:
  1304. return QDF_PROTO_EAP_WSC_START;
  1305. case WSC_ACK:
  1306. return QDF_PROTO_EAP_WSC_ACK;
  1307. case WSC_NACK:
  1308. return QDF_PROTO_EAP_WSC_NACK;
  1309. case WSC_MSG:
  1310. msg_type = *(data + EAP_EXP_MSG_TYPE_OFFSET);
  1311. switch (msg_type) {
  1312. case EAP_EXP_TYPE_M1:
  1313. return QDF_PROTO_EAP_M1;
  1314. case EAP_EXP_TYPE_M2:
  1315. return QDF_PROTO_EAP_M2;
  1316. case EAP_EXP_TYPE_M3:
  1317. return QDF_PROTO_EAP_M3;
  1318. case EAP_EXP_TYPE_M4:
  1319. return QDF_PROTO_EAP_M4;
  1320. case EAP_EXP_TYPE_M5:
  1321. return QDF_PROTO_EAP_M5;
  1322. case EAP_EXP_TYPE_M6:
  1323. return QDF_PROTO_EAP_M6;
  1324. case EAP_EXP_TYPE_M7:
  1325. return QDF_PROTO_EAP_M7;
  1326. case EAP_EXP_TYPE_M8:
  1327. return QDF_PROTO_EAP_M8;
  1328. default:
  1329. break;
  1330. }
  1331. break;
  1332. case WSC_DONE:
  1333. return QDF_PROTO_EAP_WSC_DONE;
  1334. case WSC_FRAG_ACK:
  1335. return QDF_PROTO_EAP_WSC_FRAG_ACK;
  1336. default:
  1337. break;
  1338. }
  1339. switch (code) {
  1340. case QDF_EAP_REQUEST:
  1341. return QDF_PROTO_EAP_REQUEST;
  1342. case QDF_EAP_RESPONSE:
  1343. return QDF_PROTO_EAP_RESPONSE;
  1344. default:
  1345. return QDF_PROTO_INVALID;
  1346. }
  1347. }
  1348. /**
  1349. * __qdf_nbuf_data_get_eap_type() - Get EAP type
  1350. * @data: Pointer to EAPOL packet data buffer
  1351. * @code: EAP code
  1352. *
  1353. * Return: subtype of the EAPOL packet.
  1354. */
  1355. static inline enum qdf_proto_subtype
  1356. __qdf_nbuf_data_get_eap_type(uint8_t *data, uint8_t code)
  1357. {
  1358. uint8_t type = *(data + EAP_TYPE_OFFSET);
  1359. switch (type) {
  1360. case EAP_PACKET_TYPE_EXP:
  1361. return __qdf_nbuf_data_get_exp_msg_type(data, code);
  1362. case EAP_PACKET_TYPE_ID:
  1363. switch (code) {
  1364. case QDF_EAP_REQUEST:
  1365. return QDF_PROTO_EAP_REQ_ID;
  1366. case QDF_EAP_RESPONSE:
  1367. return QDF_PROTO_EAP_RSP_ID;
  1368. default:
  1369. return QDF_PROTO_INVALID;
  1370. }
  1371. default:
  1372. switch (code) {
  1373. case QDF_EAP_REQUEST:
  1374. return QDF_PROTO_EAP_REQUEST;
  1375. case QDF_EAP_RESPONSE:
  1376. return QDF_PROTO_EAP_RESPONSE;
  1377. default:
  1378. return QDF_PROTO_INVALID;
  1379. }
  1380. }
  1381. }
  1382. /**
  1383. * __qdf_nbuf_data_get_eap_code() - Get EAPOL code
  1384. * @data: Pointer to EAPOL packet data buffer
  1385. *
  1386. * Return: subtype of the EAPOL packet.
  1387. */
  1388. static inline enum qdf_proto_subtype
  1389. __qdf_nbuf_data_get_eap_code(uint8_t *data)
  1390. {
  1391. uint8_t code = *(data + EAP_CODE_OFFSET);
  1392. switch (code) {
  1393. case QDF_EAP_REQUEST:
  1394. case QDF_EAP_RESPONSE:
  1395. return __qdf_nbuf_data_get_eap_type(data, code);
  1396. case QDF_EAP_SUCCESS:
  1397. return QDF_PROTO_EAP_SUCCESS;
  1398. case QDF_EAP_FAILURE:
  1399. return QDF_PROTO_EAP_FAILURE;
  1400. case QDF_EAP_INITIATE:
  1401. return QDF_PROTO_EAP_INITIATE;
  1402. case QDF_EAP_FINISH:
  1403. return QDF_PROTO_EAP_FINISH;
  1404. default:
  1405. return QDF_PROTO_INVALID;
  1406. }
  1407. }
  1408. enum qdf_proto_subtype
  1409. __qdf_nbuf_data_get_eapol_subtype(uint8_t *data)
  1410. {
  1411. uint8_t pkt_type = *(data + EAPOL_PACKET_TYPE_OFFSET);
  1412. switch (pkt_type) {
  1413. case EAPOL_PACKET_TYPE_EAP:
  1414. return __qdf_nbuf_data_get_eap_code(data);
  1415. case EAPOL_PACKET_TYPE_START:
  1416. return QDF_PROTO_EAPOL_START;
  1417. case EAPOL_PACKET_TYPE_LOGOFF:
  1418. return QDF_PROTO_EAPOL_LOGOFF;
  1419. case EAPOL_PACKET_TYPE_KEY:
  1420. return __qdf_nbuf_data_get_eapol_key(data);
  1421. case EAPOL_PACKET_TYPE_ASF:
  1422. return QDF_PROTO_EAPOL_ASF;
  1423. default:
  1424. return QDF_PROTO_INVALID;
  1425. }
  1426. }
  1427. qdf_export_symbol(__qdf_nbuf_data_get_eapol_subtype);
  1428. enum qdf_proto_subtype
  1429. __qdf_nbuf_data_get_arp_subtype(uint8_t *data)
  1430. {
  1431. uint16_t subtype;
  1432. enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
  1433. subtype = (uint16_t)(*(uint16_t *)
  1434. (data + ARP_SUB_TYPE_OFFSET));
  1435. switch (QDF_SWAP_U16(subtype)) {
  1436. case ARP_REQUEST:
  1437. proto_subtype = QDF_PROTO_ARP_REQ;
  1438. break;
  1439. case ARP_RESPONSE:
  1440. proto_subtype = QDF_PROTO_ARP_RES;
  1441. break;
  1442. default:
  1443. break;
  1444. }
  1445. return proto_subtype;
  1446. }
  1447. enum qdf_proto_subtype
  1448. __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
  1449. {
  1450. uint8_t subtype;
  1451. enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
  1452. subtype = (uint8_t)(*(uint8_t *)
  1453. (data + ICMP_SUBTYPE_OFFSET));
  1454. switch (subtype) {
  1455. case ICMP_REQUEST:
  1456. proto_subtype = QDF_PROTO_ICMP_REQ;
  1457. break;
  1458. case ICMP_RESPONSE:
  1459. proto_subtype = QDF_PROTO_ICMP_RES;
  1460. break;
  1461. default:
  1462. break;
  1463. }
  1464. return proto_subtype;
  1465. }
  1466. enum qdf_proto_subtype
  1467. __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
  1468. {
  1469. uint8_t subtype;
  1470. enum qdf_proto_subtype proto_subtype = QDF_PROTO_INVALID;
  1471. subtype = (uint8_t)(*(uint8_t *)
  1472. (data + ICMPV6_SUBTYPE_OFFSET));
  1473. switch (subtype) {
  1474. case ICMPV6_REQUEST:
  1475. proto_subtype = QDF_PROTO_ICMPV6_REQ;
  1476. break;
  1477. case ICMPV6_RESPONSE:
  1478. proto_subtype = QDF_PROTO_ICMPV6_RES;
  1479. break;
  1480. case ICMPV6_RS:
  1481. proto_subtype = QDF_PROTO_ICMPV6_RS;
  1482. break;
  1483. case ICMPV6_RA:
  1484. proto_subtype = QDF_PROTO_ICMPV6_RA;
  1485. break;
  1486. case ICMPV6_NS:
  1487. proto_subtype = QDF_PROTO_ICMPV6_NS;
  1488. break;
  1489. case ICMPV6_NA:
  1490. proto_subtype = QDF_PROTO_ICMPV6_NA;
  1491. break;
  1492. default:
  1493. break;
  1494. }
  1495. return proto_subtype;
  1496. }
  1497. bool
  1498. __qdf_nbuf_is_ipv4_last_fragment(struct sk_buff *skb)
  1499. {
  1500. if (((ntohs(ip_hdr(skb)->frag_off) & ~IP_OFFSET) & IP_MF) == 0)
  1501. return true;
  1502. return false;
  1503. }
  1504. void
  1505. __qdf_nbuf_data_set_ipv4_tos(uint8_t *data, uint8_t tos)
  1506. {
  1507. *(uint8_t *)(data + QDF_NBUF_TRAC_IPV4_TOS_OFFSET) = tos;
  1508. }
  1509. uint8_t
  1510. __qdf_nbuf_data_get_ipv4_tos(uint8_t *data)
  1511. {
  1512. uint8_t tos;
  1513. tos = (uint8_t)(*(uint8_t *)(data +
  1514. QDF_NBUF_TRAC_IPV4_TOS_OFFSET));
  1515. return tos;
  1516. }
  1517. uint8_t
  1518. __qdf_nbuf_data_get_ipv4_proto(uint8_t *data)
  1519. {
  1520. uint8_t proto_type;
  1521. proto_type = (uint8_t)(*(uint8_t *)(data +
  1522. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  1523. return proto_type;
  1524. }
  1525. uint8_t
  1526. __qdf_nbuf_data_get_ipv6_tc(uint8_t *data)
  1527. {
  1528. struct ipv6hdr *hdr;
  1529. hdr = (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
  1530. return ip6_tclass(ip6_flowinfo(hdr));
  1531. }
  1532. void
  1533. __qdf_nbuf_data_set_ipv6_tc(uint8_t *data, uint8_t tc)
  1534. {
  1535. struct ipv6hdr *hdr;
  1536. hdr = (struct ipv6hdr *)(data + QDF_NBUF_TRAC_IPV6_OFFSET);
  1537. ip6_flow_hdr(hdr, tc, ip6_flowlabel(hdr));
  1538. }
  1539. uint8_t
  1540. __qdf_nbuf_data_get_ipv6_proto(uint8_t *data)
  1541. {
  1542. uint8_t proto_type;
  1543. proto_type = (uint8_t)(*(uint8_t *)(data +
  1544. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  1545. return proto_type;
  1546. }
  1547. bool __qdf_nbuf_data_is_ipv4_pkt(uint8_t *data)
  1548. {
  1549. uint16_t ether_type;
  1550. ether_type = (uint16_t)(*(uint16_t *)(data +
  1551. QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
  1552. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
  1553. return true;
  1554. else
  1555. return false;
  1556. }
  1557. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_pkt);
  1558. bool __qdf_nbuf_data_is_ipv4_dhcp_pkt(uint8_t *data)
  1559. {
  1560. uint16_t sport;
  1561. uint16_t dport;
  1562. uint8_t ipv4_offset;
  1563. uint8_t ipv4_hdr_len;
  1564. struct iphdr *iphdr;
  1565. if (__qdf_nbuf_get_ether_type(data) !=
  1566. QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
  1567. return false;
  1568. ipv4_offset = __qdf_nbuf_get_ip_offset(data);
  1569. iphdr = (struct iphdr *)(data + ipv4_offset);
  1570. ipv4_hdr_len = iphdr->ihl * QDF_NBUF_IPV4_HDR_SIZE_UNIT;
  1571. sport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len);
  1572. dport = *(uint16_t *)(data + ipv4_offset + ipv4_hdr_len +
  1573. sizeof(uint16_t));
  1574. if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT)) &&
  1575. (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT))) ||
  1576. ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_CLI_PORT)) &&
  1577. (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP_SRV_PORT))))
  1578. return true;
  1579. else
  1580. return false;
  1581. }
  1582. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_dhcp_pkt);
  1583. /**
  1584. * qdf_is_eapol_type() - check if packet is EAPOL
  1585. * @type: Packet type
  1586. *
  1587. * This api is to check if frame is EAPOL packet type.
  1588. *
  1589. * Return: true if it is EAPOL frame
  1590. * false otherwise.
  1591. */
  1592. #ifdef BIG_ENDIAN_HOST
  1593. static inline bool qdf_is_eapol_type(uint16_t type)
  1594. {
  1595. return (type == QDF_NBUF_TRAC_EAPOL_ETH_TYPE);
  1596. }
  1597. #else
  1598. static inline bool qdf_is_eapol_type(uint16_t type)
  1599. {
  1600. return (type == QDF_SWAP_U16(QDF_NBUF_TRAC_EAPOL_ETH_TYPE));
  1601. }
  1602. #endif
  1603. bool __qdf_nbuf_data_is_ipv4_eapol_pkt(uint8_t *data)
  1604. {
  1605. uint16_t ether_type;
  1606. ether_type = __qdf_nbuf_get_ether_type(data);
  1607. return qdf_is_eapol_type(ether_type);
  1608. }
  1609. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_eapol_pkt);
  1610. bool __qdf_nbuf_is_ipv4_wapi_pkt(struct sk_buff *skb)
  1611. {
  1612. uint16_t ether_type;
  1613. ether_type = (uint16_t)(*(uint16_t *)(skb->data +
  1614. QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
  1615. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_WAPI_ETH_TYPE))
  1616. return true;
  1617. else
  1618. return false;
  1619. }
  1620. qdf_export_symbol(__qdf_nbuf_is_ipv4_wapi_pkt);
  1621. /**
  1622. * qdf_nbuf_is_ipv6_vlan_pkt() - check whether packet is vlan IPV6
  1623. * @data: Pointer to network data buffer
  1624. *
  1625. * This api is for vlan header included ipv6 packet.
  1626. *
  1627. * Return: true if packet is vlan header included IPV6
  1628. * false otherwise.
  1629. */
  1630. static bool qdf_nbuf_is_ipv6_vlan_pkt(uint8_t *data)
  1631. {
  1632. uint16_t ether_type;
  1633. ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  1634. if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
  1635. ether_type = *(uint16_t *)(data +
  1636. QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
  1637. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
  1638. return true;
  1639. }
  1640. return false;
  1641. }
  1642. /**
  1643. * qdf_nbuf_is_ipv4_vlan_pkt() - check whether packet is vlan IPV4
  1644. * @data: Pointer to network data buffer
  1645. *
  1646. * This api is for vlan header included ipv4 packet.
  1647. *
  1648. * Return: true if packet is vlan header included IPV4
  1649. * false otherwise.
  1650. */
  1651. static bool qdf_nbuf_is_ipv4_vlan_pkt(uint8_t *data)
  1652. {
  1653. uint16_t ether_type;
  1654. ether_type = *(uint16_t *)(data + QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  1655. if (unlikely(ether_type == QDF_SWAP_U16(QDF_ETH_TYPE_8021Q))) {
  1656. ether_type = *(uint16_t *)(data +
  1657. QDF_NBUF_TRAC_VLAN_ETH_TYPE_OFFSET);
  1658. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE))
  1659. return true;
  1660. }
  1661. return false;
  1662. }
  1663. bool __qdf_nbuf_data_is_ipv4_igmp_pkt(uint8_t *data)
  1664. {
  1665. uint8_t pkt_type;
  1666. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  1667. pkt_type = (uint8_t)(*(uint8_t *)(data +
  1668. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  1669. goto is_igmp;
  1670. }
  1671. if (qdf_nbuf_is_ipv4_vlan_pkt(data)) {
  1672. pkt_type = (uint8_t)(*(uint8_t *)(
  1673. data +
  1674. QDF_NBUF_TRAC_VLAN_IPV4_PROTO_TYPE_OFFSET));
  1675. goto is_igmp;
  1676. }
  1677. return false;
  1678. is_igmp:
  1679. if (pkt_type == QDF_NBUF_TRAC_IGMP_TYPE)
  1680. return true;
  1681. return false;
  1682. }
  1683. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_igmp_pkt);
  1684. bool __qdf_nbuf_data_is_ipv6_igmp_pkt(uint8_t *data)
  1685. {
  1686. uint8_t pkt_type;
  1687. uint8_t next_hdr;
  1688. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  1689. pkt_type = (uint8_t)(*(uint8_t *)(data +
  1690. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  1691. next_hdr = (uint8_t)(*(uint8_t *)(
  1692. data +
  1693. QDF_NBUF_TRAC_IPV6_OFFSET +
  1694. QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
  1695. goto is_mld;
  1696. }
  1697. if (qdf_nbuf_is_ipv6_vlan_pkt(data)) {
  1698. pkt_type = (uint8_t)(*(uint8_t *)(
  1699. data +
  1700. QDF_NBUF_TRAC_VLAN_IPV6_PROTO_TYPE_OFFSET));
  1701. next_hdr = (uint8_t)(*(uint8_t *)(
  1702. data +
  1703. QDF_NBUF_TRAC_VLAN_IPV6_OFFSET +
  1704. QDF_NBUF_TRAC_IPV6_HEADER_SIZE));
  1705. goto is_mld;
  1706. }
  1707. return false;
  1708. is_mld:
  1709. if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
  1710. return true;
  1711. if ((pkt_type == QDF_NBUF_TRAC_HOPOPTS_TYPE) &&
  1712. (next_hdr == QDF_NBUF_TRAC_ICMPV6_TYPE))
  1713. return true;
  1714. return false;
  1715. }
  1716. qdf_export_symbol(__qdf_nbuf_data_is_ipv6_igmp_pkt);
  1717. bool __qdf_nbuf_is_ipv4_igmp_leave_pkt(__qdf_nbuf_t buf)
  1718. {
  1719. qdf_ether_header_t *eh = NULL;
  1720. uint16_t ether_type;
  1721. uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
  1722. eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
  1723. ether_type = eh->ether_type;
  1724. if (ether_type == htons(ETH_P_8021Q)) {
  1725. struct vlan_ethhdr *veth =
  1726. (struct vlan_ethhdr *)qdf_nbuf_data(buf);
  1727. ether_type = veth->h_vlan_encapsulated_proto;
  1728. eth_hdr_size = sizeof(struct vlan_ethhdr);
  1729. }
  1730. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV4_ETH_TYPE)) {
  1731. struct iphdr *iph = NULL;
  1732. struct igmphdr *ih = NULL;
  1733. iph = (struct iphdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
  1734. ih = (struct igmphdr *)((uint8_t *)iph + iph->ihl * 4);
  1735. switch (ih->type) {
  1736. case IGMP_HOST_LEAVE_MESSAGE:
  1737. return true;
  1738. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  1739. {
  1740. struct igmpv3_report *ihv3 = (struct igmpv3_report *)ih;
  1741. struct igmpv3_grec *grec = NULL;
  1742. int num = 0;
  1743. int i = 0;
  1744. int len = 0;
  1745. int type = 0;
  1746. num = ntohs(ihv3->ngrec);
  1747. for (i = 0; i < num; i++) {
  1748. grec = (void *)((uint8_t *)(ihv3->grec) + len);
  1749. type = grec->grec_type;
  1750. if ((type == IGMPV3_MODE_IS_INCLUDE) ||
  1751. (type == IGMPV3_CHANGE_TO_INCLUDE))
  1752. return true;
  1753. len += sizeof(struct igmpv3_grec);
  1754. len += ntohs(grec->grec_nsrcs) * 4;
  1755. }
  1756. break;
  1757. }
  1758. default:
  1759. break;
  1760. }
  1761. }
  1762. return false;
  1763. }
  1764. qdf_export_symbol(__qdf_nbuf_is_ipv4_igmp_leave_pkt);
  1765. bool __qdf_nbuf_is_ipv6_igmp_leave_pkt(__qdf_nbuf_t buf)
  1766. {
  1767. qdf_ether_header_t *eh = NULL;
  1768. uint16_t ether_type;
  1769. uint8_t eth_hdr_size = sizeof(qdf_ether_header_t);
  1770. eh = (qdf_ether_header_t *)qdf_nbuf_data(buf);
  1771. ether_type = eh->ether_type;
  1772. if (ether_type == htons(ETH_P_8021Q)) {
  1773. struct vlan_ethhdr *veth =
  1774. (struct vlan_ethhdr *)qdf_nbuf_data(buf);
  1775. ether_type = veth->h_vlan_encapsulated_proto;
  1776. eth_hdr_size = sizeof(struct vlan_ethhdr);
  1777. }
  1778. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE)) {
  1779. struct ipv6hdr *ip6h = NULL;
  1780. struct icmp6hdr *icmp6h = NULL;
  1781. uint8_t nexthdr;
  1782. uint16_t frag_off = 0;
  1783. int offset;
  1784. qdf_nbuf_t buf_copy = NULL;
  1785. ip6h = (struct ipv6hdr *)(qdf_nbuf_data(buf) + eth_hdr_size);
  1786. if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
  1787. ip6h->payload_len == 0)
  1788. return false;
  1789. buf_copy = qdf_nbuf_copy(buf);
  1790. if (qdf_likely(!buf_copy))
  1791. return false;
  1792. nexthdr = ip6h->nexthdr;
  1793. offset = ipv6_skip_exthdr(buf_copy,
  1794. eth_hdr_size + sizeof(*ip6h),
  1795. &nexthdr,
  1796. &frag_off);
  1797. qdf_nbuf_free(buf_copy);
  1798. if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
  1799. return false;
  1800. icmp6h = (struct icmp6hdr *)(qdf_nbuf_data(buf) + offset);
  1801. switch (icmp6h->icmp6_type) {
  1802. case ICMPV6_MGM_REDUCTION:
  1803. return true;
  1804. case ICMPV6_MLD2_REPORT:
  1805. {
  1806. struct mld2_report *mh = NULL;
  1807. struct mld2_grec *grec = NULL;
  1808. int num = 0;
  1809. int i = 0;
  1810. int len = 0;
  1811. int type = -1;
  1812. mh = (struct mld2_report *)icmp6h;
  1813. num = ntohs(mh->mld2r_ngrec);
  1814. for (i = 0; i < num; i++) {
  1815. grec = (void *)(((uint8_t *)mh->mld2r_grec) +
  1816. len);
  1817. type = grec->grec_type;
  1818. if ((type == MLD2_MODE_IS_INCLUDE) ||
  1819. (type == MLD2_CHANGE_TO_INCLUDE))
  1820. return true;
  1821. else if (type == MLD2_BLOCK_OLD_SOURCES)
  1822. return true;
  1823. len += sizeof(struct mld2_grec);
  1824. len += ntohs(grec->grec_nsrcs) *
  1825. sizeof(struct in6_addr);
  1826. }
  1827. break;
  1828. }
  1829. default:
  1830. break;
  1831. }
  1832. }
  1833. return false;
  1834. }
  1835. qdf_export_symbol(__qdf_nbuf_is_ipv6_igmp_leave_pkt);
  1836. bool __qdf_nbuf_is_ipv4_tdls_pkt(struct sk_buff *skb)
  1837. {
  1838. uint16_t ether_type;
  1839. ether_type = *(uint16_t *)(skb->data +
  1840. QDF_NBUF_TRAC_ETH_TYPE_OFFSET);
  1841. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_TDLS_ETH_TYPE))
  1842. return true;
  1843. else
  1844. return false;
  1845. }
  1846. qdf_export_symbol(__qdf_nbuf_is_ipv4_tdls_pkt);
  1847. bool __qdf_nbuf_data_is_ipv4_arp_pkt(uint8_t *data)
  1848. {
  1849. uint16_t ether_type;
  1850. ether_type = __qdf_nbuf_get_ether_type(data);
  1851. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_ARP_ETH_TYPE))
  1852. return true;
  1853. else
  1854. return false;
  1855. }
  1856. qdf_export_symbol(__qdf_nbuf_data_is_ipv4_arp_pkt);
  1857. bool __qdf_nbuf_data_is_arp_req(uint8_t *data)
  1858. {
  1859. uint16_t op_code;
  1860. op_code = (uint16_t)(*(uint16_t *)(data +
  1861. QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
  1862. if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REQ))
  1863. return true;
  1864. return false;
  1865. }
  1866. bool __qdf_nbuf_data_is_arp_rsp(uint8_t *data)
  1867. {
  1868. uint16_t op_code;
  1869. op_code = (uint16_t)(*(uint16_t *)(data +
  1870. QDF_NBUF_PKT_ARP_OPCODE_OFFSET));
  1871. if (op_code == QDF_SWAP_U16(QDF_NBUF_PKT_ARPOP_REPLY))
  1872. return true;
  1873. return false;
  1874. }
  1875. uint32_t __qdf_nbuf_get_arp_src_ip(uint8_t *data)
  1876. {
  1877. uint32_t src_ip;
  1878. src_ip = (uint32_t)(*(uint32_t *)(data +
  1879. QDF_NBUF_PKT_ARP_SRC_IP_OFFSET));
  1880. return src_ip;
  1881. }
  1882. uint32_t __qdf_nbuf_get_arp_tgt_ip(uint8_t *data)
  1883. {
  1884. uint32_t tgt_ip;
  1885. tgt_ip = (uint32_t)(*(uint32_t *)(data +
  1886. QDF_NBUF_PKT_ARP_TGT_IP_OFFSET));
  1887. return tgt_ip;
  1888. }
  1889. uint8_t *__qdf_nbuf_get_dns_domain_name(uint8_t *data, uint32_t len)
  1890. {
  1891. uint8_t *domain_name;
  1892. domain_name = (uint8_t *)
  1893. (data + QDF_NBUF_PKT_DNS_NAME_OVER_UDP_OFFSET);
  1894. return domain_name;
  1895. }
  1896. bool __qdf_nbuf_data_is_dns_query(uint8_t *data)
  1897. {
  1898. uint16_t op_code;
  1899. uint16_t tgt_port;
  1900. tgt_port = (uint16_t)(*(uint16_t *)(data +
  1901. QDF_NBUF_PKT_DNS_DST_PORT_OFFSET));
  1902. /* Standard DNS query always happen on Dest Port 53. */
  1903. if (tgt_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
  1904. op_code = (uint16_t)(*(uint16_t *)(data +
  1905. QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
  1906. if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
  1907. QDF_NBUF_PKT_DNSOP_STANDARD_QUERY)
  1908. return true;
  1909. }
  1910. return false;
  1911. }
  1912. bool __qdf_nbuf_data_is_dns_response(uint8_t *data)
  1913. {
  1914. uint16_t op_code;
  1915. uint16_t src_port;
  1916. src_port = (uint16_t)(*(uint16_t *)(data +
  1917. QDF_NBUF_PKT_DNS_SRC_PORT_OFFSET));
  1918. /* Standard DNS response always comes on Src Port 53. */
  1919. if (src_port == QDF_SWAP_U16(QDF_NBUF_PKT_DNS_STANDARD_PORT)) {
  1920. op_code = (uint16_t)(*(uint16_t *)(data +
  1921. QDF_NBUF_PKT_DNS_OVER_UDP_OPCODE_OFFSET));
  1922. if ((QDF_SWAP_U16(op_code) & QDF_NBUF_PKT_DNSOP_BITMAP) ==
  1923. QDF_NBUF_PKT_DNSOP_STANDARD_RESPONSE)
  1924. return true;
  1925. }
  1926. return false;
  1927. }
  1928. bool __qdf_nbuf_data_is_tcp_fin(uint8_t *data)
  1929. {
  1930. uint8_t op_code;
  1931. op_code = (uint8_t)(*(uint8_t *)(data +
  1932. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1933. if (op_code == QDF_NBUF_PKT_TCPOP_FIN)
  1934. return true;
  1935. return false;
  1936. }
  1937. bool __qdf_nbuf_data_is_tcp_fin_ack(uint8_t *data)
  1938. {
  1939. uint8_t op_code;
  1940. op_code = (uint8_t)(*(uint8_t *)(data +
  1941. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1942. if (op_code == QDF_NBUF_PKT_TCPOP_FIN_ACK)
  1943. return true;
  1944. return false;
  1945. }
  1946. bool __qdf_nbuf_data_is_tcp_syn(uint8_t *data)
  1947. {
  1948. uint8_t op_code;
  1949. op_code = (uint8_t)(*(uint8_t *)(data +
  1950. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1951. if (op_code == QDF_NBUF_PKT_TCPOP_SYN)
  1952. return true;
  1953. return false;
  1954. }
  1955. bool __qdf_nbuf_data_is_tcp_syn_ack(uint8_t *data)
  1956. {
  1957. uint8_t op_code;
  1958. op_code = (uint8_t)(*(uint8_t *)(data +
  1959. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1960. if (op_code == QDF_NBUF_PKT_TCPOP_SYN_ACK)
  1961. return true;
  1962. return false;
  1963. }
  1964. bool __qdf_nbuf_data_is_tcp_rst(uint8_t *data)
  1965. {
  1966. uint8_t op_code;
  1967. op_code = (uint8_t)(*(uint8_t *)(data +
  1968. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1969. if (op_code == QDF_NBUF_PKT_TCPOP_RST)
  1970. return true;
  1971. return false;
  1972. }
  1973. bool __qdf_nbuf_data_is_tcp_ack(uint8_t *data)
  1974. {
  1975. uint8_t op_code;
  1976. op_code = (uint8_t)(*(uint8_t *)(data +
  1977. QDF_NBUF_PKT_TCP_OPCODE_OFFSET));
  1978. if (op_code == QDF_NBUF_PKT_TCPOP_ACK)
  1979. return true;
  1980. return false;
  1981. }
  1982. uint16_t __qdf_nbuf_data_get_tcp_src_port(uint8_t *data)
  1983. {
  1984. uint16_t src_port;
  1985. src_port = (uint16_t)(*(uint16_t *)(data +
  1986. QDF_NBUF_PKT_TCP_SRC_PORT_OFFSET));
  1987. return src_port;
  1988. }
  1989. uint16_t __qdf_nbuf_data_get_tcp_dst_port(uint8_t *data)
  1990. {
  1991. uint16_t tgt_port;
  1992. tgt_port = (uint16_t)(*(uint16_t *)(data +
  1993. QDF_NBUF_PKT_TCP_DST_PORT_OFFSET));
  1994. return tgt_port;
  1995. }
  1996. bool __qdf_nbuf_data_is_icmpv4_req(uint8_t *data)
  1997. {
  1998. uint8_t op_code;
  1999. op_code = (uint8_t)(*(uint8_t *)(data +
  2000. QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
  2001. if (op_code == QDF_NBUF_PKT_ICMPv4OP_REQ)
  2002. return true;
  2003. return false;
  2004. }
  2005. bool __qdf_nbuf_data_is_icmpv4_rsp(uint8_t *data)
  2006. {
  2007. uint8_t op_code;
  2008. op_code = (uint8_t)(*(uint8_t *)(data +
  2009. QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
  2010. if (op_code == QDF_NBUF_PKT_ICMPv4OP_REPLY)
  2011. return true;
  2012. return false;
  2013. }
  2014. bool __qdf_nbuf_data_is_icmpv4_redirect(uint8_t *data)
  2015. {
  2016. uint8_t op_code;
  2017. op_code = (uint8_t)(*(uint8_t *)(data +
  2018. QDF_NBUF_PKT_ICMPv4_OPCODE_OFFSET));
  2019. if (op_code == QDF_NBUF_PKT_ICMPV4_REDIRECT)
  2020. return true;
  2021. return false;
  2022. }
  2023. qdf_export_symbol(__qdf_nbuf_data_is_icmpv4_redirect);
  2024. bool __qdf_nbuf_data_is_icmpv6_redirect(uint8_t *data)
  2025. {
  2026. uint8_t subtype;
  2027. subtype = (uint8_t)(*(uint8_t *)(data + ICMPV6_SUBTYPE_OFFSET));
  2028. if (subtype == ICMPV6_REDIRECT)
  2029. return true;
  2030. return false;
  2031. }
  2032. qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_redirect);
  2033. uint32_t __qdf_nbuf_get_icmpv4_src_ip(uint8_t *data)
  2034. {
  2035. uint32_t src_ip;
  2036. src_ip = (uint32_t)(*(uint32_t *)(data +
  2037. QDF_NBUF_PKT_ICMPv4_SRC_IP_OFFSET));
  2038. return src_ip;
  2039. }
  2040. uint32_t __qdf_nbuf_get_icmpv4_tgt_ip(uint8_t *data)
  2041. {
  2042. uint32_t tgt_ip;
  2043. tgt_ip = (uint32_t)(*(uint32_t *)(data +
  2044. QDF_NBUF_PKT_ICMPv4_TGT_IP_OFFSET));
  2045. return tgt_ip;
  2046. }
  2047. bool __qdf_nbuf_data_is_ipv6_pkt(uint8_t *data)
  2048. {
  2049. uint16_t ether_type;
  2050. ether_type = (uint16_t)(*(uint16_t *)(data +
  2051. QDF_NBUF_TRAC_ETH_TYPE_OFFSET));
  2052. if (ether_type == QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_ETH_TYPE))
  2053. return true;
  2054. else
  2055. return false;
  2056. }
  2057. qdf_export_symbol(__qdf_nbuf_data_is_ipv6_pkt);
  2058. bool __qdf_nbuf_data_is_ipv6_dhcp_pkt(uint8_t *data)
  2059. {
  2060. uint16_t sport;
  2061. uint16_t dport;
  2062. uint8_t ipv6_offset;
  2063. if (!__qdf_nbuf_data_is_ipv6_pkt(data))
  2064. return false;
  2065. ipv6_offset = __qdf_nbuf_get_ip_offset(data);
  2066. sport = *(uint16_t *)(data + ipv6_offset +
  2067. QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
  2068. dport = *(uint16_t *)(data + ipv6_offset +
  2069. QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
  2070. sizeof(uint16_t));
  2071. if (((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT)) &&
  2072. (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT))) ||
  2073. ((sport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_CLI_PORT)) &&
  2074. (dport == QDF_SWAP_U16(QDF_NBUF_TRAC_DHCP6_SRV_PORT))))
  2075. return true;
  2076. else
  2077. return false;
  2078. }
  2079. qdf_export_symbol(__qdf_nbuf_data_is_ipv6_dhcp_pkt);
  2080. bool __qdf_nbuf_data_is_ipv6_mdns_pkt(uint8_t *data)
  2081. {
  2082. uint16_t sport;
  2083. uint16_t dport;
  2084. sport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
  2085. QDF_NBUF_TRAC_IPV6_HEADER_SIZE);
  2086. dport = *(uint16_t *)(data + QDF_NBUF_TRAC_IPV6_OFFSET +
  2087. QDF_NBUF_TRAC_IPV6_HEADER_SIZE +
  2088. sizeof(uint16_t));
  2089. if (sport == QDF_SWAP_U16(QDF_NBUF_TRAC_MDNS_SRC_N_DST_PORT) &&
  2090. dport == sport)
  2091. return true;
  2092. else
  2093. return false;
  2094. }
  2095. qdf_export_symbol(__qdf_nbuf_data_is_ipv6_mdns_pkt);
  2096. bool __qdf_nbuf_data_is_ipv4_mcast_pkt(uint8_t *data)
  2097. {
  2098. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  2099. uint32_t *dst_addr =
  2100. (uint32_t *)(data + QDF_NBUF_TRAC_IPV4_DEST_ADDR_OFFSET);
  2101. /*
  2102. * Check first word of the IPV4 address and if it is
  2103. * equal to 0xE then it represents multicast IP.
  2104. */
  2105. if ((*dst_addr & QDF_NBUF_TRAC_IPV4_ADDR_BCAST_MASK) ==
  2106. QDF_NBUF_TRAC_IPV4_ADDR_MCAST_MASK)
  2107. return true;
  2108. else
  2109. return false;
  2110. } else
  2111. return false;
  2112. }
  2113. bool __qdf_nbuf_data_is_ipv6_mcast_pkt(uint8_t *data)
  2114. {
  2115. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  2116. uint16_t *dst_addr;
  2117. dst_addr = (uint16_t *)
  2118. (data + QDF_NBUF_TRAC_IPV6_DEST_ADDR_OFFSET);
  2119. /*
  2120. * Check first byte of the IP address and if it
  2121. * 0xFF00 then it is a IPV6 mcast packet.
  2122. */
  2123. if (*dst_addr ==
  2124. QDF_SWAP_U16(QDF_NBUF_TRAC_IPV6_DEST_ADDR))
  2125. return true;
  2126. else
  2127. return false;
  2128. } else
  2129. return false;
  2130. }
  2131. bool __qdf_nbuf_data_is_icmp_pkt(uint8_t *data)
  2132. {
  2133. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  2134. uint8_t pkt_type;
  2135. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2136. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  2137. if (pkt_type == QDF_NBUF_TRAC_ICMP_TYPE)
  2138. return true;
  2139. else
  2140. return false;
  2141. } else
  2142. return false;
  2143. }
  2144. qdf_export_symbol(__qdf_nbuf_data_is_icmp_pkt);
  2145. bool __qdf_nbuf_data_is_icmpv6_pkt(uint8_t *data)
  2146. {
  2147. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  2148. uint8_t pkt_type;
  2149. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2150. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  2151. if (pkt_type == QDF_NBUF_TRAC_ICMPV6_TYPE)
  2152. return true;
  2153. else
  2154. return false;
  2155. } else
  2156. return false;
  2157. }
  2158. qdf_export_symbol(__qdf_nbuf_data_is_icmpv6_pkt);
  2159. bool __qdf_nbuf_data_is_ipv4_udp_pkt(uint8_t *data)
  2160. {
  2161. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  2162. uint8_t pkt_type;
  2163. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2164. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  2165. if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
  2166. return true;
  2167. else
  2168. return false;
  2169. } else
  2170. return false;
  2171. }
  2172. bool __qdf_nbuf_data_is_ipv4_tcp_pkt(uint8_t *data)
  2173. {
  2174. if (__qdf_nbuf_data_is_ipv4_pkt(data)) {
  2175. uint8_t pkt_type;
  2176. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2177. QDF_NBUF_TRAC_IPV4_PROTO_TYPE_OFFSET));
  2178. if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
  2179. return true;
  2180. else
  2181. return false;
  2182. } else
  2183. return false;
  2184. }
  2185. bool __qdf_nbuf_data_is_ipv6_udp_pkt(uint8_t *data)
  2186. {
  2187. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  2188. uint8_t pkt_type;
  2189. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2190. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  2191. if (pkt_type == QDF_NBUF_TRAC_UDP_TYPE)
  2192. return true;
  2193. else
  2194. return false;
  2195. } else
  2196. return false;
  2197. }
  2198. bool __qdf_nbuf_data_is_ipv6_tcp_pkt(uint8_t *data)
  2199. {
  2200. if (__qdf_nbuf_data_is_ipv6_pkt(data)) {
  2201. uint8_t pkt_type;
  2202. pkt_type = (uint8_t)(*(uint8_t *)(data +
  2203. QDF_NBUF_TRAC_IPV6_PROTO_TYPE_OFFSET));
  2204. if (pkt_type == QDF_NBUF_TRAC_TCP_TYPE)
  2205. return true;
  2206. else
  2207. return false;
  2208. } else
  2209. return false;
  2210. }
  2211. bool __qdf_nbuf_is_bcast_pkt(qdf_nbuf_t nbuf)
  2212. {
  2213. struct ethhdr *eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
  2214. return qdf_is_macaddr_broadcast((struct qdf_mac_addr *)eh->h_dest);
  2215. }
  2216. qdf_export_symbol(__qdf_nbuf_is_bcast_pkt);
  2217. bool __qdf_nbuf_is_mcast_replay(qdf_nbuf_t nbuf)
  2218. {
  2219. struct sk_buff *skb = (struct sk_buff *)nbuf;
  2220. struct ethhdr *eth = eth_hdr(skb);
  2221. if (qdf_likely(skb->pkt_type != PACKET_MULTICAST))
  2222. return false;
  2223. if (qdf_unlikely(ether_addr_equal(eth->h_source, skb->dev->dev_addr)))
  2224. return true;
  2225. return false;
  2226. }
  2227. bool __qdf_nbuf_is_arp_local(struct sk_buff *skb)
  2228. {
  2229. struct arphdr *arp;
  2230. struct in_ifaddr **ifap = NULL;
  2231. struct in_ifaddr *ifa = NULL;
  2232. struct in_device *in_dev;
  2233. unsigned char *arp_ptr;
  2234. __be32 tip;
  2235. arp = (struct arphdr *)skb->data;
  2236. if (arp->ar_op == htons(ARPOP_REQUEST)) {
  2237. /* if fail to acquire rtnl lock, assume it's local arp */
  2238. if (!rtnl_trylock())
  2239. return true;
  2240. in_dev = __in_dev_get_rtnl(skb->dev);
  2241. if (in_dev) {
  2242. for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
  2243. ifap = &ifa->ifa_next) {
  2244. if (!strcmp(skb->dev->name, ifa->ifa_label))
  2245. break;
  2246. }
  2247. }
  2248. if (ifa && ifa->ifa_local) {
  2249. arp_ptr = (unsigned char *)(arp + 1);
  2250. arp_ptr += (skb->dev->addr_len + 4 +
  2251. skb->dev->addr_len);
  2252. memcpy(&tip, arp_ptr, 4);
  2253. qdf_debug("ARP packet: local IP: %x dest IP: %x",
  2254. ifa->ifa_local, tip);
  2255. if (ifa->ifa_local == tip) {
  2256. rtnl_unlock();
  2257. return true;
  2258. }
  2259. }
  2260. rtnl_unlock();
  2261. }
  2262. return false;
  2263. }
  2264. /**
  2265. * __qdf_nbuf_data_get_tcp_hdr_len() - get TCP header length
  2266. * @data: pointer to data of network buffer
  2267. * @tcp_hdr_len_offset: bytes offset for tcp header length of ethernet packets
  2268. *
  2269. * Return: TCP header length in unit of byte
  2270. */
  2271. static inline
  2272. uint8_t __qdf_nbuf_data_get_tcp_hdr_len(uint8_t *data,
  2273. uint8_t tcp_hdr_len_offset)
  2274. {
  2275. uint8_t tcp_hdr_len;
  2276. tcp_hdr_len =
  2277. *((uint8_t *)(data + tcp_hdr_len_offset));
  2278. tcp_hdr_len = ((tcp_hdr_len & QDF_NBUF_PKT_TCP_HDR_LEN_MASK) >>
  2279. QDF_NBUF_PKT_TCP_HDR_LEN_LSB) *
  2280. QDF_NBUF_PKT_TCP_HDR_LEN_UNIT;
  2281. return tcp_hdr_len;
  2282. }
  2283. bool __qdf_nbuf_is_ipv4_v6_pure_tcp_ack(struct sk_buff *skb)
  2284. {
  2285. bool is_tcp_ack = false;
  2286. uint8_t op_code, tcp_hdr_len;
  2287. uint16_t ip_payload_len;
  2288. uint8_t *data = skb->data;
  2289. /*
  2290. * If packet length > TCP ACK max length or it's nonlinearized,
  2291. * then it must not be TCP ACK.
  2292. */
  2293. if (qdf_nbuf_len(skb) > QDF_NBUF_PKT_TCP_ACK_MAX_LEN ||
  2294. qdf_nbuf_is_nonlinear(skb))
  2295. return false;
  2296. if (qdf_nbuf_is_ipv4_tcp_pkt(skb)) {
  2297. ip_payload_len =
  2298. QDF_SWAP_U16(*((uint16_t *)(data +
  2299. QDF_NBUF_TRAC_IPV4_TOTAL_LEN_OFFSET)))
  2300. - QDF_NBUF_TRAC_IPV4_HEADER_SIZE;
  2301. tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
  2302. data,
  2303. QDF_NBUF_PKT_IPV4_TCP_HDR_LEN_OFFSET);
  2304. op_code = (uint8_t)(*(uint8_t *)(data +
  2305. QDF_NBUF_PKT_IPV4_TCP_OPCODE_OFFSET));
  2306. if (ip_payload_len == tcp_hdr_len &&
  2307. op_code == QDF_NBUF_PKT_TCPOP_ACK)
  2308. is_tcp_ack = true;
  2309. } else if (qdf_nbuf_is_ipv6_tcp_pkt(skb)) {
  2310. ip_payload_len =
  2311. QDF_SWAP_U16(*((uint16_t *)(data +
  2312. QDF_NBUF_TRAC_IPV6_PAYLOAD_LEN_OFFSET)));
  2313. tcp_hdr_len = __qdf_nbuf_data_get_tcp_hdr_len(
  2314. data,
  2315. QDF_NBUF_PKT_IPV6_TCP_HDR_LEN_OFFSET);
  2316. op_code = (uint8_t)(*(uint8_t *)(data +
  2317. QDF_NBUF_PKT_IPV6_TCP_OPCODE_OFFSET));
  2318. if (ip_payload_len == tcp_hdr_len &&
  2319. op_code == QDF_NBUF_PKT_TCPOP_ACK)
  2320. is_tcp_ack = true;
  2321. }
  2322. return is_tcp_ack;
  2323. }
  2324. #ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
  2325. bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
  2326. {
  2327. return nbuf->fast_xmit;
  2328. }
  2329. qdf_export_symbol(qdf_nbuf_fast_xmit);
  2330. void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
  2331. {
  2332. nbuf->fast_xmit = value;
  2333. }
  2334. qdf_export_symbol(qdf_nbuf_set_fast_xmit);
  2335. #else
  2336. bool qdf_nbuf_fast_xmit(qdf_nbuf_t nbuf)
  2337. {
  2338. return false;
  2339. }
  2340. qdf_export_symbol(qdf_nbuf_fast_xmit);
  2341. void qdf_nbuf_set_fast_xmit(qdf_nbuf_t nbuf, int value)
  2342. {
  2343. }
  2344. qdf_export_symbol(qdf_nbuf_set_fast_xmit);
  2345. #endif
  2346. #ifdef NBUF_MEMORY_DEBUG
  2347. static spinlock_t g_qdf_net_buf_track_lock[QDF_NET_BUF_TRACK_MAX_SIZE];
  2348. static QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
  2349. static struct kmem_cache *nbuf_tracking_cache;
  2350. static QDF_NBUF_TRACK *qdf_net_buf_track_free_list;
  2351. static spinlock_t qdf_net_buf_track_free_list_lock;
  2352. static uint32_t qdf_net_buf_track_free_list_count;
  2353. static uint32_t qdf_net_buf_track_used_list_count;
  2354. static uint32_t qdf_net_buf_track_max_used;
  2355. static uint32_t qdf_net_buf_track_max_free;
  2356. static uint32_t qdf_net_buf_track_max_allocated;
  2357. static uint32_t qdf_net_buf_track_fail_count;
  2358. /**
  2359. * update_max_used() - update qdf_net_buf_track_max_used tracking variable
  2360. *
  2361. * tracks the max number of network buffers that the wlan driver was tracking
  2362. * at any one time.
  2363. *
  2364. * Return: none
  2365. */
  2366. static inline void update_max_used(void)
  2367. {
  2368. int sum;
  2369. if (qdf_net_buf_track_max_used <
  2370. qdf_net_buf_track_used_list_count)
  2371. qdf_net_buf_track_max_used = qdf_net_buf_track_used_list_count;
  2372. sum = qdf_net_buf_track_free_list_count +
  2373. qdf_net_buf_track_used_list_count;
  2374. if (qdf_net_buf_track_max_allocated < sum)
  2375. qdf_net_buf_track_max_allocated = sum;
  2376. }
  2377. /**
  2378. * update_max_free() - update qdf_net_buf_track_free_list_count
  2379. *
  2380. * tracks the max number tracking buffers kept in the freelist.
  2381. *
  2382. * Return: none
  2383. */
  2384. static inline void update_max_free(void)
  2385. {
  2386. if (qdf_net_buf_track_max_free <
  2387. qdf_net_buf_track_free_list_count)
  2388. qdf_net_buf_track_max_free = qdf_net_buf_track_free_list_count;
  2389. }
  2390. /**
  2391. * qdf_nbuf_track_alloc() - allocate a cookie to track nbufs allocated by wlan
  2392. *
  2393. * This function pulls from a freelist if possible and uses kmem_cache_alloc.
  2394. * This function also ads fexibility to adjust the allocation and freelist
  2395. * scheems.
  2396. *
  2397. * Return: a pointer to an unused QDF_NBUF_TRACK structure may not be zeroed.
  2398. */
  2399. static QDF_NBUF_TRACK *qdf_nbuf_track_alloc(void)
  2400. {
  2401. int flags = GFP_KERNEL;
  2402. unsigned long irq_flag;
  2403. QDF_NBUF_TRACK *new_node = NULL;
  2404. spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
  2405. qdf_net_buf_track_used_list_count++;
  2406. if (qdf_net_buf_track_free_list) {
  2407. new_node = qdf_net_buf_track_free_list;
  2408. qdf_net_buf_track_free_list =
  2409. qdf_net_buf_track_free_list->p_next;
  2410. qdf_net_buf_track_free_list_count--;
  2411. }
  2412. update_max_used();
  2413. spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
  2414. if (new_node)
  2415. return new_node;
  2416. if (in_interrupt() || irqs_disabled() || in_atomic())
  2417. flags = GFP_ATOMIC;
  2418. return kmem_cache_alloc(nbuf_tracking_cache, flags);
  2419. }
  2420. /* FREEQ_POOLSIZE initial and minimum desired freelist poolsize */
  2421. #define FREEQ_POOLSIZE 2048
  2422. /**
  2423. * qdf_nbuf_track_free() - free the nbuf tracking cookie.
  2424. * @node: nbuf tracking node
  2425. *
  2426. * Matches calls to qdf_nbuf_track_alloc.
  2427. * Either frees the tracking cookie to kernel or an internal
  2428. * freelist based on the size of the freelist.
  2429. *
  2430. * Return: none
  2431. */
  2432. static void qdf_nbuf_track_free(QDF_NBUF_TRACK *node)
  2433. {
  2434. unsigned long irq_flag;
  2435. if (!node)
  2436. return;
  2437. /* Try to shrink the freelist if free_list_count > than FREEQ_POOLSIZE
  2438. * only shrink the freelist if it is bigger than twice the number of
  2439. * nbufs in use. If the driver is stalling in a consistent bursty
  2440. * fashion, this will keep 3/4 of thee allocations from the free list
  2441. * while also allowing the system to recover memory as less frantic
  2442. * traffic occurs.
  2443. */
  2444. spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
  2445. qdf_net_buf_track_used_list_count--;
  2446. if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
  2447. (qdf_net_buf_track_free_list_count >
  2448. qdf_net_buf_track_used_list_count << 1)) {
  2449. kmem_cache_free(nbuf_tracking_cache, node);
  2450. } else {
  2451. node->p_next = qdf_net_buf_track_free_list;
  2452. qdf_net_buf_track_free_list = node;
  2453. qdf_net_buf_track_free_list_count++;
  2454. }
  2455. update_max_free();
  2456. spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
  2457. }
  2458. /**
  2459. * qdf_nbuf_track_prefill() - prefill the nbuf tracking cookie freelist
  2460. *
  2461. * Removes a 'warmup time' characteristic of the freelist. Prefilling
  2462. * the freelist first makes it performant for the first iperf udp burst
  2463. * as well as steady state.
  2464. *
  2465. * Return: None
  2466. */
  2467. static void qdf_nbuf_track_prefill(void)
  2468. {
  2469. int i;
  2470. QDF_NBUF_TRACK *node, *head;
  2471. /* prepopulate the freelist */
  2472. head = NULL;
  2473. for (i = 0; i < FREEQ_POOLSIZE; i++) {
  2474. node = qdf_nbuf_track_alloc();
  2475. if (!node)
  2476. continue;
  2477. node->p_next = head;
  2478. head = node;
  2479. }
  2480. while (head) {
  2481. node = head->p_next;
  2482. qdf_nbuf_track_free(head);
  2483. head = node;
  2484. }
  2485. /* prefilled buffers should not count as used */
  2486. qdf_net_buf_track_max_used = 0;
  2487. }
  2488. /**
  2489. * qdf_nbuf_track_memory_manager_create() - manager for nbuf tracking cookies
  2490. *
  2491. * This initializes the memory manager for the nbuf tracking cookies. Because
  2492. * these cookies are all the same size and only used in this feature, we can
  2493. * use a kmem_cache to provide tracking as well as to speed up allocations.
  2494. * To avoid the overhead of allocating and freeing the buffers (including SLUB
  2495. * features) a freelist is prepopulated here.
  2496. *
  2497. * Return: None
  2498. */
  2499. static void qdf_nbuf_track_memory_manager_create(void)
  2500. {
  2501. spin_lock_init(&qdf_net_buf_track_free_list_lock);
  2502. nbuf_tracking_cache = kmem_cache_create("qdf_nbuf_tracking_cache",
  2503. sizeof(QDF_NBUF_TRACK),
  2504. 0, 0, NULL);
  2505. qdf_nbuf_track_prefill();
  2506. }
  2507. /**
  2508. * qdf_nbuf_track_memory_manager_destroy() - manager for nbuf tracking cookies
  2509. *
  2510. * Empty the freelist and print out usage statistics when it is no longer
  2511. * needed. Also the kmem_cache should be destroyed here so that it can warn if
  2512. * any nbuf tracking cookies were leaked.
  2513. *
  2514. * Return: None
  2515. */
  2516. static void qdf_nbuf_track_memory_manager_destroy(void)
  2517. {
  2518. QDF_NBUF_TRACK *node, *tmp;
  2519. unsigned long irq_flag;
  2520. spin_lock_irqsave(&qdf_net_buf_track_free_list_lock, irq_flag);
  2521. node = qdf_net_buf_track_free_list;
  2522. if (qdf_net_buf_track_max_used > FREEQ_POOLSIZE * 4)
  2523. qdf_print("%s: unexpectedly large max_used count %d",
  2524. __func__, qdf_net_buf_track_max_used);
  2525. if (qdf_net_buf_track_max_used < qdf_net_buf_track_max_allocated)
  2526. qdf_print("%s: %d unused trackers were allocated",
  2527. __func__,
  2528. qdf_net_buf_track_max_allocated -
  2529. qdf_net_buf_track_max_used);
  2530. if (qdf_net_buf_track_free_list_count > FREEQ_POOLSIZE &&
  2531. qdf_net_buf_track_free_list_count > 3*qdf_net_buf_track_max_used/4)
  2532. qdf_print("%s: check freelist shrinking functionality",
  2533. __func__);
  2534. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
  2535. "%s: %d residual freelist size",
  2536. __func__, qdf_net_buf_track_free_list_count);
  2537. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
  2538. "%s: %d max freelist size observed",
  2539. __func__, qdf_net_buf_track_max_free);
  2540. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
  2541. "%s: %d max buffers used observed",
  2542. __func__, qdf_net_buf_track_max_used);
  2543. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
  2544. "%s: %d max buffers allocated observed",
  2545. __func__, qdf_net_buf_track_max_allocated);
  2546. while (node) {
  2547. tmp = node;
  2548. node = node->p_next;
  2549. kmem_cache_free(nbuf_tracking_cache, tmp);
  2550. qdf_net_buf_track_free_list_count--;
  2551. }
  2552. if (qdf_net_buf_track_free_list_count != 0)
  2553. qdf_info("%d unfreed tracking memory lost in freelist",
  2554. qdf_net_buf_track_free_list_count);
  2555. if (qdf_net_buf_track_used_list_count != 0)
  2556. qdf_info("%d unfreed tracking memory still in use",
  2557. qdf_net_buf_track_used_list_count);
  2558. spin_unlock_irqrestore(&qdf_net_buf_track_free_list_lock, irq_flag);
  2559. kmem_cache_destroy(nbuf_tracking_cache);
  2560. qdf_net_buf_track_free_list = NULL;
  2561. }
  2562. void qdf_net_buf_debug_init(void)
  2563. {
  2564. uint32_t i;
  2565. is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
  2566. if (is_initial_mem_debug_disabled)
  2567. return;
  2568. qdf_atomic_set(&qdf_nbuf_history_index, -1);
  2569. qdf_nbuf_map_tracking_init();
  2570. qdf_nbuf_smmu_map_tracking_init();
  2571. qdf_nbuf_track_memory_manager_create();
  2572. for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
  2573. gp_qdf_net_buf_track_tbl[i] = NULL;
  2574. spin_lock_init(&g_qdf_net_buf_track_lock[i]);
  2575. }
  2576. }
  2577. qdf_export_symbol(qdf_net_buf_debug_init);
  2578. void qdf_net_buf_debug_exit(void)
  2579. {
  2580. uint32_t i;
  2581. uint32_t count = 0;
  2582. unsigned long irq_flag;
  2583. QDF_NBUF_TRACK *p_node;
  2584. QDF_NBUF_TRACK *p_prev;
  2585. if (is_initial_mem_debug_disabled)
  2586. return;
  2587. for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
  2588. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2589. p_node = gp_qdf_net_buf_track_tbl[i];
  2590. while (p_node) {
  2591. p_prev = p_node;
  2592. p_node = p_node->p_next;
  2593. count++;
  2594. qdf_info("SKB buf memory Leak@ Func %s, @Line %d, size %zu, nbuf %pK",
  2595. p_prev->func_name, p_prev->line_num,
  2596. p_prev->size, p_prev->net_buf);
  2597. qdf_info("SKB leak map %s, line %d, unmap %s line %d mapped=%d",
  2598. p_prev->map_func_name,
  2599. p_prev->map_line_num,
  2600. p_prev->unmap_func_name,
  2601. p_prev->unmap_line_num,
  2602. p_prev->is_nbuf_mapped);
  2603. qdf_nbuf_track_free(p_prev);
  2604. }
  2605. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2606. }
  2607. qdf_nbuf_track_memory_manager_destroy();
  2608. qdf_nbuf_map_tracking_deinit();
  2609. qdf_nbuf_smmu_map_tracking_deinit();
  2610. #ifdef CONFIG_HALT_KMEMLEAK
  2611. if (count) {
  2612. qdf_err("%d SKBs leaked .. please fix the SKB leak", count);
  2613. QDF_BUG(0);
  2614. }
  2615. #endif
  2616. }
  2617. qdf_export_symbol(qdf_net_buf_debug_exit);
  2618. /**
  2619. * qdf_net_buf_debug_hash() - hash network buffer pointer
  2620. * @net_buf: network buffer
  2621. *
  2622. * Return: hash value
  2623. */
  2624. static uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
  2625. {
  2626. uint32_t i;
  2627. i = (uint32_t) (((uintptr_t) net_buf) >> 4);
  2628. i += (uint32_t) (((uintptr_t) net_buf) >> 14);
  2629. i &= (QDF_NET_BUF_TRACK_MAX_SIZE - 1);
  2630. return i;
  2631. }
  2632. /**
  2633. * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
  2634. * @net_buf: network buffer
  2635. *
  2636. * Return: If skb is found in hash table then return pointer to network buffer
  2637. * else return %NULL
  2638. */
  2639. static QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
  2640. {
  2641. uint32_t i;
  2642. QDF_NBUF_TRACK *p_node;
  2643. i = qdf_net_buf_debug_hash(net_buf);
  2644. p_node = gp_qdf_net_buf_track_tbl[i];
  2645. while (p_node) {
  2646. if (p_node->net_buf == net_buf)
  2647. return p_node;
  2648. p_node = p_node->p_next;
  2649. }
  2650. return NULL;
  2651. }
  2652. void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
  2653. const char *func_name, uint32_t line_num)
  2654. {
  2655. uint32_t i;
  2656. unsigned long irq_flag;
  2657. QDF_NBUF_TRACK *p_node;
  2658. QDF_NBUF_TRACK *new_node;
  2659. if (is_initial_mem_debug_disabled)
  2660. return;
  2661. new_node = qdf_nbuf_track_alloc();
  2662. i = qdf_net_buf_debug_hash(net_buf);
  2663. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2664. p_node = qdf_net_buf_debug_look_up(net_buf);
  2665. if (p_node) {
  2666. qdf_print("Double allocation of skb ! Already allocated from %pK %s %d current alloc from %pK %s %d",
  2667. p_node->net_buf, p_node->func_name, p_node->line_num,
  2668. net_buf, func_name, line_num);
  2669. qdf_nbuf_track_free(new_node);
  2670. } else {
  2671. p_node = new_node;
  2672. if (p_node) {
  2673. p_node->net_buf = net_buf;
  2674. qdf_str_lcopy(p_node->func_name, func_name,
  2675. QDF_MEM_FUNC_NAME_SIZE);
  2676. p_node->line_num = line_num;
  2677. p_node->is_nbuf_mapped = false;
  2678. p_node->map_line_num = 0;
  2679. p_node->unmap_line_num = 0;
  2680. p_node->map_func_name[0] = '\0';
  2681. p_node->unmap_func_name[0] = '\0';
  2682. p_node->size = size;
  2683. p_node->time = qdf_get_log_timestamp();
  2684. qdf_net_buf_update_smmu_params(p_node);
  2685. qdf_mem_skb_inc(size);
  2686. p_node->p_next = gp_qdf_net_buf_track_tbl[i];
  2687. gp_qdf_net_buf_track_tbl[i] = p_node;
  2688. } else {
  2689. qdf_net_buf_track_fail_count++;
  2690. qdf_print(
  2691. "Mem alloc failed ! Could not track skb from %s %d of size %zu",
  2692. func_name, line_num, size);
  2693. }
  2694. }
  2695. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2696. }
  2697. qdf_export_symbol(qdf_net_buf_debug_add_node);
  2698. void qdf_net_buf_debug_update_node(qdf_nbuf_t net_buf, const char *func_name,
  2699. uint32_t line_num)
  2700. {
  2701. uint32_t i;
  2702. unsigned long irq_flag;
  2703. QDF_NBUF_TRACK *p_node;
  2704. if (is_initial_mem_debug_disabled)
  2705. return;
  2706. i = qdf_net_buf_debug_hash(net_buf);
  2707. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2708. p_node = qdf_net_buf_debug_look_up(net_buf);
  2709. if (p_node) {
  2710. qdf_str_lcopy(p_node->func_name, kbasename(func_name),
  2711. QDF_MEM_FUNC_NAME_SIZE);
  2712. p_node->line_num = line_num;
  2713. }
  2714. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2715. }
  2716. qdf_export_symbol(qdf_net_buf_debug_update_node);
  2717. void qdf_net_buf_debug_update_map_node(qdf_nbuf_t net_buf,
  2718. const char *func_name,
  2719. uint32_t line_num)
  2720. {
  2721. uint32_t i;
  2722. unsigned long irq_flag;
  2723. QDF_NBUF_TRACK *p_node;
  2724. if (is_initial_mem_debug_disabled)
  2725. return;
  2726. i = qdf_net_buf_debug_hash(net_buf);
  2727. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2728. p_node = qdf_net_buf_debug_look_up(net_buf);
  2729. if (p_node) {
  2730. qdf_str_lcopy(p_node->map_func_name, func_name,
  2731. QDF_MEM_FUNC_NAME_SIZE);
  2732. p_node->map_line_num = line_num;
  2733. p_node->is_nbuf_mapped = true;
  2734. }
  2735. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2736. }
  2737. #ifdef NBUF_SMMU_MAP_UNMAP_DEBUG
  2738. void qdf_net_buf_debug_update_smmu_map_node(qdf_nbuf_t nbuf,
  2739. unsigned long iova,
  2740. unsigned long pa,
  2741. const char *func,
  2742. uint32_t line)
  2743. {
  2744. uint32_t i;
  2745. unsigned long irq_flag;
  2746. QDF_NBUF_TRACK *p_node;
  2747. if (is_initial_mem_debug_disabled)
  2748. return;
  2749. i = qdf_net_buf_debug_hash(nbuf);
  2750. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2751. p_node = qdf_net_buf_debug_look_up(nbuf);
  2752. if (p_node) {
  2753. qdf_str_lcopy(p_node->smmu_map_func_name, func,
  2754. QDF_MEM_FUNC_NAME_SIZE);
  2755. p_node->smmu_map_line_num = line;
  2756. p_node->is_nbuf_smmu_mapped = true;
  2757. p_node->smmu_map_iova_addr = iova;
  2758. p_node->smmu_map_pa_addr = pa;
  2759. }
  2760. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2761. }
  2762. void qdf_net_buf_debug_update_smmu_unmap_node(qdf_nbuf_t nbuf,
  2763. unsigned long iova,
  2764. unsigned long pa,
  2765. const char *func,
  2766. uint32_t line)
  2767. {
  2768. uint32_t i;
  2769. unsigned long irq_flag;
  2770. QDF_NBUF_TRACK *p_node;
  2771. if (is_initial_mem_debug_disabled)
  2772. return;
  2773. i = qdf_net_buf_debug_hash(nbuf);
  2774. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2775. p_node = qdf_net_buf_debug_look_up(nbuf);
  2776. if (p_node) {
  2777. qdf_str_lcopy(p_node->smmu_unmap_func_name, func,
  2778. QDF_MEM_FUNC_NAME_SIZE);
  2779. p_node->smmu_unmap_line_num = line;
  2780. p_node->is_nbuf_smmu_mapped = false;
  2781. p_node->smmu_unmap_iova_addr = iova;
  2782. p_node->smmu_unmap_pa_addr = pa;
  2783. }
  2784. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2785. }
  2786. #endif
  2787. void qdf_net_buf_debug_update_unmap_node(qdf_nbuf_t net_buf,
  2788. const char *func_name,
  2789. uint32_t line_num)
  2790. {
  2791. uint32_t i;
  2792. unsigned long irq_flag;
  2793. QDF_NBUF_TRACK *p_node;
  2794. if (is_initial_mem_debug_disabled)
  2795. return;
  2796. i = qdf_net_buf_debug_hash(net_buf);
  2797. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2798. p_node = qdf_net_buf_debug_look_up(net_buf);
  2799. if (p_node) {
  2800. qdf_str_lcopy(p_node->unmap_func_name, func_name,
  2801. QDF_MEM_FUNC_NAME_SIZE);
  2802. p_node->unmap_line_num = line_num;
  2803. p_node->is_nbuf_mapped = false;
  2804. }
  2805. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2806. }
  2807. void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
  2808. {
  2809. uint32_t i;
  2810. QDF_NBUF_TRACK *p_head;
  2811. QDF_NBUF_TRACK *p_node = NULL;
  2812. unsigned long irq_flag;
  2813. QDF_NBUF_TRACK *p_prev;
  2814. if (is_initial_mem_debug_disabled)
  2815. return;
  2816. i = qdf_net_buf_debug_hash(net_buf);
  2817. spin_lock_irqsave(&g_qdf_net_buf_track_lock[i], irq_flag);
  2818. p_head = gp_qdf_net_buf_track_tbl[i];
  2819. /* Unallocated SKB */
  2820. if (!p_head)
  2821. goto done;
  2822. p_node = p_head;
  2823. /* Found at head of the table */
  2824. if (p_head->net_buf == net_buf) {
  2825. gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
  2826. goto done;
  2827. }
  2828. /* Search in collision list */
  2829. while (p_node) {
  2830. p_prev = p_node;
  2831. p_node = p_node->p_next;
  2832. if ((p_node) && (p_node->net_buf == net_buf)) {
  2833. p_prev->p_next = p_node->p_next;
  2834. break;
  2835. }
  2836. }
  2837. done:
  2838. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[i], irq_flag);
  2839. if (p_node) {
  2840. qdf_mem_skb_dec(p_node->size);
  2841. qdf_nbuf_track_free(p_node);
  2842. } else {
  2843. if (qdf_net_buf_track_fail_count) {
  2844. qdf_print("Untracked net_buf free: %pK with tracking failures count: %u",
  2845. net_buf, qdf_net_buf_track_fail_count);
  2846. } else
  2847. QDF_MEMDEBUG_PANIC("Unallocated buffer ! Double free of net_buf %pK ?",
  2848. net_buf);
  2849. }
  2850. }
  2851. qdf_export_symbol(qdf_net_buf_debug_delete_node);
  2852. void qdf_net_buf_debug_acquire_skb(qdf_nbuf_t net_buf,
  2853. const char *func_name, uint32_t line_num)
  2854. {
  2855. qdf_nbuf_t ext_list = qdf_nbuf_get_ext_list(net_buf);
  2856. if (is_initial_mem_debug_disabled)
  2857. return;
  2858. while (ext_list) {
  2859. /*
  2860. * Take care to add if it is Jumbo packet connected using
  2861. * frag_list
  2862. */
  2863. qdf_nbuf_t next;
  2864. next = qdf_nbuf_queue_next(ext_list);
  2865. qdf_net_buf_debug_add_node(ext_list, 0, func_name, line_num);
  2866. ext_list = next;
  2867. }
  2868. qdf_net_buf_debug_add_node(net_buf, 0, func_name, line_num);
  2869. }
  2870. qdf_export_symbol(qdf_net_buf_debug_acquire_skb);
  2871. void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
  2872. {
  2873. qdf_nbuf_t ext_list;
  2874. if (is_initial_mem_debug_disabled)
  2875. return;
  2876. ext_list = qdf_nbuf_get_ext_list(net_buf);
  2877. while (ext_list) {
  2878. /*
  2879. * Take care to free if it is Jumbo packet connected using
  2880. * frag_list
  2881. */
  2882. qdf_nbuf_t next;
  2883. next = qdf_nbuf_queue_next(ext_list);
  2884. if (qdf_nbuf_get_users(ext_list) > 1) {
  2885. ext_list = next;
  2886. continue;
  2887. }
  2888. qdf_net_buf_debug_delete_node(ext_list);
  2889. ext_list = next;
  2890. }
  2891. if (qdf_nbuf_get_users(net_buf) > 1)
  2892. return;
  2893. qdf_net_buf_debug_delete_node(net_buf);
  2894. }
  2895. qdf_export_symbol(qdf_net_buf_debug_release_skb);
  2896. qdf_nbuf_t qdf_nbuf_alloc_debug(qdf_device_t osdev, qdf_size_t size,
  2897. int reserve, int align, int prio,
  2898. const char *func, uint32_t line)
  2899. {
  2900. qdf_nbuf_t nbuf;
  2901. if (is_initial_mem_debug_disabled)
  2902. return __qdf_nbuf_alloc(osdev, size,
  2903. reserve, align,
  2904. prio, func, line);
  2905. nbuf = __qdf_nbuf_alloc(osdev, size, reserve, align, prio, func, line);
  2906. /* Store SKB in internal QDF tracking table */
  2907. if (qdf_likely(nbuf)) {
  2908. qdf_net_buf_debug_add_node(nbuf, size, func, line);
  2909. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
  2910. } else {
  2911. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
  2912. }
  2913. return nbuf;
  2914. }
  2915. qdf_export_symbol(qdf_nbuf_alloc_debug);
  2916. qdf_nbuf_t qdf_nbuf_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size,
  2917. int reserve, int align, int prio,
  2918. const char *func, uint32_t line)
  2919. {
  2920. qdf_nbuf_t nbuf;
  2921. if (is_initial_mem_debug_disabled)
  2922. return __qdf_nbuf_frag_alloc(osdev, size,
  2923. reserve, align,
  2924. prio, func, line);
  2925. nbuf = __qdf_nbuf_frag_alloc(osdev, size, reserve, align, prio,
  2926. func, line);
  2927. /* Store SKB in internal QDF tracking table */
  2928. if (qdf_likely(nbuf)) {
  2929. qdf_net_buf_debug_add_node(nbuf, size, func, line);
  2930. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
  2931. } else {
  2932. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
  2933. }
  2934. return nbuf;
  2935. }
  2936. qdf_export_symbol(qdf_nbuf_frag_alloc_debug);
  2937. qdf_nbuf_t qdf_nbuf_alloc_no_recycler_debug(size_t size, int reserve, int align,
  2938. const char *func, uint32_t line)
  2939. {
  2940. qdf_nbuf_t nbuf;
  2941. if (is_initial_mem_debug_disabled)
  2942. return __qdf_nbuf_alloc_no_recycler(size, reserve, align, func,
  2943. line);
  2944. nbuf = __qdf_nbuf_alloc_no_recycler(size, reserve, align, func, line);
  2945. /* Store SKB in internal QDF tracking table */
  2946. if (qdf_likely(nbuf)) {
  2947. qdf_net_buf_debug_add_node(nbuf, size, func, line);
  2948. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
  2949. } else {
  2950. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
  2951. }
  2952. return nbuf;
  2953. }
  2954. qdf_export_symbol(qdf_nbuf_alloc_no_recycler_debug);
  2955. void qdf_nbuf_free_debug(qdf_nbuf_t nbuf, const char *func, uint32_t line)
  2956. {
  2957. qdf_nbuf_t ext_list;
  2958. qdf_frag_t p_frag;
  2959. uint32_t num_nr_frags;
  2960. uint32_t idx = 0;
  2961. if (qdf_unlikely(!nbuf))
  2962. return;
  2963. if (is_initial_mem_debug_disabled)
  2964. goto free_buf;
  2965. if (qdf_nbuf_get_users(nbuf) > 1)
  2966. goto free_buf;
  2967. /* Remove SKB from internal QDF tracking table */
  2968. qdf_nbuf_panic_on_free_if_smmu_mapped(nbuf, func, line);
  2969. qdf_nbuf_panic_on_free_if_mapped(nbuf, func, line);
  2970. qdf_net_buf_debug_delete_node(nbuf);
  2971. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_FREE);
  2972. /* Take care to delete the debug entries for frags */
  2973. num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  2974. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  2975. while (idx < num_nr_frags) {
  2976. p_frag = qdf_nbuf_get_frag_addr(nbuf, idx);
  2977. if (qdf_likely(p_frag))
  2978. qdf_frag_debug_refcount_dec(p_frag, func, line);
  2979. idx++;
  2980. }
  2981. /*
  2982. * Take care to update the debug entries for frag_list and also
  2983. * for the frags attached to frag_list
  2984. */
  2985. ext_list = qdf_nbuf_get_ext_list(nbuf);
  2986. while (ext_list) {
  2987. if (qdf_nbuf_get_users(ext_list) == 1) {
  2988. qdf_nbuf_panic_on_free_if_smmu_mapped(ext_list, func,
  2989. line);
  2990. qdf_nbuf_panic_on_free_if_mapped(ext_list, func, line);
  2991. idx = 0;
  2992. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  2993. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  2994. while (idx < num_nr_frags) {
  2995. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  2996. if (qdf_likely(p_frag))
  2997. qdf_frag_debug_refcount_dec(p_frag,
  2998. func, line);
  2999. idx++;
  3000. }
  3001. qdf_net_buf_debug_delete_node(ext_list);
  3002. }
  3003. ext_list = qdf_nbuf_queue_next(ext_list);
  3004. }
  3005. free_buf:
  3006. __qdf_nbuf_free(nbuf);
  3007. }
  3008. qdf_export_symbol(qdf_nbuf_free_debug);
  3009. struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size,
  3010. const char *func, uint32_t line)
  3011. {
  3012. struct sk_buff *skb;
  3013. int flags = GFP_KERNEL;
  3014. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  3015. flags = GFP_ATOMIC;
  3016. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  3017. /*
  3018. * Observed that kcompactd burns out CPU to make order-3 page.
  3019. *__netdev_alloc_skb has 4k page fallback option just in case of
  3020. * failing high order page allocation so we don't need to be
  3021. * hard. Make kcompactd rest in piece.
  3022. */
  3023. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  3024. #endif
  3025. }
  3026. skb = __netdev_alloc_skb(NULL, size, flags);
  3027. if (qdf_likely(is_initial_mem_debug_disabled)) {
  3028. if (qdf_likely(skb))
  3029. qdf_nbuf_count_inc(skb);
  3030. } else {
  3031. if (qdf_likely(skb)) {
  3032. qdf_nbuf_count_inc(skb);
  3033. qdf_net_buf_debug_add_node(skb, size, func, line);
  3034. qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC);
  3035. } else {
  3036. qdf_nbuf_history_add(skb, func, line, QDF_NBUF_ALLOC_FAILURE);
  3037. }
  3038. }
  3039. return skb;
  3040. }
  3041. qdf_export_symbol(__qdf_nbuf_alloc_simple);
  3042. void qdf_nbuf_free_debug_simple(qdf_nbuf_t nbuf, const char *func,
  3043. uint32_t line)
  3044. {
  3045. if (qdf_likely(nbuf)) {
  3046. if (is_initial_mem_debug_disabled) {
  3047. dev_kfree_skb_any(nbuf);
  3048. } else {
  3049. qdf_nbuf_free_debug(nbuf, func, line);
  3050. }
  3051. }
  3052. }
  3053. qdf_export_symbol(qdf_nbuf_free_debug_simple);
  3054. qdf_nbuf_t qdf_nbuf_clone_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
  3055. {
  3056. uint32_t num_nr_frags;
  3057. uint32_t idx = 0;
  3058. qdf_nbuf_t ext_list;
  3059. qdf_frag_t p_frag;
  3060. qdf_nbuf_t cloned_buf = __qdf_nbuf_clone(buf);
  3061. if (is_initial_mem_debug_disabled)
  3062. return cloned_buf;
  3063. if (qdf_unlikely(!cloned_buf))
  3064. return NULL;
  3065. /* Take care to update the debug entries for frags */
  3066. num_nr_frags = qdf_nbuf_get_nr_frags(cloned_buf);
  3067. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  3068. while (idx < num_nr_frags) {
  3069. p_frag = qdf_nbuf_get_frag_addr(cloned_buf, idx);
  3070. if (qdf_likely(p_frag))
  3071. qdf_frag_debug_refcount_inc(p_frag, func, line);
  3072. idx++;
  3073. }
  3074. /* Take care to update debug entries for frags attached to frag_list */
  3075. ext_list = qdf_nbuf_get_ext_list(cloned_buf);
  3076. while (ext_list) {
  3077. idx = 0;
  3078. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  3079. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  3080. while (idx < num_nr_frags) {
  3081. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  3082. if (qdf_likely(p_frag))
  3083. qdf_frag_debug_refcount_inc(p_frag, func, line);
  3084. idx++;
  3085. }
  3086. ext_list = qdf_nbuf_queue_next(ext_list);
  3087. }
  3088. /* Store SKB in internal QDF tracking table */
  3089. qdf_net_buf_debug_add_node(cloned_buf, 0, func, line);
  3090. qdf_nbuf_history_add(cloned_buf, func, line, QDF_NBUF_ALLOC_CLONE);
  3091. return cloned_buf;
  3092. }
  3093. qdf_export_symbol(qdf_nbuf_clone_debug);
  3094. qdf_nbuf_t
  3095. qdf_nbuf_page_frag_alloc_debug(qdf_device_t osdev, qdf_size_t size, int reserve,
  3096. int align, __qdf_frag_cache_t *pf_cache,
  3097. const char *func, uint32_t line)
  3098. {
  3099. qdf_nbuf_t nbuf;
  3100. if (is_initial_mem_debug_disabled)
  3101. return __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
  3102. pf_cache, func, line);
  3103. nbuf = __qdf_nbuf_page_frag_alloc(osdev, size, reserve, align,
  3104. pf_cache, func, line);
  3105. /* Store SKB in internal QDF tracking table */
  3106. if (qdf_likely(nbuf)) {
  3107. qdf_net_buf_debug_add_node(nbuf, size, func, line);
  3108. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC);
  3109. } else {
  3110. qdf_nbuf_history_add(nbuf, func, line, QDF_NBUF_ALLOC_FAILURE);
  3111. }
  3112. return nbuf;
  3113. }
  3114. qdf_export_symbol(qdf_nbuf_page_frag_alloc_debug);
  3115. qdf_nbuf_t qdf_nbuf_copy_debug(qdf_nbuf_t buf, const char *func, uint32_t line)
  3116. {
  3117. qdf_nbuf_t copied_buf = __qdf_nbuf_copy(buf);
  3118. if (is_initial_mem_debug_disabled)
  3119. return copied_buf;
  3120. if (qdf_unlikely(!copied_buf))
  3121. return NULL;
  3122. /* Store SKB in internal QDF tracking table */
  3123. qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
  3124. qdf_nbuf_history_add(copied_buf, func, line, QDF_NBUF_ALLOC_COPY);
  3125. return copied_buf;
  3126. }
  3127. qdf_export_symbol(qdf_nbuf_copy_debug);
  3128. qdf_nbuf_t
  3129. qdf_nbuf_copy_expand_debug(qdf_nbuf_t buf, int headroom, int tailroom,
  3130. const char *func, uint32_t line)
  3131. {
  3132. qdf_nbuf_t copied_buf = __qdf_nbuf_copy_expand(buf, headroom, tailroom);
  3133. if (qdf_unlikely(!copied_buf))
  3134. return NULL;
  3135. if (is_initial_mem_debug_disabled)
  3136. return copied_buf;
  3137. /* Store SKB in internal QDF tracking table */
  3138. qdf_net_buf_debug_add_node(copied_buf, 0, func, line);
  3139. qdf_nbuf_history_add(copied_buf, func, line,
  3140. QDF_NBUF_ALLOC_COPY_EXPAND);
  3141. return copied_buf;
  3142. }
  3143. qdf_export_symbol(qdf_nbuf_copy_expand_debug);
  3144. qdf_nbuf_t
  3145. qdf_nbuf_unshare_debug(qdf_nbuf_t buf, const char *func_name,
  3146. uint32_t line_num)
  3147. {
  3148. qdf_nbuf_t unshared_buf;
  3149. qdf_frag_t p_frag;
  3150. uint32_t num_nr_frags;
  3151. uint32_t idx = 0;
  3152. qdf_nbuf_t ext_list, next;
  3153. if (is_initial_mem_debug_disabled)
  3154. return __qdf_nbuf_unshare(buf);
  3155. /* Not a shared buffer, nothing to do */
  3156. if (!qdf_nbuf_is_cloned(buf))
  3157. return buf;
  3158. if (qdf_nbuf_get_users(buf) > 1)
  3159. goto unshare_buf;
  3160. /* Take care to delete the debug entries for frags */
  3161. num_nr_frags = qdf_nbuf_get_nr_frags(buf);
  3162. while (idx < num_nr_frags) {
  3163. p_frag = qdf_nbuf_get_frag_addr(buf, idx);
  3164. if (qdf_likely(p_frag))
  3165. qdf_frag_debug_refcount_dec(p_frag, func_name,
  3166. line_num);
  3167. idx++;
  3168. }
  3169. qdf_net_buf_debug_delete_node(buf);
  3170. /* Take care of jumbo packet connected using frag_list and frags */
  3171. ext_list = qdf_nbuf_get_ext_list(buf);
  3172. while (ext_list) {
  3173. idx = 0;
  3174. next = qdf_nbuf_queue_next(ext_list);
  3175. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  3176. if (qdf_nbuf_get_users(ext_list) > 1) {
  3177. ext_list = next;
  3178. continue;
  3179. }
  3180. while (idx < num_nr_frags) {
  3181. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  3182. if (qdf_likely(p_frag))
  3183. qdf_frag_debug_refcount_dec(p_frag, func_name,
  3184. line_num);
  3185. idx++;
  3186. }
  3187. qdf_net_buf_debug_delete_node(ext_list);
  3188. ext_list = next;
  3189. }
  3190. unshare_buf:
  3191. unshared_buf = __qdf_nbuf_unshare(buf);
  3192. if (qdf_likely(unshared_buf))
  3193. qdf_net_buf_debug_add_node(unshared_buf, 0, func_name,
  3194. line_num);
  3195. return unshared_buf;
  3196. }
  3197. qdf_export_symbol(qdf_nbuf_unshare_debug);
  3198. void
  3199. qdf_nbuf_dev_kfree_list_debug(__qdf_nbuf_queue_head_t *nbuf_queue_head,
  3200. const char *func, uint32_t line)
  3201. {
  3202. qdf_nbuf_t buf;
  3203. if (qdf_nbuf_queue_empty(nbuf_queue_head))
  3204. return;
  3205. if (is_initial_mem_debug_disabled)
  3206. return __qdf_nbuf_dev_kfree_list(nbuf_queue_head);
  3207. while ((buf = qdf_nbuf_queue_head_dequeue(nbuf_queue_head)) != NULL)
  3208. qdf_nbuf_free_debug(buf, func, line);
  3209. }
  3210. qdf_export_symbol(qdf_nbuf_dev_kfree_list_debug);
  3211. #endif /* NBUF_MEMORY_DEBUG */
  3212. #if defined(QCA_DP_NBUF_FAST_PPEDS)
  3213. #if defined(NBUF_MEMORY_DEBUG)
  3214. struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
  3215. const char *func, uint32_t line)
  3216. {
  3217. struct sk_buff *skb;
  3218. int flags = GFP_KERNEL;
  3219. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  3220. flags = GFP_ATOMIC;
  3221. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  3222. /*
  3223. * Observed that kcompactd burns out CPU to make order-3
  3224. * page.__netdev_alloc_skb has 4k page fallback option
  3225. * just in case of
  3226. * failing high order page allocation so we don't need
  3227. * to be hard. Make kcompactd rest in piece.
  3228. */
  3229. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  3230. #endif
  3231. }
  3232. skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
  3233. if (qdf_likely(is_initial_mem_debug_disabled)) {
  3234. if (qdf_likely(skb))
  3235. qdf_nbuf_count_inc(skb);
  3236. } else {
  3237. if (qdf_likely(skb)) {
  3238. qdf_nbuf_count_inc(skb);
  3239. qdf_net_buf_debug_add_node(skb, size, func, line);
  3240. qdf_nbuf_history_add(skb, func, line,
  3241. QDF_NBUF_ALLOC);
  3242. } else {
  3243. qdf_nbuf_history_add(skb, func, line,
  3244. QDF_NBUF_ALLOC_FAILURE);
  3245. }
  3246. }
  3247. return skb;
  3248. }
  3249. #else
  3250. struct sk_buff *__qdf_nbuf_alloc_ppe_ds(qdf_device_t osdev, size_t size,
  3251. const char *func, uint32_t line)
  3252. {
  3253. struct sk_buff *skb;
  3254. int flags = GFP_KERNEL;
  3255. if (in_interrupt() || irqs_disabled() || in_atomic()) {
  3256. flags = GFP_ATOMIC;
  3257. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
  3258. /*
  3259. * Observed that kcompactd burns out CPU to make order-3
  3260. * page.__netdev_alloc_skb has 4k page fallback option
  3261. * just in case of
  3262. * failing high order page allocation so we don't need
  3263. * to be hard. Make kcompactd rest in piece.
  3264. */
  3265. flags = flags & ~__GFP_KSWAPD_RECLAIM;
  3266. #endif
  3267. }
  3268. skb = __netdev_alloc_skb_no_skb_reset(NULL, size, flags);
  3269. if (qdf_likely(skb))
  3270. qdf_nbuf_count_inc(skb);
  3271. return skb;
  3272. }
  3273. #endif
  3274. qdf_export_symbol(__qdf_nbuf_alloc_ppe_ds);
  3275. #endif
  3276. #if defined(FEATURE_TSO)
  3277. /**
  3278. * struct qdf_tso_cmn_seg_info_t - TSO common info structure
  3279. *
  3280. * @ethproto: ethernet type of the msdu
  3281. * @ip_tcp_hdr_len: ip + tcp length for the msdu
  3282. * @l2_len: L2 length for the msdu
  3283. * @eit_hdr: pointer to EIT header
  3284. * @eit_hdr_len: EIT header length for the msdu
  3285. * @eit_hdr_dma_map_addr: dma addr for EIT header
  3286. * @tcphdr: pointer to tcp header
  3287. * @ipv4_csum_en: ipv4 checksum enable
  3288. * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
  3289. * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
  3290. * @ip_id: IP id
  3291. * @tcp_seq_num: TCP sequence number
  3292. *
  3293. * This structure holds the TSO common info that is common
  3294. * across all the TCP segments of the jumbo packet.
  3295. */
  3296. struct qdf_tso_cmn_seg_info_t {
  3297. uint16_t ethproto;
  3298. uint16_t ip_tcp_hdr_len;
  3299. uint16_t l2_len;
  3300. uint8_t *eit_hdr;
  3301. uint32_t eit_hdr_len;
  3302. qdf_dma_addr_t eit_hdr_dma_map_addr;
  3303. struct tcphdr *tcphdr;
  3304. uint16_t ipv4_csum_en;
  3305. uint16_t tcp_ipv4_csum_en;
  3306. uint16_t tcp_ipv6_csum_en;
  3307. uint16_t ip_id;
  3308. uint32_t tcp_seq_num;
  3309. };
  3310. /**
  3311. * qdf_nbuf_adj_tso_frag() - adjustment for buffer address of tso fragment
  3312. * @skb: network buffer
  3313. *
  3314. * Return: byte offset length of 8 bytes aligned.
  3315. */
  3316. #ifdef FIX_TXDMA_LIMITATION
  3317. static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
  3318. {
  3319. uint32_t eit_hdr_len;
  3320. uint8_t *eit_hdr;
  3321. uint8_t byte_8_align_offset;
  3322. eit_hdr = skb->data;
  3323. eit_hdr_len = (skb_transport_header(skb)
  3324. - skb_mac_header(skb)) + tcp_hdrlen(skb);
  3325. byte_8_align_offset = ((unsigned long)(eit_hdr) + eit_hdr_len) & 0x7L;
  3326. if (qdf_unlikely(byte_8_align_offset)) {
  3327. TSO_DEBUG("%pK,Len %d %d",
  3328. eit_hdr, eit_hdr_len, byte_8_align_offset);
  3329. if (unlikely(skb_headroom(skb) < byte_8_align_offset)) {
  3330. TSO_DEBUG("[%d]Insufficient headroom,[%pK],[%pK],[%d]",
  3331. __LINE__, skb->head, skb->data,
  3332. byte_8_align_offset);
  3333. return 0;
  3334. }
  3335. qdf_nbuf_push_head(skb, byte_8_align_offset);
  3336. qdf_mem_move(skb->data,
  3337. skb->data + byte_8_align_offset,
  3338. eit_hdr_len);
  3339. skb->len -= byte_8_align_offset;
  3340. skb->mac_header -= byte_8_align_offset;
  3341. skb->network_header -= byte_8_align_offset;
  3342. skb->transport_header -= byte_8_align_offset;
  3343. }
  3344. return byte_8_align_offset;
  3345. }
  3346. #else
  3347. static uint8_t qdf_nbuf_adj_tso_frag(struct sk_buff *skb)
  3348. {
  3349. return 0;
  3350. }
  3351. #endif
  3352. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  3353. void qdf_record_nbuf_nbytes(
  3354. uint32_t nbytes, qdf_dma_dir_t dir, bool is_mapped)
  3355. {
  3356. __qdf_record_nbuf_nbytes(nbytes, dir, is_mapped);
  3357. }
  3358. qdf_export_symbol(qdf_record_nbuf_nbytes);
  3359. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  3360. /**
  3361. * qdf_nbuf_tso_map_frag() - Map TSO segment
  3362. * @osdev: qdf device handle
  3363. * @tso_frag_vaddr: addr of tso fragment
  3364. * @nbytes: number of bytes
  3365. * @dir: direction
  3366. *
  3367. * Map TSO segment and for MCL record the amount of memory mapped
  3368. *
  3369. * Return: DMA address of mapped TSO fragment in success and
  3370. * NULL in case of DMA mapping failure
  3371. */
  3372. static inline qdf_dma_addr_t qdf_nbuf_tso_map_frag(
  3373. qdf_device_t osdev, void *tso_frag_vaddr,
  3374. uint32_t nbytes, qdf_dma_dir_t dir)
  3375. {
  3376. qdf_dma_addr_t tso_frag_paddr = 0;
  3377. tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
  3378. nbytes, __qdf_dma_dir_to_os(dir));
  3379. if (unlikely(dma_mapping_error(osdev->dev, tso_frag_paddr))) {
  3380. qdf_err("DMA mapping error!");
  3381. qdf_assert_always(0);
  3382. return 0;
  3383. }
  3384. qdf_record_nbuf_nbytes(nbytes, dir, true);
  3385. return tso_frag_paddr;
  3386. }
  3387. /**
  3388. * qdf_nbuf_tso_unmap_frag() - Unmap TSO segment
  3389. * @osdev: qdf device handle
  3390. * @tso_frag_paddr: DMA addr of tso fragment
  3391. * @dir: direction
  3392. * @nbytes: number of bytes
  3393. *
  3394. * Unmap TSO segment and for MCL record the amount of memory mapped
  3395. *
  3396. * Return: None
  3397. */
  3398. static inline void qdf_nbuf_tso_unmap_frag(
  3399. qdf_device_t osdev, qdf_dma_addr_t tso_frag_paddr,
  3400. uint32_t nbytes, qdf_dma_dir_t dir)
  3401. {
  3402. qdf_record_nbuf_nbytes(nbytes, dir, false);
  3403. dma_unmap_single(osdev->dev, tso_frag_paddr,
  3404. nbytes, __qdf_dma_dir_to_os(dir));
  3405. }
  3406. /**
  3407. * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
  3408. * information
  3409. * @osdev: qdf device handle
  3410. * @skb: skb buffer
  3411. * @tso_info: Parameters common to all segments
  3412. *
  3413. * Get the TSO information that is common across all the TCP
  3414. * segments of the jumbo packet
  3415. *
  3416. * Return: 0 - success 1 - failure
  3417. */
  3418. static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
  3419. struct sk_buff *skb,
  3420. struct qdf_tso_cmn_seg_info_t *tso_info)
  3421. {
  3422. /* Get ethernet type and ethernet header length */
  3423. tso_info->ethproto = vlan_get_protocol(skb);
  3424. /* Determine whether this is an IPv4 or IPv6 packet */
  3425. if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
  3426. /* for IPv4, get the IP ID and enable TCP and IP csum */
  3427. struct iphdr *ipv4_hdr = ip_hdr(skb);
  3428. tso_info->ip_id = ntohs(ipv4_hdr->id);
  3429. tso_info->ipv4_csum_en = 1;
  3430. tso_info->tcp_ipv4_csum_en = 1;
  3431. if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
  3432. qdf_err("TSO IPV4 proto 0x%x not TCP",
  3433. ipv4_hdr->protocol);
  3434. return 1;
  3435. }
  3436. } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
  3437. /* for IPv6, enable TCP csum. No IP ID or IP csum */
  3438. tso_info->tcp_ipv6_csum_en = 1;
  3439. } else {
  3440. qdf_err("TSO: ethertype 0x%x is not supported!",
  3441. tso_info->ethproto);
  3442. return 1;
  3443. }
  3444. tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
  3445. tso_info->tcphdr = tcp_hdr(skb);
  3446. tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
  3447. /* get pointer to the ethernet + IP + TCP header and their length */
  3448. tso_info->eit_hdr = skb->data;
  3449. tso_info->eit_hdr_len = (skb_transport_header(skb)
  3450. - skb_mac_header(skb)) + tcp_hdrlen(skb);
  3451. tso_info->eit_hdr_dma_map_addr = qdf_nbuf_tso_map_frag(
  3452. osdev, tso_info->eit_hdr,
  3453. tso_info->eit_hdr_len,
  3454. QDF_DMA_TO_DEVICE);
  3455. if (qdf_unlikely(!tso_info->eit_hdr_dma_map_addr))
  3456. return 1;
  3457. if (tso_info->ethproto == htons(ETH_P_IP)) {
  3458. /* include IPv4 header length for IPV4 (total length) */
  3459. tso_info->ip_tcp_hdr_len =
  3460. tso_info->eit_hdr_len - tso_info->l2_len;
  3461. } else if (tso_info->ethproto == htons(ETH_P_IPV6)) {
  3462. /* exclude IPv6 header length for IPv6 (payload length) */
  3463. tso_info->ip_tcp_hdr_len = tcp_hdrlen(skb);
  3464. }
  3465. /*
  3466. * The length of the payload (application layer data) is added to
  3467. * tso_info->ip_tcp_hdr_len before passing it on to the msdu link ext
  3468. * descriptor.
  3469. */
  3470. TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u skb len %u\n", __func__,
  3471. tso_info->tcp_seq_num,
  3472. tso_info->eit_hdr_len,
  3473. tso_info->l2_len,
  3474. skb->len);
  3475. return 0;
  3476. }
  3477. /**
  3478. * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
  3479. *
  3480. * @curr_seg: Segment whose contents are initialized
  3481. * @tso_cmn_info: Parameters common to all segments
  3482. *
  3483. * Return: None
  3484. */
  3485. static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
  3486. struct qdf_tso_seg_elem_t *curr_seg,
  3487. struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
  3488. {
  3489. /* Initialize the flags to 0 */
  3490. memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
  3491. /*
  3492. * The following fields remain the same across all segments of
  3493. * a jumbo packet
  3494. */
  3495. curr_seg->seg.tso_flags.tso_enable = 1;
  3496. curr_seg->seg.tso_flags.ipv4_checksum_en =
  3497. tso_cmn_info->ipv4_csum_en;
  3498. curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
  3499. tso_cmn_info->tcp_ipv6_csum_en;
  3500. curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
  3501. tso_cmn_info->tcp_ipv4_csum_en;
  3502. curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
  3503. /* The following fields change for the segments */
  3504. curr_seg->seg.tso_flags.ip_id = tso_cmn_info->ip_id;
  3505. tso_cmn_info->ip_id++;
  3506. curr_seg->seg.tso_flags.syn = tso_cmn_info->tcphdr->syn;
  3507. curr_seg->seg.tso_flags.rst = tso_cmn_info->tcphdr->rst;
  3508. curr_seg->seg.tso_flags.ack = tso_cmn_info->tcphdr->ack;
  3509. curr_seg->seg.tso_flags.urg = tso_cmn_info->tcphdr->urg;
  3510. curr_seg->seg.tso_flags.ece = tso_cmn_info->tcphdr->ece;
  3511. curr_seg->seg.tso_flags.cwr = tso_cmn_info->tcphdr->cwr;
  3512. curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info->tcp_seq_num;
  3513. /*
  3514. * First fragment for each segment always contains the ethernet,
  3515. * IP and TCP header
  3516. */
  3517. curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
  3518. curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
  3519. curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
  3520. curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
  3521. TSO_DEBUG("%s %d eit hdr %pK eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
  3522. __func__, __LINE__, tso_cmn_info->eit_hdr,
  3523. tso_cmn_info->eit_hdr_len,
  3524. curr_seg->seg.tso_flags.tcp_seq_num,
  3525. curr_seg->seg.total_len);
  3526. qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
  3527. }
  3528. uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
  3529. struct qdf_tso_info_t *tso_info)
  3530. {
  3531. /* common across all segments */
  3532. struct qdf_tso_cmn_seg_info_t tso_cmn_info;
  3533. /* segment specific */
  3534. void *tso_frag_vaddr;
  3535. qdf_dma_addr_t tso_frag_paddr = 0;
  3536. uint32_t num_seg = 0;
  3537. struct qdf_tso_seg_elem_t *curr_seg;
  3538. struct qdf_tso_num_seg_elem_t *total_num_seg;
  3539. skb_frag_t *frag = NULL;
  3540. uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
  3541. uint32_t skb_frag_len = 0; /* skb's fragment length (contiguous memory)*/
  3542. uint32_t skb_proc = skb->len; /* bytes of skb pending processing */
  3543. uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
  3544. int j = 0; /* skb fragment index */
  3545. uint8_t byte_8_align_offset;
  3546. memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
  3547. total_num_seg = tso_info->tso_num_seg_list;
  3548. curr_seg = tso_info->tso_seg_list;
  3549. total_num_seg->num_seg.tso_cmn_num_seg = 0;
  3550. byte_8_align_offset = qdf_nbuf_adj_tso_frag(skb);
  3551. if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
  3552. skb, &tso_cmn_info))) {
  3553. qdf_warn("TSO: error getting common segment info");
  3554. return 0;
  3555. }
  3556. /* length of the first chunk of data in the skb */
  3557. skb_frag_len = skb_headlen(skb);
  3558. /* the 0th tso segment's 0th fragment always contains the EIT header */
  3559. /* update the remaining skb fragment length and TSO segment length */
  3560. skb_frag_len -= tso_cmn_info.eit_hdr_len;
  3561. skb_proc -= tso_cmn_info.eit_hdr_len;
  3562. /* get the address to the next tso fragment */
  3563. tso_frag_vaddr = skb->data +
  3564. tso_cmn_info.eit_hdr_len +
  3565. byte_8_align_offset;
  3566. /* get the length of the next tso fragment */
  3567. tso_frag_len = min(skb_frag_len, tso_seg_size);
  3568. if (tso_frag_len != 0) {
  3569. tso_frag_paddr = qdf_nbuf_tso_map_frag(
  3570. osdev, tso_frag_vaddr, tso_frag_len,
  3571. QDF_DMA_TO_DEVICE);
  3572. if (qdf_unlikely(!tso_frag_paddr))
  3573. return 0;
  3574. }
  3575. TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
  3576. __LINE__, skb_frag_len, tso_frag_len);
  3577. num_seg = tso_info->num_segs;
  3578. tso_info->num_segs = 0;
  3579. tso_info->is_tso = 1;
  3580. while (num_seg && curr_seg) {
  3581. int i = 1; /* tso fragment index */
  3582. uint8_t more_tso_frags = 1;
  3583. curr_seg->seg.num_frags = 0;
  3584. tso_info->num_segs++;
  3585. total_num_seg->num_seg.tso_cmn_num_seg++;
  3586. __qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
  3587. &tso_cmn_info);
  3588. /* If TCP PSH flag is set, set it in the last or only segment */
  3589. if (num_seg == 1)
  3590. curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
  3591. if (unlikely(skb_proc == 0))
  3592. return tso_info->num_segs;
  3593. curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
  3594. curr_seg->seg.tso_flags.l2_len = tso_cmn_info.l2_len;
  3595. /* frag len is added to ip_len in while loop below*/
  3596. curr_seg->seg.num_frags++;
  3597. while (more_tso_frags) {
  3598. if (tso_frag_len != 0) {
  3599. curr_seg->seg.tso_frags[i].vaddr =
  3600. tso_frag_vaddr;
  3601. curr_seg->seg.tso_frags[i].length =
  3602. tso_frag_len;
  3603. curr_seg->seg.total_len += tso_frag_len;
  3604. curr_seg->seg.tso_flags.ip_len += tso_frag_len;
  3605. curr_seg->seg.num_frags++;
  3606. skb_proc = skb_proc - tso_frag_len;
  3607. /* increment the TCP sequence number */
  3608. tso_cmn_info.tcp_seq_num += tso_frag_len;
  3609. curr_seg->seg.tso_frags[i].paddr =
  3610. tso_frag_paddr;
  3611. qdf_assert_always(curr_seg->seg.tso_frags[i].paddr);
  3612. }
  3613. TSO_DEBUG("%s[%d] frag %d frag len %d total_len %u vaddr %pK\n",
  3614. __func__, __LINE__,
  3615. i,
  3616. tso_frag_len,
  3617. curr_seg->seg.total_len,
  3618. curr_seg->seg.tso_frags[i].vaddr);
  3619. /* if there is no more data left in the skb */
  3620. if (!skb_proc)
  3621. return tso_info->num_segs;
  3622. /* get the next payload fragment information */
  3623. /* check if there are more fragments in this segment */
  3624. if (tso_frag_len < tso_seg_size) {
  3625. more_tso_frags = 1;
  3626. if (tso_frag_len != 0) {
  3627. tso_seg_size = tso_seg_size -
  3628. tso_frag_len;
  3629. i++;
  3630. if (curr_seg->seg.num_frags ==
  3631. FRAG_NUM_MAX) {
  3632. more_tso_frags = 0;
  3633. /*
  3634. * reset i and the tso
  3635. * payload size
  3636. */
  3637. i = 1;
  3638. tso_seg_size =
  3639. skb_shinfo(skb)->
  3640. gso_size;
  3641. }
  3642. }
  3643. } else {
  3644. more_tso_frags = 0;
  3645. /* reset i and the tso payload size */
  3646. i = 1;
  3647. tso_seg_size = skb_shinfo(skb)->gso_size;
  3648. }
  3649. /* if the next fragment is contiguous */
  3650. if ((tso_frag_len != 0) && (tso_frag_len < skb_frag_len)) {
  3651. tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
  3652. skb_frag_len = skb_frag_len - tso_frag_len;
  3653. tso_frag_len = min(skb_frag_len, tso_seg_size);
  3654. } else { /* the next fragment is not contiguous */
  3655. if (skb_shinfo(skb)->nr_frags == 0) {
  3656. qdf_info("TSO: nr_frags == 0!");
  3657. qdf_assert(0);
  3658. return 0;
  3659. }
  3660. if (j >= skb_shinfo(skb)->nr_frags) {
  3661. qdf_info("TSO: nr_frags %d j %d",
  3662. skb_shinfo(skb)->nr_frags, j);
  3663. qdf_assert(0);
  3664. return 0;
  3665. }
  3666. frag = &skb_shinfo(skb)->frags[j];
  3667. skb_frag_len = skb_frag_size(frag);
  3668. tso_frag_len = min(skb_frag_len, tso_seg_size);
  3669. tso_frag_vaddr = skb_frag_address_safe(frag);
  3670. j++;
  3671. }
  3672. TSO_DEBUG("%s[%d] skb frag len %d tso frag %d len tso_seg_size %d\n",
  3673. __func__, __LINE__, skb_frag_len, tso_frag_len,
  3674. tso_seg_size);
  3675. if (!(tso_frag_vaddr)) {
  3676. TSO_DEBUG("%s: Fragment virtual addr is NULL",
  3677. __func__);
  3678. return 0;
  3679. }
  3680. tso_frag_paddr = qdf_nbuf_tso_map_frag(
  3681. osdev, tso_frag_vaddr,
  3682. tso_frag_len,
  3683. QDF_DMA_TO_DEVICE);
  3684. if (qdf_unlikely(!tso_frag_paddr))
  3685. return 0;
  3686. }
  3687. TSO_DEBUG("%s tcp_seq_num: %u", __func__,
  3688. curr_seg->seg.tso_flags.tcp_seq_num);
  3689. num_seg--;
  3690. /* if TCP FIN flag was set, set it in the last segment */
  3691. if (!num_seg)
  3692. curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
  3693. qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
  3694. curr_seg = curr_seg->next;
  3695. }
  3696. return tso_info->num_segs;
  3697. }
  3698. qdf_export_symbol(__qdf_nbuf_get_tso_info);
  3699. void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
  3700. struct qdf_tso_seg_elem_t *tso_seg,
  3701. bool is_last_seg)
  3702. {
  3703. uint32_t num_frags = 0;
  3704. if (tso_seg->seg.num_frags > 0)
  3705. num_frags = tso_seg->seg.num_frags - 1;
  3706. /*Num of frags in a tso seg cannot be less than 2 */
  3707. if (num_frags < 1) {
  3708. /*
  3709. * If Num of frags is 1 in a tso seg but is_last_seg true,
  3710. * this may happen when qdf_nbuf_get_tso_info failed,
  3711. * do dma unmap for the 0th frag in this seg.
  3712. */
  3713. if (is_last_seg && tso_seg->seg.num_frags == 1)
  3714. goto last_seg_free_first_frag;
  3715. qdf_assert(0);
  3716. qdf_err("ERROR: num of frags in a tso segment is %d",
  3717. (num_frags + 1));
  3718. return;
  3719. }
  3720. while (num_frags) {
  3721. /*Do dma unmap the tso seg except the 0th frag */
  3722. if (0 == tso_seg->seg.tso_frags[num_frags].paddr) {
  3723. qdf_err("ERROR: TSO seg frag %d mapped physical address is NULL",
  3724. num_frags);
  3725. qdf_assert(0);
  3726. return;
  3727. }
  3728. qdf_nbuf_tso_unmap_frag(
  3729. osdev,
  3730. tso_seg->seg.tso_frags[num_frags].paddr,
  3731. tso_seg->seg.tso_frags[num_frags].length,
  3732. QDF_DMA_TO_DEVICE);
  3733. tso_seg->seg.tso_frags[num_frags].paddr = 0;
  3734. num_frags--;
  3735. qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
  3736. }
  3737. last_seg_free_first_frag:
  3738. if (is_last_seg) {
  3739. /*Do dma unmap for the tso seg 0th frag */
  3740. if (0 == tso_seg->seg.tso_frags[0].paddr) {
  3741. qdf_err("ERROR: TSO seg frag 0 mapped physical address is NULL");
  3742. qdf_assert(0);
  3743. return;
  3744. }
  3745. qdf_nbuf_tso_unmap_frag(osdev,
  3746. tso_seg->seg.tso_frags[0].paddr,
  3747. tso_seg->seg.tso_frags[0].length,
  3748. QDF_DMA_TO_DEVICE);
  3749. tso_seg->seg.tso_frags[0].paddr = 0;
  3750. qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
  3751. }
  3752. }
  3753. qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
  3754. size_t __qdf_nbuf_get_tcp_payload_len(struct sk_buff *skb)
  3755. {
  3756. size_t packet_len;
  3757. packet_len = skb->len -
  3758. ((skb_transport_header(skb) - skb_mac_header(skb)) +
  3759. tcp_hdrlen(skb));
  3760. return packet_len;
  3761. }
  3762. qdf_export_symbol(__qdf_nbuf_get_tcp_payload_len);
  3763. #ifndef BUILD_X86
  3764. uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
  3765. {
  3766. uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
  3767. uint32_t remainder, num_segs = 0;
  3768. uint8_t skb_nr_frags = skb_shinfo(skb)->nr_frags;
  3769. uint8_t frags_per_tso = 0;
  3770. uint32_t skb_frag_len = 0;
  3771. uint32_t eit_hdr_len = (skb_transport_header(skb)
  3772. - skb_mac_header(skb)) + tcp_hdrlen(skb);
  3773. skb_frag_t *frag = NULL;
  3774. int j = 0;
  3775. uint32_t temp_num_seg = 0;
  3776. /* length of the first chunk of data in the skb minus eit header*/
  3777. skb_frag_len = skb_headlen(skb) - eit_hdr_len;
  3778. /* Calculate num of segs for skb's first chunk of data*/
  3779. remainder = skb_frag_len % tso_seg_size;
  3780. num_segs = skb_frag_len / tso_seg_size;
  3781. /*
  3782. * Remainder non-zero and nr_frags zero implies end of skb data.
  3783. * In that case, one more tso seg is required to accommodate
  3784. * remaining data, hence num_segs++. If nr_frags is non-zero,
  3785. * then remaining data will be accommodated while doing the calculation
  3786. * for nr_frags data. Hence, frags_per_tso++.
  3787. */
  3788. if (remainder) {
  3789. if (!skb_nr_frags)
  3790. num_segs++;
  3791. else
  3792. frags_per_tso++;
  3793. }
  3794. while (skb_nr_frags) {
  3795. if (j >= skb_shinfo(skb)->nr_frags) {
  3796. qdf_info("TSO: nr_frags %d j %d",
  3797. skb_shinfo(skb)->nr_frags, j);
  3798. qdf_assert(0);
  3799. return 0;
  3800. }
  3801. /*
  3802. * Calculate the number of tso seg for nr_frags data:
  3803. * Get the length of each frag in skb_frag_len, add to
  3804. * remainder.Get the number of segments by dividing it to
  3805. * tso_seg_size and calculate the new remainder.
  3806. * Decrement the nr_frags value and keep
  3807. * looping all the skb_fragments.
  3808. */
  3809. frag = &skb_shinfo(skb)->frags[j];
  3810. skb_frag_len = skb_frag_size(frag);
  3811. temp_num_seg = num_segs;
  3812. remainder += skb_frag_len;
  3813. num_segs += remainder / tso_seg_size;
  3814. remainder = remainder % tso_seg_size;
  3815. skb_nr_frags--;
  3816. if (remainder) {
  3817. if (num_segs > temp_num_seg)
  3818. frags_per_tso = 0;
  3819. /*
  3820. * increment the tso per frags whenever remainder is
  3821. * positive. If frags_per_tso reaches the (max-1),
  3822. * [First frags always have EIT header, therefore max-1]
  3823. * increment the num_segs as no more data can be
  3824. * accommodated in the curr tso seg. Reset the remainder
  3825. * and frags per tso and keep looping.
  3826. */
  3827. frags_per_tso++;
  3828. if (frags_per_tso == FRAG_NUM_MAX - 1) {
  3829. num_segs++;
  3830. frags_per_tso = 0;
  3831. remainder = 0;
  3832. }
  3833. /*
  3834. * If this is the last skb frag and still remainder is
  3835. * non-zero(frags_per_tso is not reached to the max-1)
  3836. * then increment the num_segs to take care of the
  3837. * remaining length.
  3838. */
  3839. if (!skb_nr_frags && remainder) {
  3840. num_segs++;
  3841. frags_per_tso = 0;
  3842. }
  3843. } else {
  3844. /* Whenever remainder is 0, reset the frags_per_tso. */
  3845. frags_per_tso = 0;
  3846. }
  3847. j++;
  3848. }
  3849. return num_segs;
  3850. }
  3851. #elif !defined(QCA_WIFI_QCN9000)
  3852. uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
  3853. {
  3854. uint32_t i, gso_size, tmp_len, num_segs = 0;
  3855. skb_frag_t *frag = NULL;
  3856. /*
  3857. * Check if the head SKB or any of frags are allocated in < 0x50000000
  3858. * region which cannot be accessed by Target
  3859. */
  3860. if (virt_to_phys(skb->data) < 0x50000040) {
  3861. TSO_DEBUG("%s %d: Invalid Address nr_frags = %d, paddr = %pK \n",
  3862. __func__, __LINE__, skb_shinfo(skb)->nr_frags,
  3863. virt_to_phys(skb->data));
  3864. goto fail;
  3865. }
  3866. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  3867. frag = &skb_shinfo(skb)->frags[i];
  3868. if (!frag)
  3869. goto fail;
  3870. if (virt_to_phys(skb_frag_address_safe(frag)) < 0x50000040)
  3871. goto fail;
  3872. }
  3873. gso_size = skb_shinfo(skb)->gso_size;
  3874. tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
  3875. + tcp_hdrlen(skb));
  3876. while (tmp_len) {
  3877. num_segs++;
  3878. if (tmp_len > gso_size)
  3879. tmp_len -= gso_size;
  3880. else
  3881. break;
  3882. }
  3883. return num_segs;
  3884. /*
  3885. * Do not free this frame, just do socket level accounting
  3886. * so that this is not reused.
  3887. */
  3888. fail:
  3889. if (skb->sk)
  3890. atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
  3891. return 0;
  3892. }
  3893. #else
  3894. uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
  3895. {
  3896. uint32_t i, gso_size, tmp_len, num_segs = 0;
  3897. skb_frag_t *frag = NULL;
  3898. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  3899. frag = &skb_shinfo(skb)->frags[i];
  3900. if (!frag)
  3901. goto fail;
  3902. }
  3903. gso_size = skb_shinfo(skb)->gso_size;
  3904. tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
  3905. + tcp_hdrlen(skb));
  3906. while (tmp_len) {
  3907. num_segs++;
  3908. if (tmp_len > gso_size)
  3909. tmp_len -= gso_size;
  3910. else
  3911. break;
  3912. }
  3913. return num_segs;
  3914. /*
  3915. * Do not free this frame, just do socket level accounting
  3916. * so that this is not reused.
  3917. */
  3918. fail:
  3919. if (skb->sk)
  3920. atomic_sub(skb->truesize, &(skb->sk->sk_wmem_alloc));
  3921. return 0;
  3922. }
  3923. #endif
  3924. qdf_export_symbol(__qdf_nbuf_get_tso_num_seg);
  3925. #endif /* FEATURE_TSO */
  3926. void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
  3927. uint32_t *lo, uint32_t *hi)
  3928. {
  3929. if (sizeof(dmaaddr) > sizeof(uint32_t)) {
  3930. *lo = lower_32_bits(dmaaddr);
  3931. *hi = upper_32_bits(dmaaddr);
  3932. } else {
  3933. *lo = dmaaddr;
  3934. *hi = 0;
  3935. }
  3936. }
  3937. qdf_export_symbol(__qdf_dmaaddr_to_32s);
  3938. struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
  3939. {
  3940. qdf_nbuf_users_inc(&skb->users);
  3941. return skb;
  3942. }
  3943. qdf_export_symbol(__qdf_nbuf_inc_users);
  3944. int __qdf_nbuf_get_users(struct sk_buff *skb)
  3945. {
  3946. return qdf_nbuf_users_read(&skb->users);
  3947. }
  3948. qdf_export_symbol(__qdf_nbuf_get_users);
  3949. void __qdf_nbuf_ref(struct sk_buff *skb)
  3950. {
  3951. skb_get(skb);
  3952. }
  3953. qdf_export_symbol(__qdf_nbuf_ref);
  3954. int __qdf_nbuf_shared(struct sk_buff *skb)
  3955. {
  3956. return skb_shared(skb);
  3957. }
  3958. qdf_export_symbol(__qdf_nbuf_shared);
  3959. QDF_STATUS
  3960. __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
  3961. {
  3962. QDF_STATUS error = QDF_STATUS_SUCCESS;
  3963. /*
  3964. * driver can tell its SG capability, it must be handled.
  3965. * Bounce buffers if they are there
  3966. */
  3967. (*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
  3968. if (!(*dmap))
  3969. error = QDF_STATUS_E_NOMEM;
  3970. return error;
  3971. }
  3972. qdf_export_symbol(__qdf_nbuf_dmamap_create);
  3973. void
  3974. __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
  3975. {
  3976. kfree(dmap);
  3977. }
  3978. qdf_export_symbol(__qdf_nbuf_dmamap_destroy);
  3979. #ifdef QDF_OS_DEBUG
  3980. QDF_STATUS
  3981. __qdf_nbuf_map_nbytes(
  3982. qdf_device_t osdev,
  3983. struct sk_buff *skb,
  3984. qdf_dma_dir_t dir,
  3985. int nbytes)
  3986. {
  3987. struct skb_shared_info *sh = skb_shinfo(skb);
  3988. qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
  3989. /*
  3990. * Assume there's only a single fragment.
  3991. * To support multiple fragments, it would be necessary to change
  3992. * adf_nbuf_t to be a separate object that stores meta-info
  3993. * (including the bus address for each fragment) and a pointer
  3994. * to the underlying sk_buff.
  3995. */
  3996. qdf_assert(sh->nr_frags == 0);
  3997. return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
  3998. }
  3999. qdf_export_symbol(__qdf_nbuf_map_nbytes);
  4000. #else
  4001. QDF_STATUS
  4002. __qdf_nbuf_map_nbytes(
  4003. qdf_device_t osdev,
  4004. struct sk_buff *skb,
  4005. qdf_dma_dir_t dir,
  4006. int nbytes)
  4007. {
  4008. return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
  4009. }
  4010. qdf_export_symbol(__qdf_nbuf_map_nbytes);
  4011. #endif
  4012. void
  4013. __qdf_nbuf_unmap_nbytes(
  4014. qdf_device_t osdev,
  4015. struct sk_buff *skb,
  4016. qdf_dma_dir_t dir,
  4017. int nbytes)
  4018. {
  4019. qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
  4020. /*
  4021. * Assume there's a single fragment.
  4022. * If this is not true, the assertion in __adf_nbuf_map will catch it.
  4023. */
  4024. __qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
  4025. }
  4026. qdf_export_symbol(__qdf_nbuf_unmap_nbytes);
  4027. void
  4028. __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
  4029. {
  4030. qdf_assert(bmap->mapped);
  4031. qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
  4032. memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
  4033. sizeof(struct __qdf_segment));
  4034. sg->nsegs = bmap->nsegs;
  4035. }
  4036. qdf_export_symbol(__qdf_nbuf_dma_map_info);
  4037. #if defined(__QDF_SUPPORT_FRAG_MEM)
  4038. void
  4039. __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
  4040. {
  4041. qdf_assert(skb);
  4042. sg->sg_segs[0].vaddr = skb->data;
  4043. sg->sg_segs[0].len = skb->len;
  4044. sg->nsegs = 1;
  4045. for (int i = 1; i <= sh->nr_frags; i++) {
  4046. skb_frag_t *f = &sh->frags[i - 1];
  4047. sg->sg_segs[i].vaddr = (uint8_t *)(page_address(f->page) +
  4048. f->page_offset);
  4049. sg->sg_segs[i].len = f->size;
  4050. qdf_assert(i < QDF_MAX_SGLIST);
  4051. }
  4052. sg->nsegs += i;
  4053. }
  4054. qdf_export_symbol(__qdf_nbuf_frag_info);
  4055. #else
  4056. #ifdef QDF_OS_DEBUG
  4057. void
  4058. __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
  4059. {
  4060. struct skb_shared_info *sh = skb_shinfo(skb);
  4061. qdf_assert(skb);
  4062. sg->sg_segs[0].vaddr = skb->data;
  4063. sg->sg_segs[0].len = skb->len;
  4064. sg->nsegs = 1;
  4065. qdf_assert(sh->nr_frags == 0);
  4066. }
  4067. qdf_export_symbol(__qdf_nbuf_frag_info);
  4068. #else
  4069. void
  4070. __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
  4071. {
  4072. sg->sg_segs[0].vaddr = skb->data;
  4073. sg->sg_segs[0].len = skb->len;
  4074. sg->nsegs = 1;
  4075. }
  4076. qdf_export_symbol(__qdf_nbuf_frag_info);
  4077. #endif
  4078. #endif
  4079. uint32_t
  4080. __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
  4081. {
  4082. struct skb_shared_info *sh = skb_shinfo(nbuf);
  4083. const skb_frag_t *frag = sh->frags + cur_frag;
  4084. return skb_frag_size(frag);
  4085. }
  4086. qdf_export_symbol(__qdf_nbuf_get_frag_size);
  4087. #ifdef A_SIMOS_DEVHOST
  4088. QDF_STATUS __qdf_nbuf_frag_map(
  4089. qdf_device_t osdev, __qdf_nbuf_t nbuf,
  4090. int offset, qdf_dma_dir_t dir, int cur_frag)
  4091. {
  4092. int32_t paddr, frag_len;
  4093. QDF_NBUF_CB_PADDR(nbuf) = paddr = nbuf->data;
  4094. return QDF_STATUS_SUCCESS;
  4095. }
  4096. qdf_export_symbol(__qdf_nbuf_frag_map);
  4097. #else
  4098. QDF_STATUS __qdf_nbuf_frag_map(
  4099. qdf_device_t osdev, __qdf_nbuf_t nbuf,
  4100. int offset, qdf_dma_dir_t dir, int cur_frag)
  4101. {
  4102. dma_addr_t paddr, frag_len;
  4103. struct skb_shared_info *sh = skb_shinfo(nbuf);
  4104. const skb_frag_t *frag = sh->frags + cur_frag;
  4105. frag_len = skb_frag_size(frag);
  4106. QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
  4107. skb_frag_dma_map(osdev->dev, frag, offset, frag_len,
  4108. __qdf_dma_dir_to_os(dir));
  4109. return dma_mapping_error(osdev->dev, paddr) ?
  4110. QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
  4111. }
  4112. qdf_export_symbol(__qdf_nbuf_frag_map);
  4113. #endif
  4114. void
  4115. __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
  4116. {
  4117. return;
  4118. }
  4119. qdf_export_symbol(__qdf_nbuf_dmamap_set_cb);
  4120. /**
  4121. * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
  4122. * @osdev: os device
  4123. * @buf: sk buff
  4124. * @dir: direction
  4125. *
  4126. * Return: none
  4127. */
  4128. #if defined(A_SIMOS_DEVHOST)
  4129. static void __qdf_nbuf_sync_single_for_cpu(
  4130. qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
  4131. {
  4132. return;
  4133. }
  4134. #else
  4135. static void __qdf_nbuf_sync_single_for_cpu(
  4136. qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
  4137. {
  4138. if (0 == QDF_NBUF_CB_PADDR(buf)) {
  4139. qdf_err("ERROR: NBUF mapped physical address is NULL");
  4140. return;
  4141. }
  4142. dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
  4143. skb_end_offset(buf) - skb_headroom(buf),
  4144. __qdf_dma_dir_to_os(dir));
  4145. }
  4146. #endif
  4147. void
  4148. __qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
  4149. struct sk_buff *skb, qdf_dma_dir_t dir)
  4150. {
  4151. qdf_assert(
  4152. (dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
  4153. /*
  4154. * Assume there's a single fragment.
  4155. * If this is not true, the assertion in __adf_nbuf_map will catch it.
  4156. */
  4157. __qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
  4158. }
  4159. qdf_export_symbol(__qdf_nbuf_sync_for_cpu);
  4160. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
  4161. /**
  4162. * qdf_nbuf_update_radiotap_vht_flags() - Update radiotap header VHT flags
  4163. * @rx_status: Pointer to rx_status.
  4164. * @rtap_buf: Buf to which VHT info has to be updated.
  4165. * @rtap_len: Current length of radiotap buffer
  4166. *
  4167. * Return: Length of radiotap after VHT flags updated.
  4168. */
  4169. static unsigned int qdf_nbuf_update_radiotap_vht_flags(
  4170. struct mon_rx_status *rx_status,
  4171. int8_t *rtap_buf,
  4172. uint32_t rtap_len)
  4173. {
  4174. uint16_t vht_flags = 0;
  4175. struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
  4176. rtap_len = qdf_align(rtap_len, 2);
  4177. /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
  4178. vht_flags |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
  4179. IEEE80211_RADIOTAP_VHT_KNOWN_GI |
  4180. IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM |
  4181. IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED |
  4182. IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH |
  4183. IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID;
  4184. put_unaligned_le16(vht_flags, &rtap_buf[rtap_len]);
  4185. rtap_len += 2;
  4186. rtap_buf[rtap_len] |=
  4187. (rx_status->is_stbc ?
  4188. IEEE80211_RADIOTAP_VHT_FLAG_STBC : 0) |
  4189. (rx_status->sgi ? IEEE80211_RADIOTAP_VHT_FLAG_SGI : 0) |
  4190. (rx_status->ldpc ?
  4191. IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM : 0) |
  4192. (rx_status->beamformed ?
  4193. IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED : 0);
  4194. rtap_len += 1;
  4195. if (!rx_user_status) {
  4196. switch (rx_status->vht_flag_values2) {
  4197. case IEEE80211_RADIOTAP_VHT_BW_20:
  4198. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
  4199. break;
  4200. case IEEE80211_RADIOTAP_VHT_BW_40:
  4201. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
  4202. break;
  4203. case IEEE80211_RADIOTAP_VHT_BW_80:
  4204. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
  4205. break;
  4206. case IEEE80211_RADIOTAP_VHT_BW_160:
  4207. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
  4208. break;
  4209. }
  4210. rtap_len += 1;
  4211. rtap_buf[rtap_len] = (rx_status->vht_flag_values3[0]);
  4212. rtap_len += 1;
  4213. rtap_buf[rtap_len] = (rx_status->vht_flag_values3[1]);
  4214. rtap_len += 1;
  4215. rtap_buf[rtap_len] = (rx_status->vht_flag_values3[2]);
  4216. rtap_len += 1;
  4217. rtap_buf[rtap_len] = (rx_status->vht_flag_values3[3]);
  4218. rtap_len += 1;
  4219. rtap_buf[rtap_len] = (rx_status->vht_flag_values4);
  4220. rtap_len += 1;
  4221. rtap_buf[rtap_len] = (rx_status->vht_flag_values5);
  4222. rtap_len += 1;
  4223. put_unaligned_le16(rx_status->vht_flag_values6,
  4224. &rtap_buf[rtap_len]);
  4225. rtap_len += 2;
  4226. } else {
  4227. switch (rx_user_status->vht_flag_values2) {
  4228. case IEEE80211_RADIOTAP_VHT_BW_20:
  4229. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_20;
  4230. break;
  4231. case IEEE80211_RADIOTAP_VHT_BW_40:
  4232. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_40;
  4233. break;
  4234. case IEEE80211_RADIOTAP_VHT_BW_80:
  4235. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_80;
  4236. break;
  4237. case IEEE80211_RADIOTAP_VHT_BW_160:
  4238. rtap_buf[rtap_len] = RADIOTAP_VHT_BW_160;
  4239. break;
  4240. }
  4241. rtap_len += 1;
  4242. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[0]);
  4243. rtap_len += 1;
  4244. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[1]);
  4245. rtap_len += 1;
  4246. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[2]);
  4247. rtap_len += 1;
  4248. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values3[3]);
  4249. rtap_len += 1;
  4250. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values4);
  4251. rtap_len += 1;
  4252. rtap_buf[rtap_len] = (rx_user_status->vht_flag_values5);
  4253. rtap_len += 1;
  4254. put_unaligned_le16(rx_user_status->vht_flag_values6,
  4255. &rtap_buf[rtap_len]);
  4256. rtap_len += 2;
  4257. }
  4258. return rtap_len;
  4259. }
  4260. /**
  4261. * qdf_nbuf_update_radiotap_he_flags() - Update radiotap header from rx_status
  4262. * @rx_status: Pointer to rx_status.
  4263. * @rtap_buf: buffer to which radiotap has to be updated
  4264. * @rtap_len: radiotap length
  4265. *
  4266. * API update high-efficiency (11ax) fields in the radiotap header
  4267. *
  4268. * Return: length of rtap_len updated.
  4269. */
  4270. static unsigned int
  4271. qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
  4272. int8_t *rtap_buf, uint32_t rtap_len)
  4273. {
  4274. /*
  4275. * IEEE80211_RADIOTAP_HE u16, u16, u16, u16, u16, u16
  4276. * Enable all "known" HE radiotap flags for now
  4277. */
  4278. rtap_len = qdf_align(rtap_len, 2);
  4279. put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
  4280. rtap_len += 2;
  4281. put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
  4282. rtap_len += 2;
  4283. put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
  4284. rtap_len += 2;
  4285. put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
  4286. rtap_len += 2;
  4287. put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
  4288. rtap_len += 2;
  4289. put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
  4290. rtap_len += 2;
  4291. return rtap_len;
  4292. }
  4293. /**
  4294. * qdf_nbuf_update_radiotap_he_mu_flags() - update he-mu radiotap flags
  4295. * @rx_status: Pointer to rx_status.
  4296. * @rtap_buf: buffer to which radiotap has to be updated
  4297. * @rtap_len: radiotap length
  4298. *
  4299. * API update HE-MU fields in the radiotap header
  4300. *
  4301. * Return: length of rtap_len updated.
  4302. */
  4303. static unsigned int
  4304. qdf_nbuf_update_radiotap_he_mu_flags(struct mon_rx_status *rx_status,
  4305. int8_t *rtap_buf, uint32_t rtap_len)
  4306. {
  4307. struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
  4308. rtap_len = qdf_align(rtap_len, 2);
  4309. /*
  4310. * IEEE80211_RADIOTAP_HE_MU u16, u16, u8[4]
  4311. * Enable all "known" he-mu radiotap flags for now
  4312. */
  4313. if (!rx_user_status) {
  4314. put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
  4315. rtap_len += 2;
  4316. put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
  4317. rtap_len += 2;
  4318. rtap_buf[rtap_len] = rx_status->he_RU[0];
  4319. rtap_len += 1;
  4320. rtap_buf[rtap_len] = rx_status->he_RU[1];
  4321. rtap_len += 1;
  4322. rtap_buf[rtap_len] = rx_status->he_RU[2];
  4323. rtap_len += 1;
  4324. rtap_buf[rtap_len] = rx_status->he_RU[3];
  4325. rtap_len += 1;
  4326. } else {
  4327. put_unaligned_le16(rx_user_status->he_flags1,
  4328. &rtap_buf[rtap_len]);
  4329. rtap_len += 2;
  4330. put_unaligned_le16(rx_user_status->he_flags2,
  4331. &rtap_buf[rtap_len]);
  4332. rtap_len += 2;
  4333. rtap_buf[rtap_len] = rx_user_status->he_RU[0];
  4334. rtap_len += 1;
  4335. rtap_buf[rtap_len] = rx_user_status->he_RU[1];
  4336. rtap_len += 1;
  4337. rtap_buf[rtap_len] = rx_user_status->he_RU[2];
  4338. rtap_len += 1;
  4339. rtap_buf[rtap_len] = rx_user_status->he_RU[3];
  4340. rtap_len += 1;
  4341. qdf_debug("he_flags %x %x he-RU %x %x %x %x",
  4342. rx_user_status->he_flags1,
  4343. rx_user_status->he_flags2, rx_user_status->he_RU[0],
  4344. rx_user_status->he_RU[1], rx_user_status->he_RU[2],
  4345. rx_user_status->he_RU[3]);
  4346. }
  4347. return rtap_len;
  4348. }
  4349. /**
  4350. * qdf_nbuf_update_radiotap_he_mu_other_flags() - update he_mu_other flags
  4351. * @rx_status: Pointer to rx_status.
  4352. * @rtap_buf: buffer to which radiotap has to be updated
  4353. * @rtap_len: radiotap length
  4354. *
  4355. * API update he-mu-other fields in the radiotap header
  4356. *
  4357. * Return: length of rtap_len updated.
  4358. */
  4359. static unsigned int
  4360. qdf_nbuf_update_radiotap_he_mu_other_flags(struct mon_rx_status *rx_status,
  4361. int8_t *rtap_buf, uint32_t rtap_len)
  4362. {
  4363. struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
  4364. rtap_len = qdf_align(rtap_len, 2);
  4365. /*
  4366. * IEEE80211_RADIOTAP_HE-MU-OTHER u16, u16, u8, u8
  4367. * Enable all "known" he-mu-other radiotap flags for now
  4368. */
  4369. if (!rx_user_status) {
  4370. put_unaligned_le16(rx_status->he_per_user_1,
  4371. &rtap_buf[rtap_len]);
  4372. rtap_len += 2;
  4373. put_unaligned_le16(rx_status->he_per_user_2,
  4374. &rtap_buf[rtap_len]);
  4375. rtap_len += 2;
  4376. rtap_buf[rtap_len] = rx_status->he_per_user_position;
  4377. rtap_len += 1;
  4378. rtap_buf[rtap_len] = rx_status->he_per_user_known;
  4379. rtap_len += 1;
  4380. } else {
  4381. put_unaligned_le16(rx_user_status->he_per_user_1,
  4382. &rtap_buf[rtap_len]);
  4383. rtap_len += 2;
  4384. put_unaligned_le16(rx_user_status->he_per_user_2,
  4385. &rtap_buf[rtap_len]);
  4386. rtap_len += 2;
  4387. rtap_buf[rtap_len] = rx_user_status->he_per_user_position;
  4388. rtap_len += 1;
  4389. rtap_buf[rtap_len] = rx_user_status->he_per_user_known;
  4390. rtap_len += 1;
  4391. }
  4392. return rtap_len;
  4393. }
  4394. /**
  4395. * qdf_nbuf_update_radiotap_usig_flags() - Update radiotap header with USIG data
  4396. * from rx_status
  4397. * @rx_status: Pointer to rx_status.
  4398. * @rtap_buf: buffer to which radiotap has to be updated
  4399. * @rtap_len: radiotap length
  4400. *
  4401. * API update Extra High Throughput (11be) fields in the radiotap header
  4402. *
  4403. * Return: length of rtap_len updated.
  4404. */
  4405. static unsigned int
  4406. qdf_nbuf_update_radiotap_usig_flags(struct mon_rx_status *rx_status,
  4407. int8_t *rtap_buf, uint32_t rtap_len)
  4408. {
  4409. /*
  4410. * IEEE80211_RADIOTAP_USIG:
  4411. * u32, u32, u32
  4412. */
  4413. rtap_len = qdf_align(rtap_len, 4);
  4414. put_unaligned_le32(rx_status->usig_common, &rtap_buf[rtap_len]);
  4415. rtap_len += 4;
  4416. put_unaligned_le32(rx_status->usig_value, &rtap_buf[rtap_len]);
  4417. rtap_len += 4;
  4418. put_unaligned_le32(rx_status->usig_mask, &rtap_buf[rtap_len]);
  4419. rtap_len += 4;
  4420. qdf_rl_debug("U-SIG data %x %x %x",
  4421. rx_status->usig_common, rx_status->usig_value,
  4422. rx_status->usig_mask);
  4423. return rtap_len;
  4424. }
  4425. /**
  4426. * qdf_nbuf_update_radiotap_eht_flags() - Update radiotap header with EHT data
  4427. * from rx_status
  4428. * @rx_status: Pointer to rx_status.
  4429. * @rtap_buf: buffer to which radiotap has to be updated
  4430. * @rtap_len: radiotap length
  4431. *
  4432. * API update Extra High Throughput (11be) fields in the radiotap header
  4433. *
  4434. * Return: length of rtap_len updated.
  4435. */
  4436. static unsigned int
  4437. qdf_nbuf_update_radiotap_eht_flags(struct mon_rx_status *rx_status,
  4438. int8_t *rtap_buf, uint32_t rtap_len)
  4439. {
  4440. uint32_t user;
  4441. /*
  4442. * IEEE80211_RADIOTAP_EHT:
  4443. * u32, u32, u32, u32, u32, u32, u32, u16, [u32, u32, u32]
  4444. */
  4445. rtap_len = qdf_align(rtap_len, 4);
  4446. put_unaligned_le32(rx_status->eht_known, &rtap_buf[rtap_len]);
  4447. rtap_len += 4;
  4448. put_unaligned_le32(rx_status->eht_data[0], &rtap_buf[rtap_len]);
  4449. rtap_len += 4;
  4450. put_unaligned_le32(rx_status->eht_data[1], &rtap_buf[rtap_len]);
  4451. rtap_len += 4;
  4452. put_unaligned_le32(rx_status->eht_data[2], &rtap_buf[rtap_len]);
  4453. rtap_len += 4;
  4454. put_unaligned_le32(rx_status->eht_data[3], &rtap_buf[rtap_len]);
  4455. rtap_len += 4;
  4456. put_unaligned_le32(rx_status->eht_data[4], &rtap_buf[rtap_len]);
  4457. rtap_len += 4;
  4458. put_unaligned_le32(rx_status->eht_data[5], &rtap_buf[rtap_len]);
  4459. rtap_len += 4;
  4460. for (user = 0; user < EHT_USER_INFO_LEN &&
  4461. rx_status->num_eht_user_info_valid &&
  4462. user < rx_status->num_eht_user_info_valid; user++) {
  4463. put_unaligned_le32(rx_status->eht_user_info[user],
  4464. &rtap_buf[rtap_len]);
  4465. rtap_len += 4;
  4466. }
  4467. qdf_rl_debug("EHT data %x %x %x %x %x %x %x",
  4468. rx_status->eht_known, rx_status->eht_data[0],
  4469. rx_status->eht_data[1], rx_status->eht_data[2],
  4470. rx_status->eht_data[3], rx_status->eht_data[4],
  4471. rx_status->eht_data[5]);
  4472. return rtap_len;
  4473. }
  4474. #define IEEE80211_RADIOTAP_TX_STATUS 0
  4475. #define IEEE80211_RADIOTAP_RETRY_COUNT 1
  4476. #define IEEE80211_RADIOTAP_EXTENSION2 2
  4477. uint8_t ATH_OUI[] = {0x00, 0x03, 0x7f}; /* Atheros OUI */
  4478. /**
  4479. * qdf_nbuf_update_radiotap_ampdu_flags() - Update radiotap header ampdu flags
  4480. * @rx_status: Pointer to rx_status.
  4481. * @rtap_buf: Buf to which AMPDU info has to be updated.
  4482. * @rtap_len: Current length of radiotap buffer
  4483. *
  4484. * Return: Length of radiotap after AMPDU flags updated.
  4485. */
  4486. static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
  4487. struct mon_rx_status *rx_status,
  4488. uint8_t *rtap_buf,
  4489. uint32_t rtap_len)
  4490. {
  4491. /*
  4492. * IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8
  4493. * First 32 bits of AMPDU represents the reference number
  4494. */
  4495. uint32_t ampdu_reference_num = rx_status->ppdu_id;
  4496. uint16_t ampdu_flags = 0;
  4497. uint16_t ampdu_reserved_flags = 0;
  4498. rtap_len = qdf_align(rtap_len, 4);
  4499. put_unaligned_le32(ampdu_reference_num, &rtap_buf[rtap_len]);
  4500. rtap_len += 4;
  4501. put_unaligned_le16(ampdu_flags, &rtap_buf[rtap_len]);
  4502. rtap_len += 2;
  4503. put_unaligned_le16(ampdu_reserved_flags, &rtap_buf[rtap_len]);
  4504. rtap_len += 2;
  4505. return rtap_len;
  4506. }
  4507. #ifdef DP_MON_RSSI_IN_DBM
  4508. #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
  4509. (rx_status->rssi_comb)
  4510. #else
  4511. #ifdef QCA_RSSI_DB2DBM
  4512. #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
  4513. (((rx_status)->rssi_dbm_conv_support) ? \
  4514. ((rx_status)->rssi_comb + (rx_status)->rssi_offset) :\
  4515. ((rx_status)->rssi_comb + (rx_status)->chan_noise_floor))
  4516. #else
  4517. #define QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status) \
  4518. (rx_status->rssi_comb + rx_status->chan_noise_floor)
  4519. #endif
  4520. #endif
  4521. /**
  4522. * qdf_nbuf_update_radiotap_tx_flags() - Update radiotap header tx flags
  4523. * @rx_status: Pointer to rx_status.
  4524. * @rtap_buf: Buf to which tx info has to be updated.
  4525. * @rtap_len: Current length of radiotap buffer
  4526. *
  4527. * Return: Length of radiotap after tx flags updated.
  4528. */
  4529. static unsigned int qdf_nbuf_update_radiotap_tx_flags(
  4530. struct mon_rx_status *rx_status,
  4531. uint8_t *rtap_buf,
  4532. uint32_t rtap_len)
  4533. {
  4534. /*
  4535. * IEEE80211_RADIOTAP_TX_FLAGS u16
  4536. */
  4537. uint16_t tx_flags = 0;
  4538. rtap_len = qdf_align(rtap_len, 2);
  4539. switch (rx_status->tx_status) {
  4540. case RADIOTAP_TX_STATUS_FAIL:
  4541. tx_flags |= IEEE80211_RADIOTAP_F_TX_FAIL;
  4542. break;
  4543. case RADIOTAP_TX_STATUS_NOACK:
  4544. tx_flags |= IEEE80211_RADIOTAP_F_TX_NOACK;
  4545. break;
  4546. }
  4547. put_unaligned_le16(tx_flags, &rtap_buf[rtap_len]);
  4548. rtap_len += 2;
  4549. return rtap_len;
  4550. }
  4551. unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
  4552. qdf_nbuf_t nbuf, uint32_t headroom_sz)
  4553. {
  4554. uint8_t rtap_buf[RADIOTAP_HEADER_LEN] = {0};
  4555. struct ieee80211_radiotap_header *rthdr =
  4556. (struct ieee80211_radiotap_header *)rtap_buf;
  4557. uint32_t rtap_hdr_len = sizeof(struct ieee80211_radiotap_header);
  4558. uint32_t rtap_len = rtap_hdr_len;
  4559. uint8_t length = rtap_len;
  4560. struct qdf_radiotap_vendor_ns_ath *radiotap_vendor_ns_ath;
  4561. struct qdf_radiotap_ext2 *rtap_ext2;
  4562. struct mon_rx_user_status *rx_user_status = rx_status->rx_user_status;
  4563. /* per user info */
  4564. qdf_le32_t *it_present;
  4565. uint32_t it_present_val;
  4566. bool radiotap_ext1_hdr_present = false;
  4567. it_present = &rthdr->it_present;
  4568. /* Adding Extended Header space */
  4569. if (rx_status->add_rtap_ext || rx_status->add_rtap_ext2 ||
  4570. rx_status->usig_flags || rx_status->eht_flags) {
  4571. rtap_hdr_len += RADIOTAP_HEADER_EXT_LEN;
  4572. rtap_len = rtap_hdr_len;
  4573. radiotap_ext1_hdr_present = true;
  4574. }
  4575. length = rtap_len;
  4576. /* IEEE80211_RADIOTAP_TSFT __le64 microseconds*/
  4577. it_present_val = (1 << IEEE80211_RADIOTAP_TSFT);
  4578. put_unaligned_le64(rx_status->tsft, &rtap_buf[rtap_len]);
  4579. rtap_len += 8;
  4580. /* IEEE80211_RADIOTAP_FLAGS u8 */
  4581. it_present_val |= (1 << IEEE80211_RADIOTAP_FLAGS);
  4582. if (rx_status->rs_fcs_err)
  4583. rx_status->rtap_flags |= IEEE80211_RADIOTAP_F_BADFCS;
  4584. rtap_buf[rtap_len] = rx_status->rtap_flags;
  4585. rtap_len += 1;
  4586. /* IEEE80211_RADIOTAP_RATE u8 500kb/s */
  4587. if (!rx_status->ht_flags && !rx_status->vht_flags &&
  4588. !rx_status->he_flags && !rx_status->eht_flags) {
  4589. it_present_val |= (1 << IEEE80211_RADIOTAP_RATE);
  4590. rtap_buf[rtap_len] = rx_status->rate;
  4591. } else
  4592. rtap_buf[rtap_len] = 0;
  4593. rtap_len += 1;
  4594. /* IEEE80211_RADIOTAP_CHANNEL 2 x __le16 MHz, bitmap */
  4595. it_present_val |= (1 << IEEE80211_RADIOTAP_CHANNEL);
  4596. put_unaligned_le16(rx_status->chan_freq, &rtap_buf[rtap_len]);
  4597. rtap_len += 2;
  4598. /* Channel flags. */
  4599. if (rx_status->chan_freq > CHANNEL_FREQ_5150)
  4600. rx_status->chan_flags = RADIOTAP_5G_SPECTRUM_CHANNEL;
  4601. else
  4602. rx_status->chan_flags = RADIOTAP_2G_SPECTRUM_CHANNEL;
  4603. if (rx_status->cck_flag)
  4604. rx_status->chan_flags |= RADIOTAP_CCK_CHANNEL;
  4605. if (rx_status->ofdm_flag)
  4606. rx_status->chan_flags |= RADIOTAP_OFDM_CHANNEL;
  4607. put_unaligned_le16(rx_status->chan_flags, &rtap_buf[rtap_len]);
  4608. rtap_len += 2;
  4609. /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL s8 decibels from one milliwatt
  4610. * (dBm)
  4611. */
  4612. it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
  4613. /*
  4614. * rssi_comb is int dB, need to convert it to dBm.
  4615. * normalize value to noise floor of -96 dBm
  4616. */
  4617. rtap_buf[rtap_len] = QDF_MON_STATUS_GET_RSSI_IN_DBM(rx_status);
  4618. rtap_len += 1;
  4619. /* RX signal noise floor */
  4620. it_present_val |= (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
  4621. rtap_buf[rtap_len] = (uint8_t)rx_status->chan_noise_floor;
  4622. rtap_len += 1;
  4623. /* IEEE80211_RADIOTAP_ANTENNA u8 antenna index */
  4624. it_present_val |= (1 << IEEE80211_RADIOTAP_ANTENNA);
  4625. rtap_buf[rtap_len] = rx_status->nr_ant;
  4626. rtap_len += 1;
  4627. if ((rtap_len - length) > RADIOTAP_FIXED_HEADER_LEN) {
  4628. qdf_print("length is greater than RADIOTAP_FIXED_HEADER_LEN");
  4629. return 0;
  4630. }
  4631. /* update tx flags for pkt capture*/
  4632. if (rx_status->add_rtap_ext) {
  4633. it_present_val |=
  4634. cpu_to_le32(1 << IEEE80211_RADIOTAP_TX_FLAGS);
  4635. rtap_len = qdf_nbuf_update_radiotap_tx_flags(rx_status,
  4636. rtap_buf,
  4637. rtap_len);
  4638. if ((rtap_len - length) > RADIOTAP_TX_FLAGS_LEN) {
  4639. qdf_print("length is greater than RADIOTAP_TX_FLAGS_LEN");
  4640. return 0;
  4641. }
  4642. }
  4643. if (rx_status->ht_flags) {
  4644. length = rtap_len;
  4645. /* IEEE80211_RADIOTAP_VHT u8, u8, u8 */
  4646. it_present_val |= (1 << IEEE80211_RADIOTAP_MCS);
  4647. rtap_buf[rtap_len] = IEEE80211_RADIOTAP_MCS_HAVE_BW |
  4648. IEEE80211_RADIOTAP_MCS_HAVE_MCS |
  4649. IEEE80211_RADIOTAP_MCS_HAVE_GI;
  4650. rtap_len += 1;
  4651. if (rx_status->sgi)
  4652. rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_SGI;
  4653. if (rx_status->bw)
  4654. rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_40;
  4655. else
  4656. rtap_buf[rtap_len] |= IEEE80211_RADIOTAP_MCS_BW_20;
  4657. rtap_len += 1;
  4658. rtap_buf[rtap_len] = rx_status->ht_mcs;
  4659. rtap_len += 1;
  4660. if ((rtap_len - length) > RADIOTAP_HT_FLAGS_LEN) {
  4661. qdf_print("length is greater than RADIOTAP_HT_FLAGS_LEN");
  4662. return 0;
  4663. }
  4664. }
  4665. if (rx_status->rs_flags & IEEE80211_AMPDU_FLAG) {
  4666. /* IEEE80211_RADIOTAP_AMPDU_STATUS u32 u16 u8 u8 */
  4667. it_present_val |= (1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
  4668. rtap_len = qdf_nbuf_update_radiotap_ampdu_flags(rx_status,
  4669. rtap_buf,
  4670. rtap_len);
  4671. }
  4672. if (rx_status->vht_flags) {
  4673. length = rtap_len;
  4674. /* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16 */
  4675. it_present_val |= (1 << IEEE80211_RADIOTAP_VHT);
  4676. rtap_len = qdf_nbuf_update_radiotap_vht_flags(rx_status,
  4677. rtap_buf,
  4678. rtap_len);
  4679. if ((rtap_len - length) > RADIOTAP_VHT_FLAGS_LEN) {
  4680. qdf_print("length is greater than RADIOTAP_VHT_FLAGS_LEN");
  4681. return 0;
  4682. }
  4683. }
  4684. if (rx_status->he_flags) {
  4685. length = rtap_len;
  4686. /* IEEE80211_RADIOTAP_HE */
  4687. it_present_val |= (1 << IEEE80211_RADIOTAP_HE);
  4688. rtap_len = qdf_nbuf_update_radiotap_he_flags(rx_status,
  4689. rtap_buf,
  4690. rtap_len);
  4691. if ((rtap_len - length) > RADIOTAP_HE_FLAGS_LEN) {
  4692. qdf_print("length is greater than RADIOTAP_HE_FLAGS_LEN");
  4693. return 0;
  4694. }
  4695. }
  4696. if (rx_status->he_mu_flags) {
  4697. length = rtap_len;
  4698. /* IEEE80211_RADIOTAP_HE-MU */
  4699. it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU);
  4700. rtap_len = qdf_nbuf_update_radiotap_he_mu_flags(rx_status,
  4701. rtap_buf,
  4702. rtap_len);
  4703. if ((rtap_len - length) > RADIOTAP_HE_MU_FLAGS_LEN) {
  4704. qdf_print("length is greater than RADIOTAP_HE_MU_FLAGS_LEN");
  4705. return 0;
  4706. }
  4707. }
  4708. if (rx_status->he_mu_other_flags) {
  4709. length = rtap_len;
  4710. /* IEEE80211_RADIOTAP_HE-MU-OTHER */
  4711. it_present_val |= (1 << IEEE80211_RADIOTAP_HE_MU_OTHER);
  4712. rtap_len =
  4713. qdf_nbuf_update_radiotap_he_mu_other_flags(rx_status,
  4714. rtap_buf,
  4715. rtap_len);
  4716. if ((rtap_len - length) > RADIOTAP_HE_MU_OTHER_FLAGS_LEN) {
  4717. qdf_print("length is greater than RADIOTAP_HE_MU_OTHER_FLAGS_LEN");
  4718. return 0;
  4719. }
  4720. }
  4721. rtap_len = qdf_align(rtap_len, 2);
  4722. /*
  4723. * Radiotap Vendor Namespace
  4724. */
  4725. it_present_val |= (1 << IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
  4726. radiotap_vendor_ns_ath = (struct qdf_radiotap_vendor_ns_ath *)
  4727. (rtap_buf + rtap_len);
  4728. /*
  4729. * Copy Atheros OUI - 3 bytes (4th byte is 0)
  4730. */
  4731. qdf_mem_copy(radiotap_vendor_ns_ath->hdr.oui, ATH_OUI, sizeof(ATH_OUI));
  4732. /*
  4733. * Name space selector = 0
  4734. * We only will have one namespace for now
  4735. */
  4736. radiotap_vendor_ns_ath->hdr.selector = 0;
  4737. radiotap_vendor_ns_ath->hdr.skip_length = cpu_to_le16(
  4738. sizeof(*radiotap_vendor_ns_ath) -
  4739. sizeof(radiotap_vendor_ns_ath->hdr));
  4740. radiotap_vendor_ns_ath->device_id = cpu_to_le32(rx_status->device_id);
  4741. radiotap_vendor_ns_ath->lsig = cpu_to_le32(rx_status->l_sig_a_info);
  4742. radiotap_vendor_ns_ath->lsig_b = cpu_to_le32(rx_status->l_sig_b_info);
  4743. radiotap_vendor_ns_ath->ppdu_start_timestamp =
  4744. cpu_to_le32(rx_status->ppdu_timestamp);
  4745. rtap_len += sizeof(*radiotap_vendor_ns_ath);
  4746. /* Move to next it_present */
  4747. if (radiotap_ext1_hdr_present) {
  4748. it_present_val |= (1 << IEEE80211_RADIOTAP_EXT);
  4749. put_unaligned_le32(it_present_val, it_present);
  4750. it_present_val = 0;
  4751. it_present++;
  4752. }
  4753. /* Add Extension to Radiotap Header & corresponding data */
  4754. if (rx_status->add_rtap_ext) {
  4755. it_present_val |= (1 << IEEE80211_RADIOTAP_TX_STATUS);
  4756. it_present_val |= (1 << IEEE80211_RADIOTAP_RETRY_COUNT);
  4757. rtap_buf[rtap_len] = rx_status->tx_status;
  4758. rtap_len += 1;
  4759. rtap_buf[rtap_len] = rx_status->tx_retry_cnt;
  4760. rtap_len += 1;
  4761. }
  4762. /* Add Extension2 to Radiotap Header */
  4763. if (rx_status->add_rtap_ext2) {
  4764. it_present_val |= (1 << IEEE80211_RADIOTAP_EXTENSION2);
  4765. rtap_ext2 = (struct qdf_radiotap_ext2 *)(rtap_buf + rtap_len);
  4766. rtap_ext2->ppdu_id = rx_status->ppdu_id;
  4767. rtap_ext2->prev_ppdu_id = rx_status->prev_ppdu_id;
  4768. if (!rx_user_status) {
  4769. rtap_ext2->tid = rx_status->tid;
  4770. rtap_ext2->start_seq = rx_status->start_seq;
  4771. qdf_mem_copy(rtap_ext2->ba_bitmap,
  4772. rx_status->ba_bitmap,
  4773. 8 * (sizeof(uint32_t)));
  4774. } else {
  4775. uint8_t ba_bitmap_sz = rx_user_status->ba_bitmap_sz;
  4776. /* set default bitmap sz if not set */
  4777. ba_bitmap_sz = ba_bitmap_sz ? ba_bitmap_sz : 8;
  4778. rtap_ext2->tid = rx_user_status->tid;
  4779. rtap_ext2->start_seq = rx_user_status->start_seq;
  4780. qdf_mem_copy(rtap_ext2->ba_bitmap,
  4781. rx_user_status->ba_bitmap,
  4782. ba_bitmap_sz * (sizeof(uint32_t)));
  4783. }
  4784. rtap_len += sizeof(*rtap_ext2);
  4785. }
  4786. if (rx_status->usig_flags) {
  4787. length = rtap_len;
  4788. /* IEEE80211_RADIOTAP_USIG */
  4789. it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_USIG);
  4790. rtap_len = qdf_nbuf_update_radiotap_usig_flags(rx_status,
  4791. rtap_buf,
  4792. rtap_len);
  4793. if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
  4794. qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
  4795. return 0;
  4796. }
  4797. }
  4798. if (rx_status->eht_flags) {
  4799. length = rtap_len;
  4800. /* IEEE80211_RADIOTAP_EHT */
  4801. it_present_val |= (1 << IEEE80211_RADIOTAP_EXT1_EHT);
  4802. rtap_len = qdf_nbuf_update_radiotap_eht_flags(rx_status,
  4803. rtap_buf,
  4804. rtap_len);
  4805. if ((rtap_len - length) > RADIOTAP_EHT_FLAGS_LEN) {
  4806. qdf_print("length is greater than RADIOTAP_EHT_FLAGS_LEN");
  4807. return 0;
  4808. }
  4809. }
  4810. put_unaligned_le32(it_present_val, it_present);
  4811. rthdr->it_len = cpu_to_le16(rtap_len);
  4812. if (headroom_sz < rtap_len) {
  4813. qdf_debug("DEBUG: Not enough space to update radiotap");
  4814. return 0;
  4815. }
  4816. qdf_nbuf_push_head(nbuf, rtap_len);
  4817. qdf_mem_copy(qdf_nbuf_data(nbuf), rtap_buf, rtap_len);
  4818. return rtap_len;
  4819. }
  4820. #else
  4821. static unsigned int qdf_nbuf_update_radiotap_vht_flags(
  4822. struct mon_rx_status *rx_status,
  4823. int8_t *rtap_buf,
  4824. uint32_t rtap_len)
  4825. {
  4826. qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
  4827. return 0;
  4828. }
  4829. unsigned int qdf_nbuf_update_radiotap_he_flags(struct mon_rx_status *rx_status,
  4830. int8_t *rtap_buf, uint32_t rtap_len)
  4831. {
  4832. qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
  4833. return 0;
  4834. }
  4835. static unsigned int qdf_nbuf_update_radiotap_ampdu_flags(
  4836. struct mon_rx_status *rx_status,
  4837. uint8_t *rtap_buf,
  4838. uint32_t rtap_len)
  4839. {
  4840. qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
  4841. return 0;
  4842. }
  4843. unsigned int qdf_nbuf_update_radiotap(struct mon_rx_status *rx_status,
  4844. qdf_nbuf_t nbuf, uint32_t headroom_sz)
  4845. {
  4846. qdf_err("ERROR: struct ieee80211_radiotap_header not supported");
  4847. return 0;
  4848. }
  4849. #endif
  4850. qdf_export_symbol(qdf_nbuf_update_radiotap);
  4851. void __qdf_nbuf_reg_free_cb(qdf_nbuf_free_t cb_func_ptr)
  4852. {
  4853. nbuf_free_cb = cb_func_ptr;
  4854. }
  4855. qdf_export_symbol(__qdf_nbuf_reg_free_cb);
  4856. void qdf_nbuf_classify_pkt(struct sk_buff *skb)
  4857. {
  4858. struct ethhdr *eh = (struct ethhdr *)skb->data;
  4859. /* check destination mac address is broadcast/multicast */
  4860. if (is_broadcast_ether_addr((uint8_t *)eh))
  4861. QDF_NBUF_CB_SET_BCAST(skb);
  4862. else if (is_multicast_ether_addr((uint8_t *)eh))
  4863. QDF_NBUF_CB_SET_MCAST(skb);
  4864. if (qdf_nbuf_is_ipv4_arp_pkt(skb))
  4865. QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
  4866. QDF_NBUF_CB_PACKET_TYPE_ARP;
  4867. else if (qdf_nbuf_is_ipv4_dhcp_pkt(skb))
  4868. QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
  4869. QDF_NBUF_CB_PACKET_TYPE_DHCP;
  4870. else if (qdf_nbuf_is_ipv4_eapol_pkt(skb))
  4871. QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
  4872. QDF_NBUF_CB_PACKET_TYPE_EAPOL;
  4873. else if (qdf_nbuf_is_ipv4_wapi_pkt(skb))
  4874. QDF_NBUF_CB_GET_PACKET_TYPE(skb) =
  4875. QDF_NBUF_CB_PACKET_TYPE_WAPI;
  4876. }
  4877. qdf_export_symbol(qdf_nbuf_classify_pkt);
  4878. void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
  4879. {
  4880. qdf_nbuf_users_set(&nbuf->users, 1);
  4881. nbuf->data = nbuf->head + NET_SKB_PAD;
  4882. skb_reset_tail_pointer(nbuf);
  4883. }
  4884. qdf_export_symbol(__qdf_nbuf_init);
  4885. #ifdef WLAN_FEATURE_FASTPATH
  4886. void qdf_nbuf_init_fast(qdf_nbuf_t nbuf)
  4887. {
  4888. qdf_nbuf_users_set(&nbuf->users, 1);
  4889. skb_reset_tail_pointer(nbuf);
  4890. }
  4891. qdf_export_symbol(qdf_nbuf_init_fast);
  4892. #endif /* WLAN_FEATURE_FASTPATH */
  4893. #ifdef QDF_NBUF_GLOBAL_COUNT
  4894. void __qdf_nbuf_mod_init(void)
  4895. {
  4896. is_initial_mem_debug_disabled = qdf_mem_debug_config_get();
  4897. qdf_atomic_init(&nbuf_count);
  4898. qdf_debugfs_create_atomic(NBUF_DEBUGFS_NAME, S_IRUSR, NULL, &nbuf_count);
  4899. }
  4900. void __qdf_nbuf_mod_exit(void)
  4901. {
  4902. }
  4903. #endif
  4904. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
  4905. QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
  4906. int offset)
  4907. {
  4908. unsigned int frag_offset;
  4909. skb_frag_t *frag;
  4910. if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
  4911. return QDF_STATUS_E_FAILURE;
  4912. frag = &skb_shinfo(nbuf)->frags[idx];
  4913. frag_offset = skb_frag_off(frag);
  4914. frag_offset += offset;
  4915. skb_frag_off_set(frag, frag_offset);
  4916. __qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
  4917. return QDF_STATUS_SUCCESS;
  4918. }
  4919. #else
  4920. QDF_STATUS __qdf_nbuf_move_frag_page_offset(__qdf_nbuf_t nbuf, uint8_t idx,
  4921. int offset)
  4922. {
  4923. uint16_t frag_offset;
  4924. skb_frag_t *frag;
  4925. if (qdf_unlikely(idx >= __qdf_nbuf_get_nr_frags(nbuf)))
  4926. return QDF_STATUS_E_FAILURE;
  4927. frag = &skb_shinfo(nbuf)->frags[idx];
  4928. frag_offset = frag->page_offset;
  4929. frag_offset += offset;
  4930. frag->page_offset = frag_offset;
  4931. __qdf_nbuf_trim_add_frag_size(nbuf, idx, -(offset), 0);
  4932. return QDF_STATUS_SUCCESS;
  4933. }
  4934. #endif
  4935. qdf_export_symbol(__qdf_nbuf_move_frag_page_offset);
  4936. void __qdf_nbuf_remove_frag(__qdf_nbuf_t nbuf,
  4937. uint16_t idx,
  4938. uint16_t truesize)
  4939. {
  4940. struct page *page;
  4941. uint16_t frag_len;
  4942. page = skb_frag_page(&skb_shinfo(nbuf)->frags[idx]);
  4943. if (qdf_unlikely(!page))
  4944. return;
  4945. frag_len = qdf_nbuf_get_frag_size_by_idx(nbuf, idx);
  4946. put_page(page);
  4947. nbuf->len -= frag_len;
  4948. nbuf->data_len -= frag_len;
  4949. nbuf->truesize -= truesize;
  4950. skb_shinfo(nbuf)->nr_frags--;
  4951. }
  4952. qdf_export_symbol(__qdf_nbuf_remove_frag);
  4953. void __qdf_nbuf_add_rx_frag(__qdf_frag_t buf, __qdf_nbuf_t nbuf,
  4954. int offset, int frag_len,
  4955. unsigned int truesize, bool take_frag_ref)
  4956. {
  4957. struct page *page;
  4958. int frag_offset;
  4959. uint8_t nr_frag;
  4960. nr_frag = __qdf_nbuf_get_nr_frags(nbuf);
  4961. qdf_assert_always(nr_frag < QDF_NBUF_MAX_FRAGS);
  4962. page = virt_to_head_page(buf);
  4963. frag_offset = buf - page_address(page);
  4964. skb_add_rx_frag(nbuf, nr_frag, page,
  4965. (frag_offset + offset),
  4966. frag_len, truesize);
  4967. if (unlikely(take_frag_ref)) {
  4968. qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
  4969. skb_frag_ref(nbuf, nr_frag);
  4970. }
  4971. }
  4972. qdf_export_symbol(__qdf_nbuf_add_rx_frag);
  4973. void __qdf_nbuf_ref_frag(__qdf_frag_t buf)
  4974. {
  4975. struct page *page;
  4976. skb_frag_t frag = {0};
  4977. page = virt_to_head_page(buf);
  4978. __skb_frag_set_page(&frag, page);
  4979. /*
  4980. * since __skb_frag_ref() just use page to increase ref
  4981. * we just decode page alone
  4982. */
  4983. qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
  4984. __skb_frag_ref(&frag);
  4985. }
  4986. qdf_export_symbol(__qdf_nbuf_ref_frag);
  4987. #ifdef NBUF_FRAG_MEMORY_DEBUG
  4988. QDF_STATUS qdf_nbuf_move_frag_page_offset_debug(qdf_nbuf_t nbuf, uint8_t idx,
  4989. int offset, const char *func,
  4990. uint32_t line)
  4991. {
  4992. QDF_STATUS result;
  4993. qdf_frag_t p_fragp, n_fragp;
  4994. p_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
  4995. result = __qdf_nbuf_move_frag_page_offset(nbuf, idx, offset);
  4996. if (qdf_likely(is_initial_mem_debug_disabled))
  4997. return result;
  4998. n_fragp = qdf_nbuf_get_frag_addr(nbuf, idx);
  4999. /*
  5000. * Update frag address in frag debug tracker
  5001. * when frag offset is successfully changed in skb
  5002. */
  5003. if (result == QDF_STATUS_SUCCESS)
  5004. qdf_frag_debug_update_addr(p_fragp, n_fragp, func, line);
  5005. return result;
  5006. }
  5007. qdf_export_symbol(qdf_nbuf_move_frag_page_offset_debug);
  5008. void qdf_nbuf_add_rx_frag_debug(qdf_frag_t buf, qdf_nbuf_t nbuf,
  5009. int offset, int frag_len,
  5010. unsigned int truesize, bool take_frag_ref,
  5011. const char *func, uint32_t line)
  5012. {
  5013. qdf_frag_t fragp;
  5014. uint32_t num_nr_frags;
  5015. __qdf_nbuf_add_rx_frag(buf, nbuf, offset,
  5016. frag_len, truesize, take_frag_ref);
  5017. if (qdf_likely(is_initial_mem_debug_disabled))
  5018. return;
  5019. num_nr_frags = qdf_nbuf_get_nr_frags(nbuf);
  5020. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5021. fragp = qdf_nbuf_get_frag_addr(nbuf, num_nr_frags - 1);
  5022. /* Update frag address in frag debug tracking table */
  5023. if (fragp != buf && !take_frag_ref)
  5024. qdf_frag_debug_update_addr(buf, fragp, func, line);
  5025. /* Update frag refcount in frag debug tracking table */
  5026. qdf_frag_debug_refcount_inc(fragp, func, line);
  5027. }
  5028. qdf_export_symbol(qdf_nbuf_add_rx_frag_debug);
  5029. void qdf_nbuf_ref_frag_debug(qdf_frag_t buf, const char *func, uint32_t line)
  5030. {
  5031. __qdf_nbuf_ref_frag(buf);
  5032. if (qdf_likely(is_initial_mem_debug_disabled))
  5033. return;
  5034. /* Update frag refcount in frag debug tracking table */
  5035. qdf_frag_debug_refcount_inc(buf, func, line);
  5036. }
  5037. qdf_export_symbol(qdf_nbuf_ref_frag_debug);
  5038. void qdf_net_buf_debug_acquire_frag(qdf_nbuf_t buf, const char *func,
  5039. uint32_t line)
  5040. {
  5041. uint32_t num_nr_frags;
  5042. uint32_t idx = 0;
  5043. qdf_nbuf_t ext_list;
  5044. qdf_frag_t p_frag;
  5045. if (qdf_likely(is_initial_mem_debug_disabled))
  5046. return;
  5047. if (qdf_unlikely(!buf))
  5048. return;
  5049. /* Take care to update the refcount in the debug entries for frags */
  5050. num_nr_frags = qdf_nbuf_get_nr_frags(buf);
  5051. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5052. while (idx < num_nr_frags) {
  5053. p_frag = qdf_nbuf_get_frag_addr(buf, idx);
  5054. if (qdf_likely(p_frag))
  5055. qdf_frag_debug_refcount_inc(p_frag, func, line);
  5056. idx++;
  5057. }
  5058. /*
  5059. * Take care to update the refcount in the debug entries for the
  5060. * frags attached to frag_list
  5061. */
  5062. ext_list = qdf_nbuf_get_ext_list(buf);
  5063. while (ext_list) {
  5064. idx = 0;
  5065. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  5066. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5067. while (idx < num_nr_frags) {
  5068. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  5069. if (qdf_likely(p_frag))
  5070. qdf_frag_debug_refcount_inc(p_frag, func, line);
  5071. idx++;
  5072. }
  5073. ext_list = qdf_nbuf_queue_next(ext_list);
  5074. }
  5075. }
  5076. qdf_export_symbol(qdf_net_buf_debug_acquire_frag);
  5077. void qdf_net_buf_debug_release_frag(qdf_nbuf_t buf, const char *func,
  5078. uint32_t line)
  5079. {
  5080. uint32_t num_nr_frags;
  5081. qdf_nbuf_t ext_list;
  5082. uint32_t idx = 0;
  5083. qdf_frag_t p_frag;
  5084. if (qdf_likely(is_initial_mem_debug_disabled))
  5085. return;
  5086. if (qdf_unlikely(!buf))
  5087. return;
  5088. /*
  5089. * Decrement refcount for frag debug nodes only when last user
  5090. * of nbuf calls this API so as to avoid decrementing refcount
  5091. * on every call expect the last one in case where nbuf has multiple
  5092. * users
  5093. */
  5094. if (qdf_nbuf_get_users(buf) > 1)
  5095. return;
  5096. /* Take care to update the refcount in the debug entries for frags */
  5097. num_nr_frags = qdf_nbuf_get_nr_frags(buf);
  5098. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5099. while (idx < num_nr_frags) {
  5100. p_frag = qdf_nbuf_get_frag_addr(buf, idx);
  5101. if (qdf_likely(p_frag))
  5102. qdf_frag_debug_refcount_dec(p_frag, func, line);
  5103. idx++;
  5104. }
  5105. /* Take care to update debug entries for frags attached to frag_list */
  5106. ext_list = qdf_nbuf_get_ext_list(buf);
  5107. while (ext_list) {
  5108. if (qdf_nbuf_get_users(ext_list) == 1) {
  5109. idx = 0;
  5110. num_nr_frags = qdf_nbuf_get_nr_frags(ext_list);
  5111. qdf_assert_always(num_nr_frags <= QDF_NBUF_MAX_FRAGS);
  5112. while (idx < num_nr_frags) {
  5113. p_frag = qdf_nbuf_get_frag_addr(ext_list, idx);
  5114. if (qdf_likely(p_frag))
  5115. qdf_frag_debug_refcount_dec(p_frag,
  5116. func, line);
  5117. idx++;
  5118. }
  5119. }
  5120. ext_list = qdf_nbuf_queue_next(ext_list);
  5121. }
  5122. }
  5123. qdf_export_symbol(qdf_net_buf_debug_release_frag);
  5124. QDF_STATUS
  5125. qdf_nbuf_remove_frag_debug(qdf_nbuf_t nbuf,
  5126. uint16_t idx,
  5127. uint16_t truesize,
  5128. const char *func,
  5129. uint32_t line)
  5130. {
  5131. uint16_t num_frags;
  5132. qdf_frag_t frag;
  5133. if (qdf_unlikely(!nbuf))
  5134. return QDF_STATUS_E_INVAL;
  5135. num_frags = qdf_nbuf_get_nr_frags(nbuf);
  5136. if (idx >= num_frags)
  5137. return QDF_STATUS_E_INVAL;
  5138. if (qdf_likely(is_initial_mem_debug_disabled)) {
  5139. __qdf_nbuf_remove_frag(nbuf, idx, truesize);
  5140. return QDF_STATUS_SUCCESS;
  5141. }
  5142. frag = qdf_nbuf_get_frag_addr(nbuf, idx);
  5143. if (qdf_likely(frag))
  5144. qdf_frag_debug_refcount_dec(frag, func, line);
  5145. __qdf_nbuf_remove_frag(nbuf, idx, truesize);
  5146. return QDF_STATUS_SUCCESS;
  5147. }
  5148. qdf_export_symbol(qdf_nbuf_remove_frag_debug);
  5149. #endif /* NBUF_FRAG_MEMORY_DEBUG */
  5150. qdf_nbuf_t qdf_get_nbuf_valid_frag(qdf_nbuf_t nbuf)
  5151. {
  5152. qdf_nbuf_t last_nbuf;
  5153. uint32_t num_frags;
  5154. if (qdf_unlikely(!nbuf))
  5155. return NULL;
  5156. num_frags = qdf_nbuf_get_nr_frags(nbuf);
  5157. /* Check nbuf has enough memory to store frag memory */
  5158. if (num_frags < QDF_NBUF_MAX_FRAGS)
  5159. return nbuf;
  5160. if (!__qdf_nbuf_has_fraglist(nbuf))
  5161. return NULL;
  5162. last_nbuf = __qdf_nbuf_get_last_frag_list_nbuf(nbuf);
  5163. if (qdf_unlikely(!last_nbuf))
  5164. return NULL;
  5165. num_frags = qdf_nbuf_get_nr_frags(last_nbuf);
  5166. if (num_frags < QDF_NBUF_MAX_FRAGS)
  5167. return last_nbuf;
  5168. return NULL;
  5169. }
  5170. qdf_export_symbol(qdf_get_nbuf_valid_frag);
  5171. QDF_STATUS
  5172. qdf_nbuf_add_frag_debug(qdf_device_t osdev, qdf_frag_t buf,
  5173. qdf_nbuf_t nbuf, int offset,
  5174. int frag_len, unsigned int truesize,
  5175. bool take_frag_ref, unsigned int minsize,
  5176. const char *func, uint32_t line)
  5177. {
  5178. qdf_nbuf_t cur_nbuf;
  5179. qdf_nbuf_t this_nbuf;
  5180. cur_nbuf = nbuf;
  5181. this_nbuf = nbuf;
  5182. if (qdf_unlikely(!frag_len || !buf)) {
  5183. qdf_nofl_err("%s : %d frag[ buf[%pK] len[%d]] not valid\n",
  5184. func, line,
  5185. buf, frag_len);
  5186. return QDF_STATUS_E_INVAL;
  5187. }
  5188. this_nbuf = qdf_get_nbuf_valid_frag(this_nbuf);
  5189. if (this_nbuf) {
  5190. cur_nbuf = this_nbuf;
  5191. } else {
  5192. /* allocate a dummy mpdu buffer of 64 bytes headroom */
  5193. this_nbuf = qdf_nbuf_alloc(osdev, minsize, minsize, 4, false);
  5194. if (qdf_unlikely(!this_nbuf)) {
  5195. qdf_nofl_err("%s : %d no memory to allocate\n",
  5196. func, line);
  5197. return QDF_STATUS_E_NOMEM;
  5198. }
  5199. }
  5200. qdf_nbuf_add_rx_frag(buf, this_nbuf, offset, frag_len, truesize,
  5201. take_frag_ref);
  5202. if (this_nbuf != cur_nbuf) {
  5203. /* add new skb to frag list */
  5204. qdf_nbuf_append_ext_list(nbuf, this_nbuf,
  5205. qdf_nbuf_len(this_nbuf));
  5206. }
  5207. return QDF_STATUS_SUCCESS;
  5208. }
  5209. qdf_export_symbol(qdf_nbuf_add_frag_debug);
  5210. #ifdef MEMORY_DEBUG
  5211. void qdf_nbuf_acquire_track_lock(uint32_t index,
  5212. unsigned long irq_flag)
  5213. {
  5214. spin_lock_irqsave(&g_qdf_net_buf_track_lock[index],
  5215. irq_flag);
  5216. }
  5217. void qdf_nbuf_release_track_lock(uint32_t index,
  5218. unsigned long irq_flag)
  5219. {
  5220. spin_unlock_irqrestore(&g_qdf_net_buf_track_lock[index],
  5221. irq_flag);
  5222. }
  5223. QDF_NBUF_TRACK *qdf_nbuf_get_track_tbl(uint32_t index)
  5224. {
  5225. return gp_qdf_net_buf_track_tbl[index];
  5226. }
  5227. #endif /* MEMORY_DEBUG */
  5228. #ifdef ENHANCED_OS_ABSTRACTION
  5229. void qdf_nbuf_set_timestamp(qdf_nbuf_t buf)
  5230. {
  5231. __qdf_nbuf_set_timestamp(buf);
  5232. }
  5233. qdf_export_symbol(qdf_nbuf_set_timestamp);
  5234. uint64_t qdf_nbuf_get_timestamp(qdf_nbuf_t buf)
  5235. {
  5236. return __qdf_nbuf_get_timestamp(buf);
  5237. }
  5238. qdf_export_symbol(qdf_nbuf_get_timestamp);
  5239. uint64_t qdf_nbuf_get_timestamp_us(qdf_nbuf_t buf)
  5240. {
  5241. return __qdf_nbuf_get_timestamp_us(buf);
  5242. }
  5243. qdf_export_symbol(qdf_nbuf_get_timestamp_us);
  5244. uint64_t qdf_nbuf_get_timedelta_us(qdf_nbuf_t buf)
  5245. {
  5246. return __qdf_nbuf_get_timedelta_us(buf);
  5247. }
  5248. qdf_export_symbol(qdf_nbuf_get_timedelta_us);
  5249. uint64_t qdf_nbuf_get_timedelta_ms(qdf_nbuf_t buf)
  5250. {
  5251. return __qdf_nbuf_get_timedelta_ms(buf);
  5252. }
  5253. qdf_export_symbol(qdf_nbuf_get_timedelta_ms);
  5254. qdf_ktime_t qdf_nbuf_net_timedelta(qdf_ktime_t t)
  5255. {
  5256. return __qdf_nbuf_net_timedelta(t);
  5257. }
  5258. qdf_export_symbol(qdf_nbuf_net_timedelta);
  5259. #endif