btf.c 207 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2018 Facebook */
  3. #include <uapi/linux/btf.h>
  4. #include <uapi/linux/bpf.h>
  5. #include <uapi/linux/android_fuse.h>
  6. #include <uapi/linux/bpf_perf_event.h>
  7. #include <uapi/linux/types.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/compiler.h>
  10. #include <linux/ctype.h>
  11. #include <linux/errno.h>
  12. #include <linux/slab.h>
  13. #include <linux/anon_inodes.h>
  14. #include <linux/file.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/kernel.h>
  17. #include <linux/idr.h>
  18. #include <linux/sort.h>
  19. #include <linux/bpf_verifier.h>
  20. #include <linux/btf.h>
  21. #include <linux/btf_ids.h>
  22. #include <linux/skmsg.h>
  23. #include <linux/perf_event.h>
  24. #include <linux/bsearch.h>
  25. #include <linux/kobject.h>
  26. #include <linux/sysfs.h>
  27. #include <net/sock.h>
  28. #include "../tools/lib/bpf/relo_core.h"
  29. /* BTF (BPF Type Format) is the meta data format which describes
  30. * the data types of BPF program/map. Hence, it basically focus
  31. * on the C programming language which the modern BPF is primary
  32. * using.
  33. *
  34. * ELF Section:
  35. * ~~~~~~~~~~~
  36. * The BTF data is stored under the ".BTF" ELF section
  37. *
  38. * struct btf_type:
  39. * ~~~~~~~~~~~~~~~
  40. * Each 'struct btf_type' object describes a C data type.
  41. * Depending on the type it is describing, a 'struct btf_type'
  42. * object may be followed by more data. F.e.
  43. * To describe an array, 'struct btf_type' is followed by
  44. * 'struct btf_array'.
  45. *
  46. * 'struct btf_type' and any extra data following it are
  47. * 4 bytes aligned.
  48. *
  49. * Type section:
  50. * ~~~~~~~~~~~~~
  51. * The BTF type section contains a list of 'struct btf_type' objects.
  52. * Each one describes a C type. Recall from the above section
  53. * that a 'struct btf_type' object could be immediately followed by extra
  54. * data in order to describe some particular C types.
  55. *
  56. * type_id:
  57. * ~~~~~~~
  58. * Each btf_type object is identified by a type_id. The type_id
  59. * is implicitly implied by the location of the btf_type object in
  60. * the BTF type section. The first one has type_id 1. The second
  61. * one has type_id 2...etc. Hence, an earlier btf_type has
  62. * a smaller type_id.
  63. *
  64. * A btf_type object may refer to another btf_type object by using
  65. * type_id (i.e. the "type" in the "struct btf_type").
  66. *
  67. * NOTE that we cannot assume any reference-order.
  68. * A btf_type object can refer to an earlier btf_type object
  69. * but it can also refer to a later btf_type object.
  70. *
  71. * For example, to describe "const void *". A btf_type
  72. * object describing "const" may refer to another btf_type
  73. * object describing "void *". This type-reference is done
  74. * by specifying type_id:
  75. *
  76. * [1] CONST (anon) type_id=2
  77. * [2] PTR (anon) type_id=0
  78. *
  79. * The above is the btf_verifier debug log:
  80. * - Each line started with "[?]" is a btf_type object
  81. * - [?] is the type_id of the btf_type object.
  82. * - CONST/PTR is the BTF_KIND_XXX
  83. * - "(anon)" is the name of the type. It just
  84. * happens that CONST and PTR has no name.
  85. * - type_id=XXX is the 'u32 type' in btf_type
  86. *
  87. * NOTE: "void" has type_id 0
  88. *
  89. * String section:
  90. * ~~~~~~~~~~~~~~
  91. * The BTF string section contains the names used by the type section.
  92. * Each string is referred by an "offset" from the beginning of the
  93. * string section.
  94. *
  95. * Each string is '\0' terminated.
  96. *
  97. * The first character in the string section must be '\0'
  98. * which is used to mean 'anonymous'. Some btf_type may not
  99. * have a name.
  100. */
  101. /* BTF verification:
  102. *
  103. * To verify BTF data, two passes are needed.
  104. *
  105. * Pass #1
  106. * ~~~~~~~
  107. * The first pass is to collect all btf_type objects to
  108. * an array: "btf->types".
  109. *
  110. * Depending on the C type that a btf_type is describing,
  111. * a btf_type may be followed by extra data. We don't know
  112. * how many btf_type is there, and more importantly we don't
  113. * know where each btf_type is located in the type section.
  114. *
  115. * Without knowing the location of each type_id, most verifications
  116. * cannot be done. e.g. an earlier btf_type may refer to a later
  117. * btf_type (recall the "const void *" above), so we cannot
  118. * check this type-reference in the first pass.
  119. *
  120. * In the first pass, it still does some verifications (e.g.
  121. * checking the name is a valid offset to the string section).
  122. *
  123. * Pass #2
  124. * ~~~~~~~
  125. * The main focus is to resolve a btf_type that is referring
  126. * to another type.
  127. *
  128. * We have to ensure the referring type:
  129. * 1) does exist in the BTF (i.e. in btf->types[])
  130. * 2) does not cause a loop:
  131. * struct A {
  132. * struct B b;
  133. * };
  134. *
  135. * struct B {
  136. * struct A a;
  137. * };
  138. *
  139. * btf_type_needs_resolve() decides if a btf_type needs
  140. * to be resolved.
  141. *
  142. * The needs_resolve type implements the "resolve()" ops which
  143. * essentially does a DFS and detects backedge.
  144. *
  145. * During resolve (or DFS), different C types have different
  146. * "RESOLVED" conditions.
  147. *
  148. * When resolving a BTF_KIND_STRUCT, we need to resolve all its
  149. * members because a member is always referring to another
  150. * type. A struct's member can be treated as "RESOLVED" if
  151. * it is referring to a BTF_KIND_PTR. Otherwise, the
  152. * following valid C struct would be rejected:
  153. *
  154. * struct A {
  155. * int m;
  156. * struct A *a;
  157. * };
  158. *
  159. * When resolving a BTF_KIND_PTR, it needs to keep resolving if
  160. * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
  161. * detect a pointer loop, e.g.:
  162. * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
  163. * ^ |
  164. * +-----------------------------------------+
  165. *
  166. */
  167. #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
  168. #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
  169. #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
  170. #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
  171. #define BITS_ROUNDUP_BYTES(bits) \
  172. (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
  173. #define BTF_INFO_MASK 0x9f00ffff
  174. #define BTF_INT_MASK 0x0fffffff
  175. #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
  176. #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
  177. /* 16MB for 64k structs and each has 16 members and
  178. * a few MB spaces for the string section.
  179. * The hard limit is S32_MAX.
  180. */
  181. #define BTF_MAX_SIZE (16 * 1024 * 1024)
  182. #define for_each_member_from(i, from, struct_type, member) \
  183. for (i = from, member = btf_type_member(struct_type) + from; \
  184. i < btf_type_vlen(struct_type); \
  185. i++, member++)
  186. #define for_each_vsi_from(i, from, struct_type, member) \
  187. for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
  188. i < btf_type_vlen(struct_type); \
  189. i++, member++)
  190. DEFINE_IDR(btf_idr);
  191. DEFINE_SPINLOCK(btf_idr_lock);
  192. enum btf_kfunc_hook {
  193. BTF_KFUNC_HOOK_XDP,
  194. BTF_KFUNC_HOOK_TC,
  195. BTF_KFUNC_HOOK_STRUCT_OPS,
  196. BTF_KFUNC_HOOK_TRACING,
  197. BTF_KFUNC_HOOK_SYSCALL,
  198. BTF_KFUNC_HOOK_MAX,
  199. };
  200. enum {
  201. BTF_KFUNC_SET_MAX_CNT = 256,
  202. BTF_DTOR_KFUNC_MAX_CNT = 256,
  203. };
  204. struct btf_kfunc_set_tab {
  205. struct btf_id_set8 *sets[BTF_KFUNC_HOOK_MAX];
  206. };
  207. struct btf_id_dtor_kfunc_tab {
  208. u32 cnt;
  209. struct btf_id_dtor_kfunc dtors[];
  210. };
  211. struct btf {
  212. void *data;
  213. struct btf_type **types;
  214. u32 *resolved_ids;
  215. u32 *resolved_sizes;
  216. const char *strings;
  217. void *nohdr_data;
  218. struct btf_header hdr;
  219. u32 nr_types; /* includes VOID for base BTF */
  220. u32 types_size;
  221. u32 data_size;
  222. refcount_t refcnt;
  223. u32 id;
  224. struct rcu_head rcu;
  225. struct btf_kfunc_set_tab *kfunc_set_tab;
  226. struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab;
  227. /* split BTF support */
  228. struct btf *base_btf;
  229. u32 start_id; /* first type ID in this BTF (0 for base BTF) */
  230. u32 start_str_off; /* first string offset (0 for base BTF) */
  231. char name[MODULE_NAME_LEN];
  232. bool kernel_btf;
  233. };
  234. enum verifier_phase {
  235. CHECK_META,
  236. CHECK_TYPE,
  237. };
  238. struct resolve_vertex {
  239. const struct btf_type *t;
  240. u32 type_id;
  241. u16 next_member;
  242. };
  243. enum visit_state {
  244. NOT_VISITED,
  245. VISITED,
  246. RESOLVED,
  247. };
  248. enum resolve_mode {
  249. RESOLVE_TBD, /* To Be Determined */
  250. RESOLVE_PTR, /* Resolving for Pointer */
  251. RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union
  252. * or array
  253. */
  254. };
  255. #define MAX_RESOLVE_DEPTH 32
  256. struct btf_sec_info {
  257. u32 off;
  258. u32 len;
  259. };
  260. struct btf_verifier_env {
  261. struct btf *btf;
  262. u8 *visit_states;
  263. struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
  264. struct bpf_verifier_log log;
  265. u32 log_type_id;
  266. u32 top_stack;
  267. enum verifier_phase phase;
  268. enum resolve_mode resolve_mode;
  269. };
  270. static const char * const btf_kind_str[NR_BTF_KINDS] = {
  271. [BTF_KIND_UNKN] = "UNKNOWN",
  272. [BTF_KIND_INT] = "INT",
  273. [BTF_KIND_PTR] = "PTR",
  274. [BTF_KIND_ARRAY] = "ARRAY",
  275. [BTF_KIND_STRUCT] = "STRUCT",
  276. [BTF_KIND_UNION] = "UNION",
  277. [BTF_KIND_ENUM] = "ENUM",
  278. [BTF_KIND_FWD] = "FWD",
  279. [BTF_KIND_TYPEDEF] = "TYPEDEF",
  280. [BTF_KIND_VOLATILE] = "VOLATILE",
  281. [BTF_KIND_CONST] = "CONST",
  282. [BTF_KIND_RESTRICT] = "RESTRICT",
  283. [BTF_KIND_FUNC] = "FUNC",
  284. [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
  285. [BTF_KIND_VAR] = "VAR",
  286. [BTF_KIND_DATASEC] = "DATASEC",
  287. [BTF_KIND_FLOAT] = "FLOAT",
  288. [BTF_KIND_DECL_TAG] = "DECL_TAG",
  289. [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
  290. [BTF_KIND_ENUM64] = "ENUM64",
  291. };
  292. const char *btf_type_str(const struct btf_type *t)
  293. {
  294. return btf_kind_str[BTF_INFO_KIND(t->info)];
  295. }
  296. /* Chunk size we use in safe copy of data to be shown. */
  297. #define BTF_SHOW_OBJ_SAFE_SIZE 32
  298. /*
  299. * This is the maximum size of a base type value (equivalent to a
  300. * 128-bit int); if we are at the end of our safe buffer and have
  301. * less than 16 bytes space we can't be assured of being able
  302. * to copy the next type safely, so in such cases we will initiate
  303. * a new copy.
  304. */
  305. #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16
  306. /* Type name size */
  307. #define BTF_SHOW_NAME_SIZE 80
  308. /*
  309. * Common data to all BTF show operations. Private show functions can add
  310. * their own data to a structure containing a struct btf_show and consult it
  311. * in the show callback. See btf_type_show() below.
  312. *
  313. * One challenge with showing nested data is we want to skip 0-valued
  314. * data, but in order to figure out whether a nested object is all zeros
  315. * we need to walk through it. As a result, we need to make two passes
  316. * when handling structs, unions and arrays; the first path simply looks
  317. * for nonzero data, while the second actually does the display. The first
  318. * pass is signalled by show->state.depth_check being set, and if we
  319. * encounter a non-zero value we set show->state.depth_to_show to
  320. * the depth at which we encountered it. When we have completed the
  321. * first pass, we will know if anything needs to be displayed if
  322. * depth_to_show > depth. See btf_[struct,array]_show() for the
  323. * implementation of this.
  324. *
  325. * Another problem is we want to ensure the data for display is safe to
  326. * access. To support this, the anonymous "struct {} obj" tracks the data
  327. * object and our safe copy of it. We copy portions of the data needed
  328. * to the object "copy" buffer, but because its size is limited to
  329. * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
  330. * traverse larger objects for display.
  331. *
  332. * The various data type show functions all start with a call to
  333. * btf_show_start_type() which returns a pointer to the safe copy
  334. * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
  335. * raw data itself). btf_show_obj_safe() is responsible for
  336. * using copy_from_kernel_nofault() to update the safe data if necessary
  337. * as we traverse the object's data. skbuff-like semantics are
  338. * used:
  339. *
  340. * - obj.head points to the start of the toplevel object for display
  341. * - obj.size is the size of the toplevel object
  342. * - obj.data points to the current point in the original data at
  343. * which our safe data starts. obj.data will advance as we copy
  344. * portions of the data.
  345. *
  346. * In most cases a single copy will suffice, but larger data structures
  347. * such as "struct task_struct" will require many copies. The logic in
  348. * btf_show_obj_safe() handles the logic that determines if a new
  349. * copy_from_kernel_nofault() is needed.
  350. */
  351. struct btf_show {
  352. u64 flags;
  353. void *target; /* target of show operation (seq file, buffer) */
  354. void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
  355. const struct btf *btf;
  356. /* below are used during iteration */
  357. struct {
  358. u8 depth;
  359. u8 depth_to_show;
  360. u8 depth_check;
  361. u8 array_member:1,
  362. array_terminated:1;
  363. u16 array_encoding;
  364. u32 type_id;
  365. int status; /* non-zero for error */
  366. const struct btf_type *type;
  367. const struct btf_member *member;
  368. char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */
  369. } state;
  370. struct {
  371. u32 size;
  372. void *head;
  373. void *data;
  374. u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
  375. } obj;
  376. };
  377. struct btf_kind_operations {
  378. s32 (*check_meta)(struct btf_verifier_env *env,
  379. const struct btf_type *t,
  380. u32 meta_left);
  381. int (*resolve)(struct btf_verifier_env *env,
  382. const struct resolve_vertex *v);
  383. int (*check_member)(struct btf_verifier_env *env,
  384. const struct btf_type *struct_type,
  385. const struct btf_member *member,
  386. const struct btf_type *member_type);
  387. int (*check_kflag_member)(struct btf_verifier_env *env,
  388. const struct btf_type *struct_type,
  389. const struct btf_member *member,
  390. const struct btf_type *member_type);
  391. void (*log_details)(struct btf_verifier_env *env,
  392. const struct btf_type *t);
  393. void (*show)(const struct btf *btf, const struct btf_type *t,
  394. u32 type_id, void *data, u8 bits_offsets,
  395. struct btf_show *show);
  396. };
  397. static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
  398. static struct btf_type btf_void;
  399. static int btf_resolve(struct btf_verifier_env *env,
  400. const struct btf_type *t, u32 type_id);
  401. static int btf_func_check(struct btf_verifier_env *env,
  402. const struct btf_type *t);
  403. static bool btf_type_is_modifier(const struct btf_type *t)
  404. {
  405. /* Some of them is not strictly a C modifier
  406. * but they are grouped into the same bucket
  407. * for BTF concern:
  408. * A type (t) that refers to another
  409. * type through t->type AND its size cannot
  410. * be determined without following the t->type.
  411. *
  412. * ptr does not fall into this bucket
  413. * because its size is always sizeof(void *).
  414. */
  415. switch (BTF_INFO_KIND(t->info)) {
  416. case BTF_KIND_TYPEDEF:
  417. case BTF_KIND_VOLATILE:
  418. case BTF_KIND_CONST:
  419. case BTF_KIND_RESTRICT:
  420. case BTF_KIND_TYPE_TAG:
  421. return true;
  422. }
  423. return false;
  424. }
  425. bool btf_type_is_void(const struct btf_type *t)
  426. {
  427. return t == &btf_void;
  428. }
  429. static bool btf_type_is_fwd(const struct btf_type *t)
  430. {
  431. return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
  432. }
  433. static bool btf_type_nosize(const struct btf_type *t)
  434. {
  435. return btf_type_is_void(t) || btf_type_is_fwd(t) ||
  436. btf_type_is_func(t) || btf_type_is_func_proto(t);
  437. }
  438. static bool btf_type_nosize_or_null(const struct btf_type *t)
  439. {
  440. return !t || btf_type_nosize(t);
  441. }
  442. static bool __btf_type_is_struct(const struct btf_type *t)
  443. {
  444. return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
  445. }
  446. static bool btf_type_is_array(const struct btf_type *t)
  447. {
  448. return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
  449. }
  450. static bool btf_type_is_datasec(const struct btf_type *t)
  451. {
  452. return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
  453. }
  454. static bool btf_type_is_decl_tag(const struct btf_type *t)
  455. {
  456. return BTF_INFO_KIND(t->info) == BTF_KIND_DECL_TAG;
  457. }
  458. static bool btf_type_is_decl_tag_target(const struct btf_type *t)
  459. {
  460. return btf_type_is_func(t) || btf_type_is_struct(t) ||
  461. btf_type_is_var(t) || btf_type_is_typedef(t);
  462. }
  463. u32 btf_nr_types(const struct btf *btf)
  464. {
  465. u32 total = 0;
  466. while (btf) {
  467. total += btf->nr_types;
  468. btf = btf->base_btf;
  469. }
  470. return total;
  471. }
  472. s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
  473. {
  474. const struct btf_type *t;
  475. const char *tname;
  476. u32 i, total;
  477. total = btf_nr_types(btf);
  478. for (i = 1; i < total; i++) {
  479. t = btf_type_by_id(btf, i);
  480. if (BTF_INFO_KIND(t->info) != kind)
  481. continue;
  482. tname = btf_name_by_offset(btf, t->name_off);
  483. if (!strcmp(tname, name))
  484. return i;
  485. }
  486. return -ENOENT;
  487. }
  488. static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
  489. {
  490. struct btf *btf;
  491. s32 ret;
  492. int id;
  493. btf = bpf_get_btf_vmlinux();
  494. if (IS_ERR(btf))
  495. return PTR_ERR(btf);
  496. if (!btf)
  497. return -EINVAL;
  498. ret = btf_find_by_name_kind(btf, name, kind);
  499. /* ret is never zero, since btf_find_by_name_kind returns
  500. * positive btf_id or negative error.
  501. */
  502. if (ret > 0) {
  503. btf_get(btf);
  504. *btf_p = btf;
  505. return ret;
  506. }
  507. /* If name is not found in vmlinux's BTF then search in module's BTFs */
  508. spin_lock_bh(&btf_idr_lock);
  509. idr_for_each_entry(&btf_idr, btf, id) {
  510. if (!btf_is_module(btf))
  511. continue;
  512. /* linear search could be slow hence unlock/lock
  513. * the IDR to avoiding holding it for too long
  514. */
  515. btf_get(btf);
  516. spin_unlock_bh(&btf_idr_lock);
  517. ret = btf_find_by_name_kind(btf, name, kind);
  518. if (ret > 0) {
  519. *btf_p = btf;
  520. return ret;
  521. }
  522. btf_put(btf);
  523. spin_lock_bh(&btf_idr_lock);
  524. }
  525. spin_unlock_bh(&btf_idr_lock);
  526. return ret;
  527. }
  528. const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
  529. u32 id, u32 *res_id)
  530. {
  531. const struct btf_type *t = btf_type_by_id(btf, id);
  532. while (btf_type_is_modifier(t)) {
  533. id = t->type;
  534. t = btf_type_by_id(btf, t->type);
  535. }
  536. if (res_id)
  537. *res_id = id;
  538. return t;
  539. }
  540. const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
  541. u32 id, u32 *res_id)
  542. {
  543. const struct btf_type *t;
  544. t = btf_type_skip_modifiers(btf, id, NULL);
  545. if (!btf_type_is_ptr(t))
  546. return NULL;
  547. return btf_type_skip_modifiers(btf, t->type, res_id);
  548. }
  549. const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
  550. u32 id, u32 *res_id)
  551. {
  552. const struct btf_type *ptype;
  553. ptype = btf_type_resolve_ptr(btf, id, res_id);
  554. if (ptype && btf_type_is_func_proto(ptype))
  555. return ptype;
  556. return NULL;
  557. }
  558. /* Types that act only as a source, not sink or intermediate
  559. * type when resolving.
  560. */
  561. static bool btf_type_is_resolve_source_only(const struct btf_type *t)
  562. {
  563. return btf_type_is_var(t) ||
  564. btf_type_is_decl_tag(t) ||
  565. btf_type_is_datasec(t);
  566. }
  567. /* What types need to be resolved?
  568. *
  569. * btf_type_is_modifier() is an obvious one.
  570. *
  571. * btf_type_is_struct() because its member refers to
  572. * another type (through member->type).
  573. *
  574. * btf_type_is_var() because the variable refers to
  575. * another type. btf_type_is_datasec() holds multiple
  576. * btf_type_is_var() types that need resolving.
  577. *
  578. * btf_type_is_array() because its element (array->type)
  579. * refers to another type. Array can be thought of a
  580. * special case of struct while array just has the same
  581. * member-type repeated by array->nelems of times.
  582. */
  583. static bool btf_type_needs_resolve(const struct btf_type *t)
  584. {
  585. return btf_type_is_modifier(t) ||
  586. btf_type_is_ptr(t) ||
  587. btf_type_is_struct(t) ||
  588. btf_type_is_array(t) ||
  589. btf_type_is_var(t) ||
  590. btf_type_is_func(t) ||
  591. btf_type_is_decl_tag(t) ||
  592. btf_type_is_datasec(t);
  593. }
  594. /* t->size can be used */
  595. static bool btf_type_has_size(const struct btf_type *t)
  596. {
  597. switch (BTF_INFO_KIND(t->info)) {
  598. case BTF_KIND_INT:
  599. case BTF_KIND_STRUCT:
  600. case BTF_KIND_UNION:
  601. case BTF_KIND_ENUM:
  602. case BTF_KIND_DATASEC:
  603. case BTF_KIND_FLOAT:
  604. case BTF_KIND_ENUM64:
  605. return true;
  606. }
  607. return false;
  608. }
  609. static const char *btf_int_encoding_str(u8 encoding)
  610. {
  611. if (encoding == 0)
  612. return "(none)";
  613. else if (encoding == BTF_INT_SIGNED)
  614. return "SIGNED";
  615. else if (encoding == BTF_INT_CHAR)
  616. return "CHAR";
  617. else if (encoding == BTF_INT_BOOL)
  618. return "BOOL";
  619. else
  620. return "UNKN";
  621. }
  622. static u32 btf_type_int(const struct btf_type *t)
  623. {
  624. return *(u32 *)(t + 1);
  625. }
  626. static const struct btf_array *btf_type_array(const struct btf_type *t)
  627. {
  628. return (const struct btf_array *)(t + 1);
  629. }
  630. static const struct btf_enum *btf_type_enum(const struct btf_type *t)
  631. {
  632. return (const struct btf_enum *)(t + 1);
  633. }
  634. static const struct btf_var *btf_type_var(const struct btf_type *t)
  635. {
  636. return (const struct btf_var *)(t + 1);
  637. }
  638. static const struct btf_decl_tag *btf_type_decl_tag(const struct btf_type *t)
  639. {
  640. return (const struct btf_decl_tag *)(t + 1);
  641. }
  642. static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
  643. {
  644. return (const struct btf_enum64 *)(t + 1);
  645. }
  646. static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
  647. {
  648. return kind_ops[BTF_INFO_KIND(t->info)];
  649. }
  650. static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
  651. {
  652. if (!BTF_STR_OFFSET_VALID(offset))
  653. return false;
  654. while (offset < btf->start_str_off)
  655. btf = btf->base_btf;
  656. offset -= btf->start_str_off;
  657. return offset < btf->hdr.str_len;
  658. }
  659. static bool __btf_name_char_ok(char c, bool first)
  660. {
  661. if ((first ? !isalpha(c) :
  662. !isalnum(c)) &&
  663. c != '_' &&
  664. c != '.')
  665. return false;
  666. return true;
  667. }
  668. static const char *btf_str_by_offset(const struct btf *btf, u32 offset)
  669. {
  670. while (offset < btf->start_str_off)
  671. btf = btf->base_btf;
  672. offset -= btf->start_str_off;
  673. if (offset < btf->hdr.str_len)
  674. return &btf->strings[offset];
  675. return NULL;
  676. }
  677. static bool __btf_name_valid(const struct btf *btf, u32 offset)
  678. {
  679. /* offset must be valid */
  680. const char *src = btf_str_by_offset(btf, offset);
  681. const char *src_limit;
  682. if (!__btf_name_char_ok(*src, true))
  683. return false;
  684. /* set a limit on identifier length */
  685. src_limit = src + KSYM_NAME_LEN;
  686. src++;
  687. while (*src && src < src_limit) {
  688. if (!__btf_name_char_ok(*src, false))
  689. return false;
  690. src++;
  691. }
  692. return !*src;
  693. }
  694. static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
  695. {
  696. return __btf_name_valid(btf, offset);
  697. }
  698. static bool btf_name_valid_section(const struct btf *btf, u32 offset)
  699. {
  700. return __btf_name_valid(btf, offset);
  701. }
  702. static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
  703. {
  704. const char *name;
  705. if (!offset)
  706. return "(anon)";
  707. name = btf_str_by_offset(btf, offset);
  708. return name ?: "(invalid-name-offset)";
  709. }
  710. const char *btf_name_by_offset(const struct btf *btf, u32 offset)
  711. {
  712. return btf_str_by_offset(btf, offset);
  713. }
  714. const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
  715. {
  716. while (type_id < btf->start_id)
  717. btf = btf->base_btf;
  718. type_id -= btf->start_id;
  719. if (type_id >= btf->nr_types)
  720. return NULL;
  721. return btf->types[type_id];
  722. }
  723. EXPORT_SYMBOL_GPL(btf_type_by_id);
  724. /*
  725. * Regular int is not a bit field and it must be either
  726. * u8/u16/u32/u64 or __int128.
  727. */
  728. static bool btf_type_int_is_regular(const struct btf_type *t)
  729. {
  730. u8 nr_bits, nr_bytes;
  731. u32 int_data;
  732. int_data = btf_type_int(t);
  733. nr_bits = BTF_INT_BITS(int_data);
  734. nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
  735. if (BITS_PER_BYTE_MASKED(nr_bits) ||
  736. BTF_INT_OFFSET(int_data) ||
  737. (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
  738. nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
  739. nr_bytes != (2 * sizeof(u64)))) {
  740. return false;
  741. }
  742. return true;
  743. }
  744. /*
  745. * Check that given struct member is a regular int with expected
  746. * offset and size.
  747. */
  748. bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
  749. const struct btf_member *m,
  750. u32 expected_offset, u32 expected_size)
  751. {
  752. const struct btf_type *t;
  753. u32 id, int_data;
  754. u8 nr_bits;
  755. id = m->type;
  756. t = btf_type_id_size(btf, &id, NULL);
  757. if (!t || !btf_type_is_int(t))
  758. return false;
  759. int_data = btf_type_int(t);
  760. nr_bits = BTF_INT_BITS(int_data);
  761. if (btf_type_kflag(s)) {
  762. u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
  763. u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
  764. /* if kflag set, int should be a regular int and
  765. * bit offset should be at byte boundary.
  766. */
  767. return !bitfield_size &&
  768. BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
  769. BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
  770. }
  771. if (BTF_INT_OFFSET(int_data) ||
  772. BITS_PER_BYTE_MASKED(m->offset) ||
  773. BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
  774. BITS_PER_BYTE_MASKED(nr_bits) ||
  775. BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
  776. return false;
  777. return true;
  778. }
  779. /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
  780. static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
  781. u32 id)
  782. {
  783. const struct btf_type *t = btf_type_by_id(btf, id);
  784. while (btf_type_is_modifier(t) &&
  785. BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
  786. t = btf_type_by_id(btf, t->type);
  787. }
  788. return t;
  789. }
  790. #define BTF_SHOW_MAX_ITER 10
  791. #define BTF_KIND_BIT(kind) (1ULL << kind)
  792. /*
  793. * Populate show->state.name with type name information.
  794. * Format of type name is
  795. *
  796. * [.member_name = ] (type_name)
  797. */
  798. static const char *btf_show_name(struct btf_show *show)
  799. {
  800. /* BTF_MAX_ITER array suffixes "[]" */
  801. const char *array_suffixes = "[][][][][][][][][][]";
  802. const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
  803. /* BTF_MAX_ITER pointer suffixes "*" */
  804. const char *ptr_suffixes = "**********";
  805. const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
  806. const char *name = NULL, *prefix = "", *parens = "";
  807. const struct btf_member *m = show->state.member;
  808. const struct btf_type *t;
  809. const struct btf_array *array;
  810. u32 id = show->state.type_id;
  811. const char *member = NULL;
  812. bool show_member = false;
  813. u64 kinds = 0;
  814. int i;
  815. show->state.name[0] = '\0';
  816. /*
  817. * Don't show type name if we're showing an array member;
  818. * in that case we show the array type so don't need to repeat
  819. * ourselves for each member.
  820. */
  821. if (show->state.array_member)
  822. return "";
  823. /* Retrieve member name, if any. */
  824. if (m) {
  825. member = btf_name_by_offset(show->btf, m->name_off);
  826. show_member = strlen(member) > 0;
  827. id = m->type;
  828. }
  829. /*
  830. * Start with type_id, as we have resolved the struct btf_type *
  831. * via btf_modifier_show() past the parent typedef to the child
  832. * struct, int etc it is defined as. In such cases, the type_id
  833. * still represents the starting type while the struct btf_type *
  834. * in our show->state points at the resolved type of the typedef.
  835. */
  836. t = btf_type_by_id(show->btf, id);
  837. if (!t)
  838. return "";
  839. /*
  840. * The goal here is to build up the right number of pointer and
  841. * array suffixes while ensuring the type name for a typedef
  842. * is represented. Along the way we accumulate a list of
  843. * BTF kinds we have encountered, since these will inform later
  844. * display; for example, pointer types will not require an
  845. * opening "{" for struct, we will just display the pointer value.
  846. *
  847. * We also want to accumulate the right number of pointer or array
  848. * indices in the format string while iterating until we get to
  849. * the typedef/pointee/array member target type.
  850. *
  851. * We start by pointing at the end of pointer and array suffix
  852. * strings; as we accumulate pointers and arrays we move the pointer
  853. * or array string backwards so it will show the expected number of
  854. * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers
  855. * and/or arrays and typedefs are supported as a precaution.
  856. *
  857. * We also want to get typedef name while proceeding to resolve
  858. * type it points to so that we can add parentheses if it is a
  859. * "typedef struct" etc.
  860. */
  861. for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
  862. switch (BTF_INFO_KIND(t->info)) {
  863. case BTF_KIND_TYPEDEF:
  864. if (!name)
  865. name = btf_name_by_offset(show->btf,
  866. t->name_off);
  867. kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
  868. id = t->type;
  869. break;
  870. case BTF_KIND_ARRAY:
  871. kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
  872. parens = "[";
  873. if (!t)
  874. return "";
  875. array = btf_type_array(t);
  876. if (array_suffix > array_suffixes)
  877. array_suffix -= 2;
  878. id = array->type;
  879. break;
  880. case BTF_KIND_PTR:
  881. kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
  882. if (ptr_suffix > ptr_suffixes)
  883. ptr_suffix -= 1;
  884. id = t->type;
  885. break;
  886. default:
  887. id = 0;
  888. break;
  889. }
  890. if (!id)
  891. break;
  892. t = btf_type_skip_qualifiers(show->btf, id);
  893. }
  894. /* We may not be able to represent this type; bail to be safe */
  895. if (i == BTF_SHOW_MAX_ITER)
  896. return "";
  897. if (!name)
  898. name = btf_name_by_offset(show->btf, t->name_off);
  899. switch (BTF_INFO_KIND(t->info)) {
  900. case BTF_KIND_STRUCT:
  901. case BTF_KIND_UNION:
  902. prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
  903. "struct" : "union";
  904. /* if it's an array of struct/union, parens is already set */
  905. if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
  906. parens = "{";
  907. break;
  908. case BTF_KIND_ENUM:
  909. case BTF_KIND_ENUM64:
  910. prefix = "enum";
  911. break;
  912. default:
  913. break;
  914. }
  915. /* pointer does not require parens */
  916. if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
  917. parens = "";
  918. /* typedef does not require struct/union/enum prefix */
  919. if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
  920. prefix = "";
  921. if (!name)
  922. name = "";
  923. /* Even if we don't want type name info, we want parentheses etc */
  924. if (show->flags & BTF_SHOW_NONAME)
  925. snprintf(show->state.name, sizeof(show->state.name), "%s",
  926. parens);
  927. else
  928. snprintf(show->state.name, sizeof(show->state.name),
  929. "%s%s%s(%s%s%s%s%s%s)%s",
  930. /* first 3 strings comprise ".member = " */
  931. show_member ? "." : "",
  932. show_member ? member : "",
  933. show_member ? " = " : "",
  934. /* ...next is our prefix (struct, enum, etc) */
  935. prefix,
  936. strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
  937. /* ...this is the type name itself */
  938. name,
  939. /* ...suffixed by the appropriate '*', '[]' suffixes */
  940. strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
  941. array_suffix, parens);
  942. return show->state.name;
  943. }
  944. static const char *__btf_show_indent(struct btf_show *show)
  945. {
  946. const char *indents = " ";
  947. const char *indent = &indents[strlen(indents)];
  948. if ((indent - show->state.depth) >= indents)
  949. return indent - show->state.depth;
  950. return indents;
  951. }
  952. static const char *btf_show_indent(struct btf_show *show)
  953. {
  954. return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
  955. }
  956. static const char *btf_show_newline(struct btf_show *show)
  957. {
  958. return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
  959. }
  960. static const char *btf_show_delim(struct btf_show *show)
  961. {
  962. if (show->state.depth == 0)
  963. return "";
  964. if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
  965. BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
  966. return "|";
  967. return ",";
  968. }
  969. __printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
  970. {
  971. va_list args;
  972. if (!show->state.depth_check) {
  973. va_start(args, fmt);
  974. show->showfn(show, fmt, args);
  975. va_end(args);
  976. }
  977. }
  978. /* Macros are used here as btf_show_type_value[s]() prepends and appends
  979. * format specifiers to the format specifier passed in; these do the work of
  980. * adding indentation, delimiters etc while the caller simply has to specify
  981. * the type value(s) in the format specifier + value(s).
  982. */
  983. #define btf_show_type_value(show, fmt, value) \
  984. do { \
  985. if ((value) != (__typeof__(value))0 || \
  986. (show->flags & BTF_SHOW_ZERO) || \
  987. show->state.depth == 0) { \
  988. btf_show(show, "%s%s" fmt "%s%s", \
  989. btf_show_indent(show), \
  990. btf_show_name(show), \
  991. value, btf_show_delim(show), \
  992. btf_show_newline(show)); \
  993. if (show->state.depth > show->state.depth_to_show) \
  994. show->state.depth_to_show = show->state.depth; \
  995. } \
  996. } while (0)
  997. #define btf_show_type_values(show, fmt, ...) \
  998. do { \
  999. btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \
  1000. btf_show_name(show), \
  1001. __VA_ARGS__, btf_show_delim(show), \
  1002. btf_show_newline(show)); \
  1003. if (show->state.depth > show->state.depth_to_show) \
  1004. show->state.depth_to_show = show->state.depth; \
  1005. } while (0)
  1006. /* How much is left to copy to safe buffer after @data? */
  1007. static int btf_show_obj_size_left(struct btf_show *show, void *data)
  1008. {
  1009. return show->obj.head + show->obj.size - data;
  1010. }
  1011. /* Is object pointed to by @data of @size already copied to our safe buffer? */
  1012. static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
  1013. {
  1014. return data >= show->obj.data &&
  1015. (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
  1016. }
  1017. /*
  1018. * If object pointed to by @data of @size falls within our safe buffer, return
  1019. * the equivalent pointer to the same safe data. Assumes
  1020. * copy_from_kernel_nofault() has already happened and our safe buffer is
  1021. * populated.
  1022. */
  1023. static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
  1024. {
  1025. if (btf_show_obj_is_safe(show, data, size))
  1026. return show->obj.safe + (data - show->obj.data);
  1027. return NULL;
  1028. }
  1029. /*
  1030. * Return a safe-to-access version of data pointed to by @data.
  1031. * We do this by copying the relevant amount of information
  1032. * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
  1033. *
  1034. * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
  1035. * safe copy is needed.
  1036. *
  1037. * Otherwise we need to determine if we have the required amount
  1038. * of data (determined by the @data pointer and the size of the
  1039. * largest base type we can encounter (represented by
  1040. * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
  1041. * that we will be able to print some of the current object,
  1042. * and if more is needed a copy will be triggered.
  1043. * Some objects such as structs will not fit into the buffer;
  1044. * in such cases additional copies when we iterate over their
  1045. * members may be needed.
  1046. *
  1047. * btf_show_obj_safe() is used to return a safe buffer for
  1048. * btf_show_start_type(); this ensures that as we recurse into
  1049. * nested types we always have safe data for the given type.
  1050. * This approach is somewhat wasteful; it's possible for example
  1051. * that when iterating over a large union we'll end up copying the
  1052. * same data repeatedly, but the goal is safety not performance.
  1053. * We use stack data as opposed to per-CPU buffers because the
  1054. * iteration over a type can take some time, and preemption handling
  1055. * would greatly complicate use of the safe buffer.
  1056. */
  1057. static void *btf_show_obj_safe(struct btf_show *show,
  1058. const struct btf_type *t,
  1059. void *data)
  1060. {
  1061. const struct btf_type *rt;
  1062. int size_left, size;
  1063. void *safe = NULL;
  1064. if (show->flags & BTF_SHOW_UNSAFE)
  1065. return data;
  1066. rt = btf_resolve_size(show->btf, t, &size);
  1067. if (IS_ERR(rt)) {
  1068. show->state.status = PTR_ERR(rt);
  1069. return NULL;
  1070. }
  1071. /*
  1072. * Is this toplevel object? If so, set total object size and
  1073. * initialize pointers. Otherwise check if we still fall within
  1074. * our safe object data.
  1075. */
  1076. if (show->state.depth == 0) {
  1077. show->obj.size = size;
  1078. show->obj.head = data;
  1079. } else {
  1080. /*
  1081. * If the size of the current object is > our remaining
  1082. * safe buffer we _may_ need to do a new copy. However
  1083. * consider the case of a nested struct; it's size pushes
  1084. * us over the safe buffer limit, but showing any individual
  1085. * struct members does not. In such cases, we don't need
  1086. * to initiate a fresh copy yet; however we definitely need
  1087. * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
  1088. * in our buffer, regardless of the current object size.
  1089. * The logic here is that as we resolve types we will
  1090. * hit a base type at some point, and we need to be sure
  1091. * the next chunk of data is safely available to display
  1092. * that type info safely. We cannot rely on the size of
  1093. * the current object here because it may be much larger
  1094. * than our current buffer (e.g. task_struct is 8k).
  1095. * All we want to do here is ensure that we can print the
  1096. * next basic type, which we can if either
  1097. * - the current type size is within the safe buffer; or
  1098. * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
  1099. * the safe buffer.
  1100. */
  1101. safe = __btf_show_obj_safe(show, data,
  1102. min(size,
  1103. BTF_SHOW_OBJ_BASE_TYPE_SIZE));
  1104. }
  1105. /*
  1106. * We need a new copy to our safe object, either because we haven't
  1107. * yet copied and are initializing safe data, or because the data
  1108. * we want falls outside the boundaries of the safe object.
  1109. */
  1110. if (!safe) {
  1111. size_left = btf_show_obj_size_left(show, data);
  1112. if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
  1113. size_left = BTF_SHOW_OBJ_SAFE_SIZE;
  1114. show->state.status = copy_from_kernel_nofault(show->obj.safe,
  1115. data, size_left);
  1116. if (!show->state.status) {
  1117. show->obj.data = data;
  1118. safe = show->obj.safe;
  1119. }
  1120. }
  1121. return safe;
  1122. }
  1123. /*
  1124. * Set the type we are starting to show and return a safe data pointer
  1125. * to be used for showing the associated data.
  1126. */
  1127. static void *btf_show_start_type(struct btf_show *show,
  1128. const struct btf_type *t,
  1129. u32 type_id, void *data)
  1130. {
  1131. show->state.type = t;
  1132. show->state.type_id = type_id;
  1133. show->state.name[0] = '\0';
  1134. return btf_show_obj_safe(show, t, data);
  1135. }
  1136. static void btf_show_end_type(struct btf_show *show)
  1137. {
  1138. show->state.type = NULL;
  1139. show->state.type_id = 0;
  1140. show->state.name[0] = '\0';
  1141. }
  1142. static void *btf_show_start_aggr_type(struct btf_show *show,
  1143. const struct btf_type *t,
  1144. u32 type_id, void *data)
  1145. {
  1146. void *safe_data = btf_show_start_type(show, t, type_id, data);
  1147. if (!safe_data)
  1148. return safe_data;
  1149. btf_show(show, "%s%s%s", btf_show_indent(show),
  1150. btf_show_name(show),
  1151. btf_show_newline(show));
  1152. show->state.depth++;
  1153. return safe_data;
  1154. }
  1155. static void btf_show_end_aggr_type(struct btf_show *show,
  1156. const char *suffix)
  1157. {
  1158. show->state.depth--;
  1159. btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
  1160. btf_show_delim(show), btf_show_newline(show));
  1161. btf_show_end_type(show);
  1162. }
  1163. static void btf_show_start_member(struct btf_show *show,
  1164. const struct btf_member *m)
  1165. {
  1166. show->state.member = m;
  1167. }
  1168. static void btf_show_start_array_member(struct btf_show *show)
  1169. {
  1170. show->state.array_member = 1;
  1171. btf_show_start_member(show, NULL);
  1172. }
  1173. static void btf_show_end_member(struct btf_show *show)
  1174. {
  1175. show->state.member = NULL;
  1176. }
  1177. static void btf_show_end_array_member(struct btf_show *show)
  1178. {
  1179. show->state.array_member = 0;
  1180. btf_show_end_member(show);
  1181. }
  1182. static void *btf_show_start_array_type(struct btf_show *show,
  1183. const struct btf_type *t,
  1184. u32 type_id,
  1185. u16 array_encoding,
  1186. void *data)
  1187. {
  1188. show->state.array_encoding = array_encoding;
  1189. show->state.array_terminated = 0;
  1190. return btf_show_start_aggr_type(show, t, type_id, data);
  1191. }
  1192. static void btf_show_end_array_type(struct btf_show *show)
  1193. {
  1194. show->state.array_encoding = 0;
  1195. show->state.array_terminated = 0;
  1196. btf_show_end_aggr_type(show, "]");
  1197. }
  1198. static void *btf_show_start_struct_type(struct btf_show *show,
  1199. const struct btf_type *t,
  1200. u32 type_id,
  1201. void *data)
  1202. {
  1203. return btf_show_start_aggr_type(show, t, type_id, data);
  1204. }
  1205. static void btf_show_end_struct_type(struct btf_show *show)
  1206. {
  1207. btf_show_end_aggr_type(show, "}");
  1208. }
  1209. __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
  1210. const char *fmt, ...)
  1211. {
  1212. va_list args;
  1213. va_start(args, fmt);
  1214. bpf_verifier_vlog(log, fmt, args);
  1215. va_end(args);
  1216. }
  1217. __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
  1218. const char *fmt, ...)
  1219. {
  1220. struct bpf_verifier_log *log = &env->log;
  1221. va_list args;
  1222. if (!bpf_verifier_log_needed(log))
  1223. return;
  1224. va_start(args, fmt);
  1225. bpf_verifier_vlog(log, fmt, args);
  1226. va_end(args);
  1227. }
  1228. __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
  1229. const struct btf_type *t,
  1230. bool log_details,
  1231. const char *fmt, ...)
  1232. {
  1233. struct bpf_verifier_log *log = &env->log;
  1234. struct btf *btf = env->btf;
  1235. va_list args;
  1236. if (!bpf_verifier_log_needed(log))
  1237. return;
  1238. if (log->level == BPF_LOG_KERNEL) {
  1239. /* btf verifier prints all types it is processing via
  1240. * btf_verifier_log_type(..., fmt = NULL).
  1241. * Skip those prints for in-kernel BTF verification.
  1242. */
  1243. if (!fmt)
  1244. return;
  1245. /* Skip logging when loading module BTF with mismatches permitted */
  1246. if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
  1247. return;
  1248. }
  1249. __btf_verifier_log(log, "[%u] %s %s%s",
  1250. env->log_type_id,
  1251. btf_type_str(t),
  1252. __btf_name_by_offset(btf, t->name_off),
  1253. log_details ? " " : "");
  1254. if (log_details)
  1255. btf_type_ops(t)->log_details(env, t);
  1256. if (fmt && *fmt) {
  1257. __btf_verifier_log(log, " ");
  1258. va_start(args, fmt);
  1259. bpf_verifier_vlog(log, fmt, args);
  1260. va_end(args);
  1261. }
  1262. __btf_verifier_log(log, "\n");
  1263. }
  1264. #define btf_verifier_log_type(env, t, ...) \
  1265. __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
  1266. #define btf_verifier_log_basic(env, t, ...) \
  1267. __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
  1268. __printf(4, 5)
  1269. static void btf_verifier_log_member(struct btf_verifier_env *env,
  1270. const struct btf_type *struct_type,
  1271. const struct btf_member *member,
  1272. const char *fmt, ...)
  1273. {
  1274. struct bpf_verifier_log *log = &env->log;
  1275. struct btf *btf = env->btf;
  1276. va_list args;
  1277. if (!bpf_verifier_log_needed(log))
  1278. return;
  1279. if (log->level == BPF_LOG_KERNEL) {
  1280. if (!fmt)
  1281. return;
  1282. /* Skip logging when loading module BTF with mismatches permitted */
  1283. if (env->btf->base_btf && IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
  1284. return;
  1285. }
  1286. /* The CHECK_META phase already did a btf dump.
  1287. *
  1288. * If member is logged again, it must hit an error in
  1289. * parsing this member. It is useful to print out which
  1290. * struct this member belongs to.
  1291. */
  1292. if (env->phase != CHECK_META)
  1293. btf_verifier_log_type(env, struct_type, NULL);
  1294. if (btf_type_kflag(struct_type))
  1295. __btf_verifier_log(log,
  1296. "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
  1297. __btf_name_by_offset(btf, member->name_off),
  1298. member->type,
  1299. BTF_MEMBER_BITFIELD_SIZE(member->offset),
  1300. BTF_MEMBER_BIT_OFFSET(member->offset));
  1301. else
  1302. __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
  1303. __btf_name_by_offset(btf, member->name_off),
  1304. member->type, member->offset);
  1305. if (fmt && *fmt) {
  1306. __btf_verifier_log(log, " ");
  1307. va_start(args, fmt);
  1308. bpf_verifier_vlog(log, fmt, args);
  1309. va_end(args);
  1310. }
  1311. __btf_verifier_log(log, "\n");
  1312. }
  1313. __printf(4, 5)
  1314. static void btf_verifier_log_vsi(struct btf_verifier_env *env,
  1315. const struct btf_type *datasec_type,
  1316. const struct btf_var_secinfo *vsi,
  1317. const char *fmt, ...)
  1318. {
  1319. struct bpf_verifier_log *log = &env->log;
  1320. va_list args;
  1321. if (!bpf_verifier_log_needed(log))
  1322. return;
  1323. if (log->level == BPF_LOG_KERNEL && !fmt)
  1324. return;
  1325. if (env->phase != CHECK_META)
  1326. btf_verifier_log_type(env, datasec_type, NULL);
  1327. __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
  1328. vsi->type, vsi->offset, vsi->size);
  1329. if (fmt && *fmt) {
  1330. __btf_verifier_log(log, " ");
  1331. va_start(args, fmt);
  1332. bpf_verifier_vlog(log, fmt, args);
  1333. va_end(args);
  1334. }
  1335. __btf_verifier_log(log, "\n");
  1336. }
  1337. static void btf_verifier_log_hdr(struct btf_verifier_env *env,
  1338. u32 btf_data_size)
  1339. {
  1340. struct bpf_verifier_log *log = &env->log;
  1341. const struct btf *btf = env->btf;
  1342. const struct btf_header *hdr;
  1343. if (!bpf_verifier_log_needed(log))
  1344. return;
  1345. if (log->level == BPF_LOG_KERNEL)
  1346. return;
  1347. hdr = &btf->hdr;
  1348. __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
  1349. __btf_verifier_log(log, "version: %u\n", hdr->version);
  1350. __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
  1351. __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
  1352. __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
  1353. __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
  1354. __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
  1355. __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
  1356. __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
  1357. }
  1358. static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
  1359. {
  1360. struct btf *btf = env->btf;
  1361. if (btf->types_size == btf->nr_types) {
  1362. /* Expand 'types' array */
  1363. struct btf_type **new_types;
  1364. u32 expand_by, new_size;
  1365. if (btf->start_id + btf->types_size == BTF_MAX_TYPE) {
  1366. btf_verifier_log(env, "Exceeded max num of types");
  1367. return -E2BIG;
  1368. }
  1369. expand_by = max_t(u32, btf->types_size >> 2, 16);
  1370. new_size = min_t(u32, BTF_MAX_TYPE,
  1371. btf->types_size + expand_by);
  1372. new_types = kvcalloc(new_size, sizeof(*new_types),
  1373. GFP_KERNEL | __GFP_NOWARN);
  1374. if (!new_types)
  1375. return -ENOMEM;
  1376. if (btf->nr_types == 0) {
  1377. if (!btf->base_btf) {
  1378. /* lazily init VOID type */
  1379. new_types[0] = &btf_void;
  1380. btf->nr_types++;
  1381. }
  1382. } else {
  1383. memcpy(new_types, btf->types,
  1384. sizeof(*btf->types) * btf->nr_types);
  1385. }
  1386. kvfree(btf->types);
  1387. btf->types = new_types;
  1388. btf->types_size = new_size;
  1389. }
  1390. btf->types[btf->nr_types++] = t;
  1391. return 0;
  1392. }
  1393. static int btf_alloc_id(struct btf *btf)
  1394. {
  1395. int id;
  1396. idr_preload(GFP_KERNEL);
  1397. spin_lock_bh(&btf_idr_lock);
  1398. id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
  1399. if (id > 0)
  1400. btf->id = id;
  1401. spin_unlock_bh(&btf_idr_lock);
  1402. idr_preload_end();
  1403. if (WARN_ON_ONCE(!id))
  1404. return -ENOSPC;
  1405. return id > 0 ? 0 : id;
  1406. }
  1407. static void btf_free_id(struct btf *btf)
  1408. {
  1409. unsigned long flags;
  1410. /*
  1411. * In map-in-map, calling map_delete_elem() on outer
  1412. * map will call bpf_map_put on the inner map.
  1413. * It will then eventually call btf_free_id()
  1414. * on the inner map. Some of the map_delete_elem()
  1415. * implementation may have irq disabled, so
  1416. * we need to use the _irqsave() version instead
  1417. * of the _bh() version.
  1418. */
  1419. spin_lock_irqsave(&btf_idr_lock, flags);
  1420. idr_remove(&btf_idr, btf->id);
  1421. spin_unlock_irqrestore(&btf_idr_lock, flags);
  1422. }
  1423. static void btf_free_kfunc_set_tab(struct btf *btf)
  1424. {
  1425. struct btf_kfunc_set_tab *tab = btf->kfunc_set_tab;
  1426. int hook;
  1427. if (!tab)
  1428. return;
  1429. /* For module BTF, we directly assign the sets being registered, so
  1430. * there is nothing to free except kfunc_set_tab.
  1431. */
  1432. if (btf_is_module(btf))
  1433. goto free_tab;
  1434. for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++)
  1435. kfree(tab->sets[hook]);
  1436. free_tab:
  1437. kfree(tab);
  1438. btf->kfunc_set_tab = NULL;
  1439. }
  1440. static void btf_free_dtor_kfunc_tab(struct btf *btf)
  1441. {
  1442. struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
  1443. if (!tab)
  1444. return;
  1445. kfree(tab);
  1446. btf->dtor_kfunc_tab = NULL;
  1447. }
  1448. static void btf_free(struct btf *btf)
  1449. {
  1450. btf_free_dtor_kfunc_tab(btf);
  1451. btf_free_kfunc_set_tab(btf);
  1452. kvfree(btf->types);
  1453. kvfree(btf->resolved_sizes);
  1454. kvfree(btf->resolved_ids);
  1455. kvfree(btf->data);
  1456. kfree(btf);
  1457. }
  1458. static void btf_free_rcu(struct rcu_head *rcu)
  1459. {
  1460. struct btf *btf = container_of(rcu, struct btf, rcu);
  1461. btf_free(btf);
  1462. }
  1463. void btf_get(struct btf *btf)
  1464. {
  1465. refcount_inc(&btf->refcnt);
  1466. }
  1467. void btf_put(struct btf *btf)
  1468. {
  1469. if (btf && refcount_dec_and_test(&btf->refcnt)) {
  1470. btf_free_id(btf);
  1471. call_rcu(&btf->rcu, btf_free_rcu);
  1472. }
  1473. }
  1474. static int env_resolve_init(struct btf_verifier_env *env)
  1475. {
  1476. struct btf *btf = env->btf;
  1477. u32 nr_types = btf->nr_types;
  1478. u32 *resolved_sizes = NULL;
  1479. u32 *resolved_ids = NULL;
  1480. u8 *visit_states = NULL;
  1481. resolved_sizes = kvcalloc(nr_types, sizeof(*resolved_sizes),
  1482. GFP_KERNEL | __GFP_NOWARN);
  1483. if (!resolved_sizes)
  1484. goto nomem;
  1485. resolved_ids = kvcalloc(nr_types, sizeof(*resolved_ids),
  1486. GFP_KERNEL | __GFP_NOWARN);
  1487. if (!resolved_ids)
  1488. goto nomem;
  1489. visit_states = kvcalloc(nr_types, sizeof(*visit_states),
  1490. GFP_KERNEL | __GFP_NOWARN);
  1491. if (!visit_states)
  1492. goto nomem;
  1493. btf->resolved_sizes = resolved_sizes;
  1494. btf->resolved_ids = resolved_ids;
  1495. env->visit_states = visit_states;
  1496. return 0;
  1497. nomem:
  1498. kvfree(resolved_sizes);
  1499. kvfree(resolved_ids);
  1500. kvfree(visit_states);
  1501. return -ENOMEM;
  1502. }
  1503. static void btf_verifier_env_free(struct btf_verifier_env *env)
  1504. {
  1505. kvfree(env->visit_states);
  1506. kfree(env);
  1507. }
  1508. static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
  1509. const struct btf_type *next_type)
  1510. {
  1511. switch (env->resolve_mode) {
  1512. case RESOLVE_TBD:
  1513. /* int, enum or void is a sink */
  1514. return !btf_type_needs_resolve(next_type);
  1515. case RESOLVE_PTR:
  1516. /* int, enum, void, struct, array, func or func_proto is a sink
  1517. * for ptr
  1518. */
  1519. return !btf_type_is_modifier(next_type) &&
  1520. !btf_type_is_ptr(next_type);
  1521. case RESOLVE_STRUCT_OR_ARRAY:
  1522. /* int, enum, void, ptr, func or func_proto is a sink
  1523. * for struct and array
  1524. */
  1525. return !btf_type_is_modifier(next_type) &&
  1526. !btf_type_is_array(next_type) &&
  1527. !btf_type_is_struct(next_type);
  1528. default:
  1529. BUG();
  1530. }
  1531. }
  1532. static bool env_type_is_resolved(const struct btf_verifier_env *env,
  1533. u32 type_id)
  1534. {
  1535. /* base BTF types should be resolved by now */
  1536. if (type_id < env->btf->start_id)
  1537. return true;
  1538. return env->visit_states[type_id - env->btf->start_id] == RESOLVED;
  1539. }
  1540. static int env_stack_push(struct btf_verifier_env *env,
  1541. const struct btf_type *t, u32 type_id)
  1542. {
  1543. const struct btf *btf = env->btf;
  1544. struct resolve_vertex *v;
  1545. if (env->top_stack == MAX_RESOLVE_DEPTH)
  1546. return -E2BIG;
  1547. if (type_id < btf->start_id
  1548. || env->visit_states[type_id - btf->start_id] != NOT_VISITED)
  1549. return -EEXIST;
  1550. env->visit_states[type_id - btf->start_id] = VISITED;
  1551. v = &env->stack[env->top_stack++];
  1552. v->t = t;
  1553. v->type_id = type_id;
  1554. v->next_member = 0;
  1555. if (env->resolve_mode == RESOLVE_TBD) {
  1556. if (btf_type_is_ptr(t))
  1557. env->resolve_mode = RESOLVE_PTR;
  1558. else if (btf_type_is_struct(t) || btf_type_is_array(t))
  1559. env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
  1560. }
  1561. return 0;
  1562. }
  1563. static void env_stack_set_next_member(struct btf_verifier_env *env,
  1564. u16 next_member)
  1565. {
  1566. env->stack[env->top_stack - 1].next_member = next_member;
  1567. }
  1568. static void env_stack_pop_resolved(struct btf_verifier_env *env,
  1569. u32 resolved_type_id,
  1570. u32 resolved_size)
  1571. {
  1572. u32 type_id = env->stack[--(env->top_stack)].type_id;
  1573. struct btf *btf = env->btf;
  1574. type_id -= btf->start_id; /* adjust to local type id */
  1575. btf->resolved_sizes[type_id] = resolved_size;
  1576. btf->resolved_ids[type_id] = resolved_type_id;
  1577. env->visit_states[type_id] = RESOLVED;
  1578. }
  1579. static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
  1580. {
  1581. return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
  1582. }
  1583. /* Resolve the size of a passed-in "type"
  1584. *
  1585. * type: is an array (e.g. u32 array[x][y])
  1586. * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
  1587. * *type_size: (x * y * sizeof(u32)). Hence, *type_size always
  1588. * corresponds to the return type.
  1589. * *elem_type: u32
  1590. * *elem_id: id of u32
  1591. * *total_nelems: (x * y). Hence, individual elem size is
  1592. * (*type_size / *total_nelems)
  1593. * *type_id: id of type if it's changed within the function, 0 if not
  1594. *
  1595. * type: is not an array (e.g. const struct X)
  1596. * return type: type "struct X"
  1597. * *type_size: sizeof(struct X)
  1598. * *elem_type: same as return type ("struct X")
  1599. * *elem_id: 0
  1600. * *total_nelems: 1
  1601. * *type_id: id of type if it's changed within the function, 0 if not
  1602. */
  1603. static const struct btf_type *
  1604. __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
  1605. u32 *type_size, const struct btf_type **elem_type,
  1606. u32 *elem_id, u32 *total_nelems, u32 *type_id)
  1607. {
  1608. const struct btf_type *array_type = NULL;
  1609. const struct btf_array *array = NULL;
  1610. u32 i, size, nelems = 1, id = 0;
  1611. for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
  1612. switch (BTF_INFO_KIND(type->info)) {
  1613. /* type->size can be used */
  1614. case BTF_KIND_INT:
  1615. case BTF_KIND_STRUCT:
  1616. case BTF_KIND_UNION:
  1617. case BTF_KIND_ENUM:
  1618. case BTF_KIND_FLOAT:
  1619. case BTF_KIND_ENUM64:
  1620. size = type->size;
  1621. goto resolved;
  1622. case BTF_KIND_PTR:
  1623. size = sizeof(void *);
  1624. goto resolved;
  1625. /* Modifiers */
  1626. case BTF_KIND_TYPEDEF:
  1627. case BTF_KIND_VOLATILE:
  1628. case BTF_KIND_CONST:
  1629. case BTF_KIND_RESTRICT:
  1630. case BTF_KIND_TYPE_TAG:
  1631. id = type->type;
  1632. type = btf_type_by_id(btf, type->type);
  1633. break;
  1634. case BTF_KIND_ARRAY:
  1635. if (!array_type)
  1636. array_type = type;
  1637. array = btf_type_array(type);
  1638. if (nelems && array->nelems > U32_MAX / nelems)
  1639. return ERR_PTR(-EINVAL);
  1640. nelems *= array->nelems;
  1641. type = btf_type_by_id(btf, array->type);
  1642. break;
  1643. /* type without size */
  1644. default:
  1645. return ERR_PTR(-EINVAL);
  1646. }
  1647. }
  1648. return ERR_PTR(-EINVAL);
  1649. resolved:
  1650. if (nelems && size > U32_MAX / nelems)
  1651. return ERR_PTR(-EINVAL);
  1652. *type_size = nelems * size;
  1653. if (total_nelems)
  1654. *total_nelems = nelems;
  1655. if (elem_type)
  1656. *elem_type = type;
  1657. if (elem_id)
  1658. *elem_id = array ? array->type : 0;
  1659. if (type_id && id)
  1660. *type_id = id;
  1661. return array_type ? : type;
  1662. }
  1663. const struct btf_type *
  1664. btf_resolve_size(const struct btf *btf, const struct btf_type *type,
  1665. u32 *type_size)
  1666. {
  1667. return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
  1668. }
  1669. static u32 btf_resolved_type_id(const struct btf *btf, u32 type_id)
  1670. {
  1671. while (type_id < btf->start_id)
  1672. btf = btf->base_btf;
  1673. return btf->resolved_ids[type_id - btf->start_id];
  1674. }
  1675. /* The input param "type_id" must point to a needs_resolve type */
  1676. static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
  1677. u32 *type_id)
  1678. {
  1679. *type_id = btf_resolved_type_id(btf, *type_id);
  1680. return btf_type_by_id(btf, *type_id);
  1681. }
  1682. static u32 btf_resolved_type_size(const struct btf *btf, u32 type_id)
  1683. {
  1684. while (type_id < btf->start_id)
  1685. btf = btf->base_btf;
  1686. return btf->resolved_sizes[type_id - btf->start_id];
  1687. }
  1688. const struct btf_type *btf_type_id_size(const struct btf *btf,
  1689. u32 *type_id, u32 *ret_size)
  1690. {
  1691. const struct btf_type *size_type;
  1692. u32 size_type_id = *type_id;
  1693. u32 size = 0;
  1694. size_type = btf_type_by_id(btf, size_type_id);
  1695. if (btf_type_nosize_or_null(size_type))
  1696. return NULL;
  1697. if (btf_type_has_size(size_type)) {
  1698. size = size_type->size;
  1699. } else if (btf_type_is_array(size_type)) {
  1700. size = btf_resolved_type_size(btf, size_type_id);
  1701. } else if (btf_type_is_ptr(size_type)) {
  1702. size = sizeof(void *);
  1703. } else {
  1704. if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
  1705. !btf_type_is_var(size_type)))
  1706. return NULL;
  1707. size_type_id = btf_resolved_type_id(btf, size_type_id);
  1708. size_type = btf_type_by_id(btf, size_type_id);
  1709. if (btf_type_nosize_or_null(size_type))
  1710. return NULL;
  1711. else if (btf_type_has_size(size_type))
  1712. size = size_type->size;
  1713. else if (btf_type_is_array(size_type))
  1714. size = btf_resolved_type_size(btf, size_type_id);
  1715. else if (btf_type_is_ptr(size_type))
  1716. size = sizeof(void *);
  1717. else
  1718. return NULL;
  1719. }
  1720. *type_id = size_type_id;
  1721. if (ret_size)
  1722. *ret_size = size;
  1723. return size_type;
  1724. }
  1725. static int btf_df_check_member(struct btf_verifier_env *env,
  1726. const struct btf_type *struct_type,
  1727. const struct btf_member *member,
  1728. const struct btf_type *member_type)
  1729. {
  1730. btf_verifier_log_basic(env, struct_type,
  1731. "Unsupported check_member");
  1732. return -EINVAL;
  1733. }
  1734. static int btf_df_check_kflag_member(struct btf_verifier_env *env,
  1735. const struct btf_type *struct_type,
  1736. const struct btf_member *member,
  1737. const struct btf_type *member_type)
  1738. {
  1739. btf_verifier_log_basic(env, struct_type,
  1740. "Unsupported check_kflag_member");
  1741. return -EINVAL;
  1742. }
  1743. /* Used for ptr, array struct/union and float type members.
  1744. * int, enum and modifier types have their specific callback functions.
  1745. */
  1746. static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
  1747. const struct btf_type *struct_type,
  1748. const struct btf_member *member,
  1749. const struct btf_type *member_type)
  1750. {
  1751. if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
  1752. btf_verifier_log_member(env, struct_type, member,
  1753. "Invalid member bitfield_size");
  1754. return -EINVAL;
  1755. }
  1756. /* bitfield size is 0, so member->offset represents bit offset only.
  1757. * It is safe to call non kflag check_member variants.
  1758. */
  1759. return btf_type_ops(member_type)->check_member(env, struct_type,
  1760. member,
  1761. member_type);
  1762. }
  1763. static int btf_df_resolve(struct btf_verifier_env *env,
  1764. const struct resolve_vertex *v)
  1765. {
  1766. btf_verifier_log_basic(env, v->t, "Unsupported resolve");
  1767. return -EINVAL;
  1768. }
  1769. static void btf_df_show(const struct btf *btf, const struct btf_type *t,
  1770. u32 type_id, void *data, u8 bits_offsets,
  1771. struct btf_show *show)
  1772. {
  1773. btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
  1774. }
  1775. static int btf_int_check_member(struct btf_verifier_env *env,
  1776. const struct btf_type *struct_type,
  1777. const struct btf_member *member,
  1778. const struct btf_type *member_type)
  1779. {
  1780. u32 int_data = btf_type_int(member_type);
  1781. u32 struct_bits_off = member->offset;
  1782. u32 struct_size = struct_type->size;
  1783. u32 nr_copy_bits;
  1784. u32 bytes_offset;
  1785. if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
  1786. btf_verifier_log_member(env, struct_type, member,
  1787. "bits_offset exceeds U32_MAX");
  1788. return -EINVAL;
  1789. }
  1790. struct_bits_off += BTF_INT_OFFSET(int_data);
  1791. bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
  1792. nr_copy_bits = BTF_INT_BITS(int_data) +
  1793. BITS_PER_BYTE_MASKED(struct_bits_off);
  1794. if (nr_copy_bits > BITS_PER_U128) {
  1795. btf_verifier_log_member(env, struct_type, member,
  1796. "nr_copy_bits exceeds 128");
  1797. return -EINVAL;
  1798. }
  1799. if (struct_size < bytes_offset ||
  1800. struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
  1801. btf_verifier_log_member(env, struct_type, member,
  1802. "Member exceeds struct_size");
  1803. return -EINVAL;
  1804. }
  1805. return 0;
  1806. }
  1807. static int btf_int_check_kflag_member(struct btf_verifier_env *env,
  1808. const struct btf_type *struct_type,
  1809. const struct btf_member *member,
  1810. const struct btf_type *member_type)
  1811. {
  1812. u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
  1813. u32 int_data = btf_type_int(member_type);
  1814. u32 struct_size = struct_type->size;
  1815. u32 nr_copy_bits;
  1816. /* a regular int type is required for the kflag int member */
  1817. if (!btf_type_int_is_regular(member_type)) {
  1818. btf_verifier_log_member(env, struct_type, member,
  1819. "Invalid member base type");
  1820. return -EINVAL;
  1821. }
  1822. /* check sanity of bitfield size */
  1823. nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
  1824. struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
  1825. nr_int_data_bits = BTF_INT_BITS(int_data);
  1826. if (!nr_bits) {
  1827. /* Not a bitfield member, member offset must be at byte
  1828. * boundary.
  1829. */
  1830. if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
  1831. btf_verifier_log_member(env, struct_type, member,
  1832. "Invalid member offset");
  1833. return -EINVAL;
  1834. }
  1835. nr_bits = nr_int_data_bits;
  1836. } else if (nr_bits > nr_int_data_bits) {
  1837. btf_verifier_log_member(env, struct_type, member,
  1838. "Invalid member bitfield_size");
  1839. return -EINVAL;
  1840. }
  1841. bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
  1842. nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
  1843. if (nr_copy_bits > BITS_PER_U128) {
  1844. btf_verifier_log_member(env, struct_type, member,
  1845. "nr_copy_bits exceeds 128");
  1846. return -EINVAL;
  1847. }
  1848. if (struct_size < bytes_offset ||
  1849. struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
  1850. btf_verifier_log_member(env, struct_type, member,
  1851. "Member exceeds struct_size");
  1852. return -EINVAL;
  1853. }
  1854. return 0;
  1855. }
  1856. static s32 btf_int_check_meta(struct btf_verifier_env *env,
  1857. const struct btf_type *t,
  1858. u32 meta_left)
  1859. {
  1860. u32 int_data, nr_bits, meta_needed = sizeof(int_data);
  1861. u16 encoding;
  1862. if (meta_left < meta_needed) {
  1863. btf_verifier_log_basic(env, t,
  1864. "meta_left:%u meta_needed:%u",
  1865. meta_left, meta_needed);
  1866. return -EINVAL;
  1867. }
  1868. if (btf_type_vlen(t)) {
  1869. btf_verifier_log_type(env, t, "vlen != 0");
  1870. return -EINVAL;
  1871. }
  1872. if (btf_type_kflag(t)) {
  1873. btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
  1874. return -EINVAL;
  1875. }
  1876. int_data = btf_type_int(t);
  1877. if (int_data & ~BTF_INT_MASK) {
  1878. btf_verifier_log_basic(env, t, "Invalid int_data:%x",
  1879. int_data);
  1880. return -EINVAL;
  1881. }
  1882. nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
  1883. if (nr_bits > BITS_PER_U128) {
  1884. btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
  1885. BITS_PER_U128);
  1886. return -EINVAL;
  1887. }
  1888. if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
  1889. btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
  1890. return -EINVAL;
  1891. }
  1892. /*
  1893. * Only one of the encoding bits is allowed and it
  1894. * should be sufficient for the pretty print purpose (i.e. decoding).
  1895. * Multiple bits can be allowed later if it is found
  1896. * to be insufficient.
  1897. */
  1898. encoding = BTF_INT_ENCODING(int_data);
  1899. if (encoding &&
  1900. encoding != BTF_INT_SIGNED &&
  1901. encoding != BTF_INT_CHAR &&
  1902. encoding != BTF_INT_BOOL) {
  1903. btf_verifier_log_type(env, t, "Unsupported encoding");
  1904. return -ENOTSUPP;
  1905. }
  1906. btf_verifier_log_type(env, t, NULL);
  1907. return meta_needed;
  1908. }
  1909. static void btf_int_log(struct btf_verifier_env *env,
  1910. const struct btf_type *t)
  1911. {
  1912. int int_data = btf_type_int(t);
  1913. btf_verifier_log(env,
  1914. "size=%u bits_offset=%u nr_bits=%u encoding=%s",
  1915. t->size, BTF_INT_OFFSET(int_data),
  1916. BTF_INT_BITS(int_data),
  1917. btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
  1918. }
  1919. static void btf_int128_print(struct btf_show *show, void *data)
  1920. {
  1921. /* data points to a __int128 number.
  1922. * Suppose
  1923. * int128_num = *(__int128 *)data;
  1924. * The below formulas shows what upper_num and lower_num represents:
  1925. * upper_num = int128_num >> 64;
  1926. * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
  1927. */
  1928. u64 upper_num, lower_num;
  1929. #ifdef __BIG_ENDIAN_BITFIELD
  1930. upper_num = *(u64 *)data;
  1931. lower_num = *(u64 *)(data + 8);
  1932. #else
  1933. upper_num = *(u64 *)(data + 8);
  1934. lower_num = *(u64 *)data;
  1935. #endif
  1936. if (upper_num == 0)
  1937. btf_show_type_value(show, "0x%llx", lower_num);
  1938. else
  1939. btf_show_type_values(show, "0x%llx%016llx", upper_num,
  1940. lower_num);
  1941. }
  1942. static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
  1943. u16 right_shift_bits)
  1944. {
  1945. u64 upper_num, lower_num;
  1946. #ifdef __BIG_ENDIAN_BITFIELD
  1947. upper_num = print_num[0];
  1948. lower_num = print_num[1];
  1949. #else
  1950. upper_num = print_num[1];
  1951. lower_num = print_num[0];
  1952. #endif
  1953. /* shake out un-needed bits by shift/or operations */
  1954. if (left_shift_bits >= 64) {
  1955. upper_num = lower_num << (left_shift_bits - 64);
  1956. lower_num = 0;
  1957. } else {
  1958. upper_num = (upper_num << left_shift_bits) |
  1959. (lower_num >> (64 - left_shift_bits));
  1960. lower_num = lower_num << left_shift_bits;
  1961. }
  1962. if (right_shift_bits >= 64) {
  1963. lower_num = upper_num >> (right_shift_bits - 64);
  1964. upper_num = 0;
  1965. } else {
  1966. lower_num = (lower_num >> right_shift_bits) |
  1967. (upper_num << (64 - right_shift_bits));
  1968. upper_num = upper_num >> right_shift_bits;
  1969. }
  1970. #ifdef __BIG_ENDIAN_BITFIELD
  1971. print_num[0] = upper_num;
  1972. print_num[1] = lower_num;
  1973. #else
  1974. print_num[0] = lower_num;
  1975. print_num[1] = upper_num;
  1976. #endif
  1977. }
  1978. static void btf_bitfield_show(void *data, u8 bits_offset,
  1979. u8 nr_bits, struct btf_show *show)
  1980. {
  1981. u16 left_shift_bits, right_shift_bits;
  1982. u8 nr_copy_bytes;
  1983. u8 nr_copy_bits;
  1984. u64 print_num[2] = {};
  1985. nr_copy_bits = nr_bits + bits_offset;
  1986. nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
  1987. memcpy(print_num, data, nr_copy_bytes);
  1988. #ifdef __BIG_ENDIAN_BITFIELD
  1989. left_shift_bits = bits_offset;
  1990. #else
  1991. left_shift_bits = BITS_PER_U128 - nr_copy_bits;
  1992. #endif
  1993. right_shift_bits = BITS_PER_U128 - nr_bits;
  1994. btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
  1995. btf_int128_print(show, print_num);
  1996. }
  1997. static void btf_int_bits_show(const struct btf *btf,
  1998. const struct btf_type *t,
  1999. void *data, u8 bits_offset,
  2000. struct btf_show *show)
  2001. {
  2002. u32 int_data = btf_type_int(t);
  2003. u8 nr_bits = BTF_INT_BITS(int_data);
  2004. u8 total_bits_offset;
  2005. /*
  2006. * bits_offset is at most 7.
  2007. * BTF_INT_OFFSET() cannot exceed 128 bits.
  2008. */
  2009. total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
  2010. data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
  2011. bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
  2012. btf_bitfield_show(data, bits_offset, nr_bits, show);
  2013. }
  2014. static void btf_int_show(const struct btf *btf, const struct btf_type *t,
  2015. u32 type_id, void *data, u8 bits_offset,
  2016. struct btf_show *show)
  2017. {
  2018. u32 int_data = btf_type_int(t);
  2019. u8 encoding = BTF_INT_ENCODING(int_data);
  2020. bool sign = encoding & BTF_INT_SIGNED;
  2021. u8 nr_bits = BTF_INT_BITS(int_data);
  2022. void *safe_data;
  2023. safe_data = btf_show_start_type(show, t, type_id, data);
  2024. if (!safe_data)
  2025. return;
  2026. if (bits_offset || BTF_INT_OFFSET(int_data) ||
  2027. BITS_PER_BYTE_MASKED(nr_bits)) {
  2028. btf_int_bits_show(btf, t, safe_data, bits_offset, show);
  2029. goto out;
  2030. }
  2031. switch (nr_bits) {
  2032. case 128:
  2033. btf_int128_print(show, safe_data);
  2034. break;
  2035. case 64:
  2036. if (sign)
  2037. btf_show_type_value(show, "%lld", *(s64 *)safe_data);
  2038. else
  2039. btf_show_type_value(show, "%llu", *(u64 *)safe_data);
  2040. break;
  2041. case 32:
  2042. if (sign)
  2043. btf_show_type_value(show, "%d", *(s32 *)safe_data);
  2044. else
  2045. btf_show_type_value(show, "%u", *(u32 *)safe_data);
  2046. break;
  2047. case 16:
  2048. if (sign)
  2049. btf_show_type_value(show, "%d", *(s16 *)safe_data);
  2050. else
  2051. btf_show_type_value(show, "%u", *(u16 *)safe_data);
  2052. break;
  2053. case 8:
  2054. if (show->state.array_encoding == BTF_INT_CHAR) {
  2055. /* check for null terminator */
  2056. if (show->state.array_terminated)
  2057. break;
  2058. if (*(char *)data == '\0') {
  2059. show->state.array_terminated = 1;
  2060. break;
  2061. }
  2062. if (isprint(*(char *)data)) {
  2063. btf_show_type_value(show, "'%c'",
  2064. *(char *)safe_data);
  2065. break;
  2066. }
  2067. }
  2068. if (sign)
  2069. btf_show_type_value(show, "%d", *(s8 *)safe_data);
  2070. else
  2071. btf_show_type_value(show, "%u", *(u8 *)safe_data);
  2072. break;
  2073. default:
  2074. btf_int_bits_show(btf, t, safe_data, bits_offset, show);
  2075. break;
  2076. }
  2077. out:
  2078. btf_show_end_type(show);
  2079. }
  2080. static const struct btf_kind_operations int_ops = {
  2081. .check_meta = btf_int_check_meta,
  2082. .resolve = btf_df_resolve,
  2083. .check_member = btf_int_check_member,
  2084. .check_kflag_member = btf_int_check_kflag_member,
  2085. .log_details = btf_int_log,
  2086. .show = btf_int_show,
  2087. };
  2088. static int btf_modifier_check_member(struct btf_verifier_env *env,
  2089. const struct btf_type *struct_type,
  2090. const struct btf_member *member,
  2091. const struct btf_type *member_type)
  2092. {
  2093. const struct btf_type *resolved_type;
  2094. u32 resolved_type_id = member->type;
  2095. struct btf_member resolved_member;
  2096. struct btf *btf = env->btf;
  2097. resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
  2098. if (!resolved_type) {
  2099. btf_verifier_log_member(env, struct_type, member,
  2100. "Invalid member");
  2101. return -EINVAL;
  2102. }
  2103. resolved_member = *member;
  2104. resolved_member.type = resolved_type_id;
  2105. return btf_type_ops(resolved_type)->check_member(env, struct_type,
  2106. &resolved_member,
  2107. resolved_type);
  2108. }
  2109. static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
  2110. const struct btf_type *struct_type,
  2111. const struct btf_member *member,
  2112. const struct btf_type *member_type)
  2113. {
  2114. const struct btf_type *resolved_type;
  2115. u32 resolved_type_id = member->type;
  2116. struct btf_member resolved_member;
  2117. struct btf *btf = env->btf;
  2118. resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
  2119. if (!resolved_type) {
  2120. btf_verifier_log_member(env, struct_type, member,
  2121. "Invalid member");
  2122. return -EINVAL;
  2123. }
  2124. resolved_member = *member;
  2125. resolved_member.type = resolved_type_id;
  2126. return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
  2127. &resolved_member,
  2128. resolved_type);
  2129. }
  2130. static int btf_ptr_check_member(struct btf_verifier_env *env,
  2131. const struct btf_type *struct_type,
  2132. const struct btf_member *member,
  2133. const struct btf_type *member_type)
  2134. {
  2135. u32 struct_size, struct_bits_off, bytes_offset;
  2136. struct_size = struct_type->size;
  2137. struct_bits_off = member->offset;
  2138. bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
  2139. if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
  2140. btf_verifier_log_member(env, struct_type, member,
  2141. "Member is not byte aligned");
  2142. return -EINVAL;
  2143. }
  2144. if (struct_size - bytes_offset < sizeof(void *)) {
  2145. btf_verifier_log_member(env, struct_type, member,
  2146. "Member exceeds struct_size");
  2147. return -EINVAL;
  2148. }
  2149. return 0;
  2150. }
  2151. static int btf_ref_type_check_meta(struct btf_verifier_env *env,
  2152. const struct btf_type *t,
  2153. u32 meta_left)
  2154. {
  2155. const char *value;
  2156. if (btf_type_vlen(t)) {
  2157. btf_verifier_log_type(env, t, "vlen != 0");
  2158. return -EINVAL;
  2159. }
  2160. if (btf_type_kflag(t)) {
  2161. btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
  2162. return -EINVAL;
  2163. }
  2164. if (!BTF_TYPE_ID_VALID(t->type)) {
  2165. btf_verifier_log_type(env, t, "Invalid type_id");
  2166. return -EINVAL;
  2167. }
  2168. /* typedef/type_tag type must have a valid name, and other ref types,
  2169. * volatile, const, restrict, should have a null name.
  2170. */
  2171. if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
  2172. if (!t->name_off ||
  2173. !btf_name_valid_identifier(env->btf, t->name_off)) {
  2174. btf_verifier_log_type(env, t, "Invalid name");
  2175. return -EINVAL;
  2176. }
  2177. } else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
  2178. value = btf_name_by_offset(env->btf, t->name_off);
  2179. if (!value || !value[0]) {
  2180. btf_verifier_log_type(env, t, "Invalid name");
  2181. return -EINVAL;
  2182. }
  2183. } else {
  2184. if (t->name_off) {
  2185. btf_verifier_log_type(env, t, "Invalid name");
  2186. return -EINVAL;
  2187. }
  2188. }
  2189. btf_verifier_log_type(env, t, NULL);
  2190. return 0;
  2191. }
  2192. static int btf_modifier_resolve(struct btf_verifier_env *env,
  2193. const struct resolve_vertex *v)
  2194. {
  2195. const struct btf_type *t = v->t;
  2196. const struct btf_type *next_type;
  2197. u32 next_type_id = t->type;
  2198. struct btf *btf = env->btf;
  2199. next_type = btf_type_by_id(btf, next_type_id);
  2200. if (!next_type || btf_type_is_resolve_source_only(next_type)) {
  2201. btf_verifier_log_type(env, v->t, "Invalid type_id");
  2202. return -EINVAL;
  2203. }
  2204. if (!env_type_is_resolve_sink(env, next_type) &&
  2205. !env_type_is_resolved(env, next_type_id))
  2206. return env_stack_push(env, next_type, next_type_id);
  2207. /* Figure out the resolved next_type_id with size.
  2208. * They will be stored in the current modifier's
  2209. * resolved_ids and resolved_sizes such that it can
  2210. * save us a few type-following when we use it later (e.g. in
  2211. * pretty print).
  2212. */
  2213. if (!btf_type_id_size(btf, &next_type_id, NULL)) {
  2214. if (env_type_is_resolved(env, next_type_id))
  2215. next_type = btf_type_id_resolve(btf, &next_type_id);
  2216. /* "typedef void new_void", "const void"...etc */
  2217. if (!btf_type_is_void(next_type) &&
  2218. !btf_type_is_fwd(next_type) &&
  2219. !btf_type_is_func_proto(next_type)) {
  2220. btf_verifier_log_type(env, v->t, "Invalid type_id");
  2221. return -EINVAL;
  2222. }
  2223. }
  2224. env_stack_pop_resolved(env, next_type_id, 0);
  2225. return 0;
  2226. }
  2227. static int btf_var_resolve(struct btf_verifier_env *env,
  2228. const struct resolve_vertex *v)
  2229. {
  2230. const struct btf_type *next_type;
  2231. const struct btf_type *t = v->t;
  2232. u32 next_type_id = t->type;
  2233. struct btf *btf = env->btf;
  2234. next_type = btf_type_by_id(btf, next_type_id);
  2235. if (!next_type || btf_type_is_resolve_source_only(next_type)) {
  2236. btf_verifier_log_type(env, v->t, "Invalid type_id");
  2237. return -EINVAL;
  2238. }
  2239. if (!env_type_is_resolve_sink(env, next_type) &&
  2240. !env_type_is_resolved(env, next_type_id))
  2241. return env_stack_push(env, next_type, next_type_id);
  2242. if (btf_type_is_modifier(next_type)) {
  2243. const struct btf_type *resolved_type;
  2244. u32 resolved_type_id;
  2245. resolved_type_id = next_type_id;
  2246. resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
  2247. if (btf_type_is_ptr(resolved_type) &&
  2248. !env_type_is_resolve_sink(env, resolved_type) &&
  2249. !env_type_is_resolved(env, resolved_type_id))
  2250. return env_stack_push(env, resolved_type,
  2251. resolved_type_id);
  2252. }
  2253. /* We must resolve to something concrete at this point, no
  2254. * forward types or similar that would resolve to size of
  2255. * zero is allowed.
  2256. */
  2257. if (!btf_type_id_size(btf, &next_type_id, NULL)) {
  2258. btf_verifier_log_type(env, v->t, "Invalid type_id");
  2259. return -EINVAL;
  2260. }
  2261. env_stack_pop_resolved(env, next_type_id, 0);
  2262. return 0;
  2263. }
  2264. static int btf_ptr_resolve(struct btf_verifier_env *env,
  2265. const struct resolve_vertex *v)
  2266. {
  2267. const struct btf_type *next_type;
  2268. const struct btf_type *t = v->t;
  2269. u32 next_type_id = t->type;
  2270. struct btf *btf = env->btf;
  2271. next_type = btf_type_by_id(btf, next_type_id);
  2272. if (!next_type || btf_type_is_resolve_source_only(next_type)) {
  2273. btf_verifier_log_type(env, v->t, "Invalid type_id");
  2274. return -EINVAL;
  2275. }
  2276. if (!env_type_is_resolve_sink(env, next_type) &&
  2277. !env_type_is_resolved(env, next_type_id))
  2278. return env_stack_push(env, next_type, next_type_id);
  2279. /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
  2280. * the modifier may have stopped resolving when it was resolved
  2281. * to a ptr (last-resolved-ptr).
  2282. *
  2283. * We now need to continue from the last-resolved-ptr to
  2284. * ensure the last-resolved-ptr will not referring back to
  2285. * the current ptr (t).
  2286. */
  2287. if (btf_type_is_modifier(next_type)) {
  2288. const struct btf_type *resolved_type;
  2289. u32 resolved_type_id;
  2290. resolved_type_id = next_type_id;
  2291. resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
  2292. if (btf_type_is_ptr(resolved_type) &&
  2293. !env_type_is_resolve_sink(env, resolved_type) &&
  2294. !env_type_is_resolved(env, resolved_type_id))
  2295. return env_stack_push(env, resolved_type,
  2296. resolved_type_id);
  2297. }
  2298. if (!btf_type_id_size(btf, &next_type_id, NULL)) {
  2299. if (env_type_is_resolved(env, next_type_id))
  2300. next_type = btf_type_id_resolve(btf, &next_type_id);
  2301. if (!btf_type_is_void(next_type) &&
  2302. !btf_type_is_fwd(next_type) &&
  2303. !btf_type_is_func_proto(next_type)) {
  2304. btf_verifier_log_type(env, v->t, "Invalid type_id");
  2305. return -EINVAL;
  2306. }
  2307. }
  2308. env_stack_pop_resolved(env, next_type_id, 0);
  2309. return 0;
  2310. }
  2311. static void btf_modifier_show(const struct btf *btf,
  2312. const struct btf_type *t,
  2313. u32 type_id, void *data,
  2314. u8 bits_offset, struct btf_show *show)
  2315. {
  2316. if (btf->resolved_ids)
  2317. t = btf_type_id_resolve(btf, &type_id);
  2318. else
  2319. t = btf_type_skip_modifiers(btf, type_id, NULL);
  2320. btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
  2321. }
  2322. static void btf_var_show(const struct btf *btf, const struct btf_type *t,
  2323. u32 type_id, void *data, u8 bits_offset,
  2324. struct btf_show *show)
  2325. {
  2326. t = btf_type_id_resolve(btf, &type_id);
  2327. btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
  2328. }
  2329. static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
  2330. u32 type_id, void *data, u8 bits_offset,
  2331. struct btf_show *show)
  2332. {
  2333. void *safe_data;
  2334. safe_data = btf_show_start_type(show, t, type_id, data);
  2335. if (!safe_data)
  2336. return;
  2337. /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
  2338. if (show->flags & BTF_SHOW_PTR_RAW)
  2339. btf_show_type_value(show, "0x%px", *(void **)safe_data);
  2340. else
  2341. btf_show_type_value(show, "0x%p", *(void **)safe_data);
  2342. btf_show_end_type(show);
  2343. }
  2344. static void btf_ref_type_log(struct btf_verifier_env *env,
  2345. const struct btf_type *t)
  2346. {
  2347. btf_verifier_log(env, "type_id=%u", t->type);
  2348. }
  2349. static struct btf_kind_operations modifier_ops = {
  2350. .check_meta = btf_ref_type_check_meta,
  2351. .resolve = btf_modifier_resolve,
  2352. .check_member = btf_modifier_check_member,
  2353. .check_kflag_member = btf_modifier_check_kflag_member,
  2354. .log_details = btf_ref_type_log,
  2355. .show = btf_modifier_show,
  2356. };
  2357. static struct btf_kind_operations ptr_ops = {
  2358. .check_meta = btf_ref_type_check_meta,
  2359. .resolve = btf_ptr_resolve,
  2360. .check_member = btf_ptr_check_member,
  2361. .check_kflag_member = btf_generic_check_kflag_member,
  2362. .log_details = btf_ref_type_log,
  2363. .show = btf_ptr_show,
  2364. };
  2365. static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
  2366. const struct btf_type *t,
  2367. u32 meta_left)
  2368. {
  2369. if (btf_type_vlen(t)) {
  2370. btf_verifier_log_type(env, t, "vlen != 0");
  2371. return -EINVAL;
  2372. }
  2373. if (t->type) {
  2374. btf_verifier_log_type(env, t, "type != 0");
  2375. return -EINVAL;
  2376. }
  2377. /* fwd type must have a valid name */
  2378. if (!t->name_off ||
  2379. !btf_name_valid_identifier(env->btf, t->name_off)) {
  2380. btf_verifier_log_type(env, t, "Invalid name");
  2381. return -EINVAL;
  2382. }
  2383. btf_verifier_log_type(env, t, NULL);
  2384. return 0;
  2385. }
  2386. static void btf_fwd_type_log(struct btf_verifier_env *env,
  2387. const struct btf_type *t)
  2388. {
  2389. btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
  2390. }
  2391. static struct btf_kind_operations fwd_ops = {
  2392. .check_meta = btf_fwd_check_meta,
  2393. .resolve = btf_df_resolve,
  2394. .check_member = btf_df_check_member,
  2395. .check_kflag_member = btf_df_check_kflag_member,
  2396. .log_details = btf_fwd_type_log,
  2397. .show = btf_df_show,
  2398. };
  2399. static int btf_array_check_member(struct btf_verifier_env *env,
  2400. const struct btf_type *struct_type,
  2401. const struct btf_member *member,
  2402. const struct btf_type *member_type)
  2403. {
  2404. u32 struct_bits_off = member->offset;
  2405. u32 struct_size, bytes_offset;
  2406. u32 array_type_id, array_size;
  2407. struct btf *btf = env->btf;
  2408. if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
  2409. btf_verifier_log_member(env, struct_type, member,
  2410. "Member is not byte aligned");
  2411. return -EINVAL;
  2412. }
  2413. array_type_id = member->type;
  2414. btf_type_id_size(btf, &array_type_id, &array_size);
  2415. struct_size = struct_type->size;
  2416. bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
  2417. if (struct_size - bytes_offset < array_size) {
  2418. btf_verifier_log_member(env, struct_type, member,
  2419. "Member exceeds struct_size");
  2420. return -EINVAL;
  2421. }
  2422. return 0;
  2423. }
  2424. static s32 btf_array_check_meta(struct btf_verifier_env *env,
  2425. const struct btf_type *t,
  2426. u32 meta_left)
  2427. {
  2428. const struct btf_array *array = btf_type_array(t);
  2429. u32 meta_needed = sizeof(*array);
  2430. if (meta_left < meta_needed) {
  2431. btf_verifier_log_basic(env, t,
  2432. "meta_left:%u meta_needed:%u",
  2433. meta_left, meta_needed);
  2434. return -EINVAL;
  2435. }
  2436. /* array type should not have a name */
  2437. if (t->name_off) {
  2438. btf_verifier_log_type(env, t, "Invalid name");
  2439. return -EINVAL;
  2440. }
  2441. if (btf_type_vlen(t)) {
  2442. btf_verifier_log_type(env, t, "vlen != 0");
  2443. return -EINVAL;
  2444. }
  2445. if (btf_type_kflag(t)) {
  2446. btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
  2447. return -EINVAL;
  2448. }
  2449. if (t->size) {
  2450. btf_verifier_log_type(env, t, "size != 0");
  2451. return -EINVAL;
  2452. }
  2453. /* Array elem type and index type cannot be in type void,
  2454. * so !array->type and !array->index_type are not allowed.
  2455. */
  2456. if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
  2457. btf_verifier_log_type(env, t, "Invalid elem");
  2458. return -EINVAL;
  2459. }
  2460. if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
  2461. btf_verifier_log_type(env, t, "Invalid index");
  2462. return -EINVAL;
  2463. }
  2464. btf_verifier_log_type(env, t, NULL);
  2465. return meta_needed;
  2466. }
  2467. static int btf_array_resolve(struct btf_verifier_env *env,
  2468. const struct resolve_vertex *v)
  2469. {
  2470. const struct btf_array *array = btf_type_array(v->t);
  2471. const struct btf_type *elem_type, *index_type;
  2472. u32 elem_type_id, index_type_id;
  2473. struct btf *btf = env->btf;
  2474. u32 elem_size;
  2475. /* Check array->index_type */
  2476. index_type_id = array->index_type;
  2477. index_type = btf_type_by_id(btf, index_type_id);
  2478. if (btf_type_nosize_or_null(index_type) ||
  2479. btf_type_is_resolve_source_only(index_type)) {
  2480. btf_verifier_log_type(env, v->t, "Invalid index");
  2481. return -EINVAL;
  2482. }
  2483. if (!env_type_is_resolve_sink(env, index_type) &&
  2484. !env_type_is_resolved(env, index_type_id))
  2485. return env_stack_push(env, index_type, index_type_id);
  2486. index_type = btf_type_id_size(btf, &index_type_id, NULL);
  2487. if (!index_type || !btf_type_is_int(index_type) ||
  2488. !btf_type_int_is_regular(index_type)) {
  2489. btf_verifier_log_type(env, v->t, "Invalid index");
  2490. return -EINVAL;
  2491. }
  2492. /* Check array->type */
  2493. elem_type_id = array->type;
  2494. elem_type = btf_type_by_id(btf, elem_type_id);
  2495. if (btf_type_nosize_or_null(elem_type) ||
  2496. btf_type_is_resolve_source_only(elem_type)) {
  2497. btf_verifier_log_type(env, v->t,
  2498. "Invalid elem");
  2499. return -EINVAL;
  2500. }
  2501. if (!env_type_is_resolve_sink(env, elem_type) &&
  2502. !env_type_is_resolved(env, elem_type_id))
  2503. return env_stack_push(env, elem_type, elem_type_id);
  2504. elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
  2505. if (!elem_type) {
  2506. btf_verifier_log_type(env, v->t, "Invalid elem");
  2507. return -EINVAL;
  2508. }
  2509. if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
  2510. btf_verifier_log_type(env, v->t, "Invalid array of int");
  2511. return -EINVAL;
  2512. }
  2513. if (array->nelems && elem_size > U32_MAX / array->nelems) {
  2514. btf_verifier_log_type(env, v->t,
  2515. "Array size overflows U32_MAX");
  2516. return -EINVAL;
  2517. }
  2518. env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
  2519. return 0;
  2520. }
  2521. static void btf_array_log(struct btf_verifier_env *env,
  2522. const struct btf_type *t)
  2523. {
  2524. const struct btf_array *array = btf_type_array(t);
  2525. btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
  2526. array->type, array->index_type, array->nelems);
  2527. }
  2528. static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
  2529. u32 type_id, void *data, u8 bits_offset,
  2530. struct btf_show *show)
  2531. {
  2532. const struct btf_array *array = btf_type_array(t);
  2533. const struct btf_kind_operations *elem_ops;
  2534. const struct btf_type *elem_type;
  2535. u32 i, elem_size = 0, elem_type_id;
  2536. u16 encoding = 0;
  2537. elem_type_id = array->type;
  2538. elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
  2539. if (elem_type && btf_type_has_size(elem_type))
  2540. elem_size = elem_type->size;
  2541. if (elem_type && btf_type_is_int(elem_type)) {
  2542. u32 int_type = btf_type_int(elem_type);
  2543. encoding = BTF_INT_ENCODING(int_type);
  2544. /*
  2545. * BTF_INT_CHAR encoding never seems to be set for
  2546. * char arrays, so if size is 1 and element is
  2547. * printable as a char, we'll do that.
  2548. */
  2549. if (elem_size == 1)
  2550. encoding = BTF_INT_CHAR;
  2551. }
  2552. if (!btf_show_start_array_type(show, t, type_id, encoding, data))
  2553. return;
  2554. if (!elem_type)
  2555. goto out;
  2556. elem_ops = btf_type_ops(elem_type);
  2557. for (i = 0; i < array->nelems; i++) {
  2558. btf_show_start_array_member(show);
  2559. elem_ops->show(btf, elem_type, elem_type_id, data,
  2560. bits_offset, show);
  2561. data += elem_size;
  2562. btf_show_end_array_member(show);
  2563. if (show->state.array_terminated)
  2564. break;
  2565. }
  2566. out:
  2567. btf_show_end_array_type(show);
  2568. }
  2569. static void btf_array_show(const struct btf *btf, const struct btf_type *t,
  2570. u32 type_id, void *data, u8 bits_offset,
  2571. struct btf_show *show)
  2572. {
  2573. const struct btf_member *m = show->state.member;
  2574. /*
  2575. * First check if any members would be shown (are non-zero).
  2576. * See comments above "struct btf_show" definition for more
  2577. * details on how this works at a high-level.
  2578. */
  2579. if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
  2580. if (!show->state.depth_check) {
  2581. show->state.depth_check = show->state.depth + 1;
  2582. show->state.depth_to_show = 0;
  2583. }
  2584. __btf_array_show(btf, t, type_id, data, bits_offset, show);
  2585. show->state.member = m;
  2586. if (show->state.depth_check != show->state.depth + 1)
  2587. return;
  2588. show->state.depth_check = 0;
  2589. if (show->state.depth_to_show <= show->state.depth)
  2590. return;
  2591. /*
  2592. * Reaching here indicates we have recursed and found
  2593. * non-zero array member(s).
  2594. */
  2595. }
  2596. __btf_array_show(btf, t, type_id, data, bits_offset, show);
  2597. }
  2598. static struct btf_kind_operations array_ops = {
  2599. .check_meta = btf_array_check_meta,
  2600. .resolve = btf_array_resolve,
  2601. .check_member = btf_array_check_member,
  2602. .check_kflag_member = btf_generic_check_kflag_member,
  2603. .log_details = btf_array_log,
  2604. .show = btf_array_show,
  2605. };
  2606. static int btf_struct_check_member(struct btf_verifier_env *env,
  2607. const struct btf_type *struct_type,
  2608. const struct btf_member *member,
  2609. const struct btf_type *member_type)
  2610. {
  2611. u32 struct_bits_off = member->offset;
  2612. u32 struct_size, bytes_offset;
  2613. if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
  2614. btf_verifier_log_member(env, struct_type, member,
  2615. "Member is not byte aligned");
  2616. return -EINVAL;
  2617. }
  2618. struct_size = struct_type->size;
  2619. bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
  2620. if (struct_size - bytes_offset < member_type->size) {
  2621. btf_verifier_log_member(env, struct_type, member,
  2622. "Member exceeds struct_size");
  2623. return -EINVAL;
  2624. }
  2625. return 0;
  2626. }
  2627. static s32 btf_struct_check_meta(struct btf_verifier_env *env,
  2628. const struct btf_type *t,
  2629. u32 meta_left)
  2630. {
  2631. bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
  2632. const struct btf_member *member;
  2633. u32 meta_needed, last_offset;
  2634. struct btf *btf = env->btf;
  2635. u32 struct_size = t->size;
  2636. u32 offset;
  2637. u16 i;
  2638. meta_needed = btf_type_vlen(t) * sizeof(*member);
  2639. if (meta_left < meta_needed) {
  2640. btf_verifier_log_basic(env, t,
  2641. "meta_left:%u meta_needed:%u",
  2642. meta_left, meta_needed);
  2643. return -EINVAL;
  2644. }
  2645. /* struct type either no name or a valid one */
  2646. if (t->name_off &&
  2647. !btf_name_valid_identifier(env->btf, t->name_off)) {
  2648. btf_verifier_log_type(env, t, "Invalid name");
  2649. return -EINVAL;
  2650. }
  2651. btf_verifier_log_type(env, t, NULL);
  2652. last_offset = 0;
  2653. for_each_member(i, t, member) {
  2654. if (!btf_name_offset_valid(btf, member->name_off)) {
  2655. btf_verifier_log_member(env, t, member,
  2656. "Invalid member name_offset:%u",
  2657. member->name_off);
  2658. return -EINVAL;
  2659. }
  2660. /* struct member either no name or a valid one */
  2661. if (member->name_off &&
  2662. !btf_name_valid_identifier(btf, member->name_off)) {
  2663. btf_verifier_log_member(env, t, member, "Invalid name");
  2664. return -EINVAL;
  2665. }
  2666. /* A member cannot be in type void */
  2667. if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
  2668. btf_verifier_log_member(env, t, member,
  2669. "Invalid type_id");
  2670. return -EINVAL;
  2671. }
  2672. offset = __btf_member_bit_offset(t, member);
  2673. if (is_union && offset) {
  2674. btf_verifier_log_member(env, t, member,
  2675. "Invalid member bits_offset");
  2676. return -EINVAL;
  2677. }
  2678. /*
  2679. * ">" instead of ">=" because the last member could be
  2680. * "char a[0];"
  2681. */
  2682. if (last_offset > offset) {
  2683. btf_verifier_log_member(env, t, member,
  2684. "Invalid member bits_offset");
  2685. return -EINVAL;
  2686. }
  2687. if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
  2688. btf_verifier_log_member(env, t, member,
  2689. "Member bits_offset exceeds its struct size");
  2690. return -EINVAL;
  2691. }
  2692. btf_verifier_log_member(env, t, member, NULL);
  2693. last_offset = offset;
  2694. }
  2695. return meta_needed;
  2696. }
  2697. static int btf_struct_resolve(struct btf_verifier_env *env,
  2698. const struct resolve_vertex *v)
  2699. {
  2700. const struct btf_member *member;
  2701. int err;
  2702. u16 i;
  2703. /* Before continue resolving the next_member,
  2704. * ensure the last member is indeed resolved to a
  2705. * type with size info.
  2706. */
  2707. if (v->next_member) {
  2708. const struct btf_type *last_member_type;
  2709. const struct btf_member *last_member;
  2710. u32 last_member_type_id;
  2711. last_member = btf_type_member(v->t) + v->next_member - 1;
  2712. last_member_type_id = last_member->type;
  2713. if (WARN_ON_ONCE(!env_type_is_resolved(env,
  2714. last_member_type_id)))
  2715. return -EINVAL;
  2716. last_member_type = btf_type_by_id(env->btf,
  2717. last_member_type_id);
  2718. if (btf_type_kflag(v->t))
  2719. err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
  2720. last_member,
  2721. last_member_type);
  2722. else
  2723. err = btf_type_ops(last_member_type)->check_member(env, v->t,
  2724. last_member,
  2725. last_member_type);
  2726. if (err)
  2727. return err;
  2728. }
  2729. for_each_member_from(i, v->next_member, v->t, member) {
  2730. u32 member_type_id = member->type;
  2731. const struct btf_type *member_type = btf_type_by_id(env->btf,
  2732. member_type_id);
  2733. if (btf_type_nosize_or_null(member_type) ||
  2734. btf_type_is_resolve_source_only(member_type)) {
  2735. btf_verifier_log_member(env, v->t, member,
  2736. "Invalid member");
  2737. return -EINVAL;
  2738. }
  2739. if (!env_type_is_resolve_sink(env, member_type) &&
  2740. !env_type_is_resolved(env, member_type_id)) {
  2741. env_stack_set_next_member(env, i + 1);
  2742. return env_stack_push(env, member_type, member_type_id);
  2743. }
  2744. if (btf_type_kflag(v->t))
  2745. err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
  2746. member,
  2747. member_type);
  2748. else
  2749. err = btf_type_ops(member_type)->check_member(env, v->t,
  2750. member,
  2751. member_type);
  2752. if (err)
  2753. return err;
  2754. }
  2755. env_stack_pop_resolved(env, 0, 0);
  2756. return 0;
  2757. }
  2758. static void btf_struct_log(struct btf_verifier_env *env,
  2759. const struct btf_type *t)
  2760. {
  2761. btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
  2762. }
  2763. enum btf_field_type {
  2764. BTF_FIELD_SPIN_LOCK,
  2765. BTF_FIELD_TIMER,
  2766. BTF_FIELD_KPTR,
  2767. };
  2768. enum {
  2769. BTF_FIELD_IGNORE = 0,
  2770. BTF_FIELD_FOUND = 1,
  2771. };
  2772. struct btf_field_info {
  2773. u32 type_id;
  2774. u32 off;
  2775. enum bpf_kptr_type type;
  2776. };
  2777. static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
  2778. u32 off, int sz, struct btf_field_info *info)
  2779. {
  2780. if (!__btf_type_is_struct(t))
  2781. return BTF_FIELD_IGNORE;
  2782. if (t->size != sz)
  2783. return BTF_FIELD_IGNORE;
  2784. info->off = off;
  2785. return BTF_FIELD_FOUND;
  2786. }
  2787. static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
  2788. u32 off, int sz, struct btf_field_info *info)
  2789. {
  2790. enum bpf_kptr_type type;
  2791. u32 res_id;
  2792. /* For PTR, sz is always == 8 */
  2793. if (!btf_type_is_ptr(t))
  2794. return BTF_FIELD_IGNORE;
  2795. t = btf_type_by_id(btf, t->type);
  2796. if (!btf_type_is_type_tag(t))
  2797. return BTF_FIELD_IGNORE;
  2798. /* Reject extra tags */
  2799. if (btf_type_is_type_tag(btf_type_by_id(btf, t->type)))
  2800. return -EINVAL;
  2801. if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off)))
  2802. type = BPF_KPTR_UNREF;
  2803. else if (!strcmp("kptr_ref", __btf_name_by_offset(btf, t->name_off)))
  2804. type = BPF_KPTR_REF;
  2805. else
  2806. return -EINVAL;
  2807. /* Get the base type */
  2808. t = btf_type_skip_modifiers(btf, t->type, &res_id);
  2809. /* Only pointer to struct is allowed */
  2810. if (!__btf_type_is_struct(t))
  2811. return -EINVAL;
  2812. info->type_id = res_id;
  2813. info->off = off;
  2814. info->type = type;
  2815. return BTF_FIELD_FOUND;
  2816. }
  2817. static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t,
  2818. const char *name, int sz, int align,
  2819. enum btf_field_type field_type,
  2820. struct btf_field_info *info, int info_cnt)
  2821. {
  2822. const struct btf_member *member;
  2823. struct btf_field_info tmp;
  2824. int ret, idx = 0;
  2825. u32 i, off;
  2826. for_each_member(i, t, member) {
  2827. const struct btf_type *member_type = btf_type_by_id(btf,
  2828. member->type);
  2829. if (name && strcmp(__btf_name_by_offset(btf, member_type->name_off), name))
  2830. continue;
  2831. off = __btf_member_bit_offset(t, member);
  2832. if (off % 8)
  2833. /* valid C code cannot generate such BTF */
  2834. return -EINVAL;
  2835. off /= 8;
  2836. if (off % align)
  2837. return -EINVAL;
  2838. switch (field_type) {
  2839. case BTF_FIELD_SPIN_LOCK:
  2840. case BTF_FIELD_TIMER:
  2841. ret = btf_find_struct(btf, member_type, off, sz,
  2842. idx < info_cnt ? &info[idx] : &tmp);
  2843. if (ret < 0)
  2844. return ret;
  2845. break;
  2846. case BTF_FIELD_KPTR:
  2847. ret = btf_find_kptr(btf, member_type, off, sz,
  2848. idx < info_cnt ? &info[idx] : &tmp);
  2849. if (ret < 0)
  2850. return ret;
  2851. break;
  2852. default:
  2853. return -EFAULT;
  2854. }
  2855. if (ret == BTF_FIELD_IGNORE)
  2856. continue;
  2857. if (idx >= info_cnt)
  2858. return -E2BIG;
  2859. ++idx;
  2860. }
  2861. return idx;
  2862. }
  2863. static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
  2864. const char *name, int sz, int align,
  2865. enum btf_field_type field_type,
  2866. struct btf_field_info *info, int info_cnt)
  2867. {
  2868. const struct btf_var_secinfo *vsi;
  2869. struct btf_field_info tmp;
  2870. int ret, idx = 0;
  2871. u32 i, off;
  2872. for_each_vsi(i, t, vsi) {
  2873. const struct btf_type *var = btf_type_by_id(btf, vsi->type);
  2874. const struct btf_type *var_type = btf_type_by_id(btf, var->type);
  2875. off = vsi->offset;
  2876. if (name && strcmp(__btf_name_by_offset(btf, var_type->name_off), name))
  2877. continue;
  2878. if (vsi->size != sz)
  2879. continue;
  2880. if (off % align)
  2881. return -EINVAL;
  2882. switch (field_type) {
  2883. case BTF_FIELD_SPIN_LOCK:
  2884. case BTF_FIELD_TIMER:
  2885. ret = btf_find_struct(btf, var_type, off, sz,
  2886. idx < info_cnt ? &info[idx] : &tmp);
  2887. if (ret < 0)
  2888. return ret;
  2889. break;
  2890. case BTF_FIELD_KPTR:
  2891. ret = btf_find_kptr(btf, var_type, off, sz,
  2892. idx < info_cnt ? &info[idx] : &tmp);
  2893. if (ret < 0)
  2894. return ret;
  2895. break;
  2896. default:
  2897. return -EFAULT;
  2898. }
  2899. if (ret == BTF_FIELD_IGNORE)
  2900. continue;
  2901. if (idx >= info_cnt)
  2902. return -E2BIG;
  2903. ++idx;
  2904. }
  2905. return idx;
  2906. }
  2907. static int btf_find_field(const struct btf *btf, const struct btf_type *t,
  2908. enum btf_field_type field_type,
  2909. struct btf_field_info *info, int info_cnt)
  2910. {
  2911. const char *name;
  2912. int sz, align;
  2913. switch (field_type) {
  2914. case BTF_FIELD_SPIN_LOCK:
  2915. name = "bpf_spin_lock";
  2916. sz = sizeof(struct bpf_spin_lock);
  2917. align = __alignof__(struct bpf_spin_lock);
  2918. break;
  2919. case BTF_FIELD_TIMER:
  2920. name = "bpf_timer";
  2921. sz = sizeof(struct bpf_timer);
  2922. align = __alignof__(struct bpf_timer);
  2923. break;
  2924. case BTF_FIELD_KPTR:
  2925. name = NULL;
  2926. sz = sizeof(u64);
  2927. align = 8;
  2928. break;
  2929. default:
  2930. return -EFAULT;
  2931. }
  2932. if (__btf_type_is_struct(t))
  2933. return btf_find_struct_field(btf, t, name, sz, align, field_type, info, info_cnt);
  2934. else if (btf_type_is_datasec(t))
  2935. return btf_find_datasec_var(btf, t, name, sz, align, field_type, info, info_cnt);
  2936. return -EINVAL;
  2937. }
  2938. /* find 'struct bpf_spin_lock' in map value.
  2939. * return >= 0 offset if found
  2940. * and < 0 in case of error
  2941. */
  2942. int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
  2943. {
  2944. struct btf_field_info info;
  2945. int ret;
  2946. ret = btf_find_field(btf, t, BTF_FIELD_SPIN_LOCK, &info, 1);
  2947. if (ret < 0)
  2948. return ret;
  2949. if (!ret)
  2950. return -ENOENT;
  2951. return info.off;
  2952. }
  2953. int btf_find_timer(const struct btf *btf, const struct btf_type *t)
  2954. {
  2955. struct btf_field_info info;
  2956. int ret;
  2957. ret = btf_find_field(btf, t, BTF_FIELD_TIMER, &info, 1);
  2958. if (ret < 0)
  2959. return ret;
  2960. if (!ret)
  2961. return -ENOENT;
  2962. return info.off;
  2963. }
  2964. struct bpf_map_value_off *btf_parse_kptrs(const struct btf *btf,
  2965. const struct btf_type *t)
  2966. {
  2967. struct btf_field_info info_arr[BPF_MAP_VALUE_OFF_MAX];
  2968. struct bpf_map_value_off *tab;
  2969. struct btf *kernel_btf = NULL;
  2970. struct module *mod = NULL;
  2971. int ret, i, nr_off;
  2972. ret = btf_find_field(btf, t, BTF_FIELD_KPTR, info_arr, ARRAY_SIZE(info_arr));
  2973. if (ret < 0)
  2974. return ERR_PTR(ret);
  2975. if (!ret)
  2976. return NULL;
  2977. nr_off = ret;
  2978. tab = kzalloc(offsetof(struct bpf_map_value_off, off[nr_off]), GFP_KERNEL | __GFP_NOWARN);
  2979. if (!tab)
  2980. return ERR_PTR(-ENOMEM);
  2981. for (i = 0; i < nr_off; i++) {
  2982. const struct btf_type *t;
  2983. s32 id;
  2984. /* Find type in map BTF, and use it to look up the matching type
  2985. * in vmlinux or module BTFs, by name and kind.
  2986. */
  2987. t = btf_type_by_id(btf, info_arr[i].type_id);
  2988. id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
  2989. &kernel_btf);
  2990. if (id < 0) {
  2991. ret = id;
  2992. goto end;
  2993. }
  2994. /* Find and stash the function pointer for the destruction function that
  2995. * needs to be eventually invoked from the map free path.
  2996. */
  2997. if (info_arr[i].type == BPF_KPTR_REF) {
  2998. const struct btf_type *dtor_func;
  2999. const char *dtor_func_name;
  3000. unsigned long addr;
  3001. s32 dtor_btf_id;
  3002. /* This call also serves as a whitelist of allowed objects that
  3003. * can be used as a referenced pointer and be stored in a map at
  3004. * the same time.
  3005. */
  3006. dtor_btf_id = btf_find_dtor_kfunc(kernel_btf, id);
  3007. if (dtor_btf_id < 0) {
  3008. ret = dtor_btf_id;
  3009. goto end_btf;
  3010. }
  3011. dtor_func = btf_type_by_id(kernel_btf, dtor_btf_id);
  3012. if (!dtor_func) {
  3013. ret = -ENOENT;
  3014. goto end_btf;
  3015. }
  3016. if (btf_is_module(kernel_btf)) {
  3017. mod = btf_try_get_module(kernel_btf);
  3018. if (!mod) {
  3019. ret = -ENXIO;
  3020. goto end_btf;
  3021. }
  3022. }
  3023. /* We already verified dtor_func to be btf_type_is_func
  3024. * in register_btf_id_dtor_kfuncs.
  3025. */
  3026. dtor_func_name = __btf_name_by_offset(kernel_btf, dtor_func->name_off);
  3027. addr = kallsyms_lookup_name(dtor_func_name);
  3028. if (!addr) {
  3029. ret = -EINVAL;
  3030. goto end_mod;
  3031. }
  3032. tab->off[i].kptr.dtor = (void *)addr;
  3033. }
  3034. tab->off[i].offset = info_arr[i].off;
  3035. tab->off[i].type = info_arr[i].type;
  3036. tab->off[i].kptr.btf_id = id;
  3037. tab->off[i].kptr.btf = kernel_btf;
  3038. tab->off[i].kptr.module = mod;
  3039. }
  3040. tab->nr_off = nr_off;
  3041. return tab;
  3042. end_mod:
  3043. module_put(mod);
  3044. end_btf:
  3045. btf_put(kernel_btf);
  3046. end:
  3047. while (i--) {
  3048. btf_put(tab->off[i].kptr.btf);
  3049. if (tab->off[i].kptr.module)
  3050. module_put(tab->off[i].kptr.module);
  3051. }
  3052. kfree(tab);
  3053. return ERR_PTR(ret);
  3054. }
  3055. static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
  3056. u32 type_id, void *data, u8 bits_offset,
  3057. struct btf_show *show)
  3058. {
  3059. const struct btf_member *member;
  3060. void *safe_data;
  3061. u32 i;
  3062. safe_data = btf_show_start_struct_type(show, t, type_id, data);
  3063. if (!safe_data)
  3064. return;
  3065. for_each_member(i, t, member) {
  3066. const struct btf_type *member_type = btf_type_by_id(btf,
  3067. member->type);
  3068. const struct btf_kind_operations *ops;
  3069. u32 member_offset, bitfield_size;
  3070. u32 bytes_offset;
  3071. u8 bits8_offset;
  3072. btf_show_start_member(show, member);
  3073. member_offset = __btf_member_bit_offset(t, member);
  3074. bitfield_size = __btf_member_bitfield_size(t, member);
  3075. bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
  3076. bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
  3077. if (bitfield_size) {
  3078. safe_data = btf_show_start_type(show, member_type,
  3079. member->type,
  3080. data + bytes_offset);
  3081. if (safe_data)
  3082. btf_bitfield_show(safe_data,
  3083. bits8_offset,
  3084. bitfield_size, show);
  3085. btf_show_end_type(show);
  3086. } else {
  3087. ops = btf_type_ops(member_type);
  3088. ops->show(btf, member_type, member->type,
  3089. data + bytes_offset, bits8_offset, show);
  3090. }
  3091. btf_show_end_member(show);
  3092. }
  3093. btf_show_end_struct_type(show);
  3094. }
  3095. static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
  3096. u32 type_id, void *data, u8 bits_offset,
  3097. struct btf_show *show)
  3098. {
  3099. const struct btf_member *m = show->state.member;
  3100. /*
  3101. * First check if any members would be shown (are non-zero).
  3102. * See comments above "struct btf_show" definition for more
  3103. * details on how this works at a high-level.
  3104. */
  3105. if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
  3106. if (!show->state.depth_check) {
  3107. show->state.depth_check = show->state.depth + 1;
  3108. show->state.depth_to_show = 0;
  3109. }
  3110. __btf_struct_show(btf, t, type_id, data, bits_offset, show);
  3111. /* Restore saved member data here */
  3112. show->state.member = m;
  3113. if (show->state.depth_check != show->state.depth + 1)
  3114. return;
  3115. show->state.depth_check = 0;
  3116. if (show->state.depth_to_show <= show->state.depth)
  3117. return;
  3118. /*
  3119. * Reaching here indicates we have recursed and found
  3120. * non-zero child values.
  3121. */
  3122. }
  3123. __btf_struct_show(btf, t, type_id, data, bits_offset, show);
  3124. }
  3125. static struct btf_kind_operations struct_ops = {
  3126. .check_meta = btf_struct_check_meta,
  3127. .resolve = btf_struct_resolve,
  3128. .check_member = btf_struct_check_member,
  3129. .check_kflag_member = btf_generic_check_kflag_member,
  3130. .log_details = btf_struct_log,
  3131. .show = btf_struct_show,
  3132. };
  3133. static int btf_enum_check_member(struct btf_verifier_env *env,
  3134. const struct btf_type *struct_type,
  3135. const struct btf_member *member,
  3136. const struct btf_type *member_type)
  3137. {
  3138. u32 struct_bits_off = member->offset;
  3139. u32 struct_size, bytes_offset;
  3140. if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
  3141. btf_verifier_log_member(env, struct_type, member,
  3142. "Member is not byte aligned");
  3143. return -EINVAL;
  3144. }
  3145. struct_size = struct_type->size;
  3146. bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
  3147. if (struct_size - bytes_offset < member_type->size) {
  3148. btf_verifier_log_member(env, struct_type, member,
  3149. "Member exceeds struct_size");
  3150. return -EINVAL;
  3151. }
  3152. return 0;
  3153. }
  3154. static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
  3155. const struct btf_type *struct_type,
  3156. const struct btf_member *member,
  3157. const struct btf_type *member_type)
  3158. {
  3159. u32 struct_bits_off, nr_bits, bytes_end, struct_size;
  3160. u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
  3161. struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
  3162. nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
  3163. if (!nr_bits) {
  3164. if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
  3165. btf_verifier_log_member(env, struct_type, member,
  3166. "Member is not byte aligned");
  3167. return -EINVAL;
  3168. }
  3169. nr_bits = int_bitsize;
  3170. } else if (nr_bits > int_bitsize) {
  3171. btf_verifier_log_member(env, struct_type, member,
  3172. "Invalid member bitfield_size");
  3173. return -EINVAL;
  3174. }
  3175. struct_size = struct_type->size;
  3176. bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
  3177. if (struct_size < bytes_end) {
  3178. btf_verifier_log_member(env, struct_type, member,
  3179. "Member exceeds struct_size");
  3180. return -EINVAL;
  3181. }
  3182. return 0;
  3183. }
  3184. static s32 btf_enum_check_meta(struct btf_verifier_env *env,
  3185. const struct btf_type *t,
  3186. u32 meta_left)
  3187. {
  3188. const struct btf_enum *enums = btf_type_enum(t);
  3189. struct btf *btf = env->btf;
  3190. const char *fmt_str;
  3191. u16 i, nr_enums;
  3192. u32 meta_needed;
  3193. nr_enums = btf_type_vlen(t);
  3194. meta_needed = nr_enums * sizeof(*enums);
  3195. if (meta_left < meta_needed) {
  3196. btf_verifier_log_basic(env, t,
  3197. "meta_left:%u meta_needed:%u",
  3198. meta_left, meta_needed);
  3199. return -EINVAL;
  3200. }
  3201. if (t->size > 8 || !is_power_of_2(t->size)) {
  3202. btf_verifier_log_type(env, t, "Unexpected size");
  3203. return -EINVAL;
  3204. }
  3205. /* enum type either no name or a valid one */
  3206. if (t->name_off &&
  3207. !btf_name_valid_identifier(env->btf, t->name_off)) {
  3208. btf_verifier_log_type(env, t, "Invalid name");
  3209. return -EINVAL;
  3210. }
  3211. btf_verifier_log_type(env, t, NULL);
  3212. for (i = 0; i < nr_enums; i++) {
  3213. if (!btf_name_offset_valid(btf, enums[i].name_off)) {
  3214. btf_verifier_log(env, "\tInvalid name_offset:%u",
  3215. enums[i].name_off);
  3216. return -EINVAL;
  3217. }
  3218. /* enum member must have a valid name */
  3219. if (!enums[i].name_off ||
  3220. !btf_name_valid_identifier(btf, enums[i].name_off)) {
  3221. btf_verifier_log_type(env, t, "Invalid name");
  3222. return -EINVAL;
  3223. }
  3224. if (env->log.level == BPF_LOG_KERNEL)
  3225. continue;
  3226. fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
  3227. btf_verifier_log(env, fmt_str,
  3228. __btf_name_by_offset(btf, enums[i].name_off),
  3229. enums[i].val);
  3230. }
  3231. return meta_needed;
  3232. }
  3233. static void btf_enum_log(struct btf_verifier_env *env,
  3234. const struct btf_type *t)
  3235. {
  3236. btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
  3237. }
  3238. static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
  3239. u32 type_id, void *data, u8 bits_offset,
  3240. struct btf_show *show)
  3241. {
  3242. const struct btf_enum *enums = btf_type_enum(t);
  3243. u32 i, nr_enums = btf_type_vlen(t);
  3244. void *safe_data;
  3245. int v;
  3246. safe_data = btf_show_start_type(show, t, type_id, data);
  3247. if (!safe_data)
  3248. return;
  3249. v = *(int *)safe_data;
  3250. for (i = 0; i < nr_enums; i++) {
  3251. if (v != enums[i].val)
  3252. continue;
  3253. btf_show_type_value(show, "%s",
  3254. __btf_name_by_offset(btf,
  3255. enums[i].name_off));
  3256. btf_show_end_type(show);
  3257. return;
  3258. }
  3259. if (btf_type_kflag(t))
  3260. btf_show_type_value(show, "%d", v);
  3261. else
  3262. btf_show_type_value(show, "%u", v);
  3263. btf_show_end_type(show);
  3264. }
  3265. static struct btf_kind_operations enum_ops = {
  3266. .check_meta = btf_enum_check_meta,
  3267. .resolve = btf_df_resolve,
  3268. .check_member = btf_enum_check_member,
  3269. .check_kflag_member = btf_enum_check_kflag_member,
  3270. .log_details = btf_enum_log,
  3271. .show = btf_enum_show,
  3272. };
  3273. static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
  3274. const struct btf_type *t,
  3275. u32 meta_left)
  3276. {
  3277. const struct btf_enum64 *enums = btf_type_enum64(t);
  3278. struct btf *btf = env->btf;
  3279. const char *fmt_str;
  3280. u16 i, nr_enums;
  3281. u32 meta_needed;
  3282. nr_enums = btf_type_vlen(t);
  3283. meta_needed = nr_enums * sizeof(*enums);
  3284. if (meta_left < meta_needed) {
  3285. btf_verifier_log_basic(env, t,
  3286. "meta_left:%u meta_needed:%u",
  3287. meta_left, meta_needed);
  3288. return -EINVAL;
  3289. }
  3290. if (t->size > 8 || !is_power_of_2(t->size)) {
  3291. btf_verifier_log_type(env, t, "Unexpected size");
  3292. return -EINVAL;
  3293. }
  3294. /* enum type either no name or a valid one */
  3295. if (t->name_off &&
  3296. !btf_name_valid_identifier(env->btf, t->name_off)) {
  3297. btf_verifier_log_type(env, t, "Invalid name");
  3298. return -EINVAL;
  3299. }
  3300. btf_verifier_log_type(env, t, NULL);
  3301. for (i = 0; i < nr_enums; i++) {
  3302. if (!btf_name_offset_valid(btf, enums[i].name_off)) {
  3303. btf_verifier_log(env, "\tInvalid name_offset:%u",
  3304. enums[i].name_off);
  3305. return -EINVAL;
  3306. }
  3307. /* enum member must have a valid name */
  3308. if (!enums[i].name_off ||
  3309. !btf_name_valid_identifier(btf, enums[i].name_off)) {
  3310. btf_verifier_log_type(env, t, "Invalid name");
  3311. return -EINVAL;
  3312. }
  3313. if (env->log.level == BPF_LOG_KERNEL)
  3314. continue;
  3315. fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
  3316. btf_verifier_log(env, fmt_str,
  3317. __btf_name_by_offset(btf, enums[i].name_off),
  3318. btf_enum64_value(enums + i));
  3319. }
  3320. return meta_needed;
  3321. }
  3322. static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
  3323. u32 type_id, void *data, u8 bits_offset,
  3324. struct btf_show *show)
  3325. {
  3326. const struct btf_enum64 *enums = btf_type_enum64(t);
  3327. u32 i, nr_enums = btf_type_vlen(t);
  3328. void *safe_data;
  3329. s64 v;
  3330. safe_data = btf_show_start_type(show, t, type_id, data);
  3331. if (!safe_data)
  3332. return;
  3333. v = *(u64 *)safe_data;
  3334. for (i = 0; i < nr_enums; i++) {
  3335. if (v != btf_enum64_value(enums + i))
  3336. continue;
  3337. btf_show_type_value(show, "%s",
  3338. __btf_name_by_offset(btf,
  3339. enums[i].name_off));
  3340. btf_show_end_type(show);
  3341. return;
  3342. }
  3343. if (btf_type_kflag(t))
  3344. btf_show_type_value(show, "%lld", v);
  3345. else
  3346. btf_show_type_value(show, "%llu", v);
  3347. btf_show_end_type(show);
  3348. }
  3349. static struct btf_kind_operations enum64_ops = {
  3350. .check_meta = btf_enum64_check_meta,
  3351. .resolve = btf_df_resolve,
  3352. .check_member = btf_enum_check_member,
  3353. .check_kflag_member = btf_enum_check_kflag_member,
  3354. .log_details = btf_enum_log,
  3355. .show = btf_enum64_show,
  3356. };
  3357. static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
  3358. const struct btf_type *t,
  3359. u32 meta_left)
  3360. {
  3361. u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
  3362. if (meta_left < meta_needed) {
  3363. btf_verifier_log_basic(env, t,
  3364. "meta_left:%u meta_needed:%u",
  3365. meta_left, meta_needed);
  3366. return -EINVAL;
  3367. }
  3368. if (t->name_off) {
  3369. btf_verifier_log_type(env, t, "Invalid name");
  3370. return -EINVAL;
  3371. }
  3372. if (btf_type_kflag(t)) {
  3373. btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
  3374. return -EINVAL;
  3375. }
  3376. btf_verifier_log_type(env, t, NULL);
  3377. return meta_needed;
  3378. }
  3379. static void btf_func_proto_log(struct btf_verifier_env *env,
  3380. const struct btf_type *t)
  3381. {
  3382. const struct btf_param *args = (const struct btf_param *)(t + 1);
  3383. u16 nr_args = btf_type_vlen(t), i;
  3384. btf_verifier_log(env, "return=%u args=(", t->type);
  3385. if (!nr_args) {
  3386. btf_verifier_log(env, "void");
  3387. goto done;
  3388. }
  3389. if (nr_args == 1 && !args[0].type) {
  3390. /* Only one vararg */
  3391. btf_verifier_log(env, "vararg");
  3392. goto done;
  3393. }
  3394. btf_verifier_log(env, "%u %s", args[0].type,
  3395. __btf_name_by_offset(env->btf,
  3396. args[0].name_off));
  3397. for (i = 1; i < nr_args - 1; i++)
  3398. btf_verifier_log(env, ", %u %s", args[i].type,
  3399. __btf_name_by_offset(env->btf,
  3400. args[i].name_off));
  3401. if (nr_args > 1) {
  3402. const struct btf_param *last_arg = &args[nr_args - 1];
  3403. if (last_arg->type)
  3404. btf_verifier_log(env, ", %u %s", last_arg->type,
  3405. __btf_name_by_offset(env->btf,
  3406. last_arg->name_off));
  3407. else
  3408. btf_verifier_log(env, ", vararg");
  3409. }
  3410. done:
  3411. btf_verifier_log(env, ")");
  3412. }
  3413. static struct btf_kind_operations func_proto_ops = {
  3414. .check_meta = btf_func_proto_check_meta,
  3415. .resolve = btf_df_resolve,
  3416. /*
  3417. * BTF_KIND_FUNC_PROTO cannot be directly referred by
  3418. * a struct's member.
  3419. *
  3420. * It should be a function pointer instead.
  3421. * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
  3422. *
  3423. * Hence, there is no btf_func_check_member().
  3424. */
  3425. .check_member = btf_df_check_member,
  3426. .check_kflag_member = btf_df_check_kflag_member,
  3427. .log_details = btf_func_proto_log,
  3428. .show = btf_df_show,
  3429. };
  3430. static s32 btf_func_check_meta(struct btf_verifier_env *env,
  3431. const struct btf_type *t,
  3432. u32 meta_left)
  3433. {
  3434. if (!t->name_off ||
  3435. !btf_name_valid_identifier(env->btf, t->name_off)) {
  3436. btf_verifier_log_type(env, t, "Invalid name");
  3437. return -EINVAL;
  3438. }
  3439. if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
  3440. btf_verifier_log_type(env, t, "Invalid func linkage");
  3441. return -EINVAL;
  3442. }
  3443. if (btf_type_kflag(t)) {
  3444. btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
  3445. return -EINVAL;
  3446. }
  3447. btf_verifier_log_type(env, t, NULL);
  3448. return 0;
  3449. }
  3450. static int btf_func_resolve(struct btf_verifier_env *env,
  3451. const struct resolve_vertex *v)
  3452. {
  3453. const struct btf_type *t = v->t;
  3454. u32 next_type_id = t->type;
  3455. int err;
  3456. err = btf_func_check(env, t);
  3457. if (err)
  3458. return err;
  3459. env_stack_pop_resolved(env, next_type_id, 0);
  3460. return 0;
  3461. }
  3462. static struct btf_kind_operations func_ops = {
  3463. .check_meta = btf_func_check_meta,
  3464. .resolve = btf_func_resolve,
  3465. .check_member = btf_df_check_member,
  3466. .check_kflag_member = btf_df_check_kflag_member,
  3467. .log_details = btf_ref_type_log,
  3468. .show = btf_df_show,
  3469. };
  3470. static s32 btf_var_check_meta(struct btf_verifier_env *env,
  3471. const struct btf_type *t,
  3472. u32 meta_left)
  3473. {
  3474. const struct btf_var *var;
  3475. u32 meta_needed = sizeof(*var);
  3476. if (meta_left < meta_needed) {
  3477. btf_verifier_log_basic(env, t,
  3478. "meta_left:%u meta_needed:%u",
  3479. meta_left, meta_needed);
  3480. return -EINVAL;
  3481. }
  3482. if (btf_type_vlen(t)) {
  3483. btf_verifier_log_type(env, t, "vlen != 0");
  3484. return -EINVAL;
  3485. }
  3486. if (btf_type_kflag(t)) {
  3487. btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
  3488. return -EINVAL;
  3489. }
  3490. if (!t->name_off ||
  3491. !__btf_name_valid(env->btf, t->name_off)) {
  3492. btf_verifier_log_type(env, t, "Invalid name");
  3493. return -EINVAL;
  3494. }
  3495. /* A var cannot be in type void */
  3496. if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
  3497. btf_verifier_log_type(env, t, "Invalid type_id");
  3498. return -EINVAL;
  3499. }
  3500. var = btf_type_var(t);
  3501. if (var->linkage != BTF_VAR_STATIC &&
  3502. var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
  3503. btf_verifier_log_type(env, t, "Linkage not supported");
  3504. return -EINVAL;
  3505. }
  3506. btf_verifier_log_type(env, t, NULL);
  3507. return meta_needed;
  3508. }
  3509. static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
  3510. {
  3511. const struct btf_var *var = btf_type_var(t);
  3512. btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
  3513. }
  3514. static const struct btf_kind_operations var_ops = {
  3515. .check_meta = btf_var_check_meta,
  3516. .resolve = btf_var_resolve,
  3517. .check_member = btf_df_check_member,
  3518. .check_kflag_member = btf_df_check_kflag_member,
  3519. .log_details = btf_var_log,
  3520. .show = btf_var_show,
  3521. };
  3522. static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
  3523. const struct btf_type *t,
  3524. u32 meta_left)
  3525. {
  3526. const struct btf_var_secinfo *vsi;
  3527. u64 last_vsi_end_off = 0, sum = 0;
  3528. u32 i, meta_needed;
  3529. meta_needed = btf_type_vlen(t) * sizeof(*vsi);
  3530. if (meta_left < meta_needed) {
  3531. btf_verifier_log_basic(env, t,
  3532. "meta_left:%u meta_needed:%u",
  3533. meta_left, meta_needed);
  3534. return -EINVAL;
  3535. }
  3536. if (!t->size) {
  3537. btf_verifier_log_type(env, t, "size == 0");
  3538. return -EINVAL;
  3539. }
  3540. if (btf_type_kflag(t)) {
  3541. btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
  3542. return -EINVAL;
  3543. }
  3544. if (!t->name_off ||
  3545. !btf_name_valid_section(env->btf, t->name_off)) {
  3546. btf_verifier_log_type(env, t, "Invalid name");
  3547. return -EINVAL;
  3548. }
  3549. btf_verifier_log_type(env, t, NULL);
  3550. for_each_vsi(i, t, vsi) {
  3551. /* A var cannot be in type void */
  3552. if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
  3553. btf_verifier_log_vsi(env, t, vsi,
  3554. "Invalid type_id");
  3555. return -EINVAL;
  3556. }
  3557. if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
  3558. btf_verifier_log_vsi(env, t, vsi,
  3559. "Invalid offset");
  3560. return -EINVAL;
  3561. }
  3562. if (!vsi->size || vsi->size > t->size) {
  3563. btf_verifier_log_vsi(env, t, vsi,
  3564. "Invalid size");
  3565. return -EINVAL;
  3566. }
  3567. last_vsi_end_off = vsi->offset + vsi->size;
  3568. if (last_vsi_end_off > t->size) {
  3569. btf_verifier_log_vsi(env, t, vsi,
  3570. "Invalid offset+size");
  3571. return -EINVAL;
  3572. }
  3573. btf_verifier_log_vsi(env, t, vsi, NULL);
  3574. sum += vsi->size;
  3575. }
  3576. if (t->size < sum) {
  3577. btf_verifier_log_type(env, t, "Invalid btf_info size");
  3578. return -EINVAL;
  3579. }
  3580. return meta_needed;
  3581. }
  3582. static int btf_datasec_resolve(struct btf_verifier_env *env,
  3583. const struct resolve_vertex *v)
  3584. {
  3585. const struct btf_var_secinfo *vsi;
  3586. struct btf *btf = env->btf;
  3587. u16 i;
  3588. env->resolve_mode = RESOLVE_TBD;
  3589. for_each_vsi_from(i, v->next_member, v->t, vsi) {
  3590. u32 var_type_id = vsi->type, type_id, type_size = 0;
  3591. const struct btf_type *var_type = btf_type_by_id(env->btf,
  3592. var_type_id);
  3593. if (!var_type || !btf_type_is_var(var_type)) {
  3594. btf_verifier_log_vsi(env, v->t, vsi,
  3595. "Not a VAR kind member");
  3596. return -EINVAL;
  3597. }
  3598. if (!env_type_is_resolve_sink(env, var_type) &&
  3599. !env_type_is_resolved(env, var_type_id)) {
  3600. env_stack_set_next_member(env, i + 1);
  3601. return env_stack_push(env, var_type, var_type_id);
  3602. }
  3603. type_id = var_type->type;
  3604. if (!btf_type_id_size(btf, &type_id, &type_size)) {
  3605. btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
  3606. return -EINVAL;
  3607. }
  3608. if (vsi->size < type_size) {
  3609. btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
  3610. return -EINVAL;
  3611. }
  3612. }
  3613. env_stack_pop_resolved(env, 0, 0);
  3614. return 0;
  3615. }
  3616. static void btf_datasec_log(struct btf_verifier_env *env,
  3617. const struct btf_type *t)
  3618. {
  3619. btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
  3620. }
  3621. static void btf_datasec_show(const struct btf *btf,
  3622. const struct btf_type *t, u32 type_id,
  3623. void *data, u8 bits_offset,
  3624. struct btf_show *show)
  3625. {
  3626. const struct btf_var_secinfo *vsi;
  3627. const struct btf_type *var;
  3628. u32 i;
  3629. if (!btf_show_start_type(show, t, type_id, data))
  3630. return;
  3631. btf_show_type_value(show, "section (\"%s\") = {",
  3632. __btf_name_by_offset(btf, t->name_off));
  3633. for_each_vsi(i, t, vsi) {
  3634. var = btf_type_by_id(btf, vsi->type);
  3635. if (i)
  3636. btf_show(show, ",");
  3637. btf_type_ops(var)->show(btf, var, vsi->type,
  3638. data + vsi->offset, bits_offset, show);
  3639. }
  3640. btf_show_end_type(show);
  3641. }
  3642. static const struct btf_kind_operations datasec_ops = {
  3643. .check_meta = btf_datasec_check_meta,
  3644. .resolve = btf_datasec_resolve,
  3645. .check_member = btf_df_check_member,
  3646. .check_kflag_member = btf_df_check_kflag_member,
  3647. .log_details = btf_datasec_log,
  3648. .show = btf_datasec_show,
  3649. };
  3650. static s32 btf_float_check_meta(struct btf_verifier_env *env,
  3651. const struct btf_type *t,
  3652. u32 meta_left)
  3653. {
  3654. if (btf_type_vlen(t)) {
  3655. btf_verifier_log_type(env, t, "vlen != 0");
  3656. return -EINVAL;
  3657. }
  3658. if (btf_type_kflag(t)) {
  3659. btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
  3660. return -EINVAL;
  3661. }
  3662. if (t->size != 2 && t->size != 4 && t->size != 8 && t->size != 12 &&
  3663. t->size != 16) {
  3664. btf_verifier_log_type(env, t, "Invalid type_size");
  3665. return -EINVAL;
  3666. }
  3667. btf_verifier_log_type(env, t, NULL);
  3668. return 0;
  3669. }
  3670. static int btf_float_check_member(struct btf_verifier_env *env,
  3671. const struct btf_type *struct_type,
  3672. const struct btf_member *member,
  3673. const struct btf_type *member_type)
  3674. {
  3675. u64 start_offset_bytes;
  3676. u64 end_offset_bytes;
  3677. u64 misalign_bits;
  3678. u64 align_bytes;
  3679. u64 align_bits;
  3680. /* Different architectures have different alignment requirements, so
  3681. * here we check only for the reasonable minimum. This way we ensure
  3682. * that types after CO-RE can pass the kernel BTF verifier.
  3683. */
  3684. align_bytes = min_t(u64, sizeof(void *), member_type->size);
  3685. align_bits = align_bytes * BITS_PER_BYTE;
  3686. div64_u64_rem(member->offset, align_bits, &misalign_bits);
  3687. if (misalign_bits) {
  3688. btf_verifier_log_member(env, struct_type, member,
  3689. "Member is not properly aligned");
  3690. return -EINVAL;
  3691. }
  3692. start_offset_bytes = member->offset / BITS_PER_BYTE;
  3693. end_offset_bytes = start_offset_bytes + member_type->size;
  3694. if (end_offset_bytes > struct_type->size) {
  3695. btf_verifier_log_member(env, struct_type, member,
  3696. "Member exceeds struct_size");
  3697. return -EINVAL;
  3698. }
  3699. return 0;
  3700. }
  3701. static void btf_float_log(struct btf_verifier_env *env,
  3702. const struct btf_type *t)
  3703. {
  3704. btf_verifier_log(env, "size=%u", t->size);
  3705. }
  3706. static const struct btf_kind_operations float_ops = {
  3707. .check_meta = btf_float_check_meta,
  3708. .resolve = btf_df_resolve,
  3709. .check_member = btf_float_check_member,
  3710. .check_kflag_member = btf_generic_check_kflag_member,
  3711. .log_details = btf_float_log,
  3712. .show = btf_df_show,
  3713. };
  3714. static s32 btf_decl_tag_check_meta(struct btf_verifier_env *env,
  3715. const struct btf_type *t,
  3716. u32 meta_left)
  3717. {
  3718. const struct btf_decl_tag *tag;
  3719. u32 meta_needed = sizeof(*tag);
  3720. s32 component_idx;
  3721. const char *value;
  3722. if (meta_left < meta_needed) {
  3723. btf_verifier_log_basic(env, t,
  3724. "meta_left:%u meta_needed:%u",
  3725. meta_left, meta_needed);
  3726. return -EINVAL;
  3727. }
  3728. value = btf_name_by_offset(env->btf, t->name_off);
  3729. if (!value || !value[0]) {
  3730. btf_verifier_log_type(env, t, "Invalid value");
  3731. return -EINVAL;
  3732. }
  3733. if (btf_type_vlen(t)) {
  3734. btf_verifier_log_type(env, t, "vlen != 0");
  3735. return -EINVAL;
  3736. }
  3737. if (btf_type_kflag(t)) {
  3738. btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
  3739. return -EINVAL;
  3740. }
  3741. component_idx = btf_type_decl_tag(t)->component_idx;
  3742. if (component_idx < -1) {
  3743. btf_verifier_log_type(env, t, "Invalid component_idx");
  3744. return -EINVAL;
  3745. }
  3746. btf_verifier_log_type(env, t, NULL);
  3747. return meta_needed;
  3748. }
  3749. static int btf_decl_tag_resolve(struct btf_verifier_env *env,
  3750. const struct resolve_vertex *v)
  3751. {
  3752. const struct btf_type *next_type;
  3753. const struct btf_type *t = v->t;
  3754. u32 next_type_id = t->type;
  3755. struct btf *btf = env->btf;
  3756. s32 component_idx;
  3757. u32 vlen;
  3758. next_type = btf_type_by_id(btf, next_type_id);
  3759. if (!next_type || !btf_type_is_decl_tag_target(next_type)) {
  3760. btf_verifier_log_type(env, v->t, "Invalid type_id");
  3761. return -EINVAL;
  3762. }
  3763. if (!env_type_is_resolve_sink(env, next_type) &&
  3764. !env_type_is_resolved(env, next_type_id))
  3765. return env_stack_push(env, next_type, next_type_id);
  3766. component_idx = btf_type_decl_tag(t)->component_idx;
  3767. if (component_idx != -1) {
  3768. if (btf_type_is_var(next_type) || btf_type_is_typedef(next_type)) {
  3769. btf_verifier_log_type(env, v->t, "Invalid component_idx");
  3770. return -EINVAL;
  3771. }
  3772. if (btf_type_is_struct(next_type)) {
  3773. vlen = btf_type_vlen(next_type);
  3774. } else {
  3775. /* next_type should be a function */
  3776. next_type = btf_type_by_id(btf, next_type->type);
  3777. vlen = btf_type_vlen(next_type);
  3778. }
  3779. if ((u32)component_idx >= vlen) {
  3780. btf_verifier_log_type(env, v->t, "Invalid component_idx");
  3781. return -EINVAL;
  3782. }
  3783. }
  3784. env_stack_pop_resolved(env, next_type_id, 0);
  3785. return 0;
  3786. }
  3787. static void btf_decl_tag_log(struct btf_verifier_env *env, const struct btf_type *t)
  3788. {
  3789. btf_verifier_log(env, "type=%u component_idx=%d", t->type,
  3790. btf_type_decl_tag(t)->component_idx);
  3791. }
  3792. static const struct btf_kind_operations decl_tag_ops = {
  3793. .check_meta = btf_decl_tag_check_meta,
  3794. .resolve = btf_decl_tag_resolve,
  3795. .check_member = btf_df_check_member,
  3796. .check_kflag_member = btf_df_check_kflag_member,
  3797. .log_details = btf_decl_tag_log,
  3798. .show = btf_df_show,
  3799. };
  3800. static int btf_func_proto_check(struct btf_verifier_env *env,
  3801. const struct btf_type *t)
  3802. {
  3803. const struct btf_type *ret_type;
  3804. const struct btf_param *args;
  3805. const struct btf *btf;
  3806. u16 nr_args, i;
  3807. int err;
  3808. btf = env->btf;
  3809. args = (const struct btf_param *)(t + 1);
  3810. nr_args = btf_type_vlen(t);
  3811. /* Check func return type which could be "void" (t->type == 0) */
  3812. if (t->type) {
  3813. u32 ret_type_id = t->type;
  3814. ret_type = btf_type_by_id(btf, ret_type_id);
  3815. if (!ret_type) {
  3816. btf_verifier_log_type(env, t, "Invalid return type");
  3817. return -EINVAL;
  3818. }
  3819. if (btf_type_is_resolve_source_only(ret_type)) {
  3820. btf_verifier_log_type(env, t, "Invalid return type");
  3821. return -EINVAL;
  3822. }
  3823. if (btf_type_needs_resolve(ret_type) &&
  3824. !env_type_is_resolved(env, ret_type_id)) {
  3825. err = btf_resolve(env, ret_type, ret_type_id);
  3826. if (err)
  3827. return err;
  3828. }
  3829. /* Ensure the return type is a type that has a size */
  3830. if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
  3831. btf_verifier_log_type(env, t, "Invalid return type");
  3832. return -EINVAL;
  3833. }
  3834. }
  3835. if (!nr_args)
  3836. return 0;
  3837. /* Last func arg type_id could be 0 if it is a vararg */
  3838. if (!args[nr_args - 1].type) {
  3839. if (args[nr_args - 1].name_off) {
  3840. btf_verifier_log_type(env, t, "Invalid arg#%u",
  3841. nr_args);
  3842. return -EINVAL;
  3843. }
  3844. nr_args--;
  3845. }
  3846. err = 0;
  3847. for (i = 0; i < nr_args; i++) {
  3848. const struct btf_type *arg_type;
  3849. u32 arg_type_id;
  3850. arg_type_id = args[i].type;
  3851. arg_type = btf_type_by_id(btf, arg_type_id);
  3852. if (!arg_type) {
  3853. btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
  3854. err = -EINVAL;
  3855. break;
  3856. }
  3857. if (btf_type_is_resolve_source_only(arg_type)) {
  3858. btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
  3859. return -EINVAL;
  3860. }
  3861. if (args[i].name_off &&
  3862. (!btf_name_offset_valid(btf, args[i].name_off) ||
  3863. !btf_name_valid_identifier(btf, args[i].name_off))) {
  3864. btf_verifier_log_type(env, t,
  3865. "Invalid arg#%u", i + 1);
  3866. err = -EINVAL;
  3867. break;
  3868. }
  3869. if (btf_type_needs_resolve(arg_type) &&
  3870. !env_type_is_resolved(env, arg_type_id)) {
  3871. err = btf_resolve(env, arg_type, arg_type_id);
  3872. if (err)
  3873. break;
  3874. }
  3875. if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
  3876. btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
  3877. err = -EINVAL;
  3878. break;
  3879. }
  3880. }
  3881. return err;
  3882. }
  3883. static int btf_func_check(struct btf_verifier_env *env,
  3884. const struct btf_type *t)
  3885. {
  3886. const struct btf_type *proto_type;
  3887. const struct btf_param *args;
  3888. const struct btf *btf;
  3889. u16 nr_args, i;
  3890. btf = env->btf;
  3891. proto_type = btf_type_by_id(btf, t->type);
  3892. if (!proto_type || !btf_type_is_func_proto(proto_type)) {
  3893. btf_verifier_log_type(env, t, "Invalid type_id");
  3894. return -EINVAL;
  3895. }
  3896. args = (const struct btf_param *)(proto_type + 1);
  3897. nr_args = btf_type_vlen(proto_type);
  3898. for (i = 0; i < nr_args; i++) {
  3899. if (!args[i].name_off && args[i].type) {
  3900. btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
  3901. return -EINVAL;
  3902. }
  3903. }
  3904. return 0;
  3905. }
  3906. static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
  3907. [BTF_KIND_INT] = &int_ops,
  3908. [BTF_KIND_PTR] = &ptr_ops,
  3909. [BTF_KIND_ARRAY] = &array_ops,
  3910. [BTF_KIND_STRUCT] = &struct_ops,
  3911. [BTF_KIND_UNION] = &struct_ops,
  3912. [BTF_KIND_ENUM] = &enum_ops,
  3913. [BTF_KIND_FWD] = &fwd_ops,
  3914. [BTF_KIND_TYPEDEF] = &modifier_ops,
  3915. [BTF_KIND_VOLATILE] = &modifier_ops,
  3916. [BTF_KIND_CONST] = &modifier_ops,
  3917. [BTF_KIND_RESTRICT] = &modifier_ops,
  3918. [BTF_KIND_FUNC] = &func_ops,
  3919. [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
  3920. [BTF_KIND_VAR] = &var_ops,
  3921. [BTF_KIND_DATASEC] = &datasec_ops,
  3922. [BTF_KIND_FLOAT] = &float_ops,
  3923. [BTF_KIND_DECL_TAG] = &decl_tag_ops,
  3924. [BTF_KIND_TYPE_TAG] = &modifier_ops,
  3925. [BTF_KIND_ENUM64] = &enum64_ops,
  3926. };
  3927. static s32 btf_check_meta(struct btf_verifier_env *env,
  3928. const struct btf_type *t,
  3929. u32 meta_left)
  3930. {
  3931. u32 saved_meta_left = meta_left;
  3932. s32 var_meta_size;
  3933. if (meta_left < sizeof(*t)) {
  3934. btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
  3935. env->log_type_id, meta_left, sizeof(*t));
  3936. return -EINVAL;
  3937. }
  3938. meta_left -= sizeof(*t);
  3939. if (t->info & ~BTF_INFO_MASK) {
  3940. btf_verifier_log(env, "[%u] Invalid btf_info:%x",
  3941. env->log_type_id, t->info);
  3942. return -EINVAL;
  3943. }
  3944. if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
  3945. BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
  3946. btf_verifier_log(env, "[%u] Invalid kind:%u",
  3947. env->log_type_id, BTF_INFO_KIND(t->info));
  3948. return -EINVAL;
  3949. }
  3950. if (!btf_name_offset_valid(env->btf, t->name_off)) {
  3951. btf_verifier_log(env, "[%u] Invalid name_offset:%u",
  3952. env->log_type_id, t->name_off);
  3953. return -EINVAL;
  3954. }
  3955. var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
  3956. if (var_meta_size < 0)
  3957. return var_meta_size;
  3958. meta_left -= var_meta_size;
  3959. return saved_meta_left - meta_left;
  3960. }
  3961. static int btf_check_all_metas(struct btf_verifier_env *env)
  3962. {
  3963. struct btf *btf = env->btf;
  3964. struct btf_header *hdr;
  3965. void *cur, *end;
  3966. hdr = &btf->hdr;
  3967. cur = btf->nohdr_data + hdr->type_off;
  3968. end = cur + hdr->type_len;
  3969. env->log_type_id = btf->base_btf ? btf->start_id : 1;
  3970. while (cur < end) {
  3971. struct btf_type *t = cur;
  3972. s32 meta_size;
  3973. meta_size = btf_check_meta(env, t, end - cur);
  3974. if (meta_size < 0)
  3975. return meta_size;
  3976. btf_add_type(env, t);
  3977. cur += meta_size;
  3978. env->log_type_id++;
  3979. }
  3980. return 0;
  3981. }
  3982. static bool btf_resolve_valid(struct btf_verifier_env *env,
  3983. const struct btf_type *t,
  3984. u32 type_id)
  3985. {
  3986. struct btf *btf = env->btf;
  3987. if (!env_type_is_resolved(env, type_id))
  3988. return false;
  3989. if (btf_type_is_struct(t) || btf_type_is_datasec(t))
  3990. return !btf_resolved_type_id(btf, type_id) &&
  3991. !btf_resolved_type_size(btf, type_id);
  3992. if (btf_type_is_decl_tag(t) || btf_type_is_func(t))
  3993. return btf_resolved_type_id(btf, type_id) &&
  3994. !btf_resolved_type_size(btf, type_id);
  3995. if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
  3996. btf_type_is_var(t)) {
  3997. t = btf_type_id_resolve(btf, &type_id);
  3998. return t &&
  3999. !btf_type_is_modifier(t) &&
  4000. !btf_type_is_var(t) &&
  4001. !btf_type_is_datasec(t);
  4002. }
  4003. if (btf_type_is_array(t)) {
  4004. const struct btf_array *array = btf_type_array(t);
  4005. const struct btf_type *elem_type;
  4006. u32 elem_type_id = array->type;
  4007. u32 elem_size;
  4008. elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
  4009. return elem_type && !btf_type_is_modifier(elem_type) &&
  4010. (array->nelems * elem_size ==
  4011. btf_resolved_type_size(btf, type_id));
  4012. }
  4013. return false;
  4014. }
  4015. static int btf_resolve(struct btf_verifier_env *env,
  4016. const struct btf_type *t, u32 type_id)
  4017. {
  4018. u32 save_log_type_id = env->log_type_id;
  4019. const struct resolve_vertex *v;
  4020. int err = 0;
  4021. env->resolve_mode = RESOLVE_TBD;
  4022. env_stack_push(env, t, type_id);
  4023. while (!err && (v = env_stack_peak(env))) {
  4024. env->log_type_id = v->type_id;
  4025. err = btf_type_ops(v->t)->resolve(env, v);
  4026. }
  4027. env->log_type_id = type_id;
  4028. if (err == -E2BIG) {
  4029. btf_verifier_log_type(env, t,
  4030. "Exceeded max resolving depth:%u",
  4031. MAX_RESOLVE_DEPTH);
  4032. } else if (err == -EEXIST) {
  4033. btf_verifier_log_type(env, t, "Loop detected");
  4034. }
  4035. /* Final sanity check */
  4036. if (!err && !btf_resolve_valid(env, t, type_id)) {
  4037. btf_verifier_log_type(env, t, "Invalid resolve state");
  4038. err = -EINVAL;
  4039. }
  4040. env->log_type_id = save_log_type_id;
  4041. return err;
  4042. }
  4043. static int btf_check_all_types(struct btf_verifier_env *env)
  4044. {
  4045. struct btf *btf = env->btf;
  4046. const struct btf_type *t;
  4047. u32 type_id, i;
  4048. int err;
  4049. err = env_resolve_init(env);
  4050. if (err)
  4051. return err;
  4052. env->phase++;
  4053. for (i = btf->base_btf ? 0 : 1; i < btf->nr_types; i++) {
  4054. type_id = btf->start_id + i;
  4055. t = btf_type_by_id(btf, type_id);
  4056. env->log_type_id = type_id;
  4057. if (btf_type_needs_resolve(t) &&
  4058. !env_type_is_resolved(env, type_id)) {
  4059. err = btf_resolve(env, t, type_id);
  4060. if (err)
  4061. return err;
  4062. }
  4063. if (btf_type_is_func_proto(t)) {
  4064. err = btf_func_proto_check(env, t);
  4065. if (err)
  4066. return err;
  4067. }
  4068. }
  4069. return 0;
  4070. }
  4071. static int btf_parse_type_sec(struct btf_verifier_env *env)
  4072. {
  4073. const struct btf_header *hdr = &env->btf->hdr;
  4074. int err;
  4075. /* Type section must align to 4 bytes */
  4076. if (hdr->type_off & (sizeof(u32) - 1)) {
  4077. btf_verifier_log(env, "Unaligned type_off");
  4078. return -EINVAL;
  4079. }
  4080. if (!env->btf->base_btf && !hdr->type_len) {
  4081. btf_verifier_log(env, "No type found");
  4082. return -EINVAL;
  4083. }
  4084. err = btf_check_all_metas(env);
  4085. if (err)
  4086. return err;
  4087. return btf_check_all_types(env);
  4088. }
  4089. static int btf_parse_str_sec(struct btf_verifier_env *env)
  4090. {
  4091. const struct btf_header *hdr;
  4092. struct btf *btf = env->btf;
  4093. const char *start, *end;
  4094. hdr = &btf->hdr;
  4095. start = btf->nohdr_data + hdr->str_off;
  4096. end = start + hdr->str_len;
  4097. if (end != btf->data + btf->data_size) {
  4098. btf_verifier_log(env, "String section is not at the end");
  4099. return -EINVAL;
  4100. }
  4101. btf->strings = start;
  4102. if (btf->base_btf && !hdr->str_len)
  4103. return 0;
  4104. if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || end[-1]) {
  4105. btf_verifier_log(env, "Invalid string section");
  4106. return -EINVAL;
  4107. }
  4108. if (!btf->base_btf && start[0]) {
  4109. btf_verifier_log(env, "Invalid string section");
  4110. return -EINVAL;
  4111. }
  4112. return 0;
  4113. }
  4114. static const size_t btf_sec_info_offset[] = {
  4115. offsetof(struct btf_header, type_off),
  4116. offsetof(struct btf_header, str_off),
  4117. };
  4118. static int btf_sec_info_cmp(const void *a, const void *b)
  4119. {
  4120. const struct btf_sec_info *x = a;
  4121. const struct btf_sec_info *y = b;
  4122. return (int)(x->off - y->off) ? : (int)(x->len - y->len);
  4123. }
  4124. static int btf_check_sec_info(struct btf_verifier_env *env,
  4125. u32 btf_data_size)
  4126. {
  4127. struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
  4128. u32 total, expected_total, i;
  4129. const struct btf_header *hdr;
  4130. const struct btf *btf;
  4131. btf = env->btf;
  4132. hdr = &btf->hdr;
  4133. /* Populate the secs from hdr */
  4134. for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
  4135. secs[i] = *(struct btf_sec_info *)((void *)hdr +
  4136. btf_sec_info_offset[i]);
  4137. sort(secs, ARRAY_SIZE(btf_sec_info_offset),
  4138. sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
  4139. /* Check for gaps and overlap among sections */
  4140. total = 0;
  4141. expected_total = btf_data_size - hdr->hdr_len;
  4142. for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
  4143. if (expected_total < secs[i].off) {
  4144. btf_verifier_log(env, "Invalid section offset");
  4145. return -EINVAL;
  4146. }
  4147. if (total < secs[i].off) {
  4148. /* gap */
  4149. btf_verifier_log(env, "Unsupported section found");
  4150. return -EINVAL;
  4151. }
  4152. if (total > secs[i].off) {
  4153. btf_verifier_log(env, "Section overlap found");
  4154. return -EINVAL;
  4155. }
  4156. if (expected_total - total < secs[i].len) {
  4157. btf_verifier_log(env,
  4158. "Total section length too long");
  4159. return -EINVAL;
  4160. }
  4161. total += secs[i].len;
  4162. }
  4163. /* There is data other than hdr and known sections */
  4164. if (expected_total != total) {
  4165. btf_verifier_log(env, "Unsupported section found");
  4166. return -EINVAL;
  4167. }
  4168. return 0;
  4169. }
  4170. static int btf_parse_hdr(struct btf_verifier_env *env)
  4171. {
  4172. u32 hdr_len, hdr_copy, btf_data_size;
  4173. const struct btf_header *hdr;
  4174. struct btf *btf;
  4175. btf = env->btf;
  4176. btf_data_size = btf->data_size;
  4177. if (btf_data_size < offsetofend(struct btf_header, hdr_len)) {
  4178. btf_verifier_log(env, "hdr_len not found");
  4179. return -EINVAL;
  4180. }
  4181. hdr = btf->data;
  4182. hdr_len = hdr->hdr_len;
  4183. if (btf_data_size < hdr_len) {
  4184. btf_verifier_log(env, "btf_header not found");
  4185. return -EINVAL;
  4186. }
  4187. /* Ensure the unsupported header fields are zero */
  4188. if (hdr_len > sizeof(btf->hdr)) {
  4189. u8 *expected_zero = btf->data + sizeof(btf->hdr);
  4190. u8 *end = btf->data + hdr_len;
  4191. for (; expected_zero < end; expected_zero++) {
  4192. if (*expected_zero) {
  4193. btf_verifier_log(env, "Unsupported btf_header");
  4194. return -E2BIG;
  4195. }
  4196. }
  4197. }
  4198. hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
  4199. memcpy(&btf->hdr, btf->data, hdr_copy);
  4200. hdr = &btf->hdr;
  4201. btf_verifier_log_hdr(env, btf_data_size);
  4202. if (hdr->magic != BTF_MAGIC) {
  4203. btf_verifier_log(env, "Invalid magic");
  4204. return -EINVAL;
  4205. }
  4206. if (hdr->version != BTF_VERSION) {
  4207. btf_verifier_log(env, "Unsupported version");
  4208. return -ENOTSUPP;
  4209. }
  4210. if (hdr->flags) {
  4211. btf_verifier_log(env, "Unsupported flags");
  4212. return -ENOTSUPP;
  4213. }
  4214. if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
  4215. btf_verifier_log(env, "No data");
  4216. return -EINVAL;
  4217. }
  4218. return btf_check_sec_info(env, btf_data_size);
  4219. }
  4220. static int btf_check_type_tags(struct btf_verifier_env *env,
  4221. struct btf *btf, int start_id)
  4222. {
  4223. int i, n, good_id = start_id - 1;
  4224. bool in_tags;
  4225. n = btf_nr_types(btf);
  4226. for (i = start_id; i < n; i++) {
  4227. const struct btf_type *t;
  4228. int chain_limit = 32;
  4229. u32 cur_id = i;
  4230. t = btf_type_by_id(btf, i);
  4231. if (!t)
  4232. return -EINVAL;
  4233. if (!btf_type_is_modifier(t))
  4234. continue;
  4235. cond_resched();
  4236. in_tags = btf_type_is_type_tag(t);
  4237. while (btf_type_is_modifier(t)) {
  4238. if (!chain_limit--) {
  4239. btf_verifier_log(env, "Max chain length or cycle detected");
  4240. return -ELOOP;
  4241. }
  4242. if (btf_type_is_type_tag(t)) {
  4243. if (!in_tags) {
  4244. btf_verifier_log(env, "Type tags don't precede modifiers");
  4245. return -EINVAL;
  4246. }
  4247. } else if (in_tags) {
  4248. in_tags = false;
  4249. }
  4250. if (cur_id <= good_id)
  4251. break;
  4252. /* Move to next type */
  4253. cur_id = t->type;
  4254. t = btf_type_by_id(btf, cur_id);
  4255. if (!t)
  4256. return -EINVAL;
  4257. }
  4258. good_id = i;
  4259. }
  4260. return 0;
  4261. }
  4262. static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
  4263. u32 log_level, char __user *log_ubuf, u32 log_size)
  4264. {
  4265. struct btf_verifier_env *env = NULL;
  4266. struct bpf_verifier_log *log;
  4267. struct btf *btf = NULL;
  4268. u8 *data;
  4269. int err;
  4270. if (btf_data_size > BTF_MAX_SIZE)
  4271. return ERR_PTR(-E2BIG);
  4272. env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
  4273. if (!env)
  4274. return ERR_PTR(-ENOMEM);
  4275. log = &env->log;
  4276. if (log_level || log_ubuf || log_size) {
  4277. /* user requested verbose verifier output
  4278. * and supplied buffer to store the verification trace
  4279. */
  4280. log->level = log_level;
  4281. log->ubuf = log_ubuf;
  4282. log->len_total = log_size;
  4283. /* log attributes have to be sane */
  4284. if (!bpf_verifier_log_attr_valid(log)) {
  4285. err = -EINVAL;
  4286. goto errout;
  4287. }
  4288. }
  4289. btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
  4290. if (!btf) {
  4291. err = -ENOMEM;
  4292. goto errout;
  4293. }
  4294. env->btf = btf;
  4295. data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
  4296. if (!data) {
  4297. err = -ENOMEM;
  4298. goto errout;
  4299. }
  4300. btf->data = data;
  4301. btf->data_size = btf_data_size;
  4302. if (copy_from_bpfptr(data, btf_data, btf_data_size)) {
  4303. err = -EFAULT;
  4304. goto errout;
  4305. }
  4306. err = btf_parse_hdr(env);
  4307. if (err)
  4308. goto errout;
  4309. btf->nohdr_data = btf->data + btf->hdr.hdr_len;
  4310. err = btf_parse_str_sec(env);
  4311. if (err)
  4312. goto errout;
  4313. err = btf_parse_type_sec(env);
  4314. if (err)
  4315. goto errout;
  4316. err = btf_check_type_tags(env, btf, 1);
  4317. if (err)
  4318. goto errout;
  4319. if (log->level && bpf_verifier_log_full(log)) {
  4320. err = -ENOSPC;
  4321. goto errout;
  4322. }
  4323. btf_verifier_env_free(env);
  4324. refcount_set(&btf->refcnt, 1);
  4325. return btf;
  4326. errout:
  4327. btf_verifier_env_free(env);
  4328. if (btf)
  4329. btf_free(btf);
  4330. return ERR_PTR(err);
  4331. }
  4332. extern char __weak __start_BTF[];
  4333. extern char __weak __stop_BTF[];
  4334. extern struct btf *btf_vmlinux;
  4335. #define BPF_MAP_TYPE(_id, _ops)
  4336. #define BPF_LINK_TYPE(_id, _name)
  4337. static union {
  4338. struct bpf_ctx_convert {
  4339. #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
  4340. prog_ctx_type _id##_prog; \
  4341. kern_ctx_type _id##_kern;
  4342. #include <linux/bpf_types.h>
  4343. #undef BPF_PROG_TYPE
  4344. } *__t;
  4345. /* 't' is written once under lock. Read many times. */
  4346. const struct btf_type *t;
  4347. } bpf_ctx_convert;
  4348. enum {
  4349. #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
  4350. __ctx_convert##_id,
  4351. #include <linux/bpf_types.h>
  4352. #undef BPF_PROG_TYPE
  4353. __ctx_convert_unused, /* to avoid empty enum in extreme .config */
  4354. };
  4355. static u8 bpf_ctx_convert_map[] = {
  4356. #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
  4357. [_id] = __ctx_convert##_id,
  4358. #include <linux/bpf_types.h>
  4359. #undef BPF_PROG_TYPE
  4360. 0, /* avoid empty array */
  4361. };
  4362. #undef BPF_MAP_TYPE
  4363. #undef BPF_LINK_TYPE
  4364. static const struct btf_member *
  4365. btf_get_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf,
  4366. const struct btf_type *t, enum bpf_prog_type prog_type,
  4367. int arg)
  4368. {
  4369. const struct btf_type *conv_struct;
  4370. const struct btf_type *ctx_struct;
  4371. const struct btf_member *ctx_type;
  4372. const char *tname, *ctx_tname;
  4373. conv_struct = bpf_ctx_convert.t;
  4374. if (!conv_struct) {
  4375. bpf_log(log, "btf_vmlinux is malformed\n");
  4376. return NULL;
  4377. }
  4378. t = btf_type_by_id(btf, t->type);
  4379. while (btf_type_is_modifier(t))
  4380. t = btf_type_by_id(btf, t->type);
  4381. if (!btf_type_is_struct(t)) {
  4382. /* Only pointer to struct is supported for now.
  4383. * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
  4384. * is not supported yet.
  4385. * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
  4386. */
  4387. return NULL;
  4388. }
  4389. tname = btf_name_by_offset(btf, t->name_off);
  4390. if (!tname) {
  4391. bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
  4392. return NULL;
  4393. }
  4394. /* prog_type is valid bpf program type. No need for bounds check. */
  4395. ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
  4396. /* ctx_struct is a pointer to prog_ctx_type in vmlinux.
  4397. * Like 'struct __sk_buff'
  4398. */
  4399. ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
  4400. if (!ctx_struct)
  4401. /* should not happen */
  4402. return NULL;
  4403. again:
  4404. ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
  4405. if (!ctx_tname) {
  4406. /* should not happen */
  4407. bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
  4408. return NULL;
  4409. }
  4410. /* only compare that prog's ctx type name is the same as
  4411. * kernel expects. No need to compare field by field.
  4412. * It's ok for bpf prog to do:
  4413. * struct __sk_buff {};
  4414. * int socket_filter_bpf_prog(struct __sk_buff *skb)
  4415. * { // no fields of skb are ever used }
  4416. */
  4417. if (strcmp(ctx_tname, tname)) {
  4418. /* bpf_user_pt_regs_t is a typedef, so resolve it to
  4419. * underlying struct and check name again
  4420. */
  4421. if (!btf_type_is_modifier(ctx_struct))
  4422. return NULL;
  4423. while (btf_type_is_modifier(ctx_struct))
  4424. ctx_struct = btf_type_by_id(btf_vmlinux, ctx_struct->type);
  4425. goto again;
  4426. }
  4427. return ctx_type;
  4428. }
  4429. static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
  4430. struct btf *btf,
  4431. const struct btf_type *t,
  4432. enum bpf_prog_type prog_type,
  4433. int arg)
  4434. {
  4435. const struct btf_member *prog_ctx_type, *kern_ctx_type;
  4436. prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
  4437. if (!prog_ctx_type)
  4438. return -ENOENT;
  4439. kern_ctx_type = prog_ctx_type + 1;
  4440. return kern_ctx_type->type;
  4441. }
  4442. BTF_ID_LIST(bpf_ctx_convert_btf_id)
  4443. BTF_ID(struct, bpf_ctx_convert)
  4444. struct btf *btf_parse_vmlinux(void)
  4445. {
  4446. struct btf_verifier_env *env = NULL;
  4447. struct bpf_verifier_log *log;
  4448. struct btf *btf = NULL;
  4449. int err;
  4450. env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
  4451. if (!env)
  4452. return ERR_PTR(-ENOMEM);
  4453. log = &env->log;
  4454. log->level = BPF_LOG_KERNEL;
  4455. btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
  4456. if (!btf) {
  4457. err = -ENOMEM;
  4458. goto errout;
  4459. }
  4460. env->btf = btf;
  4461. btf->data = __start_BTF;
  4462. btf->data_size = __stop_BTF - __start_BTF;
  4463. btf->kernel_btf = true;
  4464. snprintf(btf->name, sizeof(btf->name), "vmlinux");
  4465. err = btf_parse_hdr(env);
  4466. if (err)
  4467. goto errout;
  4468. btf->nohdr_data = btf->data + btf->hdr.hdr_len;
  4469. err = btf_parse_str_sec(env);
  4470. if (err)
  4471. goto errout;
  4472. err = btf_check_all_metas(env);
  4473. if (err)
  4474. goto errout;
  4475. err = btf_check_type_tags(env, btf, 1);
  4476. if (err)
  4477. goto errout;
  4478. /* btf_parse_vmlinux() runs under bpf_verifier_lock */
  4479. bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
  4480. bpf_struct_ops_init(btf, log);
  4481. refcount_set(&btf->refcnt, 1);
  4482. err = btf_alloc_id(btf);
  4483. if (err)
  4484. goto errout;
  4485. btf_verifier_env_free(env);
  4486. return btf;
  4487. errout:
  4488. btf_verifier_env_free(env);
  4489. if (btf) {
  4490. kvfree(btf->types);
  4491. kfree(btf);
  4492. }
  4493. return ERR_PTR(err);
  4494. }
  4495. #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
  4496. static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size)
  4497. {
  4498. struct btf_verifier_env *env = NULL;
  4499. struct bpf_verifier_log *log;
  4500. struct btf *btf = NULL, *base_btf;
  4501. int err;
  4502. base_btf = bpf_get_btf_vmlinux();
  4503. if (IS_ERR(base_btf))
  4504. return base_btf;
  4505. if (!base_btf)
  4506. return ERR_PTR(-EINVAL);
  4507. env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
  4508. if (!env)
  4509. return ERR_PTR(-ENOMEM);
  4510. log = &env->log;
  4511. log->level = BPF_LOG_KERNEL;
  4512. btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
  4513. if (!btf) {
  4514. err = -ENOMEM;
  4515. goto errout;
  4516. }
  4517. env->btf = btf;
  4518. btf->base_btf = base_btf;
  4519. btf->start_id = base_btf->nr_types;
  4520. btf->start_str_off = base_btf->hdr.str_len;
  4521. btf->kernel_btf = true;
  4522. snprintf(btf->name, sizeof(btf->name), "%s", module_name);
  4523. btf->data = kvmalloc(data_size, GFP_KERNEL | __GFP_NOWARN);
  4524. if (!btf->data) {
  4525. err = -ENOMEM;
  4526. goto errout;
  4527. }
  4528. memcpy(btf->data, data, data_size);
  4529. btf->data_size = data_size;
  4530. err = btf_parse_hdr(env);
  4531. if (err)
  4532. goto errout;
  4533. btf->nohdr_data = btf->data + btf->hdr.hdr_len;
  4534. err = btf_parse_str_sec(env);
  4535. if (err)
  4536. goto errout;
  4537. err = btf_check_all_metas(env);
  4538. if (err)
  4539. goto errout;
  4540. err = btf_check_type_tags(env, btf, btf_nr_types(base_btf));
  4541. if (err)
  4542. goto errout;
  4543. btf_verifier_env_free(env);
  4544. refcount_set(&btf->refcnt, 1);
  4545. return btf;
  4546. errout:
  4547. btf_verifier_env_free(env);
  4548. if (btf) {
  4549. kvfree(btf->data);
  4550. kvfree(btf->types);
  4551. kfree(btf);
  4552. }
  4553. return ERR_PTR(err);
  4554. }
  4555. #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
  4556. struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
  4557. {
  4558. struct bpf_prog *tgt_prog = prog->aux->dst_prog;
  4559. if (tgt_prog)
  4560. return tgt_prog->aux->btf;
  4561. else
  4562. return prog->aux->attach_btf;
  4563. }
  4564. static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
  4565. {
  4566. /* skip modifiers */
  4567. t = btf_type_skip_modifiers(btf, t->type, NULL);
  4568. return btf_type_is_int(t);
  4569. }
  4570. static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto,
  4571. int off)
  4572. {
  4573. const struct btf_param *args;
  4574. const struct btf_type *t;
  4575. u32 offset = 0, nr_args;
  4576. int i;
  4577. if (!func_proto)
  4578. return off / 8;
  4579. nr_args = btf_type_vlen(func_proto);
  4580. args = (const struct btf_param *)(func_proto + 1);
  4581. for (i = 0; i < nr_args; i++) {
  4582. t = btf_type_skip_modifiers(btf, args[i].type, NULL);
  4583. offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
  4584. if (off < offset)
  4585. return i;
  4586. }
  4587. t = btf_type_skip_modifiers(btf, func_proto->type, NULL);
  4588. offset += btf_type_is_ptr(t) ? 8 : roundup(t->size, 8);
  4589. if (off < offset)
  4590. return nr_args;
  4591. return nr_args + 1;
  4592. }
  4593. bool btf_ctx_access(int off, int size, enum bpf_access_type type,
  4594. const struct bpf_prog *prog,
  4595. struct bpf_insn_access_aux *info)
  4596. {
  4597. const struct btf_type *t = prog->aux->attach_func_proto;
  4598. struct bpf_prog *tgt_prog = prog->aux->dst_prog;
  4599. struct btf *btf = bpf_prog_get_target_btf(prog);
  4600. const char *tname = prog->aux->attach_func_name;
  4601. struct bpf_verifier_log *log = info->log;
  4602. const struct btf_param *args;
  4603. const char *tag_value;
  4604. u32 nr_args, arg;
  4605. int i, ret;
  4606. if (off % 8) {
  4607. bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
  4608. tname, off);
  4609. return false;
  4610. }
  4611. arg = get_ctx_arg_idx(btf, t, off);
  4612. args = (const struct btf_param *)(t + 1);
  4613. /* if (t == NULL) Fall back to default BPF prog with
  4614. * MAX_BPF_FUNC_REG_ARGS u64 arguments.
  4615. */
  4616. nr_args = t ? btf_type_vlen(t) : MAX_BPF_FUNC_REG_ARGS;
  4617. if (prog->aux->attach_btf_trace) {
  4618. /* skip first 'void *__data' argument in btf_trace_##name typedef */
  4619. args++;
  4620. nr_args--;
  4621. }
  4622. if (arg > nr_args) {
  4623. bpf_log(log, "func '%s' doesn't have %d-th argument\n",
  4624. tname, arg + 1);
  4625. return false;
  4626. }
  4627. if (arg == nr_args) {
  4628. switch (prog->expected_attach_type) {
  4629. case BPF_LSM_CGROUP:
  4630. case BPF_LSM_MAC:
  4631. case BPF_TRACE_FEXIT:
  4632. /* When LSM programs are attached to void LSM hooks
  4633. * they use FEXIT trampolines and when attached to
  4634. * int LSM hooks, they use MODIFY_RETURN trampolines.
  4635. *
  4636. * While the LSM programs are BPF_MODIFY_RETURN-like
  4637. * the check:
  4638. *
  4639. * if (ret_type != 'int')
  4640. * return -EINVAL;
  4641. *
  4642. * is _not_ done here. This is still safe as LSM hooks
  4643. * have only void and int return types.
  4644. */
  4645. if (!t)
  4646. return true;
  4647. t = btf_type_by_id(btf, t->type);
  4648. break;
  4649. case BPF_MODIFY_RETURN:
  4650. /* For now the BPF_MODIFY_RETURN can only be attached to
  4651. * functions that return an int.
  4652. */
  4653. if (!t)
  4654. return false;
  4655. t = btf_type_skip_modifiers(btf, t->type, NULL);
  4656. if (!btf_type_is_small_int(t)) {
  4657. bpf_log(log,
  4658. "ret type %s not allowed for fmod_ret\n",
  4659. btf_type_str(t));
  4660. return false;
  4661. }
  4662. break;
  4663. default:
  4664. bpf_log(log, "func '%s' doesn't have %d-th argument\n",
  4665. tname, arg + 1);
  4666. return false;
  4667. }
  4668. } else {
  4669. if (!t)
  4670. /* Default prog with MAX_BPF_FUNC_REG_ARGS args */
  4671. return true;
  4672. t = btf_type_by_id(btf, args[arg].type);
  4673. }
  4674. /* skip modifiers */
  4675. while (btf_type_is_modifier(t))
  4676. t = btf_type_by_id(btf, t->type);
  4677. if (btf_type_is_small_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
  4678. /* accessing a scalar */
  4679. return true;
  4680. if (!btf_type_is_ptr(t)) {
  4681. bpf_log(log,
  4682. "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
  4683. tname, arg,
  4684. __btf_name_by_offset(btf, t->name_off),
  4685. btf_type_str(t));
  4686. return false;
  4687. }
  4688. /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
  4689. for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
  4690. const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
  4691. u32 type, flag;
  4692. type = base_type(ctx_arg_info->reg_type);
  4693. flag = type_flag(ctx_arg_info->reg_type);
  4694. if (ctx_arg_info->offset == off && type == PTR_TO_BUF &&
  4695. (flag & PTR_MAYBE_NULL)) {
  4696. info->reg_type = ctx_arg_info->reg_type;
  4697. return true;
  4698. }
  4699. }
  4700. if (t->type == 0)
  4701. /* This is a pointer to void.
  4702. * It is the same as scalar from the verifier safety pov.
  4703. * No further pointer walking is allowed.
  4704. */
  4705. return true;
  4706. if (is_int_ptr(btf, t))
  4707. return true;
  4708. /* this is a pointer to another type */
  4709. for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
  4710. const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
  4711. if (ctx_arg_info->offset == off) {
  4712. if (!ctx_arg_info->btf_id) {
  4713. bpf_log(log,"invalid btf_id for context argument offset %u\n", off);
  4714. return false;
  4715. }
  4716. info->reg_type = ctx_arg_info->reg_type;
  4717. info->btf = btf_vmlinux;
  4718. info->btf_id = ctx_arg_info->btf_id;
  4719. return true;
  4720. }
  4721. }
  4722. info->reg_type = PTR_TO_BTF_ID;
  4723. if (tgt_prog) {
  4724. enum bpf_prog_type tgt_type;
  4725. if (tgt_prog->type == BPF_PROG_TYPE_EXT)
  4726. tgt_type = tgt_prog->aux->saved_dst_prog_type;
  4727. else
  4728. tgt_type = tgt_prog->type;
  4729. ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
  4730. if (ret > 0) {
  4731. info->btf = btf_vmlinux;
  4732. info->btf_id = ret;
  4733. return true;
  4734. } else {
  4735. return false;
  4736. }
  4737. }
  4738. info->btf = btf;
  4739. info->btf_id = t->type;
  4740. t = btf_type_by_id(btf, t->type);
  4741. if (btf_type_is_type_tag(t)) {
  4742. tag_value = __btf_name_by_offset(btf, t->name_off);
  4743. if (strcmp(tag_value, "user") == 0)
  4744. info->reg_type |= MEM_USER;
  4745. if (strcmp(tag_value, "percpu") == 0)
  4746. info->reg_type |= MEM_PERCPU;
  4747. }
  4748. /* skip modifiers */
  4749. while (btf_type_is_modifier(t)) {
  4750. info->btf_id = t->type;
  4751. t = btf_type_by_id(btf, t->type);
  4752. }
  4753. if (!btf_type_is_struct(t)) {
  4754. bpf_log(log,
  4755. "func '%s' arg%d type %s is not a struct\n",
  4756. tname, arg, btf_type_str(t));
  4757. return false;
  4758. }
  4759. bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
  4760. tname, arg, info->btf_id, btf_type_str(t),
  4761. __btf_name_by_offset(btf, t->name_off));
  4762. return true;
  4763. }
  4764. enum bpf_struct_walk_result {
  4765. /* < 0 error */
  4766. WALK_SCALAR = 0,
  4767. WALK_PTR,
  4768. WALK_STRUCT,
  4769. };
  4770. static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
  4771. const struct btf_type *t, int off, int size,
  4772. u32 *next_btf_id, enum bpf_type_flag *flag)
  4773. {
  4774. u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
  4775. const struct btf_type *mtype, *elem_type = NULL;
  4776. const struct btf_member *member;
  4777. const char *tname, *mname, *tag_value;
  4778. u32 vlen, elem_id, mid;
  4779. again:
  4780. tname = __btf_name_by_offset(btf, t->name_off);
  4781. if (!btf_type_is_struct(t)) {
  4782. bpf_log(log, "Type '%s' is not a struct\n", tname);
  4783. return -EINVAL;
  4784. }
  4785. vlen = btf_type_vlen(t);
  4786. if (off + size > t->size) {
  4787. /* If the last element is a variable size array, we may
  4788. * need to relax the rule.
  4789. */
  4790. struct btf_array *array_elem;
  4791. if (vlen == 0)
  4792. goto error;
  4793. member = btf_type_member(t) + vlen - 1;
  4794. mtype = btf_type_skip_modifiers(btf, member->type,
  4795. NULL);
  4796. if (!btf_type_is_array(mtype))
  4797. goto error;
  4798. array_elem = (struct btf_array *)(mtype + 1);
  4799. if (array_elem->nelems != 0)
  4800. goto error;
  4801. moff = __btf_member_bit_offset(t, member) / 8;
  4802. if (off < moff)
  4803. goto error;
  4804. /* Only allow structure for now, can be relaxed for
  4805. * other types later.
  4806. */
  4807. t = btf_type_skip_modifiers(btf, array_elem->type,
  4808. NULL);
  4809. if (!btf_type_is_struct(t))
  4810. goto error;
  4811. off = (off - moff) % t->size;
  4812. goto again;
  4813. error:
  4814. bpf_log(log, "access beyond struct %s at off %u size %u\n",
  4815. tname, off, size);
  4816. return -EACCES;
  4817. }
  4818. for_each_member(i, t, member) {
  4819. /* offset of the field in bytes */
  4820. moff = __btf_member_bit_offset(t, member) / 8;
  4821. if (off + size <= moff)
  4822. /* won't find anything, field is already too far */
  4823. break;
  4824. if (__btf_member_bitfield_size(t, member)) {
  4825. u32 end_bit = __btf_member_bit_offset(t, member) +
  4826. __btf_member_bitfield_size(t, member);
  4827. /* off <= moff instead of off == moff because clang
  4828. * does not generate a BTF member for anonymous
  4829. * bitfield like the ":16" here:
  4830. * struct {
  4831. * int :16;
  4832. * int x:8;
  4833. * };
  4834. */
  4835. if (off <= moff &&
  4836. BITS_ROUNDUP_BYTES(end_bit) <= off + size)
  4837. return WALK_SCALAR;
  4838. /* off may be accessing a following member
  4839. *
  4840. * or
  4841. *
  4842. * Doing partial access at either end of this
  4843. * bitfield. Continue on this case also to
  4844. * treat it as not accessing this bitfield
  4845. * and eventually error out as field not
  4846. * found to keep it simple.
  4847. * It could be relaxed if there was a legit
  4848. * partial access case later.
  4849. */
  4850. continue;
  4851. }
  4852. /* In case of "off" is pointing to holes of a struct */
  4853. if (off < moff)
  4854. break;
  4855. /* type of the field */
  4856. mid = member->type;
  4857. mtype = btf_type_by_id(btf, member->type);
  4858. mname = __btf_name_by_offset(btf, member->name_off);
  4859. mtype = __btf_resolve_size(btf, mtype, &msize,
  4860. &elem_type, &elem_id, &total_nelems,
  4861. &mid);
  4862. if (IS_ERR(mtype)) {
  4863. bpf_log(log, "field %s doesn't have size\n", mname);
  4864. return -EFAULT;
  4865. }
  4866. mtrue_end = moff + msize;
  4867. if (off >= mtrue_end)
  4868. /* no overlap with member, keep iterating */
  4869. continue;
  4870. if (btf_type_is_array(mtype)) {
  4871. u32 elem_idx;
  4872. /* __btf_resolve_size() above helps to
  4873. * linearize a multi-dimensional array.
  4874. *
  4875. * The logic here is treating an array
  4876. * in a struct as the following way:
  4877. *
  4878. * struct outer {
  4879. * struct inner array[2][2];
  4880. * };
  4881. *
  4882. * looks like:
  4883. *
  4884. * struct outer {
  4885. * struct inner array_elem0;
  4886. * struct inner array_elem1;
  4887. * struct inner array_elem2;
  4888. * struct inner array_elem3;
  4889. * };
  4890. *
  4891. * When accessing outer->array[1][0], it moves
  4892. * moff to "array_elem2", set mtype to
  4893. * "struct inner", and msize also becomes
  4894. * sizeof(struct inner). Then most of the
  4895. * remaining logic will fall through without
  4896. * caring the current member is an array or
  4897. * not.
  4898. *
  4899. * Unlike mtype/msize/moff, mtrue_end does not
  4900. * change. The naming difference ("_true") tells
  4901. * that it is not always corresponding to
  4902. * the current mtype/msize/moff.
  4903. * It is the true end of the current
  4904. * member (i.e. array in this case). That
  4905. * will allow an int array to be accessed like
  4906. * a scratch space,
  4907. * i.e. allow access beyond the size of
  4908. * the array's element as long as it is
  4909. * within the mtrue_end boundary.
  4910. */
  4911. /* skip empty array */
  4912. if (moff == mtrue_end)
  4913. continue;
  4914. msize /= total_nelems;
  4915. elem_idx = (off - moff) / msize;
  4916. moff += elem_idx * msize;
  4917. mtype = elem_type;
  4918. mid = elem_id;
  4919. }
  4920. /* the 'off' we're looking for is either equal to start
  4921. * of this field or inside of this struct
  4922. */
  4923. if (btf_type_is_struct(mtype)) {
  4924. /* our field must be inside that union or struct */
  4925. t = mtype;
  4926. /* return if the offset matches the member offset */
  4927. if (off == moff) {
  4928. *next_btf_id = mid;
  4929. return WALK_STRUCT;
  4930. }
  4931. /* adjust offset we're looking for */
  4932. off -= moff;
  4933. goto again;
  4934. }
  4935. if (btf_type_is_ptr(mtype)) {
  4936. const struct btf_type *stype, *t;
  4937. enum bpf_type_flag tmp_flag = 0;
  4938. u32 id;
  4939. if (msize != size || off != moff) {
  4940. bpf_log(log,
  4941. "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
  4942. mname, moff, tname, off, size);
  4943. return -EACCES;
  4944. }
  4945. /* check type tag */
  4946. t = btf_type_by_id(btf, mtype->type);
  4947. if (btf_type_is_type_tag(t)) {
  4948. tag_value = __btf_name_by_offset(btf, t->name_off);
  4949. /* check __user tag */
  4950. if (strcmp(tag_value, "user") == 0)
  4951. tmp_flag = MEM_USER;
  4952. /* check __percpu tag */
  4953. if (strcmp(tag_value, "percpu") == 0)
  4954. tmp_flag = MEM_PERCPU;
  4955. }
  4956. stype = btf_type_skip_modifiers(btf, mtype->type, &id);
  4957. if (btf_type_is_struct(stype)) {
  4958. *next_btf_id = id;
  4959. *flag = tmp_flag;
  4960. return WALK_PTR;
  4961. }
  4962. }
  4963. /* Allow more flexible access within an int as long as
  4964. * it is within mtrue_end.
  4965. * Since mtrue_end could be the end of an array,
  4966. * that also allows using an array of int as a scratch
  4967. * space. e.g. skb->cb[].
  4968. */
  4969. if (off + size > mtrue_end && !(*flag & PTR_UNTRUSTED)) {
  4970. bpf_log(log,
  4971. "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
  4972. mname, mtrue_end, tname, off, size);
  4973. return -EACCES;
  4974. }
  4975. return WALK_SCALAR;
  4976. }
  4977. bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
  4978. return -EINVAL;
  4979. }
  4980. int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
  4981. const struct btf_type *t, int off, int size,
  4982. enum bpf_access_type atype __maybe_unused,
  4983. u32 *next_btf_id, enum bpf_type_flag *flag)
  4984. {
  4985. enum bpf_type_flag tmp_flag = 0;
  4986. int err;
  4987. u32 id;
  4988. do {
  4989. err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag);
  4990. switch (err) {
  4991. case WALK_PTR:
  4992. /* If we found the pointer or scalar on t+off,
  4993. * we're done.
  4994. */
  4995. *next_btf_id = id;
  4996. *flag = tmp_flag;
  4997. return PTR_TO_BTF_ID;
  4998. case WALK_SCALAR:
  4999. return SCALAR_VALUE;
  5000. case WALK_STRUCT:
  5001. /* We found nested struct, so continue the search
  5002. * by diving in it. At this point the offset is
  5003. * aligned with the new type, so set it to 0.
  5004. */
  5005. t = btf_type_by_id(btf, id);
  5006. off = 0;
  5007. break;
  5008. default:
  5009. /* It's either error or unknown return value..
  5010. * scream and leave.
  5011. */
  5012. if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
  5013. return -EINVAL;
  5014. return err;
  5015. }
  5016. } while (t);
  5017. return -EINVAL;
  5018. }
  5019. /* Check that two BTF types, each specified as an BTF object + id, are exactly
  5020. * the same. Trivial ID check is not enough due to module BTFs, because we can
  5021. * end up with two different module BTFs, but IDs point to the common type in
  5022. * vmlinux BTF.
  5023. */
  5024. static bool btf_types_are_same(const struct btf *btf1, u32 id1,
  5025. const struct btf *btf2, u32 id2)
  5026. {
  5027. if (id1 != id2)
  5028. return false;
  5029. if (btf1 == btf2)
  5030. return true;
  5031. return btf_type_by_id(btf1, id1) == btf_type_by_id(btf2, id2);
  5032. }
  5033. bool btf_struct_ids_match(struct bpf_verifier_log *log,
  5034. const struct btf *btf, u32 id, int off,
  5035. const struct btf *need_btf, u32 need_type_id,
  5036. bool strict)
  5037. {
  5038. const struct btf_type *type;
  5039. enum bpf_type_flag flag;
  5040. int err;
  5041. /* Are we already done? */
  5042. if (off == 0 && btf_types_are_same(btf, id, need_btf, need_type_id))
  5043. return true;
  5044. /* In case of strict type match, we do not walk struct, the top level
  5045. * type match must succeed. When strict is true, off should have already
  5046. * been 0.
  5047. */
  5048. if (strict)
  5049. return false;
  5050. again:
  5051. type = btf_type_by_id(btf, id);
  5052. if (!type)
  5053. return false;
  5054. err = btf_struct_walk(log, btf, type, off, 1, &id, &flag);
  5055. if (err != WALK_STRUCT)
  5056. return false;
  5057. /* We found nested struct object. If it matches
  5058. * the requested ID, we're done. Otherwise let's
  5059. * continue the search with offset 0 in the new
  5060. * type.
  5061. */
  5062. if (!btf_types_are_same(btf, id, need_btf, need_type_id)) {
  5063. off = 0;
  5064. goto again;
  5065. }
  5066. return true;
  5067. }
  5068. static int __get_type_size(struct btf *btf, u32 btf_id,
  5069. const struct btf_type **ret_type)
  5070. {
  5071. const struct btf_type *t;
  5072. *ret_type = btf_type_by_id(btf, 0);
  5073. if (!btf_id)
  5074. /* void */
  5075. return 0;
  5076. t = btf_type_by_id(btf, btf_id);
  5077. while (t && btf_type_is_modifier(t))
  5078. t = btf_type_by_id(btf, t->type);
  5079. if (!t)
  5080. return -EINVAL;
  5081. *ret_type = t;
  5082. if (btf_type_is_ptr(t))
  5083. /* kernel size of pointer. Not BPF's size of pointer*/
  5084. return sizeof(void *);
  5085. if (btf_type_is_int(t) || btf_is_any_enum(t) || __btf_type_is_struct(t))
  5086. return t->size;
  5087. return -EINVAL;
  5088. }
  5089. int btf_distill_func_proto(struct bpf_verifier_log *log,
  5090. struct btf *btf,
  5091. const struct btf_type *func,
  5092. const char *tname,
  5093. struct btf_func_model *m)
  5094. {
  5095. const struct btf_param *args;
  5096. const struct btf_type *t;
  5097. u32 i, nargs;
  5098. int ret;
  5099. if (!func) {
  5100. /* BTF function prototype doesn't match the verifier types.
  5101. * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
  5102. */
  5103. for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
  5104. m->arg_size[i] = 8;
  5105. m->arg_flags[i] = 0;
  5106. }
  5107. m->ret_size = 8;
  5108. m->nr_args = MAX_BPF_FUNC_REG_ARGS;
  5109. return 0;
  5110. }
  5111. args = (const struct btf_param *)(func + 1);
  5112. nargs = btf_type_vlen(func);
  5113. if (nargs > MAX_BPF_FUNC_ARGS) {
  5114. bpf_log(log,
  5115. "The function %s has %d arguments. Too many.\n",
  5116. tname, nargs);
  5117. return -EINVAL;
  5118. }
  5119. ret = __get_type_size(btf, func->type, &t);
  5120. if (ret < 0 || __btf_type_is_struct(t)) {
  5121. bpf_log(log,
  5122. "The function %s return type %s is unsupported.\n",
  5123. tname, btf_type_str(t));
  5124. return -EINVAL;
  5125. }
  5126. m->ret_size = ret;
  5127. for (i = 0; i < nargs; i++) {
  5128. if (i == nargs - 1 && args[i].type == 0) {
  5129. bpf_log(log,
  5130. "The function %s with variable args is unsupported.\n",
  5131. tname);
  5132. return -EINVAL;
  5133. }
  5134. ret = __get_type_size(btf, args[i].type, &t);
  5135. /* No support of struct argument size greater than 16 bytes */
  5136. if (ret < 0 || ret > 16) {
  5137. bpf_log(log,
  5138. "The function %s arg%d type %s is unsupported.\n",
  5139. tname, i, btf_type_str(t));
  5140. return -EINVAL;
  5141. }
  5142. if (ret == 0) {
  5143. bpf_log(log,
  5144. "The function %s has malformed void argument.\n",
  5145. tname);
  5146. return -EINVAL;
  5147. }
  5148. m->arg_size[i] = ret;
  5149. m->arg_flags[i] = __btf_type_is_struct(t) ? BTF_FMODEL_STRUCT_ARG : 0;
  5150. }
  5151. m->nr_args = nargs;
  5152. return 0;
  5153. }
  5154. /* Compare BTFs of two functions assuming only scalars and pointers to context.
  5155. * t1 points to BTF_KIND_FUNC in btf1
  5156. * t2 points to BTF_KIND_FUNC in btf2
  5157. * Returns:
  5158. * EINVAL - function prototype mismatch
  5159. * EFAULT - verifier bug
  5160. * 0 - 99% match. The last 1% is validated by the verifier.
  5161. */
  5162. static int btf_check_func_type_match(struct bpf_verifier_log *log,
  5163. struct btf *btf1, const struct btf_type *t1,
  5164. struct btf *btf2, const struct btf_type *t2)
  5165. {
  5166. const struct btf_param *args1, *args2;
  5167. const char *fn1, *fn2, *s1, *s2;
  5168. u32 nargs1, nargs2, i;
  5169. fn1 = btf_name_by_offset(btf1, t1->name_off);
  5170. fn2 = btf_name_by_offset(btf2, t2->name_off);
  5171. if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
  5172. bpf_log(log, "%s() is not a global function\n", fn1);
  5173. return -EINVAL;
  5174. }
  5175. if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
  5176. bpf_log(log, "%s() is not a global function\n", fn2);
  5177. return -EINVAL;
  5178. }
  5179. t1 = btf_type_by_id(btf1, t1->type);
  5180. if (!t1 || !btf_type_is_func_proto(t1))
  5181. return -EFAULT;
  5182. t2 = btf_type_by_id(btf2, t2->type);
  5183. if (!t2 || !btf_type_is_func_proto(t2))
  5184. return -EFAULT;
  5185. args1 = (const struct btf_param *)(t1 + 1);
  5186. nargs1 = btf_type_vlen(t1);
  5187. args2 = (const struct btf_param *)(t2 + 1);
  5188. nargs2 = btf_type_vlen(t2);
  5189. if (nargs1 != nargs2) {
  5190. bpf_log(log, "%s() has %d args while %s() has %d args\n",
  5191. fn1, nargs1, fn2, nargs2);
  5192. return -EINVAL;
  5193. }
  5194. t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
  5195. t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
  5196. if (t1->info != t2->info) {
  5197. bpf_log(log,
  5198. "Return type %s of %s() doesn't match type %s of %s()\n",
  5199. btf_type_str(t1), fn1,
  5200. btf_type_str(t2), fn2);
  5201. return -EINVAL;
  5202. }
  5203. for (i = 0; i < nargs1; i++) {
  5204. t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
  5205. t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
  5206. if (t1->info != t2->info) {
  5207. bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
  5208. i, fn1, btf_type_str(t1),
  5209. fn2, btf_type_str(t2));
  5210. return -EINVAL;
  5211. }
  5212. if (btf_type_has_size(t1) && t1->size != t2->size) {
  5213. bpf_log(log,
  5214. "arg%d in %s() has size %d while %s() has %d\n",
  5215. i, fn1, t1->size,
  5216. fn2, t2->size);
  5217. return -EINVAL;
  5218. }
  5219. /* global functions are validated with scalars and pointers
  5220. * to context only. And only global functions can be replaced.
  5221. * Hence type check only those types.
  5222. */
  5223. if (btf_type_is_int(t1) || btf_is_any_enum(t1))
  5224. continue;
  5225. if (!btf_type_is_ptr(t1)) {
  5226. bpf_log(log,
  5227. "arg%d in %s() has unrecognized type\n",
  5228. i, fn1);
  5229. return -EINVAL;
  5230. }
  5231. t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
  5232. t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
  5233. if (!btf_type_is_struct(t1)) {
  5234. bpf_log(log,
  5235. "arg%d in %s() is not a pointer to context\n",
  5236. i, fn1);
  5237. return -EINVAL;
  5238. }
  5239. if (!btf_type_is_struct(t2)) {
  5240. bpf_log(log,
  5241. "arg%d in %s() is not a pointer to context\n",
  5242. i, fn2);
  5243. return -EINVAL;
  5244. }
  5245. /* This is an optional check to make program writing easier.
  5246. * Compare names of structs and report an error to the user.
  5247. * btf_prepare_func_args() already checked that t2 struct
  5248. * is a context type. btf_prepare_func_args() will check
  5249. * later that t1 struct is a context type as well.
  5250. */
  5251. s1 = btf_name_by_offset(btf1, t1->name_off);
  5252. s2 = btf_name_by_offset(btf2, t2->name_off);
  5253. if (strcmp(s1, s2)) {
  5254. bpf_log(log,
  5255. "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
  5256. i, fn1, s1, fn2, s2);
  5257. return -EINVAL;
  5258. }
  5259. }
  5260. return 0;
  5261. }
  5262. /* Compare BTFs of given program with BTF of target program */
  5263. int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
  5264. struct btf *btf2, const struct btf_type *t2)
  5265. {
  5266. struct btf *btf1 = prog->aux->btf;
  5267. const struct btf_type *t1;
  5268. u32 btf_id = 0;
  5269. if (!prog->aux->func_info) {
  5270. bpf_log(log, "Program extension requires BTF\n");
  5271. return -EINVAL;
  5272. }
  5273. btf_id = prog->aux->func_info[0].type_id;
  5274. if (!btf_id)
  5275. return -EFAULT;
  5276. t1 = btf_type_by_id(btf1, btf_id);
  5277. if (!t1 || !btf_type_is_func(t1))
  5278. return -EFAULT;
  5279. return btf_check_func_type_match(log, btf1, t1, btf2, t2);
  5280. }
  5281. static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
  5282. #ifdef CONFIG_NET
  5283. [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
  5284. [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
  5285. [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
  5286. #endif
  5287. };
  5288. /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
  5289. static bool __btf_type_is_scalar_struct(struct bpf_verifier_log *log,
  5290. const struct btf *btf,
  5291. const struct btf_type *t, int rec)
  5292. {
  5293. const struct btf_type *member_type;
  5294. const struct btf_member *member;
  5295. u32 i;
  5296. if (!btf_type_is_struct(t))
  5297. return false;
  5298. for_each_member(i, t, member) {
  5299. const struct btf_array *array;
  5300. member_type = btf_type_skip_modifiers(btf, member->type, NULL);
  5301. if (btf_type_is_struct(member_type)) {
  5302. if (rec >= 3) {
  5303. bpf_log(log, "max struct nesting depth exceeded\n");
  5304. return false;
  5305. }
  5306. if (!__btf_type_is_scalar_struct(log, btf, member_type, rec + 1))
  5307. return false;
  5308. continue;
  5309. }
  5310. if (btf_type_is_array(member_type)) {
  5311. array = btf_type_array(member_type);
  5312. if (!array->nelems)
  5313. return false;
  5314. member_type = btf_type_skip_modifiers(btf, array->type, NULL);
  5315. if (!btf_type_is_scalar(member_type))
  5316. return false;
  5317. continue;
  5318. }
  5319. if (!btf_type_is_scalar(member_type))
  5320. return false;
  5321. }
  5322. return true;
  5323. }
  5324. static bool is_kfunc_arg_mem_size(const struct btf *btf,
  5325. const struct btf_param *arg,
  5326. const struct bpf_reg_state *reg)
  5327. {
  5328. int len, sfx_len = sizeof("__sz") - 1;
  5329. const struct btf_type *t;
  5330. const char *param_name;
  5331. t = btf_type_skip_modifiers(btf, arg->type, NULL);
  5332. if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
  5333. return false;
  5334. /* In the future, this can be ported to use BTF tagging */
  5335. param_name = btf_name_by_offset(btf, arg->name_off);
  5336. if (str_is_empty(param_name))
  5337. return false;
  5338. len = strlen(param_name);
  5339. if (len < sfx_len)
  5340. return false;
  5341. param_name += len - sfx_len;
  5342. if (strncmp(param_name, "__sz", sfx_len))
  5343. return false;
  5344. return true;
  5345. }
  5346. static bool btf_is_kfunc_arg_mem_size(const struct btf *btf,
  5347. const struct btf_param *arg,
  5348. const struct bpf_reg_state *reg,
  5349. const char *name)
  5350. {
  5351. int len, target_len = strlen(name);
  5352. const struct btf_type *t;
  5353. const char *param_name;
  5354. t = btf_type_skip_modifiers(btf, arg->type, NULL);
  5355. if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
  5356. return false;
  5357. param_name = btf_name_by_offset(btf, arg->name_off);
  5358. if (str_is_empty(param_name))
  5359. return false;
  5360. len = strlen(param_name);
  5361. if (len != target_len)
  5362. return false;
  5363. if (strcmp(param_name, name))
  5364. return false;
  5365. return true;
  5366. }
  5367. static int btf_check_func_arg_match(struct bpf_verifier_env *env,
  5368. const struct btf *btf, u32 func_id,
  5369. struct bpf_reg_state *regs,
  5370. bool ptr_to_mem_ok,
  5371. struct bpf_kfunc_arg_meta *kfunc_meta,
  5372. bool processing_call)
  5373. {
  5374. enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
  5375. bool rel = false, kptr_get = false, trusted_args = false;
  5376. bool sleepable = false;
  5377. struct bpf_verifier_log *log = &env->log;
  5378. u32 i, nargs, ref_id, ref_obj_id = 0;
  5379. bool is_kfunc = btf_is_kernel(btf);
  5380. const char *func_name, *ref_tname;
  5381. const struct btf_type *t, *ref_t;
  5382. const struct btf_param *args;
  5383. int ref_regno = 0, ret;
  5384. t = btf_type_by_id(btf, func_id);
  5385. if (!t || !btf_type_is_func(t)) {
  5386. /* These checks were already done by the verifier while loading
  5387. * struct bpf_func_info or in add_kfunc_call().
  5388. */
  5389. bpf_log(log, "BTF of func_id %u doesn't point to KIND_FUNC\n",
  5390. func_id);
  5391. return -EFAULT;
  5392. }
  5393. func_name = btf_name_by_offset(btf, t->name_off);
  5394. t = btf_type_by_id(btf, t->type);
  5395. if (!t || !btf_type_is_func_proto(t)) {
  5396. bpf_log(log, "Invalid BTF of func %s\n", func_name);
  5397. return -EFAULT;
  5398. }
  5399. args = (const struct btf_param *)(t + 1);
  5400. nargs = btf_type_vlen(t);
  5401. if (nargs > MAX_BPF_FUNC_REG_ARGS) {
  5402. bpf_log(log, "Function %s has %d > %d args\n", func_name, nargs,
  5403. MAX_BPF_FUNC_REG_ARGS);
  5404. return -EINVAL;
  5405. }
  5406. if (is_kfunc && kfunc_meta) {
  5407. /* Only kfunc can be release func */
  5408. rel = kfunc_meta->flags & KF_RELEASE;
  5409. kptr_get = kfunc_meta->flags & KF_KPTR_GET;
  5410. trusted_args = kfunc_meta->flags & KF_TRUSTED_ARGS;
  5411. sleepable = kfunc_meta->flags & KF_SLEEPABLE;
  5412. }
  5413. /* check that BTF function arguments match actual types that the
  5414. * verifier sees.
  5415. */
  5416. for (i = 0; i < nargs; i++) {
  5417. enum bpf_arg_type arg_type = ARG_DONTCARE;
  5418. u32 regno = i + 1;
  5419. struct bpf_reg_state *reg = &regs[regno];
  5420. bool obj_ptr = false;
  5421. t = btf_type_skip_modifiers(btf, args[i].type, NULL);
  5422. if (btf_type_is_scalar(t)) {
  5423. if (is_kfunc && kfunc_meta) {
  5424. bool is_buf_size = false;
  5425. /* check for any const scalar parameter of name "rdonly_buf_size"
  5426. * or "rdwr_buf_size"
  5427. */
  5428. if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg,
  5429. "rdonly_buf_size")) {
  5430. kfunc_meta->r0_rdonly = true;
  5431. is_buf_size = true;
  5432. } else if (btf_is_kfunc_arg_mem_size(btf, &args[i], reg,
  5433. "rdwr_buf_size"))
  5434. is_buf_size = true;
  5435. if (is_buf_size) {
  5436. if (kfunc_meta->r0_size) {
  5437. bpf_log(log, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
  5438. return -EINVAL;
  5439. }
  5440. if (!tnum_is_const(reg->var_off)) {
  5441. bpf_log(log, "R%d is not a const\n", regno);
  5442. return -EINVAL;
  5443. }
  5444. kfunc_meta->r0_size = reg->var_off.value;
  5445. ret = mark_chain_precision(env, regno);
  5446. if (ret)
  5447. return ret;
  5448. }
  5449. }
  5450. if (reg->type == SCALAR_VALUE)
  5451. continue;
  5452. bpf_log(log, "R%d is not a scalar\n", regno);
  5453. return -EINVAL;
  5454. }
  5455. if (!btf_type_is_ptr(t)) {
  5456. bpf_log(log, "Unrecognized arg#%d type %s\n",
  5457. i, btf_type_str(t));
  5458. return -EINVAL;
  5459. }
  5460. /* These register types have special constraints wrt ref_obj_id
  5461. * and offset checks. The rest of trusted args don't.
  5462. */
  5463. obj_ptr = reg->type == PTR_TO_CTX || reg->type == PTR_TO_BTF_ID ||
  5464. reg2btf_ids[base_type(reg->type)];
  5465. /* Check if argument must be a referenced pointer, args + i has
  5466. * been verified to be a pointer (after skipping modifiers).
  5467. * PTR_TO_CTX is ok without having non-zero ref_obj_id.
  5468. */
  5469. if (is_kfunc && trusted_args && (obj_ptr && reg->type != PTR_TO_CTX) && !reg->ref_obj_id) {
  5470. bpf_log(log, "R%d must be referenced\n", regno);
  5471. return -EINVAL;
  5472. }
  5473. ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
  5474. ref_tname = btf_name_by_offset(btf, ref_t->name_off);
  5475. /* Trusted args have the same offset checks as release arguments */
  5476. if ((trusted_args && obj_ptr) || (rel && reg->ref_obj_id))
  5477. arg_type |= OBJ_RELEASE;
  5478. ret = check_func_arg_reg_off(env, reg, regno, arg_type);
  5479. if (ret < 0)
  5480. return ret;
  5481. if (is_kfunc && reg->ref_obj_id) {
  5482. /* Ensure only one argument is referenced PTR_TO_BTF_ID */
  5483. if (ref_obj_id) {
  5484. bpf_log(log, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
  5485. regno, reg->ref_obj_id, ref_obj_id);
  5486. return -EFAULT;
  5487. }
  5488. ref_regno = regno;
  5489. ref_obj_id = reg->ref_obj_id;
  5490. }
  5491. /* kptr_get is only true for kfunc */
  5492. if (i == 0 && kptr_get) {
  5493. struct bpf_map_value_off_desc *off_desc;
  5494. if (reg->type != PTR_TO_MAP_VALUE) {
  5495. bpf_log(log, "arg#0 expected pointer to map value\n");
  5496. return -EINVAL;
  5497. }
  5498. /* check_func_arg_reg_off allows var_off for
  5499. * PTR_TO_MAP_VALUE, but we need fixed offset to find
  5500. * off_desc.
  5501. */
  5502. if (!tnum_is_const(reg->var_off)) {
  5503. bpf_log(log, "arg#0 must have constant offset\n");
  5504. return -EINVAL;
  5505. }
  5506. off_desc = bpf_map_kptr_off_contains(reg->map_ptr, reg->off + reg->var_off.value);
  5507. if (!off_desc || off_desc->type != BPF_KPTR_REF) {
  5508. bpf_log(log, "arg#0 no referenced kptr at map value offset=%llu\n",
  5509. reg->off + reg->var_off.value);
  5510. return -EINVAL;
  5511. }
  5512. if (!btf_type_is_ptr(ref_t)) {
  5513. bpf_log(log, "arg#0 BTF type must be a double pointer\n");
  5514. return -EINVAL;
  5515. }
  5516. ref_t = btf_type_skip_modifiers(btf, ref_t->type, &ref_id);
  5517. ref_tname = btf_name_by_offset(btf, ref_t->name_off);
  5518. if (!btf_type_is_struct(ref_t)) {
  5519. bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n",
  5520. func_name, i, btf_type_str(ref_t), ref_tname);
  5521. return -EINVAL;
  5522. }
  5523. if (!btf_struct_ids_match(log, btf, ref_id, 0, off_desc->kptr.btf,
  5524. off_desc->kptr.btf_id, true)) {
  5525. bpf_log(log, "kernel function %s args#%d expected pointer to %s %s\n",
  5526. func_name, i, btf_type_str(ref_t), ref_tname);
  5527. return -EINVAL;
  5528. }
  5529. /* rest of the arguments can be anything, like normal kfunc */
  5530. } else if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
  5531. /* If function expects ctx type in BTF check that caller
  5532. * is passing PTR_TO_CTX.
  5533. */
  5534. if (reg->type != PTR_TO_CTX) {
  5535. bpf_log(log,
  5536. "arg#%d expected pointer to ctx, but got %s\n",
  5537. i, btf_type_str(t));
  5538. return -EINVAL;
  5539. }
  5540. } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID ||
  5541. (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) {
  5542. const struct btf_type *reg_ref_t;
  5543. const struct btf *reg_btf;
  5544. const char *reg_ref_tname;
  5545. u32 reg_ref_id;
  5546. if (!btf_type_is_struct(ref_t)) {
  5547. bpf_log(log, "kernel function %s args#%d pointer type %s %s is not supported\n",
  5548. func_name, i, btf_type_str(ref_t),
  5549. ref_tname);
  5550. return -EINVAL;
  5551. }
  5552. if (reg->type == PTR_TO_BTF_ID) {
  5553. reg_btf = reg->btf;
  5554. reg_ref_id = reg->btf_id;
  5555. } else {
  5556. reg_btf = btf_vmlinux;
  5557. reg_ref_id = *reg2btf_ids[base_type(reg->type)];
  5558. }
  5559. reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id,
  5560. &reg_ref_id);
  5561. reg_ref_tname = btf_name_by_offset(reg_btf,
  5562. reg_ref_t->name_off);
  5563. if (!btf_struct_ids_match(log, reg_btf, reg_ref_id,
  5564. reg->off, btf, ref_id,
  5565. trusted_args || (rel && reg->ref_obj_id))) {
  5566. bpf_log(log, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
  5567. func_name, i,
  5568. btf_type_str(ref_t), ref_tname,
  5569. regno, btf_type_str(reg_ref_t),
  5570. reg_ref_tname);
  5571. return -EINVAL;
  5572. }
  5573. } else if (ptr_to_mem_ok && processing_call) {
  5574. const struct btf_type *resolve_ret;
  5575. u32 type_size;
  5576. if (is_kfunc) {
  5577. bool arg_mem_size = i + 1 < nargs && is_kfunc_arg_mem_size(btf, &args[i + 1], &regs[regno + 1]);
  5578. bool arg_dynptr = btf_type_is_struct(ref_t) &&
  5579. !strcmp(ref_tname,
  5580. stringify_struct(bpf_dynptr_kern));
  5581. /* Permit pointer to mem, but only when argument
  5582. * type is pointer to scalar, or struct composed
  5583. * (recursively) of scalars.
  5584. * When arg_mem_size is true, the pointer can be
  5585. * void *.
  5586. * Also permit initialized local dynamic pointers.
  5587. */
  5588. if (!btf_type_is_scalar(ref_t) &&
  5589. !__btf_type_is_scalar_struct(log, btf, ref_t, 0) &&
  5590. !arg_dynptr &&
  5591. (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
  5592. bpf_log(log,
  5593. "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
  5594. i, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
  5595. return -EINVAL;
  5596. }
  5597. if (arg_dynptr) {
  5598. if (reg->type != PTR_TO_STACK) {
  5599. bpf_log(log, "arg#%d pointer type %s %s not to stack\n",
  5600. i, btf_type_str(ref_t),
  5601. ref_tname);
  5602. return -EINVAL;
  5603. }
  5604. if (!is_dynptr_reg_valid_init(env, reg)) {
  5605. bpf_log(log,
  5606. "arg#%d pointer type %s %s must be valid and initialized\n",
  5607. i, btf_type_str(ref_t),
  5608. ref_tname);
  5609. return -EINVAL;
  5610. }
  5611. if (!is_dynptr_type_expected(env, reg,
  5612. ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL)) {
  5613. bpf_log(log,
  5614. "arg#%d pointer type %s %s points to unsupported dynamic pointer type\n",
  5615. i, btf_type_str(ref_t),
  5616. ref_tname);
  5617. return -EINVAL;
  5618. }
  5619. continue;
  5620. }
  5621. /* Check for mem, len pair */
  5622. if (arg_mem_size) {
  5623. if (check_kfunc_mem_size_reg(env, &regs[regno + 1], regno + 1)) {
  5624. bpf_log(log, "arg#%d arg#%d memory, len pair leads to invalid memory access\n",
  5625. i, i + 1);
  5626. return -EINVAL;
  5627. }
  5628. i++;
  5629. continue;
  5630. }
  5631. }
  5632. resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
  5633. if (IS_ERR(resolve_ret)) {
  5634. bpf_log(log,
  5635. "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
  5636. i, btf_type_str(ref_t), ref_tname,
  5637. PTR_ERR(resolve_ret));
  5638. return -EINVAL;
  5639. }
  5640. if (check_mem_reg(env, reg, regno, type_size))
  5641. return -EINVAL;
  5642. } else {
  5643. bpf_log(log, "reg type unsupported for arg#%d %sfunction %s#%d\n", i,
  5644. is_kfunc ? "kernel " : "", func_name, func_id);
  5645. return -EINVAL;
  5646. }
  5647. }
  5648. /* Either both are set, or neither */
  5649. WARN_ON_ONCE((ref_obj_id && !ref_regno) || (!ref_obj_id && ref_regno));
  5650. /* We already made sure ref_obj_id is set only for one argument. We do
  5651. * allow (!rel && ref_obj_id), so that passing such referenced
  5652. * PTR_TO_BTF_ID to other kfuncs works. Note that rel is only true when
  5653. * is_kfunc is true.
  5654. */
  5655. if (rel && !ref_obj_id) {
  5656. bpf_log(log, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
  5657. func_name);
  5658. return -EINVAL;
  5659. }
  5660. if (sleepable && !env->prog->aux->sleepable) {
  5661. bpf_log(log, "kernel function %s is sleepable but the program is not\n",
  5662. func_name);
  5663. return -EINVAL;
  5664. }
  5665. if (kfunc_meta && ref_obj_id)
  5666. kfunc_meta->ref_obj_id = ref_obj_id;
  5667. /* returns argument register number > 0 in case of reference release kfunc */
  5668. return rel ? ref_regno : 0;
  5669. }
  5670. /* Compare BTF of a function declaration with given bpf_reg_state.
  5671. * Returns:
  5672. * EFAULT - there is a verifier bug. Abort verification.
  5673. * EINVAL - there is a type mismatch or BTF is not available.
  5674. * 0 - BTF matches with what bpf_reg_state expects.
  5675. * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
  5676. */
  5677. int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
  5678. struct bpf_reg_state *regs)
  5679. {
  5680. struct bpf_prog *prog = env->prog;
  5681. struct btf *btf = prog->aux->btf;
  5682. bool is_global;
  5683. u32 btf_id;
  5684. int err;
  5685. if (!prog->aux->func_info)
  5686. return -EINVAL;
  5687. btf_id = prog->aux->func_info[subprog].type_id;
  5688. if (!btf_id)
  5689. return -EFAULT;
  5690. if (prog->aux->func_info_aux[subprog].unreliable)
  5691. return -EINVAL;
  5692. is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
  5693. err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, false);
  5694. /* Compiler optimizations can remove arguments from static functions
  5695. * or mismatched type can be passed into a global function.
  5696. * In such cases mark the function as unreliable from BTF point of view.
  5697. */
  5698. if (err)
  5699. prog->aux->func_info_aux[subprog].unreliable = true;
  5700. return err;
  5701. }
  5702. /* Compare BTF of a function call with given bpf_reg_state.
  5703. * Returns:
  5704. * EFAULT - there is a verifier bug. Abort verification.
  5705. * EINVAL - there is a type mismatch or BTF is not available.
  5706. * 0 - BTF matches with what bpf_reg_state expects.
  5707. * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
  5708. *
  5709. * NOTE: the code is duplicated from btf_check_subprog_arg_match()
  5710. * because btf_check_func_arg_match() is still doing both. Once that
  5711. * function is split in 2, we can call from here btf_check_subprog_arg_match()
  5712. * first, and then treat the calling part in a new code path.
  5713. */
  5714. int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
  5715. struct bpf_reg_state *regs)
  5716. {
  5717. struct bpf_prog *prog = env->prog;
  5718. struct btf *btf = prog->aux->btf;
  5719. bool is_global;
  5720. u32 btf_id;
  5721. int err;
  5722. if (!prog->aux->func_info)
  5723. return -EINVAL;
  5724. btf_id = prog->aux->func_info[subprog].type_id;
  5725. if (!btf_id)
  5726. return -EFAULT;
  5727. if (prog->aux->func_info_aux[subprog].unreliable)
  5728. return -EINVAL;
  5729. is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
  5730. err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, NULL, true);
  5731. /* Compiler optimizations can remove arguments from static functions
  5732. * or mismatched type can be passed into a global function.
  5733. * In such cases mark the function as unreliable from BTF point of view.
  5734. */
  5735. if (err)
  5736. prog->aux->func_info_aux[subprog].unreliable = true;
  5737. return err;
  5738. }
  5739. int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
  5740. const struct btf *btf, u32 func_id,
  5741. struct bpf_reg_state *regs,
  5742. struct bpf_kfunc_arg_meta *meta)
  5743. {
  5744. return btf_check_func_arg_match(env, btf, func_id, regs, true, meta, true);
  5745. }
  5746. /* Convert BTF of a function into bpf_reg_state if possible
  5747. * Returns:
  5748. * EFAULT - there is a verifier bug. Abort verification.
  5749. * EINVAL - cannot convert BTF.
  5750. * 0 - Successfully converted BTF into bpf_reg_state
  5751. * (either PTR_TO_CTX or SCALAR_VALUE).
  5752. */
  5753. int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
  5754. struct bpf_reg_state *regs)
  5755. {
  5756. struct bpf_verifier_log *log = &env->log;
  5757. struct bpf_prog *prog = env->prog;
  5758. enum bpf_prog_type prog_type = prog->type;
  5759. struct btf *btf = prog->aux->btf;
  5760. const struct btf_param *args;
  5761. const struct btf_type *t, *ref_t;
  5762. u32 i, nargs, btf_id;
  5763. const char *tname;
  5764. if (!prog->aux->func_info ||
  5765. prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
  5766. bpf_log(log, "Verifier bug\n");
  5767. return -EFAULT;
  5768. }
  5769. btf_id = prog->aux->func_info[subprog].type_id;
  5770. if (!btf_id) {
  5771. bpf_log(log, "Global functions need valid BTF\n");
  5772. return -EFAULT;
  5773. }
  5774. t = btf_type_by_id(btf, btf_id);
  5775. if (!t || !btf_type_is_func(t)) {
  5776. /* These checks were already done by the verifier while loading
  5777. * struct bpf_func_info
  5778. */
  5779. bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
  5780. subprog);
  5781. return -EFAULT;
  5782. }
  5783. tname = btf_name_by_offset(btf, t->name_off);
  5784. if (log->level & BPF_LOG_LEVEL)
  5785. bpf_log(log, "Validating %s() func#%d...\n",
  5786. tname, subprog);
  5787. if (prog->aux->func_info_aux[subprog].unreliable) {
  5788. bpf_log(log, "Verifier bug in function %s()\n", tname);
  5789. return -EFAULT;
  5790. }
  5791. if (prog_type == BPF_PROG_TYPE_EXT)
  5792. prog_type = prog->aux->dst_prog->type;
  5793. t = btf_type_by_id(btf, t->type);
  5794. if (!t || !btf_type_is_func_proto(t)) {
  5795. bpf_log(log, "Invalid type of function %s()\n", tname);
  5796. return -EFAULT;
  5797. }
  5798. args = (const struct btf_param *)(t + 1);
  5799. nargs = btf_type_vlen(t);
  5800. if (nargs > MAX_BPF_FUNC_REG_ARGS) {
  5801. bpf_log(log, "Global function %s() with %d > %d args. Buggy compiler.\n",
  5802. tname, nargs, MAX_BPF_FUNC_REG_ARGS);
  5803. return -EINVAL;
  5804. }
  5805. /* check that function returns int */
  5806. t = btf_type_by_id(btf, t->type);
  5807. while (btf_type_is_modifier(t))
  5808. t = btf_type_by_id(btf, t->type);
  5809. if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
  5810. bpf_log(log,
  5811. "Global function %s() doesn't return scalar. Only those are supported.\n",
  5812. tname);
  5813. return -EINVAL;
  5814. }
  5815. /* Convert BTF function arguments into verifier types.
  5816. * Only PTR_TO_CTX and SCALAR are supported atm.
  5817. */
  5818. for (i = 0; i < nargs; i++) {
  5819. struct bpf_reg_state *reg = &regs[i + 1];
  5820. t = btf_type_by_id(btf, args[i].type);
  5821. while (btf_type_is_modifier(t))
  5822. t = btf_type_by_id(btf, t->type);
  5823. if (btf_type_is_int(t) || btf_is_any_enum(t)) {
  5824. reg->type = SCALAR_VALUE;
  5825. continue;
  5826. }
  5827. if (btf_type_is_ptr(t)) {
  5828. if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
  5829. reg->type = PTR_TO_CTX;
  5830. continue;
  5831. }
  5832. t = btf_type_skip_modifiers(btf, t->type, NULL);
  5833. ref_t = btf_resolve_size(btf, t, &reg->mem_size);
  5834. if (IS_ERR(ref_t)) {
  5835. bpf_log(log,
  5836. "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
  5837. i, btf_type_str(t), btf_name_by_offset(btf, t->name_off),
  5838. PTR_ERR(ref_t));
  5839. return -EINVAL;
  5840. }
  5841. reg->type = PTR_TO_MEM | PTR_MAYBE_NULL;
  5842. reg->id = ++env->id_gen;
  5843. continue;
  5844. }
  5845. bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
  5846. i, btf_type_str(t), tname);
  5847. return -EINVAL;
  5848. }
  5849. return 0;
  5850. }
  5851. static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
  5852. struct btf_show *show)
  5853. {
  5854. const struct btf_type *t = btf_type_by_id(btf, type_id);
  5855. show->btf = btf;
  5856. memset(&show->state, 0, sizeof(show->state));
  5857. memset(&show->obj, 0, sizeof(show->obj));
  5858. btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
  5859. }
  5860. static void btf_seq_show(struct btf_show *show, const char *fmt,
  5861. va_list args)
  5862. {
  5863. seq_vprintf((struct seq_file *)show->target, fmt, args);
  5864. }
  5865. int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
  5866. void *obj, struct seq_file *m, u64 flags)
  5867. {
  5868. struct btf_show sseq;
  5869. sseq.target = m;
  5870. sseq.showfn = btf_seq_show;
  5871. sseq.flags = flags;
  5872. btf_type_show(btf, type_id, obj, &sseq);
  5873. return sseq.state.status;
  5874. }
  5875. void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
  5876. struct seq_file *m)
  5877. {
  5878. (void) btf_type_seq_show_flags(btf, type_id, obj, m,
  5879. BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
  5880. BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
  5881. }
  5882. struct btf_show_snprintf {
  5883. struct btf_show show;
  5884. int len_left; /* space left in string */
  5885. int len; /* length we would have written */
  5886. };
  5887. static void btf_snprintf_show(struct btf_show *show, const char *fmt,
  5888. va_list args)
  5889. {
  5890. struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
  5891. int len;
  5892. len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
  5893. if (len < 0) {
  5894. ssnprintf->len_left = 0;
  5895. ssnprintf->len = len;
  5896. } else if (len >= ssnprintf->len_left) {
  5897. /* no space, drive on to get length we would have written */
  5898. ssnprintf->len_left = 0;
  5899. ssnprintf->len += len;
  5900. } else {
  5901. ssnprintf->len_left -= len;
  5902. ssnprintf->len += len;
  5903. show->target += len;
  5904. }
  5905. }
  5906. int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
  5907. char *buf, int len, u64 flags)
  5908. {
  5909. struct btf_show_snprintf ssnprintf;
  5910. ssnprintf.show.target = buf;
  5911. ssnprintf.show.flags = flags;
  5912. ssnprintf.show.showfn = btf_snprintf_show;
  5913. ssnprintf.len_left = len;
  5914. ssnprintf.len = 0;
  5915. btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
  5916. /* If we encountered an error, return it. */
  5917. if (ssnprintf.show.state.status)
  5918. return ssnprintf.show.state.status;
  5919. /* Otherwise return length we would have written */
  5920. return ssnprintf.len;
  5921. }
  5922. #ifdef CONFIG_PROC_FS
  5923. static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
  5924. {
  5925. const struct btf *btf = filp->private_data;
  5926. seq_printf(m, "btf_id:\t%u\n", btf->id);
  5927. }
  5928. #endif
  5929. static int btf_release(struct inode *inode, struct file *filp)
  5930. {
  5931. btf_put(filp->private_data);
  5932. return 0;
  5933. }
  5934. const struct file_operations btf_fops = {
  5935. #ifdef CONFIG_PROC_FS
  5936. .show_fdinfo = bpf_btf_show_fdinfo,
  5937. #endif
  5938. .release = btf_release,
  5939. };
  5940. static int __btf_new_fd(struct btf *btf)
  5941. {
  5942. return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
  5943. }
  5944. int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr)
  5945. {
  5946. struct btf *btf;
  5947. int ret;
  5948. btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel),
  5949. attr->btf_size, attr->btf_log_level,
  5950. u64_to_user_ptr(attr->btf_log_buf),
  5951. attr->btf_log_size);
  5952. if (IS_ERR(btf))
  5953. return PTR_ERR(btf);
  5954. ret = btf_alloc_id(btf);
  5955. if (ret) {
  5956. btf_free(btf);
  5957. return ret;
  5958. }
  5959. /*
  5960. * The BTF ID is published to the userspace.
  5961. * All BTF free must go through call_rcu() from
  5962. * now on (i.e. free by calling btf_put()).
  5963. */
  5964. ret = __btf_new_fd(btf);
  5965. if (ret < 0)
  5966. btf_put(btf);
  5967. return ret;
  5968. }
  5969. struct btf *btf_get_by_fd(int fd)
  5970. {
  5971. struct btf *btf;
  5972. struct fd f;
  5973. f = fdget(fd);
  5974. if (!f.file)
  5975. return ERR_PTR(-EBADF);
  5976. if (f.file->f_op != &btf_fops) {
  5977. fdput(f);
  5978. return ERR_PTR(-EINVAL);
  5979. }
  5980. btf = f.file->private_data;
  5981. refcount_inc(&btf->refcnt);
  5982. fdput(f);
  5983. return btf;
  5984. }
  5985. int btf_get_info_by_fd(const struct btf *btf,
  5986. const union bpf_attr *attr,
  5987. union bpf_attr __user *uattr)
  5988. {
  5989. struct bpf_btf_info __user *uinfo;
  5990. struct bpf_btf_info info;
  5991. u32 info_copy, btf_copy;
  5992. void __user *ubtf;
  5993. char __user *uname;
  5994. u32 uinfo_len, uname_len, name_len;
  5995. int ret = 0;
  5996. uinfo = u64_to_user_ptr(attr->info.info);
  5997. uinfo_len = attr->info.info_len;
  5998. info_copy = min_t(u32, uinfo_len, sizeof(info));
  5999. memset(&info, 0, sizeof(info));
  6000. if (copy_from_user(&info, uinfo, info_copy))
  6001. return -EFAULT;
  6002. info.id = btf->id;
  6003. ubtf = u64_to_user_ptr(info.btf);
  6004. btf_copy = min_t(u32, btf->data_size, info.btf_size);
  6005. if (copy_to_user(ubtf, btf->data, btf_copy))
  6006. return -EFAULT;
  6007. info.btf_size = btf->data_size;
  6008. info.kernel_btf = btf->kernel_btf;
  6009. uname = u64_to_user_ptr(info.name);
  6010. uname_len = info.name_len;
  6011. if (!uname ^ !uname_len)
  6012. return -EINVAL;
  6013. name_len = strlen(btf->name);
  6014. info.name_len = name_len;
  6015. if (uname) {
  6016. if (uname_len >= name_len + 1) {
  6017. if (copy_to_user(uname, btf->name, name_len + 1))
  6018. return -EFAULT;
  6019. } else {
  6020. char zero = '\0';
  6021. if (copy_to_user(uname, btf->name, uname_len - 1))
  6022. return -EFAULT;
  6023. if (put_user(zero, uname + uname_len - 1))
  6024. return -EFAULT;
  6025. /* let user-space know about too short buffer */
  6026. ret = -ENOSPC;
  6027. }
  6028. }
  6029. if (copy_to_user(uinfo, &info, info_copy) ||
  6030. put_user(info_copy, &uattr->info.info_len))
  6031. return -EFAULT;
  6032. return ret;
  6033. }
  6034. int btf_get_fd_by_id(u32 id)
  6035. {
  6036. struct btf *btf;
  6037. int fd;
  6038. rcu_read_lock();
  6039. btf = idr_find(&btf_idr, id);
  6040. if (!btf || !refcount_inc_not_zero(&btf->refcnt))
  6041. btf = ERR_PTR(-ENOENT);
  6042. rcu_read_unlock();
  6043. if (IS_ERR(btf))
  6044. return PTR_ERR(btf);
  6045. fd = __btf_new_fd(btf);
  6046. if (fd < 0)
  6047. btf_put(btf);
  6048. return fd;
  6049. }
  6050. u32 btf_obj_id(const struct btf *btf)
  6051. {
  6052. return btf->id;
  6053. }
  6054. bool btf_is_kernel(const struct btf *btf)
  6055. {
  6056. return btf->kernel_btf;
  6057. }
  6058. bool btf_is_module(const struct btf *btf)
  6059. {
  6060. return btf->kernel_btf && strcmp(btf->name, "vmlinux") != 0;
  6061. }
  6062. static int btf_id_cmp_func(const void *a, const void *b)
  6063. {
  6064. const int *pa = a, *pb = b;
  6065. return *pa - *pb;
  6066. }
  6067. bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
  6068. {
  6069. return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
  6070. }
  6071. static void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id)
  6072. {
  6073. return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func);
  6074. }
  6075. enum {
  6076. BTF_MODULE_F_LIVE = (1 << 0),
  6077. };
  6078. #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
  6079. struct btf_module {
  6080. struct list_head list;
  6081. struct module *module;
  6082. struct btf *btf;
  6083. struct bin_attribute *sysfs_attr;
  6084. int flags;
  6085. };
  6086. static LIST_HEAD(btf_modules);
  6087. static DEFINE_MUTEX(btf_module_mutex);
  6088. static ssize_t
  6089. btf_module_read(struct file *file, struct kobject *kobj,
  6090. struct bin_attribute *bin_attr,
  6091. char *buf, loff_t off, size_t len)
  6092. {
  6093. const struct btf *btf = bin_attr->private;
  6094. memcpy(buf, btf->data + off, len);
  6095. return len;
  6096. }
  6097. static void purge_cand_cache(struct btf *btf);
  6098. static int btf_module_notify(struct notifier_block *nb, unsigned long op,
  6099. void *module)
  6100. {
  6101. struct btf_module *btf_mod, *tmp;
  6102. struct module *mod = module;
  6103. struct btf *btf;
  6104. int err = 0;
  6105. if (mod->btf_data_size == 0 ||
  6106. (op != MODULE_STATE_COMING && op != MODULE_STATE_LIVE &&
  6107. op != MODULE_STATE_GOING))
  6108. goto out;
  6109. switch (op) {
  6110. case MODULE_STATE_COMING:
  6111. btf_mod = kzalloc(sizeof(*btf_mod), GFP_KERNEL);
  6112. if (!btf_mod) {
  6113. err = -ENOMEM;
  6114. goto out;
  6115. }
  6116. btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size);
  6117. if (IS_ERR(btf)) {
  6118. kfree(btf_mod);
  6119. if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) {
  6120. pr_warn("failed to validate module [%s] BTF: %ld\n",
  6121. mod->name, PTR_ERR(btf));
  6122. err = PTR_ERR(btf);
  6123. } else {
  6124. pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n");
  6125. }
  6126. goto out;
  6127. }
  6128. err = btf_alloc_id(btf);
  6129. if (err) {
  6130. btf_free(btf);
  6131. kfree(btf_mod);
  6132. goto out;
  6133. }
  6134. purge_cand_cache(NULL);
  6135. mutex_lock(&btf_module_mutex);
  6136. btf_mod->module = module;
  6137. btf_mod->btf = btf;
  6138. list_add(&btf_mod->list, &btf_modules);
  6139. mutex_unlock(&btf_module_mutex);
  6140. if (IS_ENABLED(CONFIG_SYSFS)) {
  6141. struct bin_attribute *attr;
  6142. attr = kzalloc(sizeof(*attr), GFP_KERNEL);
  6143. if (!attr)
  6144. goto out;
  6145. sysfs_bin_attr_init(attr);
  6146. attr->attr.name = btf->name;
  6147. attr->attr.mode = 0444;
  6148. attr->size = btf->data_size;
  6149. attr->private = btf;
  6150. attr->read = btf_module_read;
  6151. err = sysfs_create_bin_file(btf_kobj, attr);
  6152. if (err) {
  6153. pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
  6154. mod->name, err);
  6155. kfree(attr);
  6156. err = 0;
  6157. goto out;
  6158. }
  6159. btf_mod->sysfs_attr = attr;
  6160. }
  6161. break;
  6162. case MODULE_STATE_LIVE:
  6163. mutex_lock(&btf_module_mutex);
  6164. list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
  6165. if (btf_mod->module != module)
  6166. continue;
  6167. btf_mod->flags |= BTF_MODULE_F_LIVE;
  6168. break;
  6169. }
  6170. mutex_unlock(&btf_module_mutex);
  6171. break;
  6172. case MODULE_STATE_GOING:
  6173. mutex_lock(&btf_module_mutex);
  6174. list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
  6175. if (btf_mod->module != module)
  6176. continue;
  6177. list_del(&btf_mod->list);
  6178. if (btf_mod->sysfs_attr)
  6179. sysfs_remove_bin_file(btf_kobj, btf_mod->sysfs_attr);
  6180. purge_cand_cache(btf_mod->btf);
  6181. btf_put(btf_mod->btf);
  6182. kfree(btf_mod->sysfs_attr);
  6183. kfree(btf_mod);
  6184. break;
  6185. }
  6186. mutex_unlock(&btf_module_mutex);
  6187. break;
  6188. }
  6189. out:
  6190. return notifier_from_errno(err);
  6191. }
  6192. static struct notifier_block btf_module_nb = {
  6193. .notifier_call = btf_module_notify,
  6194. };
  6195. static int __init btf_module_init(void)
  6196. {
  6197. register_module_notifier(&btf_module_nb);
  6198. return 0;
  6199. }
  6200. fs_initcall(btf_module_init);
  6201. #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
  6202. struct module *btf_try_get_module(const struct btf *btf)
  6203. {
  6204. struct module *res = NULL;
  6205. #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
  6206. struct btf_module *btf_mod, *tmp;
  6207. mutex_lock(&btf_module_mutex);
  6208. list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
  6209. if (btf_mod->btf != btf)
  6210. continue;
  6211. /* We must only consider module whose __init routine has
  6212. * finished, hence we must check for BTF_MODULE_F_LIVE flag,
  6213. * which is set from the notifier callback for
  6214. * MODULE_STATE_LIVE.
  6215. */
  6216. if ((btf_mod->flags & BTF_MODULE_F_LIVE) && try_module_get(btf_mod->module))
  6217. res = btf_mod->module;
  6218. break;
  6219. }
  6220. mutex_unlock(&btf_module_mutex);
  6221. #endif
  6222. return res;
  6223. }
  6224. /* Returns struct btf corresponding to the struct module.
  6225. * This function can return NULL or ERR_PTR.
  6226. */
  6227. static struct btf *btf_get_module_btf(const struct module *module)
  6228. {
  6229. #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
  6230. struct btf_module *btf_mod, *tmp;
  6231. #endif
  6232. struct btf *btf = NULL;
  6233. if (!module) {
  6234. btf = bpf_get_btf_vmlinux();
  6235. if (!IS_ERR_OR_NULL(btf))
  6236. btf_get(btf);
  6237. return btf;
  6238. }
  6239. #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
  6240. mutex_lock(&btf_module_mutex);
  6241. list_for_each_entry_safe(btf_mod, tmp, &btf_modules, list) {
  6242. if (btf_mod->module != module)
  6243. continue;
  6244. btf_get(btf_mod->btf);
  6245. btf = btf_mod->btf;
  6246. break;
  6247. }
  6248. mutex_unlock(&btf_module_mutex);
  6249. #endif
  6250. return btf;
  6251. }
  6252. BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
  6253. {
  6254. struct btf *btf = NULL;
  6255. int btf_obj_fd = 0;
  6256. long ret;
  6257. if (flags)
  6258. return -EINVAL;
  6259. if (name_sz <= 1 || name[name_sz - 1])
  6260. return -EINVAL;
  6261. ret = bpf_find_btf_id(name, kind, &btf);
  6262. if (ret > 0 && btf_is_module(btf)) {
  6263. btf_obj_fd = __btf_new_fd(btf);
  6264. if (btf_obj_fd < 0) {
  6265. btf_put(btf);
  6266. return btf_obj_fd;
  6267. }
  6268. return ret | (((u64)btf_obj_fd) << 32);
  6269. }
  6270. if (ret > 0)
  6271. btf_put(btf);
  6272. return ret;
  6273. }
  6274. const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
  6275. .func = bpf_btf_find_by_name_kind,
  6276. .gpl_only = false,
  6277. .ret_type = RET_INTEGER,
  6278. .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
  6279. .arg2_type = ARG_CONST_SIZE,
  6280. .arg3_type = ARG_ANYTHING,
  6281. .arg4_type = ARG_ANYTHING,
  6282. };
  6283. BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
  6284. #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
  6285. BTF_TRACING_TYPE_xxx
  6286. #undef BTF_TRACING_TYPE
  6287. /* Kernel Function (kfunc) BTF ID set registration API */
  6288. static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
  6289. struct btf_id_set8 *add_set)
  6290. {
  6291. bool vmlinux_set = !btf_is_module(btf);
  6292. struct btf_kfunc_set_tab *tab;
  6293. struct btf_id_set8 *set;
  6294. u32 set_cnt;
  6295. int ret;
  6296. if (hook >= BTF_KFUNC_HOOK_MAX) {
  6297. ret = -EINVAL;
  6298. goto end;
  6299. }
  6300. if (!add_set->cnt)
  6301. return 0;
  6302. tab = btf->kfunc_set_tab;
  6303. if (!tab) {
  6304. tab = kzalloc(sizeof(*tab), GFP_KERNEL | __GFP_NOWARN);
  6305. if (!tab)
  6306. return -ENOMEM;
  6307. btf->kfunc_set_tab = tab;
  6308. }
  6309. set = tab->sets[hook];
  6310. /* Warn when register_btf_kfunc_id_set is called twice for the same hook
  6311. * for module sets.
  6312. */
  6313. if (WARN_ON_ONCE(set && !vmlinux_set)) {
  6314. ret = -EINVAL;
  6315. goto end;
  6316. }
  6317. /* We don't need to allocate, concatenate, and sort module sets, because
  6318. * only one is allowed per hook. Hence, we can directly assign the
  6319. * pointer and return.
  6320. */
  6321. if (!vmlinux_set) {
  6322. tab->sets[hook] = add_set;
  6323. return 0;
  6324. }
  6325. /* In case of vmlinux sets, there may be more than one set being
  6326. * registered per hook. To create a unified set, we allocate a new set
  6327. * and concatenate all individual sets being registered. While each set
  6328. * is individually sorted, they may become unsorted when concatenated,
  6329. * hence re-sorting the final set again is required to make binary
  6330. * searching the set using btf_id_set8_contains function work.
  6331. */
  6332. set_cnt = set ? set->cnt : 0;
  6333. if (set_cnt > U32_MAX - add_set->cnt) {
  6334. ret = -EOVERFLOW;
  6335. goto end;
  6336. }
  6337. if (set_cnt + add_set->cnt > BTF_KFUNC_SET_MAX_CNT) {
  6338. ret = -E2BIG;
  6339. goto end;
  6340. }
  6341. /* Grow set */
  6342. set = krealloc(tab->sets[hook],
  6343. offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]),
  6344. GFP_KERNEL | __GFP_NOWARN);
  6345. if (!set) {
  6346. ret = -ENOMEM;
  6347. goto end;
  6348. }
  6349. /* For newly allocated set, initialize set->cnt to 0 */
  6350. if (!tab->sets[hook])
  6351. set->cnt = 0;
  6352. tab->sets[hook] = set;
  6353. /* Concatenate the two sets */
  6354. memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0]));
  6355. set->cnt += add_set->cnt;
  6356. sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL);
  6357. return 0;
  6358. end:
  6359. btf_free_kfunc_set_tab(btf);
  6360. return ret;
  6361. }
  6362. static u32 *__btf_kfunc_id_set_contains(const struct btf *btf,
  6363. enum btf_kfunc_hook hook,
  6364. u32 kfunc_btf_id)
  6365. {
  6366. struct btf_id_set8 *set;
  6367. u32 *id;
  6368. if (hook >= BTF_KFUNC_HOOK_MAX)
  6369. return NULL;
  6370. if (!btf->kfunc_set_tab)
  6371. return NULL;
  6372. set = btf->kfunc_set_tab->sets[hook];
  6373. if (!set)
  6374. return NULL;
  6375. id = btf_id_set8_contains(set, kfunc_btf_id);
  6376. if (!id)
  6377. return NULL;
  6378. /* The flags for BTF ID are located next to it */
  6379. return id + 1;
  6380. }
  6381. static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
  6382. {
  6383. switch (prog_type) {
  6384. case BPF_PROG_TYPE_XDP:
  6385. return BTF_KFUNC_HOOK_XDP;
  6386. case BPF_PROG_TYPE_SCHED_CLS:
  6387. return BTF_KFUNC_HOOK_TC;
  6388. case BPF_PROG_TYPE_STRUCT_OPS:
  6389. return BTF_KFUNC_HOOK_STRUCT_OPS;
  6390. case BPF_PROG_TYPE_TRACING:
  6391. case BPF_PROG_TYPE_LSM:
  6392. return BTF_KFUNC_HOOK_TRACING;
  6393. case BPF_PROG_TYPE_SYSCALL:
  6394. return BTF_KFUNC_HOOK_SYSCALL;
  6395. default:
  6396. return BTF_KFUNC_HOOK_MAX;
  6397. }
  6398. }
  6399. /* Caution:
  6400. * Reference to the module (obtained using btf_try_get_module) corresponding to
  6401. * the struct btf *MUST* be held when calling this function from verifier
  6402. * context. This is usually true as we stash references in prog's kfunc_btf_tab;
  6403. * keeping the reference for the duration of the call provides the necessary
  6404. * protection for looking up a well-formed btf->kfunc_set_tab.
  6405. */
  6406. u32 *btf_kfunc_id_set_contains(const struct btf *btf,
  6407. enum bpf_prog_type prog_type,
  6408. u32 kfunc_btf_id)
  6409. {
  6410. enum btf_kfunc_hook hook;
  6411. hook = bpf_prog_type_to_kfunc_hook(prog_type);
  6412. return __btf_kfunc_id_set_contains(btf, hook, kfunc_btf_id);
  6413. }
  6414. /* This function must be invoked only from initcalls/module init functions */
  6415. int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
  6416. const struct btf_kfunc_id_set *kset)
  6417. {
  6418. enum btf_kfunc_hook hook;
  6419. struct btf *btf;
  6420. int ret;
  6421. btf = btf_get_module_btf(kset->owner);
  6422. if (!btf) {
  6423. if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
  6424. pr_err("missing vmlinux BTF, cannot register kfuncs\n");
  6425. return -ENOENT;
  6426. }
  6427. if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
  6428. pr_warn("missing module BTF, cannot register kfuncs\n");
  6429. return 0;
  6430. }
  6431. if (IS_ERR(btf))
  6432. return PTR_ERR(btf);
  6433. hook = bpf_prog_type_to_kfunc_hook(prog_type);
  6434. ret = btf_populate_kfunc_set(btf, hook, kset->set);
  6435. btf_put(btf);
  6436. return ret;
  6437. }
  6438. EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set);
  6439. s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
  6440. {
  6441. struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
  6442. struct btf_id_dtor_kfunc *dtor;
  6443. if (!tab)
  6444. return -ENOENT;
  6445. /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
  6446. * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
  6447. */
  6448. BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
  6449. dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
  6450. if (!dtor)
  6451. return -ENOENT;
  6452. return dtor->kfunc_btf_id;
  6453. }
  6454. static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc *dtors, u32 cnt)
  6455. {
  6456. const struct btf_type *dtor_func, *dtor_func_proto, *t;
  6457. const struct btf_param *args;
  6458. s32 dtor_btf_id;
  6459. u32 nr_args, i;
  6460. for (i = 0; i < cnt; i++) {
  6461. dtor_btf_id = dtors[i].kfunc_btf_id;
  6462. dtor_func = btf_type_by_id(btf, dtor_btf_id);
  6463. if (!dtor_func || !btf_type_is_func(dtor_func))
  6464. return -EINVAL;
  6465. dtor_func_proto = btf_type_by_id(btf, dtor_func->type);
  6466. if (!dtor_func_proto || !btf_type_is_func_proto(dtor_func_proto))
  6467. return -EINVAL;
  6468. /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
  6469. t = btf_type_by_id(btf, dtor_func_proto->type);
  6470. if (!t || !btf_type_is_void(t))
  6471. return -EINVAL;
  6472. nr_args = btf_type_vlen(dtor_func_proto);
  6473. if (nr_args != 1)
  6474. return -EINVAL;
  6475. args = btf_params(dtor_func_proto);
  6476. t = btf_type_by_id(btf, args[0].type);
  6477. /* Allow any pointer type, as width on targets Linux supports
  6478. * will be same for all pointer types (i.e. sizeof(void *))
  6479. */
  6480. if (!t || !btf_type_is_ptr(t))
  6481. return -EINVAL;
  6482. }
  6483. return 0;
  6484. }
  6485. /* This function must be invoked only from initcalls/module init functions */
  6486. int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
  6487. struct module *owner)
  6488. {
  6489. struct btf_id_dtor_kfunc_tab *tab;
  6490. struct btf *btf;
  6491. u32 tab_cnt;
  6492. int ret;
  6493. btf = btf_get_module_btf(owner);
  6494. if (!btf) {
  6495. if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
  6496. pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n");
  6497. return -ENOENT;
  6498. }
  6499. if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) {
  6500. pr_err("missing module BTF, cannot register dtor kfuncs\n");
  6501. return -ENOENT;
  6502. }
  6503. return 0;
  6504. }
  6505. if (IS_ERR(btf))
  6506. return PTR_ERR(btf);
  6507. if (add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
  6508. pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
  6509. ret = -E2BIG;
  6510. goto end;
  6511. }
  6512. /* Ensure that the prototype of dtor kfuncs being registered is sane */
  6513. ret = btf_check_dtor_kfuncs(btf, dtors, add_cnt);
  6514. if (ret < 0)
  6515. goto end;
  6516. tab = btf->dtor_kfunc_tab;
  6517. /* Only one call allowed for modules */
  6518. if (WARN_ON_ONCE(tab && btf_is_module(btf))) {
  6519. ret = -EINVAL;
  6520. goto end;
  6521. }
  6522. tab_cnt = tab ? tab->cnt : 0;
  6523. if (tab_cnt > U32_MAX - add_cnt) {
  6524. ret = -EOVERFLOW;
  6525. goto end;
  6526. }
  6527. if (tab_cnt + add_cnt >= BTF_DTOR_KFUNC_MAX_CNT) {
  6528. pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT);
  6529. ret = -E2BIG;
  6530. goto end;
  6531. }
  6532. tab = krealloc(btf->dtor_kfunc_tab,
  6533. offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]),
  6534. GFP_KERNEL | __GFP_NOWARN);
  6535. if (!tab) {
  6536. ret = -ENOMEM;
  6537. goto end;
  6538. }
  6539. if (!btf->dtor_kfunc_tab)
  6540. tab->cnt = 0;
  6541. btf->dtor_kfunc_tab = tab;
  6542. memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0]));
  6543. tab->cnt += add_cnt;
  6544. sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL);
  6545. end:
  6546. if (ret)
  6547. btf_free_dtor_kfunc_tab(btf);
  6548. btf_put(btf);
  6549. return ret;
  6550. }
  6551. EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs);
  6552. #define MAX_TYPES_ARE_COMPAT_DEPTH 2
  6553. /* Check local and target types for compatibility. This check is used for
  6554. * type-based CO-RE relocations and follow slightly different rules than
  6555. * field-based relocations. This function assumes that root types were already
  6556. * checked for name match. Beyond that initial root-level name check, names
  6557. * are completely ignored. Compatibility rules are as follows:
  6558. * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
  6559. * kind should match for local and target types (i.e., STRUCT is not
  6560. * compatible with UNION);
  6561. * - for ENUMs/ENUM64s, the size is ignored;
  6562. * - for INT, size and signedness are ignored;
  6563. * - for ARRAY, dimensionality is ignored, element types are checked for
  6564. * compatibility recursively;
  6565. * - CONST/VOLATILE/RESTRICT modifiers are ignored;
  6566. * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
  6567. * - FUNC_PROTOs are compatible if they have compatible signature: same
  6568. * number of input args and compatible return and argument types.
  6569. * These rules are not set in stone and probably will be adjusted as we get
  6570. * more experience with using BPF CO-RE relocations.
  6571. */
  6572. int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
  6573. const struct btf *targ_btf, __u32 targ_id)
  6574. {
  6575. return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
  6576. MAX_TYPES_ARE_COMPAT_DEPTH);
  6577. }
  6578. #define MAX_TYPES_MATCH_DEPTH 2
  6579. int bpf_core_types_match(const struct btf *local_btf, u32 local_id,
  6580. const struct btf *targ_btf, u32 targ_id)
  6581. {
  6582. return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false,
  6583. MAX_TYPES_MATCH_DEPTH);
  6584. }
  6585. static bool bpf_core_is_flavor_sep(const char *s)
  6586. {
  6587. /* check X___Y name pattern, where X and Y are not underscores */
  6588. return s[0] != '_' && /* X */
  6589. s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
  6590. s[4] != '_'; /* Y */
  6591. }
  6592. size_t bpf_core_essential_name_len(const char *name)
  6593. {
  6594. size_t n = strlen(name);
  6595. int i;
  6596. for (i = n - 5; i >= 0; i--) {
  6597. if (bpf_core_is_flavor_sep(name + i))
  6598. return i + 1;
  6599. }
  6600. return n;
  6601. }
  6602. struct bpf_cand_cache {
  6603. const char *name;
  6604. u32 name_len;
  6605. u16 kind;
  6606. u16 cnt;
  6607. struct {
  6608. const struct btf *btf;
  6609. u32 id;
  6610. } cands[];
  6611. };
  6612. static void bpf_free_cands(struct bpf_cand_cache *cands)
  6613. {
  6614. if (!cands->cnt)
  6615. /* empty candidate array was allocated on stack */
  6616. return;
  6617. kfree(cands);
  6618. }
  6619. static void bpf_free_cands_from_cache(struct bpf_cand_cache *cands)
  6620. {
  6621. kfree(cands->name);
  6622. kfree(cands);
  6623. }
  6624. #define VMLINUX_CAND_CACHE_SIZE 31
  6625. static struct bpf_cand_cache *vmlinux_cand_cache[VMLINUX_CAND_CACHE_SIZE];
  6626. #define MODULE_CAND_CACHE_SIZE 31
  6627. static struct bpf_cand_cache *module_cand_cache[MODULE_CAND_CACHE_SIZE];
  6628. static DEFINE_MUTEX(cand_cache_mutex);
  6629. static void __print_cand_cache(struct bpf_verifier_log *log,
  6630. struct bpf_cand_cache **cache,
  6631. int cache_size)
  6632. {
  6633. struct bpf_cand_cache *cc;
  6634. int i, j;
  6635. for (i = 0; i < cache_size; i++) {
  6636. cc = cache[i];
  6637. if (!cc)
  6638. continue;
  6639. bpf_log(log, "[%d]%s(", i, cc->name);
  6640. for (j = 0; j < cc->cnt; j++) {
  6641. bpf_log(log, "%d", cc->cands[j].id);
  6642. if (j < cc->cnt - 1)
  6643. bpf_log(log, " ");
  6644. }
  6645. bpf_log(log, "), ");
  6646. }
  6647. }
  6648. static void print_cand_cache(struct bpf_verifier_log *log)
  6649. {
  6650. mutex_lock(&cand_cache_mutex);
  6651. bpf_log(log, "vmlinux_cand_cache:");
  6652. __print_cand_cache(log, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
  6653. bpf_log(log, "\nmodule_cand_cache:");
  6654. __print_cand_cache(log, module_cand_cache, MODULE_CAND_CACHE_SIZE);
  6655. bpf_log(log, "\n");
  6656. mutex_unlock(&cand_cache_mutex);
  6657. }
  6658. static u32 hash_cands(struct bpf_cand_cache *cands)
  6659. {
  6660. return jhash(cands->name, cands->name_len, 0);
  6661. }
  6662. static struct bpf_cand_cache *check_cand_cache(struct bpf_cand_cache *cands,
  6663. struct bpf_cand_cache **cache,
  6664. int cache_size)
  6665. {
  6666. struct bpf_cand_cache *cc = cache[hash_cands(cands) % cache_size];
  6667. if (cc && cc->name_len == cands->name_len &&
  6668. !strncmp(cc->name, cands->name, cands->name_len))
  6669. return cc;
  6670. return NULL;
  6671. }
  6672. static size_t sizeof_cands(int cnt)
  6673. {
  6674. return offsetof(struct bpf_cand_cache, cands[cnt]);
  6675. }
  6676. static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
  6677. struct bpf_cand_cache **cache,
  6678. int cache_size)
  6679. {
  6680. struct bpf_cand_cache **cc = &cache[hash_cands(cands) % cache_size], *new_cands;
  6681. if (*cc) {
  6682. bpf_free_cands_from_cache(*cc);
  6683. *cc = NULL;
  6684. }
  6685. new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
  6686. if (!new_cands) {
  6687. bpf_free_cands(cands);
  6688. return ERR_PTR(-ENOMEM);
  6689. }
  6690. /* strdup the name, since it will stay in cache.
  6691. * the cands->name points to strings in prog's BTF and the prog can be unloaded.
  6692. */
  6693. new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
  6694. bpf_free_cands(cands);
  6695. if (!new_cands->name) {
  6696. kfree(new_cands);
  6697. return ERR_PTR(-ENOMEM);
  6698. }
  6699. *cc = new_cands;
  6700. return new_cands;
  6701. }
  6702. #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
  6703. static void __purge_cand_cache(struct btf *btf, struct bpf_cand_cache **cache,
  6704. int cache_size)
  6705. {
  6706. struct bpf_cand_cache *cc;
  6707. int i, j;
  6708. for (i = 0; i < cache_size; i++) {
  6709. cc = cache[i];
  6710. if (!cc)
  6711. continue;
  6712. if (!btf) {
  6713. /* when new module is loaded purge all of module_cand_cache,
  6714. * since new module might have candidates with the name
  6715. * that matches cached cands.
  6716. */
  6717. bpf_free_cands_from_cache(cc);
  6718. cache[i] = NULL;
  6719. continue;
  6720. }
  6721. /* when module is unloaded purge cache entries
  6722. * that match module's btf
  6723. */
  6724. for (j = 0; j < cc->cnt; j++)
  6725. if (cc->cands[j].btf == btf) {
  6726. bpf_free_cands_from_cache(cc);
  6727. cache[i] = NULL;
  6728. break;
  6729. }
  6730. }
  6731. }
  6732. static void purge_cand_cache(struct btf *btf)
  6733. {
  6734. mutex_lock(&cand_cache_mutex);
  6735. __purge_cand_cache(btf, module_cand_cache, MODULE_CAND_CACHE_SIZE);
  6736. mutex_unlock(&cand_cache_mutex);
  6737. }
  6738. #endif
  6739. static struct bpf_cand_cache *
  6740. bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
  6741. int targ_start_id)
  6742. {
  6743. struct bpf_cand_cache *new_cands;
  6744. const struct btf_type *t;
  6745. const char *targ_name;
  6746. size_t targ_essent_len;
  6747. int n, i;
  6748. n = btf_nr_types(targ_btf);
  6749. for (i = targ_start_id; i < n; i++) {
  6750. t = btf_type_by_id(targ_btf, i);
  6751. if (btf_kind(t) != cands->kind)
  6752. continue;
  6753. targ_name = btf_name_by_offset(targ_btf, t->name_off);
  6754. if (!targ_name)
  6755. continue;
  6756. /* the resched point is before strncmp to make sure that search
  6757. * for non-existing name will have a chance to schedule().
  6758. */
  6759. cond_resched();
  6760. if (strncmp(cands->name, targ_name, cands->name_len) != 0)
  6761. continue;
  6762. targ_essent_len = bpf_core_essential_name_len(targ_name);
  6763. if (targ_essent_len != cands->name_len)
  6764. continue;
  6765. /* most of the time there is only one candidate for a given kind+name pair */
  6766. new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
  6767. if (!new_cands) {
  6768. bpf_free_cands(cands);
  6769. return ERR_PTR(-ENOMEM);
  6770. }
  6771. memcpy(new_cands, cands, sizeof_cands(cands->cnt));
  6772. bpf_free_cands(cands);
  6773. cands = new_cands;
  6774. cands->cands[cands->cnt].btf = targ_btf;
  6775. cands->cands[cands->cnt].id = i;
  6776. cands->cnt++;
  6777. }
  6778. return cands;
  6779. }
  6780. static struct bpf_cand_cache *
  6781. bpf_core_find_cands(struct bpf_core_ctx *ctx, u32 local_type_id)
  6782. {
  6783. struct bpf_cand_cache *cands, *cc, local_cand = {};
  6784. const struct btf *local_btf = ctx->btf;
  6785. const struct btf_type *local_type;
  6786. const struct btf *main_btf;
  6787. size_t local_essent_len;
  6788. struct btf *mod_btf;
  6789. const char *name;
  6790. int id;
  6791. main_btf = bpf_get_btf_vmlinux();
  6792. if (IS_ERR(main_btf))
  6793. return ERR_CAST(main_btf);
  6794. if (!main_btf)
  6795. return ERR_PTR(-EINVAL);
  6796. local_type = btf_type_by_id(local_btf, local_type_id);
  6797. if (!local_type)
  6798. return ERR_PTR(-EINVAL);
  6799. name = btf_name_by_offset(local_btf, local_type->name_off);
  6800. if (str_is_empty(name))
  6801. return ERR_PTR(-EINVAL);
  6802. local_essent_len = bpf_core_essential_name_len(name);
  6803. cands = &local_cand;
  6804. cands->name = name;
  6805. cands->kind = btf_kind(local_type);
  6806. cands->name_len = local_essent_len;
  6807. cc = check_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
  6808. /* cands is a pointer to stack here */
  6809. if (cc) {
  6810. if (cc->cnt)
  6811. return cc;
  6812. goto check_modules;
  6813. }
  6814. /* Attempt to find target candidates in vmlinux BTF first */
  6815. cands = bpf_core_add_cands(cands, main_btf, 1);
  6816. if (IS_ERR(cands))
  6817. return ERR_CAST(cands);
  6818. /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
  6819. /* populate cache even when cands->cnt == 0 */
  6820. cc = populate_cand_cache(cands, vmlinux_cand_cache, VMLINUX_CAND_CACHE_SIZE);
  6821. if (IS_ERR(cc))
  6822. return ERR_CAST(cc);
  6823. /* if vmlinux BTF has any candidate, don't go for module BTFs */
  6824. if (cc->cnt)
  6825. return cc;
  6826. check_modules:
  6827. /* cands is a pointer to stack here and cands->cnt == 0 */
  6828. cc = check_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
  6829. if (cc)
  6830. /* if cache has it return it even if cc->cnt == 0 */
  6831. return cc;
  6832. /* If candidate is not found in vmlinux's BTF then search in module's BTFs */
  6833. spin_lock_bh(&btf_idr_lock);
  6834. idr_for_each_entry(&btf_idr, mod_btf, id) {
  6835. if (!btf_is_module(mod_btf))
  6836. continue;
  6837. /* linear search could be slow hence unlock/lock
  6838. * the IDR to avoiding holding it for too long
  6839. */
  6840. btf_get(mod_btf);
  6841. spin_unlock_bh(&btf_idr_lock);
  6842. cands = bpf_core_add_cands(cands, mod_btf, btf_nr_types(main_btf));
  6843. btf_put(mod_btf);
  6844. if (IS_ERR(cands))
  6845. return ERR_CAST(cands);
  6846. spin_lock_bh(&btf_idr_lock);
  6847. }
  6848. spin_unlock_bh(&btf_idr_lock);
  6849. /* cands is a pointer to kmalloced memory here if cands->cnt > 0
  6850. * or pointer to stack if cands->cnd == 0.
  6851. * Copy it into the cache even when cands->cnt == 0 and
  6852. * return the result.
  6853. */
  6854. return populate_cand_cache(cands, module_cand_cache, MODULE_CAND_CACHE_SIZE);
  6855. }
  6856. int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
  6857. int relo_idx, void *insn)
  6858. {
  6859. bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL;
  6860. struct bpf_core_cand_list cands = {};
  6861. struct bpf_core_relo_res targ_res;
  6862. struct bpf_core_spec *specs;
  6863. int err;
  6864. /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
  6865. * into arrays of btf_ids of struct fields and array indices.
  6866. */
  6867. specs = kcalloc(3, sizeof(*specs), GFP_KERNEL);
  6868. if (!specs)
  6869. return -ENOMEM;
  6870. if (need_cands) {
  6871. struct bpf_cand_cache *cc;
  6872. int i;
  6873. mutex_lock(&cand_cache_mutex);
  6874. cc = bpf_core_find_cands(ctx, relo->type_id);
  6875. if (IS_ERR(cc)) {
  6876. bpf_log(ctx->log, "target candidate search failed for %d\n",
  6877. relo->type_id);
  6878. err = PTR_ERR(cc);
  6879. goto out;
  6880. }
  6881. if (cc->cnt) {
  6882. cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
  6883. if (!cands.cands) {
  6884. err = -ENOMEM;
  6885. goto out;
  6886. }
  6887. }
  6888. for (i = 0; i < cc->cnt; i++) {
  6889. bpf_log(ctx->log,
  6890. "CO-RE relocating %s %s: found target candidate [%d]\n",
  6891. btf_kind_str[cc->kind], cc->name, cc->cands[i].id);
  6892. cands.cands[i].btf = cc->cands[i].btf;
  6893. cands.cands[i].id = cc->cands[i].id;
  6894. }
  6895. cands.len = cc->cnt;
  6896. /* cand_cache_mutex needs to span the cache lookup and
  6897. * copy of btf pointer into bpf_core_cand_list,
  6898. * since module can be unloaded while bpf_core_calc_relo_insn
  6899. * is working with module's btf.
  6900. */
  6901. }
  6902. err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs,
  6903. &targ_res);
  6904. if (err)
  6905. goto out;
  6906. err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx,
  6907. &targ_res);
  6908. out:
  6909. kfree(specs);
  6910. if (need_cands) {
  6911. kfree(cands.cands);
  6912. mutex_unlock(&cand_cache_mutex);
  6913. if (ctx->log->level & BPF_LOG_LEVEL2)
  6914. print_cand_cache(ctx->log);
  6915. }
  6916. return err;
  6917. }